github.com/sanprasirt/go@v0.0.0-20170607001320-a027466e4b6d/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // Code generated from gen/AMD64.rules; DO NOT EDIT. 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 import "cmd/internal/obj" 8 import "cmd/internal/objabi" 9 import "cmd/compile/internal/types" 10 11 var _ = math.MinInt8 // in case not otherwise used 12 var _ = obj.ANOP // in case not otherwise used 13 var _ = objabi.GOROOT // in case not otherwise used 14 var _ = types.TypeMem // in case not otherwise used 15 16 func rewriteValueAMD64(v *Value) bool { 17 switch v.Op { 18 case OpAMD64ADDL: 19 return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) 20 case OpAMD64ADDLconst: 21 return rewriteValueAMD64_OpAMD64ADDLconst_0(v) 22 case OpAMD64ADDQ: 23 return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) 24 case OpAMD64ADDQconst: 25 return rewriteValueAMD64_OpAMD64ADDQconst_0(v) 26 case OpAMD64ADDSD: 27 return rewriteValueAMD64_OpAMD64ADDSD_0(v) 28 case OpAMD64ADDSS: 29 return rewriteValueAMD64_OpAMD64ADDSS_0(v) 30 case OpAMD64ANDL: 31 return rewriteValueAMD64_OpAMD64ANDL_0(v) 32 case OpAMD64ANDLconst: 33 return rewriteValueAMD64_OpAMD64ANDLconst_0(v) 34 case OpAMD64ANDQ: 35 return rewriteValueAMD64_OpAMD64ANDQ_0(v) 36 case OpAMD64ANDQconst: 37 return rewriteValueAMD64_OpAMD64ANDQconst_0(v) 38 case OpAMD64BSFQ: 39 return rewriteValueAMD64_OpAMD64BSFQ_0(v) 40 case OpAMD64BTQconst: 41 return rewriteValueAMD64_OpAMD64BTQconst_0(v) 42 case OpAMD64CMOVQEQ: 43 return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v) 44 case OpAMD64CMPB: 45 return rewriteValueAMD64_OpAMD64CMPB_0(v) 46 case OpAMD64CMPBconst: 47 return rewriteValueAMD64_OpAMD64CMPBconst_0(v) 48 case OpAMD64CMPL: 49 return rewriteValueAMD64_OpAMD64CMPL_0(v) 50 case OpAMD64CMPLconst: 51 return rewriteValueAMD64_OpAMD64CMPLconst_0(v) 52 case OpAMD64CMPQ: 53 return rewriteValueAMD64_OpAMD64CMPQ_0(v) 54 case OpAMD64CMPQconst: 55 return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v) 56 case OpAMD64CMPW: 57 return rewriteValueAMD64_OpAMD64CMPW_0(v) 58 case OpAMD64CMPWconst: 59 return rewriteValueAMD64_OpAMD64CMPWconst_0(v) 60 case OpAMD64CMPXCHGLlock: 61 return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v) 62 case OpAMD64CMPXCHGQlock: 63 return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v) 64 case OpAMD64LEAL: 65 return rewriteValueAMD64_OpAMD64LEAL_0(v) 66 case OpAMD64LEAQ: 67 return rewriteValueAMD64_OpAMD64LEAQ_0(v) 68 case OpAMD64LEAQ1: 69 return rewriteValueAMD64_OpAMD64LEAQ1_0(v) 70 case OpAMD64LEAQ2: 71 return rewriteValueAMD64_OpAMD64LEAQ2_0(v) 72 case OpAMD64LEAQ4: 73 return rewriteValueAMD64_OpAMD64LEAQ4_0(v) 74 case OpAMD64LEAQ8: 75 return rewriteValueAMD64_OpAMD64LEAQ8_0(v) 76 case OpAMD64MOVBQSX: 77 return rewriteValueAMD64_OpAMD64MOVBQSX_0(v) 78 case OpAMD64MOVBQSXload: 79 return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v) 80 case OpAMD64MOVBQZX: 81 return rewriteValueAMD64_OpAMD64MOVBQZX_0(v) 82 case OpAMD64MOVBload: 83 return rewriteValueAMD64_OpAMD64MOVBload_0(v) 84 case OpAMD64MOVBloadidx1: 85 return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) 86 case OpAMD64MOVBstore: 87 return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) 88 case OpAMD64MOVBstoreconst: 89 return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) 90 case OpAMD64MOVBstoreconstidx1: 91 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v) 92 case OpAMD64MOVBstoreidx1: 93 return rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v) 94 case OpAMD64MOVLQSX: 95 return rewriteValueAMD64_OpAMD64MOVLQSX_0(v) 96 case OpAMD64MOVLQSXload: 97 return rewriteValueAMD64_OpAMD64MOVLQSXload_0(v) 98 case OpAMD64MOVLQZX: 99 return rewriteValueAMD64_OpAMD64MOVLQZX_0(v) 100 case OpAMD64MOVLatomicload: 101 return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v) 102 case OpAMD64MOVLload: 103 return rewriteValueAMD64_OpAMD64MOVLload_0(v) 104 case OpAMD64MOVLloadidx1: 105 return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v) 106 case OpAMD64MOVLloadidx4: 107 return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v) 108 case OpAMD64MOVLstore: 109 return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) 110 case OpAMD64MOVLstoreconst: 111 return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v) 112 case OpAMD64MOVLstoreconstidx1: 113 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v) 114 case OpAMD64MOVLstoreconstidx4: 115 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v) 116 case OpAMD64MOVLstoreidx1: 117 return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v) 118 case OpAMD64MOVLstoreidx4: 119 return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v) 120 case OpAMD64MOVOload: 121 return rewriteValueAMD64_OpAMD64MOVOload_0(v) 122 case OpAMD64MOVOstore: 123 return rewriteValueAMD64_OpAMD64MOVOstore_0(v) 124 case OpAMD64MOVQatomicload: 125 return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v) 126 case OpAMD64MOVQload: 127 return rewriteValueAMD64_OpAMD64MOVQload_0(v) 128 case OpAMD64MOVQloadidx1: 129 return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v) 130 case OpAMD64MOVQloadidx8: 131 return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v) 132 case OpAMD64MOVQstore: 133 return rewriteValueAMD64_OpAMD64MOVQstore_0(v) 134 case OpAMD64MOVQstoreconst: 135 return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v) 136 case OpAMD64MOVQstoreconstidx1: 137 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v) 138 case OpAMD64MOVQstoreconstidx8: 139 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v) 140 case OpAMD64MOVQstoreidx1: 141 return rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v) 142 case OpAMD64MOVQstoreidx8: 143 return rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v) 144 case OpAMD64MOVSDload: 145 return rewriteValueAMD64_OpAMD64MOVSDload_0(v) 146 case OpAMD64MOVSDloadidx1: 147 return rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v) 148 case OpAMD64MOVSDloadidx8: 149 return rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v) 150 case OpAMD64MOVSDstore: 151 return rewriteValueAMD64_OpAMD64MOVSDstore_0(v) 152 case OpAMD64MOVSDstoreidx1: 153 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v) 154 case OpAMD64MOVSDstoreidx8: 155 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v) 156 case OpAMD64MOVSSload: 157 return rewriteValueAMD64_OpAMD64MOVSSload_0(v) 158 case OpAMD64MOVSSloadidx1: 159 return rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v) 160 case OpAMD64MOVSSloadidx4: 161 return rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v) 162 case OpAMD64MOVSSstore: 163 return rewriteValueAMD64_OpAMD64MOVSSstore_0(v) 164 case OpAMD64MOVSSstoreidx1: 165 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v) 166 case OpAMD64MOVSSstoreidx4: 167 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v) 168 case OpAMD64MOVWQSX: 169 return rewriteValueAMD64_OpAMD64MOVWQSX_0(v) 170 case OpAMD64MOVWQSXload: 171 return rewriteValueAMD64_OpAMD64MOVWQSXload_0(v) 172 case OpAMD64MOVWQZX: 173 return rewriteValueAMD64_OpAMD64MOVWQZX_0(v) 174 case OpAMD64MOVWload: 175 return rewriteValueAMD64_OpAMD64MOVWload_0(v) 176 case OpAMD64MOVWloadidx1: 177 return rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v) 178 case OpAMD64MOVWloadidx2: 179 return rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v) 180 case OpAMD64MOVWstore: 181 return rewriteValueAMD64_OpAMD64MOVWstore_0(v) || rewriteValueAMD64_OpAMD64MOVWstore_10(v) 182 case OpAMD64MOVWstoreconst: 183 return rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v) 184 case OpAMD64MOVWstoreconstidx1: 185 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v) 186 case OpAMD64MOVWstoreconstidx2: 187 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v) 188 case OpAMD64MOVWstoreidx1: 189 return rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v) 190 case OpAMD64MOVWstoreidx2: 191 return rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v) 192 case OpAMD64MULL: 193 return rewriteValueAMD64_OpAMD64MULL_0(v) 194 case OpAMD64MULLconst: 195 return rewriteValueAMD64_OpAMD64MULLconst_0(v) 196 case OpAMD64MULQ: 197 return rewriteValueAMD64_OpAMD64MULQ_0(v) 198 case OpAMD64MULQconst: 199 return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) 200 case OpAMD64MULSD: 201 return rewriteValueAMD64_OpAMD64MULSD_0(v) 202 case OpAMD64MULSS: 203 return rewriteValueAMD64_OpAMD64MULSS_0(v) 204 case OpAMD64NEGL: 205 return rewriteValueAMD64_OpAMD64NEGL_0(v) 206 case OpAMD64NEGQ: 207 return rewriteValueAMD64_OpAMD64NEGQ_0(v) 208 case OpAMD64NOTL: 209 return rewriteValueAMD64_OpAMD64NOTL_0(v) 210 case OpAMD64NOTQ: 211 return rewriteValueAMD64_OpAMD64NOTQ_0(v) 212 case OpAMD64ORL: 213 return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v) 214 case OpAMD64ORLconst: 215 return rewriteValueAMD64_OpAMD64ORLconst_0(v) 216 case OpAMD64ORQ: 217 return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v) 218 case OpAMD64ORQconst: 219 return rewriteValueAMD64_OpAMD64ORQconst_0(v) 220 case OpAMD64ROLB: 221 return rewriteValueAMD64_OpAMD64ROLB_0(v) 222 case OpAMD64ROLBconst: 223 return rewriteValueAMD64_OpAMD64ROLBconst_0(v) 224 case OpAMD64ROLL: 225 return rewriteValueAMD64_OpAMD64ROLL_0(v) 226 case OpAMD64ROLLconst: 227 return rewriteValueAMD64_OpAMD64ROLLconst_0(v) 228 case OpAMD64ROLQ: 229 return rewriteValueAMD64_OpAMD64ROLQ_0(v) 230 case OpAMD64ROLQconst: 231 return rewriteValueAMD64_OpAMD64ROLQconst_0(v) 232 case OpAMD64ROLW: 233 return rewriteValueAMD64_OpAMD64ROLW_0(v) 234 case OpAMD64ROLWconst: 235 return rewriteValueAMD64_OpAMD64ROLWconst_0(v) 236 case OpAMD64RORB: 237 return rewriteValueAMD64_OpAMD64RORB_0(v) 238 case OpAMD64RORL: 239 return rewriteValueAMD64_OpAMD64RORL_0(v) 240 case OpAMD64RORQ: 241 return rewriteValueAMD64_OpAMD64RORQ_0(v) 242 case OpAMD64RORW: 243 return rewriteValueAMD64_OpAMD64RORW_0(v) 244 case OpAMD64SARB: 245 return rewriteValueAMD64_OpAMD64SARB_0(v) 246 case OpAMD64SARBconst: 247 return rewriteValueAMD64_OpAMD64SARBconst_0(v) 248 case OpAMD64SARL: 249 return rewriteValueAMD64_OpAMD64SARL_0(v) 250 case OpAMD64SARLconst: 251 return rewriteValueAMD64_OpAMD64SARLconst_0(v) 252 case OpAMD64SARQ: 253 return rewriteValueAMD64_OpAMD64SARQ_0(v) 254 case OpAMD64SARQconst: 255 return rewriteValueAMD64_OpAMD64SARQconst_0(v) 256 case OpAMD64SARW: 257 return rewriteValueAMD64_OpAMD64SARW_0(v) 258 case OpAMD64SARWconst: 259 return rewriteValueAMD64_OpAMD64SARWconst_0(v) 260 case OpAMD64SBBLcarrymask: 261 return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v) 262 case OpAMD64SBBQcarrymask: 263 return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v) 264 case OpAMD64SETA: 265 return rewriteValueAMD64_OpAMD64SETA_0(v) 266 case OpAMD64SETAE: 267 return rewriteValueAMD64_OpAMD64SETAE_0(v) 268 case OpAMD64SETB: 269 return rewriteValueAMD64_OpAMD64SETB_0(v) 270 case OpAMD64SETBE: 271 return rewriteValueAMD64_OpAMD64SETBE_0(v) 272 case OpAMD64SETEQ: 273 return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) 274 case OpAMD64SETG: 275 return rewriteValueAMD64_OpAMD64SETG_0(v) 276 case OpAMD64SETGE: 277 return rewriteValueAMD64_OpAMD64SETGE_0(v) 278 case OpAMD64SETL: 279 return rewriteValueAMD64_OpAMD64SETL_0(v) 280 case OpAMD64SETLE: 281 return rewriteValueAMD64_OpAMD64SETLE_0(v) 282 case OpAMD64SETNE: 283 return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) 284 case OpAMD64SHLL: 285 return rewriteValueAMD64_OpAMD64SHLL_0(v) 286 case OpAMD64SHLLconst: 287 return rewriteValueAMD64_OpAMD64SHLLconst_0(v) 288 case OpAMD64SHLQ: 289 return rewriteValueAMD64_OpAMD64SHLQ_0(v) 290 case OpAMD64SHLQconst: 291 return rewriteValueAMD64_OpAMD64SHLQconst_0(v) 292 case OpAMD64SHRB: 293 return rewriteValueAMD64_OpAMD64SHRB_0(v) 294 case OpAMD64SHRBconst: 295 return rewriteValueAMD64_OpAMD64SHRBconst_0(v) 296 case OpAMD64SHRL: 297 return rewriteValueAMD64_OpAMD64SHRL_0(v) 298 case OpAMD64SHRLconst: 299 return rewriteValueAMD64_OpAMD64SHRLconst_0(v) 300 case OpAMD64SHRQ: 301 return rewriteValueAMD64_OpAMD64SHRQ_0(v) 302 case OpAMD64SHRQconst: 303 return rewriteValueAMD64_OpAMD64SHRQconst_0(v) 304 case OpAMD64SHRW: 305 return rewriteValueAMD64_OpAMD64SHRW_0(v) 306 case OpAMD64SHRWconst: 307 return rewriteValueAMD64_OpAMD64SHRWconst_0(v) 308 case OpAMD64SUBL: 309 return rewriteValueAMD64_OpAMD64SUBL_0(v) 310 case OpAMD64SUBLconst: 311 return rewriteValueAMD64_OpAMD64SUBLconst_0(v) 312 case OpAMD64SUBQ: 313 return rewriteValueAMD64_OpAMD64SUBQ_0(v) 314 case OpAMD64SUBQconst: 315 return rewriteValueAMD64_OpAMD64SUBQconst_0(v) 316 case OpAMD64SUBSD: 317 return rewriteValueAMD64_OpAMD64SUBSD_0(v) 318 case OpAMD64SUBSS: 319 return rewriteValueAMD64_OpAMD64SUBSS_0(v) 320 case OpAMD64TESTB: 321 return rewriteValueAMD64_OpAMD64TESTB_0(v) 322 case OpAMD64TESTL: 323 return rewriteValueAMD64_OpAMD64TESTL_0(v) 324 case OpAMD64TESTQ: 325 return rewriteValueAMD64_OpAMD64TESTQ_0(v) 326 case OpAMD64TESTW: 327 return rewriteValueAMD64_OpAMD64TESTW_0(v) 328 case OpAMD64XADDLlock: 329 return rewriteValueAMD64_OpAMD64XADDLlock_0(v) 330 case OpAMD64XADDQlock: 331 return rewriteValueAMD64_OpAMD64XADDQlock_0(v) 332 case OpAMD64XCHGL: 333 return rewriteValueAMD64_OpAMD64XCHGL_0(v) 334 case OpAMD64XCHGQ: 335 return rewriteValueAMD64_OpAMD64XCHGQ_0(v) 336 case OpAMD64XORL: 337 return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v) 338 case OpAMD64XORLconst: 339 return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v) 340 case OpAMD64XORQ: 341 return rewriteValueAMD64_OpAMD64XORQ_0(v) 342 case OpAMD64XORQconst: 343 return rewriteValueAMD64_OpAMD64XORQconst_0(v) 344 case OpAdd16: 345 return rewriteValueAMD64_OpAdd16_0(v) 346 case OpAdd32: 347 return rewriteValueAMD64_OpAdd32_0(v) 348 case OpAdd32F: 349 return rewriteValueAMD64_OpAdd32F_0(v) 350 case OpAdd64: 351 return rewriteValueAMD64_OpAdd64_0(v) 352 case OpAdd64F: 353 return rewriteValueAMD64_OpAdd64F_0(v) 354 case OpAdd8: 355 return rewriteValueAMD64_OpAdd8_0(v) 356 case OpAddPtr: 357 return rewriteValueAMD64_OpAddPtr_0(v) 358 case OpAddr: 359 return rewriteValueAMD64_OpAddr_0(v) 360 case OpAnd16: 361 return rewriteValueAMD64_OpAnd16_0(v) 362 case OpAnd32: 363 return rewriteValueAMD64_OpAnd32_0(v) 364 case OpAnd64: 365 return rewriteValueAMD64_OpAnd64_0(v) 366 case OpAnd8: 367 return rewriteValueAMD64_OpAnd8_0(v) 368 case OpAndB: 369 return rewriteValueAMD64_OpAndB_0(v) 370 case OpAtomicAdd32: 371 return rewriteValueAMD64_OpAtomicAdd32_0(v) 372 case OpAtomicAdd64: 373 return rewriteValueAMD64_OpAtomicAdd64_0(v) 374 case OpAtomicAnd8: 375 return rewriteValueAMD64_OpAtomicAnd8_0(v) 376 case OpAtomicCompareAndSwap32: 377 return rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v) 378 case OpAtomicCompareAndSwap64: 379 return rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v) 380 case OpAtomicExchange32: 381 return rewriteValueAMD64_OpAtomicExchange32_0(v) 382 case OpAtomicExchange64: 383 return rewriteValueAMD64_OpAtomicExchange64_0(v) 384 case OpAtomicLoad32: 385 return rewriteValueAMD64_OpAtomicLoad32_0(v) 386 case OpAtomicLoad64: 387 return rewriteValueAMD64_OpAtomicLoad64_0(v) 388 case OpAtomicLoadPtr: 389 return rewriteValueAMD64_OpAtomicLoadPtr_0(v) 390 case OpAtomicOr8: 391 return rewriteValueAMD64_OpAtomicOr8_0(v) 392 case OpAtomicStore32: 393 return rewriteValueAMD64_OpAtomicStore32_0(v) 394 case OpAtomicStore64: 395 return rewriteValueAMD64_OpAtomicStore64_0(v) 396 case OpAtomicStorePtrNoWB: 397 return rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v) 398 case OpAvg64u: 399 return rewriteValueAMD64_OpAvg64u_0(v) 400 case OpBitLen32: 401 return rewriteValueAMD64_OpBitLen32_0(v) 402 case OpBitLen64: 403 return rewriteValueAMD64_OpBitLen64_0(v) 404 case OpBswap32: 405 return rewriteValueAMD64_OpBswap32_0(v) 406 case OpBswap64: 407 return rewriteValueAMD64_OpBswap64_0(v) 408 case OpClosureCall: 409 return rewriteValueAMD64_OpClosureCall_0(v) 410 case OpCom16: 411 return rewriteValueAMD64_OpCom16_0(v) 412 case OpCom32: 413 return rewriteValueAMD64_OpCom32_0(v) 414 case OpCom64: 415 return rewriteValueAMD64_OpCom64_0(v) 416 case OpCom8: 417 return rewriteValueAMD64_OpCom8_0(v) 418 case OpConst16: 419 return rewriteValueAMD64_OpConst16_0(v) 420 case OpConst32: 421 return rewriteValueAMD64_OpConst32_0(v) 422 case OpConst32F: 423 return rewriteValueAMD64_OpConst32F_0(v) 424 case OpConst64: 425 return rewriteValueAMD64_OpConst64_0(v) 426 case OpConst64F: 427 return rewriteValueAMD64_OpConst64F_0(v) 428 case OpConst8: 429 return rewriteValueAMD64_OpConst8_0(v) 430 case OpConstBool: 431 return rewriteValueAMD64_OpConstBool_0(v) 432 case OpConstNil: 433 return rewriteValueAMD64_OpConstNil_0(v) 434 case OpConvert: 435 return rewriteValueAMD64_OpConvert_0(v) 436 case OpCtz32: 437 return rewriteValueAMD64_OpCtz32_0(v) 438 case OpCtz64: 439 return rewriteValueAMD64_OpCtz64_0(v) 440 case OpCvt32Fto32: 441 return rewriteValueAMD64_OpCvt32Fto32_0(v) 442 case OpCvt32Fto64: 443 return rewriteValueAMD64_OpCvt32Fto64_0(v) 444 case OpCvt32Fto64F: 445 return rewriteValueAMD64_OpCvt32Fto64F_0(v) 446 case OpCvt32to32F: 447 return rewriteValueAMD64_OpCvt32to32F_0(v) 448 case OpCvt32to64F: 449 return rewriteValueAMD64_OpCvt32to64F_0(v) 450 case OpCvt64Fto32: 451 return rewriteValueAMD64_OpCvt64Fto32_0(v) 452 case OpCvt64Fto32F: 453 return rewriteValueAMD64_OpCvt64Fto32F_0(v) 454 case OpCvt64Fto64: 455 return rewriteValueAMD64_OpCvt64Fto64_0(v) 456 case OpCvt64to32F: 457 return rewriteValueAMD64_OpCvt64to32F_0(v) 458 case OpCvt64to64F: 459 return rewriteValueAMD64_OpCvt64to64F_0(v) 460 case OpDiv128u: 461 return rewriteValueAMD64_OpDiv128u_0(v) 462 case OpDiv16: 463 return rewriteValueAMD64_OpDiv16_0(v) 464 case OpDiv16u: 465 return rewriteValueAMD64_OpDiv16u_0(v) 466 case OpDiv32: 467 return rewriteValueAMD64_OpDiv32_0(v) 468 case OpDiv32F: 469 return rewriteValueAMD64_OpDiv32F_0(v) 470 case OpDiv32u: 471 return rewriteValueAMD64_OpDiv32u_0(v) 472 case OpDiv64: 473 return rewriteValueAMD64_OpDiv64_0(v) 474 case OpDiv64F: 475 return rewriteValueAMD64_OpDiv64F_0(v) 476 case OpDiv64u: 477 return rewriteValueAMD64_OpDiv64u_0(v) 478 case OpDiv8: 479 return rewriteValueAMD64_OpDiv8_0(v) 480 case OpDiv8u: 481 return rewriteValueAMD64_OpDiv8u_0(v) 482 case OpEq16: 483 return rewriteValueAMD64_OpEq16_0(v) 484 case OpEq32: 485 return rewriteValueAMD64_OpEq32_0(v) 486 case OpEq32F: 487 return rewriteValueAMD64_OpEq32F_0(v) 488 case OpEq64: 489 return rewriteValueAMD64_OpEq64_0(v) 490 case OpEq64F: 491 return rewriteValueAMD64_OpEq64F_0(v) 492 case OpEq8: 493 return rewriteValueAMD64_OpEq8_0(v) 494 case OpEqB: 495 return rewriteValueAMD64_OpEqB_0(v) 496 case OpEqPtr: 497 return rewriteValueAMD64_OpEqPtr_0(v) 498 case OpGeq16: 499 return rewriteValueAMD64_OpGeq16_0(v) 500 case OpGeq16U: 501 return rewriteValueAMD64_OpGeq16U_0(v) 502 case OpGeq32: 503 return rewriteValueAMD64_OpGeq32_0(v) 504 case OpGeq32F: 505 return rewriteValueAMD64_OpGeq32F_0(v) 506 case OpGeq32U: 507 return rewriteValueAMD64_OpGeq32U_0(v) 508 case OpGeq64: 509 return rewriteValueAMD64_OpGeq64_0(v) 510 case OpGeq64F: 511 return rewriteValueAMD64_OpGeq64F_0(v) 512 case OpGeq64U: 513 return rewriteValueAMD64_OpGeq64U_0(v) 514 case OpGeq8: 515 return rewriteValueAMD64_OpGeq8_0(v) 516 case OpGeq8U: 517 return rewriteValueAMD64_OpGeq8U_0(v) 518 case OpGetClosurePtr: 519 return rewriteValueAMD64_OpGetClosurePtr_0(v) 520 case OpGetG: 521 return rewriteValueAMD64_OpGetG_0(v) 522 case OpGreater16: 523 return rewriteValueAMD64_OpGreater16_0(v) 524 case OpGreater16U: 525 return rewriteValueAMD64_OpGreater16U_0(v) 526 case OpGreater32: 527 return rewriteValueAMD64_OpGreater32_0(v) 528 case OpGreater32F: 529 return rewriteValueAMD64_OpGreater32F_0(v) 530 case OpGreater32U: 531 return rewriteValueAMD64_OpGreater32U_0(v) 532 case OpGreater64: 533 return rewriteValueAMD64_OpGreater64_0(v) 534 case OpGreater64F: 535 return rewriteValueAMD64_OpGreater64F_0(v) 536 case OpGreater64U: 537 return rewriteValueAMD64_OpGreater64U_0(v) 538 case OpGreater8: 539 return rewriteValueAMD64_OpGreater8_0(v) 540 case OpGreater8U: 541 return rewriteValueAMD64_OpGreater8U_0(v) 542 case OpHmul32: 543 return rewriteValueAMD64_OpHmul32_0(v) 544 case OpHmul32u: 545 return rewriteValueAMD64_OpHmul32u_0(v) 546 case OpHmul64: 547 return rewriteValueAMD64_OpHmul64_0(v) 548 case OpHmul64u: 549 return rewriteValueAMD64_OpHmul64u_0(v) 550 case OpInt64Hi: 551 return rewriteValueAMD64_OpInt64Hi_0(v) 552 case OpInterCall: 553 return rewriteValueAMD64_OpInterCall_0(v) 554 case OpIsInBounds: 555 return rewriteValueAMD64_OpIsInBounds_0(v) 556 case OpIsNonNil: 557 return rewriteValueAMD64_OpIsNonNil_0(v) 558 case OpIsSliceInBounds: 559 return rewriteValueAMD64_OpIsSliceInBounds_0(v) 560 case OpLeq16: 561 return rewriteValueAMD64_OpLeq16_0(v) 562 case OpLeq16U: 563 return rewriteValueAMD64_OpLeq16U_0(v) 564 case OpLeq32: 565 return rewriteValueAMD64_OpLeq32_0(v) 566 case OpLeq32F: 567 return rewriteValueAMD64_OpLeq32F_0(v) 568 case OpLeq32U: 569 return rewriteValueAMD64_OpLeq32U_0(v) 570 case OpLeq64: 571 return rewriteValueAMD64_OpLeq64_0(v) 572 case OpLeq64F: 573 return rewriteValueAMD64_OpLeq64F_0(v) 574 case OpLeq64U: 575 return rewriteValueAMD64_OpLeq64U_0(v) 576 case OpLeq8: 577 return rewriteValueAMD64_OpLeq8_0(v) 578 case OpLeq8U: 579 return rewriteValueAMD64_OpLeq8U_0(v) 580 case OpLess16: 581 return rewriteValueAMD64_OpLess16_0(v) 582 case OpLess16U: 583 return rewriteValueAMD64_OpLess16U_0(v) 584 case OpLess32: 585 return rewriteValueAMD64_OpLess32_0(v) 586 case OpLess32F: 587 return rewriteValueAMD64_OpLess32F_0(v) 588 case OpLess32U: 589 return rewriteValueAMD64_OpLess32U_0(v) 590 case OpLess64: 591 return rewriteValueAMD64_OpLess64_0(v) 592 case OpLess64F: 593 return rewriteValueAMD64_OpLess64F_0(v) 594 case OpLess64U: 595 return rewriteValueAMD64_OpLess64U_0(v) 596 case OpLess8: 597 return rewriteValueAMD64_OpLess8_0(v) 598 case OpLess8U: 599 return rewriteValueAMD64_OpLess8U_0(v) 600 case OpLoad: 601 return rewriteValueAMD64_OpLoad_0(v) 602 case OpLsh16x16: 603 return rewriteValueAMD64_OpLsh16x16_0(v) 604 case OpLsh16x32: 605 return rewriteValueAMD64_OpLsh16x32_0(v) 606 case OpLsh16x64: 607 return rewriteValueAMD64_OpLsh16x64_0(v) 608 case OpLsh16x8: 609 return rewriteValueAMD64_OpLsh16x8_0(v) 610 case OpLsh32x16: 611 return rewriteValueAMD64_OpLsh32x16_0(v) 612 case OpLsh32x32: 613 return rewriteValueAMD64_OpLsh32x32_0(v) 614 case OpLsh32x64: 615 return rewriteValueAMD64_OpLsh32x64_0(v) 616 case OpLsh32x8: 617 return rewriteValueAMD64_OpLsh32x8_0(v) 618 case OpLsh64x16: 619 return rewriteValueAMD64_OpLsh64x16_0(v) 620 case OpLsh64x32: 621 return rewriteValueAMD64_OpLsh64x32_0(v) 622 case OpLsh64x64: 623 return rewriteValueAMD64_OpLsh64x64_0(v) 624 case OpLsh64x8: 625 return rewriteValueAMD64_OpLsh64x8_0(v) 626 case OpLsh8x16: 627 return rewriteValueAMD64_OpLsh8x16_0(v) 628 case OpLsh8x32: 629 return rewriteValueAMD64_OpLsh8x32_0(v) 630 case OpLsh8x64: 631 return rewriteValueAMD64_OpLsh8x64_0(v) 632 case OpLsh8x8: 633 return rewriteValueAMD64_OpLsh8x8_0(v) 634 case OpMod16: 635 return rewriteValueAMD64_OpMod16_0(v) 636 case OpMod16u: 637 return rewriteValueAMD64_OpMod16u_0(v) 638 case OpMod32: 639 return rewriteValueAMD64_OpMod32_0(v) 640 case OpMod32u: 641 return rewriteValueAMD64_OpMod32u_0(v) 642 case OpMod64: 643 return rewriteValueAMD64_OpMod64_0(v) 644 case OpMod64u: 645 return rewriteValueAMD64_OpMod64u_0(v) 646 case OpMod8: 647 return rewriteValueAMD64_OpMod8_0(v) 648 case OpMod8u: 649 return rewriteValueAMD64_OpMod8u_0(v) 650 case OpMove: 651 return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) 652 case OpMul16: 653 return rewriteValueAMD64_OpMul16_0(v) 654 case OpMul32: 655 return rewriteValueAMD64_OpMul32_0(v) 656 case OpMul32F: 657 return rewriteValueAMD64_OpMul32F_0(v) 658 case OpMul64: 659 return rewriteValueAMD64_OpMul64_0(v) 660 case OpMul64F: 661 return rewriteValueAMD64_OpMul64F_0(v) 662 case OpMul64uhilo: 663 return rewriteValueAMD64_OpMul64uhilo_0(v) 664 case OpMul8: 665 return rewriteValueAMD64_OpMul8_0(v) 666 case OpNeg16: 667 return rewriteValueAMD64_OpNeg16_0(v) 668 case OpNeg32: 669 return rewriteValueAMD64_OpNeg32_0(v) 670 case OpNeg32F: 671 return rewriteValueAMD64_OpNeg32F_0(v) 672 case OpNeg64: 673 return rewriteValueAMD64_OpNeg64_0(v) 674 case OpNeg64F: 675 return rewriteValueAMD64_OpNeg64F_0(v) 676 case OpNeg8: 677 return rewriteValueAMD64_OpNeg8_0(v) 678 case OpNeq16: 679 return rewriteValueAMD64_OpNeq16_0(v) 680 case OpNeq32: 681 return rewriteValueAMD64_OpNeq32_0(v) 682 case OpNeq32F: 683 return rewriteValueAMD64_OpNeq32F_0(v) 684 case OpNeq64: 685 return rewriteValueAMD64_OpNeq64_0(v) 686 case OpNeq64F: 687 return rewriteValueAMD64_OpNeq64F_0(v) 688 case OpNeq8: 689 return rewriteValueAMD64_OpNeq8_0(v) 690 case OpNeqB: 691 return rewriteValueAMD64_OpNeqB_0(v) 692 case OpNeqPtr: 693 return rewriteValueAMD64_OpNeqPtr_0(v) 694 case OpNilCheck: 695 return rewriteValueAMD64_OpNilCheck_0(v) 696 case OpNot: 697 return rewriteValueAMD64_OpNot_0(v) 698 case OpOffPtr: 699 return rewriteValueAMD64_OpOffPtr_0(v) 700 case OpOr16: 701 return rewriteValueAMD64_OpOr16_0(v) 702 case OpOr32: 703 return rewriteValueAMD64_OpOr32_0(v) 704 case OpOr64: 705 return rewriteValueAMD64_OpOr64_0(v) 706 case OpOr8: 707 return rewriteValueAMD64_OpOr8_0(v) 708 case OpOrB: 709 return rewriteValueAMD64_OpOrB_0(v) 710 case OpPopCount16: 711 return rewriteValueAMD64_OpPopCount16_0(v) 712 case OpPopCount32: 713 return rewriteValueAMD64_OpPopCount32_0(v) 714 case OpPopCount64: 715 return rewriteValueAMD64_OpPopCount64_0(v) 716 case OpPopCount8: 717 return rewriteValueAMD64_OpPopCount8_0(v) 718 case OpRound32F: 719 return rewriteValueAMD64_OpRound32F_0(v) 720 case OpRound64F: 721 return rewriteValueAMD64_OpRound64F_0(v) 722 case OpRsh16Ux16: 723 return rewriteValueAMD64_OpRsh16Ux16_0(v) 724 case OpRsh16Ux32: 725 return rewriteValueAMD64_OpRsh16Ux32_0(v) 726 case OpRsh16Ux64: 727 return rewriteValueAMD64_OpRsh16Ux64_0(v) 728 case OpRsh16Ux8: 729 return rewriteValueAMD64_OpRsh16Ux8_0(v) 730 case OpRsh16x16: 731 return rewriteValueAMD64_OpRsh16x16_0(v) 732 case OpRsh16x32: 733 return rewriteValueAMD64_OpRsh16x32_0(v) 734 case OpRsh16x64: 735 return rewriteValueAMD64_OpRsh16x64_0(v) 736 case OpRsh16x8: 737 return rewriteValueAMD64_OpRsh16x8_0(v) 738 case OpRsh32Ux16: 739 return rewriteValueAMD64_OpRsh32Ux16_0(v) 740 case OpRsh32Ux32: 741 return rewriteValueAMD64_OpRsh32Ux32_0(v) 742 case OpRsh32Ux64: 743 return rewriteValueAMD64_OpRsh32Ux64_0(v) 744 case OpRsh32Ux8: 745 return rewriteValueAMD64_OpRsh32Ux8_0(v) 746 case OpRsh32x16: 747 return rewriteValueAMD64_OpRsh32x16_0(v) 748 case OpRsh32x32: 749 return rewriteValueAMD64_OpRsh32x32_0(v) 750 case OpRsh32x64: 751 return rewriteValueAMD64_OpRsh32x64_0(v) 752 case OpRsh32x8: 753 return rewriteValueAMD64_OpRsh32x8_0(v) 754 case OpRsh64Ux16: 755 return rewriteValueAMD64_OpRsh64Ux16_0(v) 756 case OpRsh64Ux32: 757 return rewriteValueAMD64_OpRsh64Ux32_0(v) 758 case OpRsh64Ux64: 759 return rewriteValueAMD64_OpRsh64Ux64_0(v) 760 case OpRsh64Ux8: 761 return rewriteValueAMD64_OpRsh64Ux8_0(v) 762 case OpRsh64x16: 763 return rewriteValueAMD64_OpRsh64x16_0(v) 764 case OpRsh64x32: 765 return rewriteValueAMD64_OpRsh64x32_0(v) 766 case OpRsh64x64: 767 return rewriteValueAMD64_OpRsh64x64_0(v) 768 case OpRsh64x8: 769 return rewriteValueAMD64_OpRsh64x8_0(v) 770 case OpRsh8Ux16: 771 return rewriteValueAMD64_OpRsh8Ux16_0(v) 772 case OpRsh8Ux32: 773 return rewriteValueAMD64_OpRsh8Ux32_0(v) 774 case OpRsh8Ux64: 775 return rewriteValueAMD64_OpRsh8Ux64_0(v) 776 case OpRsh8Ux8: 777 return rewriteValueAMD64_OpRsh8Ux8_0(v) 778 case OpRsh8x16: 779 return rewriteValueAMD64_OpRsh8x16_0(v) 780 case OpRsh8x32: 781 return rewriteValueAMD64_OpRsh8x32_0(v) 782 case OpRsh8x64: 783 return rewriteValueAMD64_OpRsh8x64_0(v) 784 case OpRsh8x8: 785 return rewriteValueAMD64_OpRsh8x8_0(v) 786 case OpSelect0: 787 return rewriteValueAMD64_OpSelect0_0(v) 788 case OpSelect1: 789 return rewriteValueAMD64_OpSelect1_0(v) 790 case OpSignExt16to32: 791 return rewriteValueAMD64_OpSignExt16to32_0(v) 792 case OpSignExt16to64: 793 return rewriteValueAMD64_OpSignExt16to64_0(v) 794 case OpSignExt32to64: 795 return rewriteValueAMD64_OpSignExt32to64_0(v) 796 case OpSignExt8to16: 797 return rewriteValueAMD64_OpSignExt8to16_0(v) 798 case OpSignExt8to32: 799 return rewriteValueAMD64_OpSignExt8to32_0(v) 800 case OpSignExt8to64: 801 return rewriteValueAMD64_OpSignExt8to64_0(v) 802 case OpSlicemask: 803 return rewriteValueAMD64_OpSlicemask_0(v) 804 case OpSqrt: 805 return rewriteValueAMD64_OpSqrt_0(v) 806 case OpStaticCall: 807 return rewriteValueAMD64_OpStaticCall_0(v) 808 case OpStore: 809 return rewriteValueAMD64_OpStore_0(v) 810 case OpSub16: 811 return rewriteValueAMD64_OpSub16_0(v) 812 case OpSub32: 813 return rewriteValueAMD64_OpSub32_0(v) 814 case OpSub32F: 815 return rewriteValueAMD64_OpSub32F_0(v) 816 case OpSub64: 817 return rewriteValueAMD64_OpSub64_0(v) 818 case OpSub64F: 819 return rewriteValueAMD64_OpSub64F_0(v) 820 case OpSub8: 821 return rewriteValueAMD64_OpSub8_0(v) 822 case OpSubPtr: 823 return rewriteValueAMD64_OpSubPtr_0(v) 824 case OpTrunc16to8: 825 return rewriteValueAMD64_OpTrunc16to8_0(v) 826 case OpTrunc32to16: 827 return rewriteValueAMD64_OpTrunc32to16_0(v) 828 case OpTrunc32to8: 829 return rewriteValueAMD64_OpTrunc32to8_0(v) 830 case OpTrunc64to16: 831 return rewriteValueAMD64_OpTrunc64to16_0(v) 832 case OpTrunc64to32: 833 return rewriteValueAMD64_OpTrunc64to32_0(v) 834 case OpTrunc64to8: 835 return rewriteValueAMD64_OpTrunc64to8_0(v) 836 case OpXor16: 837 return rewriteValueAMD64_OpXor16_0(v) 838 case OpXor32: 839 return rewriteValueAMD64_OpXor32_0(v) 840 case OpXor64: 841 return rewriteValueAMD64_OpXor64_0(v) 842 case OpXor8: 843 return rewriteValueAMD64_OpXor8_0(v) 844 case OpZero: 845 return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) 846 case OpZeroExt16to32: 847 return rewriteValueAMD64_OpZeroExt16to32_0(v) 848 case OpZeroExt16to64: 849 return rewriteValueAMD64_OpZeroExt16to64_0(v) 850 case OpZeroExt32to64: 851 return rewriteValueAMD64_OpZeroExt32to64_0(v) 852 case OpZeroExt8to16: 853 return rewriteValueAMD64_OpZeroExt8to16_0(v) 854 case OpZeroExt8to32: 855 return rewriteValueAMD64_OpZeroExt8to32_0(v) 856 case OpZeroExt8to64: 857 return rewriteValueAMD64_OpZeroExt8to64_0(v) 858 } 859 return false 860 } 861 func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { 862 // match: (ADDL x (MOVLconst [c])) 863 // cond: 864 // result: (ADDLconst [c] x) 865 for { 866 _ = v.Args[1] 867 x := v.Args[0] 868 v_1 := v.Args[1] 869 if v_1.Op != OpAMD64MOVLconst { 870 break 871 } 872 c := v_1.AuxInt 873 v.reset(OpAMD64ADDLconst) 874 v.AuxInt = c 875 v.AddArg(x) 876 return true 877 } 878 // match: (ADDL (MOVLconst [c]) x) 879 // cond: 880 // result: (ADDLconst [c] x) 881 for { 882 _ = v.Args[1] 883 v_0 := v.Args[0] 884 if v_0.Op != OpAMD64MOVLconst { 885 break 886 } 887 c := v_0.AuxInt 888 x := v.Args[1] 889 v.reset(OpAMD64ADDLconst) 890 v.AuxInt = c 891 v.AddArg(x) 892 return true 893 } 894 // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) 895 // cond: d==32-c 896 // result: (ROLLconst x [c]) 897 for { 898 _ = v.Args[1] 899 v_0 := v.Args[0] 900 if v_0.Op != OpAMD64SHLLconst { 901 break 902 } 903 c := v_0.AuxInt 904 x := v_0.Args[0] 905 v_1 := v.Args[1] 906 if v_1.Op != OpAMD64SHRLconst { 907 break 908 } 909 d := v_1.AuxInt 910 if x != v_1.Args[0] { 911 break 912 } 913 if !(d == 32-c) { 914 break 915 } 916 v.reset(OpAMD64ROLLconst) 917 v.AuxInt = c 918 v.AddArg(x) 919 return true 920 } 921 // match: (ADDL (SHRLconst x [d]) (SHLLconst x [c])) 922 // cond: d==32-c 923 // result: (ROLLconst x [c]) 924 for { 925 _ = v.Args[1] 926 v_0 := v.Args[0] 927 if v_0.Op != OpAMD64SHRLconst { 928 break 929 } 930 d := v_0.AuxInt 931 x := v_0.Args[0] 932 v_1 := v.Args[1] 933 if v_1.Op != OpAMD64SHLLconst { 934 break 935 } 936 c := v_1.AuxInt 937 if x != v_1.Args[0] { 938 break 939 } 940 if !(d == 32-c) { 941 break 942 } 943 v.reset(OpAMD64ROLLconst) 944 v.AuxInt = c 945 v.AddArg(x) 946 return true 947 } 948 // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) 949 // cond: d==16-c && c < 16 && t.Size() == 2 950 // result: (ROLWconst x [c]) 951 for { 952 t := v.Type 953 _ = v.Args[1] 954 v_0 := v.Args[0] 955 if v_0.Op != OpAMD64SHLLconst { 956 break 957 } 958 c := v_0.AuxInt 959 x := v_0.Args[0] 960 v_1 := v.Args[1] 961 if v_1.Op != OpAMD64SHRWconst { 962 break 963 } 964 d := v_1.AuxInt 965 if x != v_1.Args[0] { 966 break 967 } 968 if !(d == 16-c && c < 16 && t.Size() == 2) { 969 break 970 } 971 v.reset(OpAMD64ROLWconst) 972 v.AuxInt = c 973 v.AddArg(x) 974 return true 975 } 976 // match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c])) 977 // cond: d==16-c && c < 16 && t.Size() == 2 978 // result: (ROLWconst x [c]) 979 for { 980 t := v.Type 981 _ = v.Args[1] 982 v_0 := v.Args[0] 983 if v_0.Op != OpAMD64SHRWconst { 984 break 985 } 986 d := v_0.AuxInt 987 x := v_0.Args[0] 988 v_1 := v.Args[1] 989 if v_1.Op != OpAMD64SHLLconst { 990 break 991 } 992 c := v_1.AuxInt 993 if x != v_1.Args[0] { 994 break 995 } 996 if !(d == 16-c && c < 16 && t.Size() == 2) { 997 break 998 } 999 v.reset(OpAMD64ROLWconst) 1000 v.AuxInt = c 1001 v.AddArg(x) 1002 return true 1003 } 1004 // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) 1005 // cond: d==8-c && c < 8 && t.Size() == 1 1006 // result: (ROLBconst x [c]) 1007 for { 1008 t := v.Type 1009 _ = v.Args[1] 1010 v_0 := v.Args[0] 1011 if v_0.Op != OpAMD64SHLLconst { 1012 break 1013 } 1014 c := v_0.AuxInt 1015 x := v_0.Args[0] 1016 v_1 := v.Args[1] 1017 if v_1.Op != OpAMD64SHRBconst { 1018 break 1019 } 1020 d := v_1.AuxInt 1021 if x != v_1.Args[0] { 1022 break 1023 } 1024 if !(d == 8-c && c < 8 && t.Size() == 1) { 1025 break 1026 } 1027 v.reset(OpAMD64ROLBconst) 1028 v.AuxInt = c 1029 v.AddArg(x) 1030 return true 1031 } 1032 // match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c])) 1033 // cond: d==8-c && c < 8 && t.Size() == 1 1034 // result: (ROLBconst x [c]) 1035 for { 1036 t := v.Type 1037 _ = v.Args[1] 1038 v_0 := v.Args[0] 1039 if v_0.Op != OpAMD64SHRBconst { 1040 break 1041 } 1042 d := v_0.AuxInt 1043 x := v_0.Args[0] 1044 v_1 := v.Args[1] 1045 if v_1.Op != OpAMD64SHLLconst { 1046 break 1047 } 1048 c := v_1.AuxInt 1049 if x != v_1.Args[0] { 1050 break 1051 } 1052 if !(d == 8-c && c < 8 && t.Size() == 1) { 1053 break 1054 } 1055 v.reset(OpAMD64ROLBconst) 1056 v.AuxInt = c 1057 v.AddArg(x) 1058 return true 1059 } 1060 // match: (ADDL x (NEGL y)) 1061 // cond: 1062 // result: (SUBL x y) 1063 for { 1064 _ = v.Args[1] 1065 x := v.Args[0] 1066 v_1 := v.Args[1] 1067 if v_1.Op != OpAMD64NEGL { 1068 break 1069 } 1070 y := v_1.Args[0] 1071 v.reset(OpAMD64SUBL) 1072 v.AddArg(x) 1073 v.AddArg(y) 1074 return true 1075 } 1076 // match: (ADDL (NEGL y) x) 1077 // cond: 1078 // result: (SUBL x y) 1079 for { 1080 _ = v.Args[1] 1081 v_0 := v.Args[0] 1082 if v_0.Op != OpAMD64NEGL { 1083 break 1084 } 1085 y := v_0.Args[0] 1086 x := v.Args[1] 1087 v.reset(OpAMD64SUBL) 1088 v.AddArg(x) 1089 v.AddArg(y) 1090 return true 1091 } 1092 return false 1093 } 1094 func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { 1095 // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) 1096 // cond: canMergeLoad(v, l, x) && clobber(l) 1097 // result: (ADDLmem x [off] {sym} ptr mem) 1098 for { 1099 _ = v.Args[1] 1100 x := v.Args[0] 1101 l := v.Args[1] 1102 if l.Op != OpAMD64MOVLload { 1103 break 1104 } 1105 off := l.AuxInt 1106 sym := l.Aux 1107 _ = l.Args[1] 1108 ptr := l.Args[0] 1109 mem := l.Args[1] 1110 if !(canMergeLoad(v, l, x) && clobber(l)) { 1111 break 1112 } 1113 v.reset(OpAMD64ADDLmem) 1114 v.AuxInt = off 1115 v.Aux = sym 1116 v.AddArg(x) 1117 v.AddArg(ptr) 1118 v.AddArg(mem) 1119 return true 1120 } 1121 // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) 1122 // cond: canMergeLoad(v, l, x) && clobber(l) 1123 // result: (ADDLmem x [off] {sym} ptr mem) 1124 for { 1125 _ = v.Args[1] 1126 l := v.Args[0] 1127 if l.Op != OpAMD64MOVLload { 1128 break 1129 } 1130 off := l.AuxInt 1131 sym := l.Aux 1132 _ = l.Args[1] 1133 ptr := l.Args[0] 1134 mem := l.Args[1] 1135 x := v.Args[1] 1136 if !(canMergeLoad(v, l, x) && clobber(l)) { 1137 break 1138 } 1139 v.reset(OpAMD64ADDLmem) 1140 v.AuxInt = off 1141 v.Aux = sym 1142 v.AddArg(x) 1143 v.AddArg(ptr) 1144 v.AddArg(mem) 1145 return true 1146 } 1147 return false 1148 } 1149 func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { 1150 // match: (ADDLconst [c] x) 1151 // cond: int32(c)==0 1152 // result: x 1153 for { 1154 c := v.AuxInt 1155 x := v.Args[0] 1156 if !(int32(c) == 0) { 1157 break 1158 } 1159 v.reset(OpCopy) 1160 v.Type = x.Type 1161 v.AddArg(x) 1162 return true 1163 } 1164 // match: (ADDLconst [c] (MOVLconst [d])) 1165 // cond: 1166 // result: (MOVLconst [int64(int32(c+d))]) 1167 for { 1168 c := v.AuxInt 1169 v_0 := v.Args[0] 1170 if v_0.Op != OpAMD64MOVLconst { 1171 break 1172 } 1173 d := v_0.AuxInt 1174 v.reset(OpAMD64MOVLconst) 1175 v.AuxInt = int64(int32(c + d)) 1176 return true 1177 } 1178 // match: (ADDLconst [c] (ADDLconst [d] x)) 1179 // cond: 1180 // result: (ADDLconst [int64(int32(c+d))] x) 1181 for { 1182 c := v.AuxInt 1183 v_0 := v.Args[0] 1184 if v_0.Op != OpAMD64ADDLconst { 1185 break 1186 } 1187 d := v_0.AuxInt 1188 x := v_0.Args[0] 1189 v.reset(OpAMD64ADDLconst) 1190 v.AuxInt = int64(int32(c + d)) 1191 v.AddArg(x) 1192 return true 1193 } 1194 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1195 // cond: is32Bit(c+d) 1196 // result: (LEAL [c+d] {s} x) 1197 for { 1198 c := v.AuxInt 1199 v_0 := v.Args[0] 1200 if v_0.Op != OpAMD64LEAL { 1201 break 1202 } 1203 d := v_0.AuxInt 1204 s := v_0.Aux 1205 x := v_0.Args[0] 1206 if !(is32Bit(c + d)) { 1207 break 1208 } 1209 v.reset(OpAMD64LEAL) 1210 v.AuxInt = c + d 1211 v.Aux = s 1212 v.AddArg(x) 1213 return true 1214 } 1215 return false 1216 } 1217 func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { 1218 // match: (ADDQ x (MOVQconst [c])) 1219 // cond: is32Bit(c) 1220 // result: (ADDQconst [c] x) 1221 for { 1222 _ = v.Args[1] 1223 x := v.Args[0] 1224 v_1 := v.Args[1] 1225 if v_1.Op != OpAMD64MOVQconst { 1226 break 1227 } 1228 c := v_1.AuxInt 1229 if !(is32Bit(c)) { 1230 break 1231 } 1232 v.reset(OpAMD64ADDQconst) 1233 v.AuxInt = c 1234 v.AddArg(x) 1235 return true 1236 } 1237 // match: (ADDQ (MOVQconst [c]) x) 1238 // cond: is32Bit(c) 1239 // result: (ADDQconst [c] x) 1240 for { 1241 _ = v.Args[1] 1242 v_0 := v.Args[0] 1243 if v_0.Op != OpAMD64MOVQconst { 1244 break 1245 } 1246 c := v_0.AuxInt 1247 x := v.Args[1] 1248 if !(is32Bit(c)) { 1249 break 1250 } 1251 v.reset(OpAMD64ADDQconst) 1252 v.AuxInt = c 1253 v.AddArg(x) 1254 return true 1255 } 1256 // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) 1257 // cond: d==64-c 1258 // result: (ROLQconst x [c]) 1259 for { 1260 _ = v.Args[1] 1261 v_0 := v.Args[0] 1262 if v_0.Op != OpAMD64SHLQconst { 1263 break 1264 } 1265 c := v_0.AuxInt 1266 x := v_0.Args[0] 1267 v_1 := v.Args[1] 1268 if v_1.Op != OpAMD64SHRQconst { 1269 break 1270 } 1271 d := v_1.AuxInt 1272 if x != v_1.Args[0] { 1273 break 1274 } 1275 if !(d == 64-c) { 1276 break 1277 } 1278 v.reset(OpAMD64ROLQconst) 1279 v.AuxInt = c 1280 v.AddArg(x) 1281 return true 1282 } 1283 // match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c])) 1284 // cond: d==64-c 1285 // result: (ROLQconst x [c]) 1286 for { 1287 _ = v.Args[1] 1288 v_0 := v.Args[0] 1289 if v_0.Op != OpAMD64SHRQconst { 1290 break 1291 } 1292 d := v_0.AuxInt 1293 x := v_0.Args[0] 1294 v_1 := v.Args[1] 1295 if v_1.Op != OpAMD64SHLQconst { 1296 break 1297 } 1298 c := v_1.AuxInt 1299 if x != v_1.Args[0] { 1300 break 1301 } 1302 if !(d == 64-c) { 1303 break 1304 } 1305 v.reset(OpAMD64ROLQconst) 1306 v.AuxInt = c 1307 v.AddArg(x) 1308 return true 1309 } 1310 // match: (ADDQ x (SHLQconst [3] y)) 1311 // cond: 1312 // result: (LEAQ8 x y) 1313 for { 1314 _ = v.Args[1] 1315 x := v.Args[0] 1316 v_1 := v.Args[1] 1317 if v_1.Op != OpAMD64SHLQconst { 1318 break 1319 } 1320 if v_1.AuxInt != 3 { 1321 break 1322 } 1323 y := v_1.Args[0] 1324 v.reset(OpAMD64LEAQ8) 1325 v.AddArg(x) 1326 v.AddArg(y) 1327 return true 1328 } 1329 // match: (ADDQ (SHLQconst [3] y) x) 1330 // cond: 1331 // result: (LEAQ8 x y) 1332 for { 1333 _ = v.Args[1] 1334 v_0 := v.Args[0] 1335 if v_0.Op != OpAMD64SHLQconst { 1336 break 1337 } 1338 if v_0.AuxInt != 3 { 1339 break 1340 } 1341 y := v_0.Args[0] 1342 x := v.Args[1] 1343 v.reset(OpAMD64LEAQ8) 1344 v.AddArg(x) 1345 v.AddArg(y) 1346 return true 1347 } 1348 // match: (ADDQ x (SHLQconst [2] y)) 1349 // cond: 1350 // result: (LEAQ4 x y) 1351 for { 1352 _ = v.Args[1] 1353 x := v.Args[0] 1354 v_1 := v.Args[1] 1355 if v_1.Op != OpAMD64SHLQconst { 1356 break 1357 } 1358 if v_1.AuxInt != 2 { 1359 break 1360 } 1361 y := v_1.Args[0] 1362 v.reset(OpAMD64LEAQ4) 1363 v.AddArg(x) 1364 v.AddArg(y) 1365 return true 1366 } 1367 // match: (ADDQ (SHLQconst [2] y) x) 1368 // cond: 1369 // result: (LEAQ4 x y) 1370 for { 1371 _ = v.Args[1] 1372 v_0 := v.Args[0] 1373 if v_0.Op != OpAMD64SHLQconst { 1374 break 1375 } 1376 if v_0.AuxInt != 2 { 1377 break 1378 } 1379 y := v_0.Args[0] 1380 x := v.Args[1] 1381 v.reset(OpAMD64LEAQ4) 1382 v.AddArg(x) 1383 v.AddArg(y) 1384 return true 1385 } 1386 // match: (ADDQ x (SHLQconst [1] y)) 1387 // cond: 1388 // result: (LEAQ2 x y) 1389 for { 1390 _ = v.Args[1] 1391 x := v.Args[0] 1392 v_1 := v.Args[1] 1393 if v_1.Op != OpAMD64SHLQconst { 1394 break 1395 } 1396 if v_1.AuxInt != 1 { 1397 break 1398 } 1399 y := v_1.Args[0] 1400 v.reset(OpAMD64LEAQ2) 1401 v.AddArg(x) 1402 v.AddArg(y) 1403 return true 1404 } 1405 // match: (ADDQ (SHLQconst [1] y) x) 1406 // cond: 1407 // result: (LEAQ2 x y) 1408 for { 1409 _ = v.Args[1] 1410 v_0 := v.Args[0] 1411 if v_0.Op != OpAMD64SHLQconst { 1412 break 1413 } 1414 if v_0.AuxInt != 1 { 1415 break 1416 } 1417 y := v_0.Args[0] 1418 x := v.Args[1] 1419 v.reset(OpAMD64LEAQ2) 1420 v.AddArg(x) 1421 v.AddArg(y) 1422 return true 1423 } 1424 return false 1425 } 1426 func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { 1427 // match: (ADDQ x (ADDQ y y)) 1428 // cond: 1429 // result: (LEAQ2 x y) 1430 for { 1431 _ = v.Args[1] 1432 x := v.Args[0] 1433 v_1 := v.Args[1] 1434 if v_1.Op != OpAMD64ADDQ { 1435 break 1436 } 1437 _ = v_1.Args[1] 1438 y := v_1.Args[0] 1439 if y != v_1.Args[1] { 1440 break 1441 } 1442 v.reset(OpAMD64LEAQ2) 1443 v.AddArg(x) 1444 v.AddArg(y) 1445 return true 1446 } 1447 // match: (ADDQ (ADDQ y y) x) 1448 // cond: 1449 // result: (LEAQ2 x y) 1450 for { 1451 _ = v.Args[1] 1452 v_0 := v.Args[0] 1453 if v_0.Op != OpAMD64ADDQ { 1454 break 1455 } 1456 _ = v_0.Args[1] 1457 y := v_0.Args[0] 1458 if y != v_0.Args[1] { 1459 break 1460 } 1461 x := v.Args[1] 1462 v.reset(OpAMD64LEAQ2) 1463 v.AddArg(x) 1464 v.AddArg(y) 1465 return true 1466 } 1467 // match: (ADDQ x (ADDQ x y)) 1468 // cond: 1469 // result: (LEAQ2 y x) 1470 for { 1471 _ = v.Args[1] 1472 x := v.Args[0] 1473 v_1 := v.Args[1] 1474 if v_1.Op != OpAMD64ADDQ { 1475 break 1476 } 1477 _ = v_1.Args[1] 1478 if x != v_1.Args[0] { 1479 break 1480 } 1481 y := v_1.Args[1] 1482 v.reset(OpAMD64LEAQ2) 1483 v.AddArg(y) 1484 v.AddArg(x) 1485 return true 1486 } 1487 // match: (ADDQ x (ADDQ y x)) 1488 // cond: 1489 // result: (LEAQ2 y x) 1490 for { 1491 _ = v.Args[1] 1492 x := v.Args[0] 1493 v_1 := v.Args[1] 1494 if v_1.Op != OpAMD64ADDQ { 1495 break 1496 } 1497 _ = v_1.Args[1] 1498 y := v_1.Args[0] 1499 if x != v_1.Args[1] { 1500 break 1501 } 1502 v.reset(OpAMD64LEAQ2) 1503 v.AddArg(y) 1504 v.AddArg(x) 1505 return true 1506 } 1507 // match: (ADDQ (ADDQ x y) x) 1508 // cond: 1509 // result: (LEAQ2 y x) 1510 for { 1511 _ = v.Args[1] 1512 v_0 := v.Args[0] 1513 if v_0.Op != OpAMD64ADDQ { 1514 break 1515 } 1516 _ = v_0.Args[1] 1517 x := v_0.Args[0] 1518 y := v_0.Args[1] 1519 if x != v.Args[1] { 1520 break 1521 } 1522 v.reset(OpAMD64LEAQ2) 1523 v.AddArg(y) 1524 v.AddArg(x) 1525 return true 1526 } 1527 // match: (ADDQ (ADDQ y x) x) 1528 // cond: 1529 // result: (LEAQ2 y x) 1530 for { 1531 _ = v.Args[1] 1532 v_0 := v.Args[0] 1533 if v_0.Op != OpAMD64ADDQ { 1534 break 1535 } 1536 _ = v_0.Args[1] 1537 y := v_0.Args[0] 1538 x := v_0.Args[1] 1539 if x != v.Args[1] { 1540 break 1541 } 1542 v.reset(OpAMD64LEAQ2) 1543 v.AddArg(y) 1544 v.AddArg(x) 1545 return true 1546 } 1547 // match: (ADDQ (ADDQconst [c] x) y) 1548 // cond: 1549 // result: (LEAQ1 [c] x y) 1550 for { 1551 _ = v.Args[1] 1552 v_0 := v.Args[0] 1553 if v_0.Op != OpAMD64ADDQconst { 1554 break 1555 } 1556 c := v_0.AuxInt 1557 x := v_0.Args[0] 1558 y := v.Args[1] 1559 v.reset(OpAMD64LEAQ1) 1560 v.AuxInt = c 1561 v.AddArg(x) 1562 v.AddArg(y) 1563 return true 1564 } 1565 // match: (ADDQ y (ADDQconst [c] x)) 1566 // cond: 1567 // result: (LEAQ1 [c] x y) 1568 for { 1569 _ = v.Args[1] 1570 y := v.Args[0] 1571 v_1 := v.Args[1] 1572 if v_1.Op != OpAMD64ADDQconst { 1573 break 1574 } 1575 c := v_1.AuxInt 1576 x := v_1.Args[0] 1577 v.reset(OpAMD64LEAQ1) 1578 v.AuxInt = c 1579 v.AddArg(x) 1580 v.AddArg(y) 1581 return true 1582 } 1583 // match: (ADDQ x (LEAQ [c] {s} y)) 1584 // cond: x.Op != OpSB && y.Op != OpSB 1585 // result: (LEAQ1 [c] {s} x y) 1586 for { 1587 _ = v.Args[1] 1588 x := v.Args[0] 1589 v_1 := v.Args[1] 1590 if v_1.Op != OpAMD64LEAQ { 1591 break 1592 } 1593 c := v_1.AuxInt 1594 s := v_1.Aux 1595 y := v_1.Args[0] 1596 if !(x.Op != OpSB && y.Op != OpSB) { 1597 break 1598 } 1599 v.reset(OpAMD64LEAQ1) 1600 v.AuxInt = c 1601 v.Aux = s 1602 v.AddArg(x) 1603 v.AddArg(y) 1604 return true 1605 } 1606 // match: (ADDQ (LEAQ [c] {s} y) x) 1607 // cond: x.Op != OpSB && y.Op != OpSB 1608 // result: (LEAQ1 [c] {s} x y) 1609 for { 1610 _ = v.Args[1] 1611 v_0 := v.Args[0] 1612 if v_0.Op != OpAMD64LEAQ { 1613 break 1614 } 1615 c := v_0.AuxInt 1616 s := v_0.Aux 1617 y := v_0.Args[0] 1618 x := v.Args[1] 1619 if !(x.Op != OpSB && y.Op != OpSB) { 1620 break 1621 } 1622 v.reset(OpAMD64LEAQ1) 1623 v.AuxInt = c 1624 v.Aux = s 1625 v.AddArg(x) 1626 v.AddArg(y) 1627 return true 1628 } 1629 return false 1630 } 1631 func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { 1632 // match: (ADDQ x (NEGQ y)) 1633 // cond: 1634 // result: (SUBQ x y) 1635 for { 1636 _ = v.Args[1] 1637 x := v.Args[0] 1638 v_1 := v.Args[1] 1639 if v_1.Op != OpAMD64NEGQ { 1640 break 1641 } 1642 y := v_1.Args[0] 1643 v.reset(OpAMD64SUBQ) 1644 v.AddArg(x) 1645 v.AddArg(y) 1646 return true 1647 } 1648 // match: (ADDQ (NEGQ y) x) 1649 // cond: 1650 // result: (SUBQ x y) 1651 for { 1652 _ = v.Args[1] 1653 v_0 := v.Args[0] 1654 if v_0.Op != OpAMD64NEGQ { 1655 break 1656 } 1657 y := v_0.Args[0] 1658 x := v.Args[1] 1659 v.reset(OpAMD64SUBQ) 1660 v.AddArg(x) 1661 v.AddArg(y) 1662 return true 1663 } 1664 // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) 1665 // cond: canMergeLoad(v, l, x) && clobber(l) 1666 // result: (ADDQmem x [off] {sym} ptr mem) 1667 for { 1668 _ = v.Args[1] 1669 x := v.Args[0] 1670 l := v.Args[1] 1671 if l.Op != OpAMD64MOVQload { 1672 break 1673 } 1674 off := l.AuxInt 1675 sym := l.Aux 1676 _ = l.Args[1] 1677 ptr := l.Args[0] 1678 mem := l.Args[1] 1679 if !(canMergeLoad(v, l, x) && clobber(l)) { 1680 break 1681 } 1682 v.reset(OpAMD64ADDQmem) 1683 v.AuxInt = off 1684 v.Aux = sym 1685 v.AddArg(x) 1686 v.AddArg(ptr) 1687 v.AddArg(mem) 1688 return true 1689 } 1690 // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) 1691 // cond: canMergeLoad(v, l, x) && clobber(l) 1692 // result: (ADDQmem x [off] {sym} ptr mem) 1693 for { 1694 _ = v.Args[1] 1695 l := v.Args[0] 1696 if l.Op != OpAMD64MOVQload { 1697 break 1698 } 1699 off := l.AuxInt 1700 sym := l.Aux 1701 _ = l.Args[1] 1702 ptr := l.Args[0] 1703 mem := l.Args[1] 1704 x := v.Args[1] 1705 if !(canMergeLoad(v, l, x) && clobber(l)) { 1706 break 1707 } 1708 v.reset(OpAMD64ADDQmem) 1709 v.AuxInt = off 1710 v.Aux = sym 1711 v.AddArg(x) 1712 v.AddArg(ptr) 1713 v.AddArg(mem) 1714 return true 1715 } 1716 return false 1717 } 1718 func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { 1719 // match: (ADDQconst [c] (ADDQ x y)) 1720 // cond: 1721 // result: (LEAQ1 [c] x y) 1722 for { 1723 c := v.AuxInt 1724 v_0 := v.Args[0] 1725 if v_0.Op != OpAMD64ADDQ { 1726 break 1727 } 1728 _ = v_0.Args[1] 1729 x := v_0.Args[0] 1730 y := v_0.Args[1] 1731 v.reset(OpAMD64LEAQ1) 1732 v.AuxInt = c 1733 v.AddArg(x) 1734 v.AddArg(y) 1735 return true 1736 } 1737 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1738 // cond: is32Bit(c+d) 1739 // result: (LEAQ [c+d] {s} x) 1740 for { 1741 c := v.AuxInt 1742 v_0 := v.Args[0] 1743 if v_0.Op != OpAMD64LEAQ { 1744 break 1745 } 1746 d := v_0.AuxInt 1747 s := v_0.Aux 1748 x := v_0.Args[0] 1749 if !(is32Bit(c + d)) { 1750 break 1751 } 1752 v.reset(OpAMD64LEAQ) 1753 v.AuxInt = c + d 1754 v.Aux = s 1755 v.AddArg(x) 1756 return true 1757 } 1758 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1759 // cond: is32Bit(c+d) 1760 // result: (LEAQ1 [c+d] {s} x y) 1761 for { 1762 c := v.AuxInt 1763 v_0 := v.Args[0] 1764 if v_0.Op != OpAMD64LEAQ1 { 1765 break 1766 } 1767 d := v_0.AuxInt 1768 s := v_0.Aux 1769 _ = v_0.Args[1] 1770 x := v_0.Args[0] 1771 y := v_0.Args[1] 1772 if !(is32Bit(c + d)) { 1773 break 1774 } 1775 v.reset(OpAMD64LEAQ1) 1776 v.AuxInt = c + d 1777 v.Aux = s 1778 v.AddArg(x) 1779 v.AddArg(y) 1780 return true 1781 } 1782 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1783 // cond: is32Bit(c+d) 1784 // result: (LEAQ2 [c+d] {s} x y) 1785 for { 1786 c := v.AuxInt 1787 v_0 := v.Args[0] 1788 if v_0.Op != OpAMD64LEAQ2 { 1789 break 1790 } 1791 d := v_0.AuxInt 1792 s := v_0.Aux 1793 _ = v_0.Args[1] 1794 x := v_0.Args[0] 1795 y := v_0.Args[1] 1796 if !(is32Bit(c + d)) { 1797 break 1798 } 1799 v.reset(OpAMD64LEAQ2) 1800 v.AuxInt = c + d 1801 v.Aux = s 1802 v.AddArg(x) 1803 v.AddArg(y) 1804 return true 1805 } 1806 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1807 // cond: is32Bit(c+d) 1808 // result: (LEAQ4 [c+d] {s} x y) 1809 for { 1810 c := v.AuxInt 1811 v_0 := v.Args[0] 1812 if v_0.Op != OpAMD64LEAQ4 { 1813 break 1814 } 1815 d := v_0.AuxInt 1816 s := v_0.Aux 1817 _ = v_0.Args[1] 1818 x := v_0.Args[0] 1819 y := v_0.Args[1] 1820 if !(is32Bit(c + d)) { 1821 break 1822 } 1823 v.reset(OpAMD64LEAQ4) 1824 v.AuxInt = c + d 1825 v.Aux = s 1826 v.AddArg(x) 1827 v.AddArg(y) 1828 return true 1829 } 1830 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1831 // cond: is32Bit(c+d) 1832 // result: (LEAQ8 [c+d] {s} x y) 1833 for { 1834 c := v.AuxInt 1835 v_0 := v.Args[0] 1836 if v_0.Op != OpAMD64LEAQ8 { 1837 break 1838 } 1839 d := v_0.AuxInt 1840 s := v_0.Aux 1841 _ = v_0.Args[1] 1842 x := v_0.Args[0] 1843 y := v_0.Args[1] 1844 if !(is32Bit(c + d)) { 1845 break 1846 } 1847 v.reset(OpAMD64LEAQ8) 1848 v.AuxInt = c + d 1849 v.Aux = s 1850 v.AddArg(x) 1851 v.AddArg(y) 1852 return true 1853 } 1854 // match: (ADDQconst [0] x) 1855 // cond: 1856 // result: x 1857 for { 1858 if v.AuxInt != 0 { 1859 break 1860 } 1861 x := v.Args[0] 1862 v.reset(OpCopy) 1863 v.Type = x.Type 1864 v.AddArg(x) 1865 return true 1866 } 1867 // match: (ADDQconst [c] (MOVQconst [d])) 1868 // cond: 1869 // result: (MOVQconst [c+d]) 1870 for { 1871 c := v.AuxInt 1872 v_0 := v.Args[0] 1873 if v_0.Op != OpAMD64MOVQconst { 1874 break 1875 } 1876 d := v_0.AuxInt 1877 v.reset(OpAMD64MOVQconst) 1878 v.AuxInt = c + d 1879 return true 1880 } 1881 // match: (ADDQconst [c] (ADDQconst [d] x)) 1882 // cond: is32Bit(c+d) 1883 // result: (ADDQconst [c+d] x) 1884 for { 1885 c := v.AuxInt 1886 v_0 := v.Args[0] 1887 if v_0.Op != OpAMD64ADDQconst { 1888 break 1889 } 1890 d := v_0.AuxInt 1891 x := v_0.Args[0] 1892 if !(is32Bit(c + d)) { 1893 break 1894 } 1895 v.reset(OpAMD64ADDQconst) 1896 v.AuxInt = c + d 1897 v.AddArg(x) 1898 return true 1899 } 1900 return false 1901 } 1902 func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { 1903 // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) 1904 // cond: canMergeLoad(v, l, x) && clobber(l) 1905 // result: (ADDSDmem x [off] {sym} ptr mem) 1906 for { 1907 _ = v.Args[1] 1908 x := v.Args[0] 1909 l := v.Args[1] 1910 if l.Op != OpAMD64MOVSDload { 1911 break 1912 } 1913 off := l.AuxInt 1914 sym := l.Aux 1915 _ = l.Args[1] 1916 ptr := l.Args[0] 1917 mem := l.Args[1] 1918 if !(canMergeLoad(v, l, x) && clobber(l)) { 1919 break 1920 } 1921 v.reset(OpAMD64ADDSDmem) 1922 v.AuxInt = off 1923 v.Aux = sym 1924 v.AddArg(x) 1925 v.AddArg(ptr) 1926 v.AddArg(mem) 1927 return true 1928 } 1929 // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) 1930 // cond: canMergeLoad(v, l, x) && clobber(l) 1931 // result: (ADDSDmem x [off] {sym} ptr mem) 1932 for { 1933 _ = v.Args[1] 1934 l := v.Args[0] 1935 if l.Op != OpAMD64MOVSDload { 1936 break 1937 } 1938 off := l.AuxInt 1939 sym := l.Aux 1940 _ = l.Args[1] 1941 ptr := l.Args[0] 1942 mem := l.Args[1] 1943 x := v.Args[1] 1944 if !(canMergeLoad(v, l, x) && clobber(l)) { 1945 break 1946 } 1947 v.reset(OpAMD64ADDSDmem) 1948 v.AuxInt = off 1949 v.Aux = sym 1950 v.AddArg(x) 1951 v.AddArg(ptr) 1952 v.AddArg(mem) 1953 return true 1954 } 1955 return false 1956 } 1957 func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { 1958 // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) 1959 // cond: canMergeLoad(v, l, x) && clobber(l) 1960 // result: (ADDSSmem x [off] {sym} ptr mem) 1961 for { 1962 _ = v.Args[1] 1963 x := v.Args[0] 1964 l := v.Args[1] 1965 if l.Op != OpAMD64MOVSSload { 1966 break 1967 } 1968 off := l.AuxInt 1969 sym := l.Aux 1970 _ = l.Args[1] 1971 ptr := l.Args[0] 1972 mem := l.Args[1] 1973 if !(canMergeLoad(v, l, x) && clobber(l)) { 1974 break 1975 } 1976 v.reset(OpAMD64ADDSSmem) 1977 v.AuxInt = off 1978 v.Aux = sym 1979 v.AddArg(x) 1980 v.AddArg(ptr) 1981 v.AddArg(mem) 1982 return true 1983 } 1984 // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) 1985 // cond: canMergeLoad(v, l, x) && clobber(l) 1986 // result: (ADDSSmem x [off] {sym} ptr mem) 1987 for { 1988 _ = v.Args[1] 1989 l := v.Args[0] 1990 if l.Op != OpAMD64MOVSSload { 1991 break 1992 } 1993 off := l.AuxInt 1994 sym := l.Aux 1995 _ = l.Args[1] 1996 ptr := l.Args[0] 1997 mem := l.Args[1] 1998 x := v.Args[1] 1999 if !(canMergeLoad(v, l, x) && clobber(l)) { 2000 break 2001 } 2002 v.reset(OpAMD64ADDSSmem) 2003 v.AuxInt = off 2004 v.Aux = sym 2005 v.AddArg(x) 2006 v.AddArg(ptr) 2007 v.AddArg(mem) 2008 return true 2009 } 2010 return false 2011 } 2012 func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { 2013 // match: (ANDL x (MOVLconst [c])) 2014 // cond: 2015 // result: (ANDLconst [c] x) 2016 for { 2017 _ = v.Args[1] 2018 x := v.Args[0] 2019 v_1 := v.Args[1] 2020 if v_1.Op != OpAMD64MOVLconst { 2021 break 2022 } 2023 c := v_1.AuxInt 2024 v.reset(OpAMD64ANDLconst) 2025 v.AuxInt = c 2026 v.AddArg(x) 2027 return true 2028 } 2029 // match: (ANDL (MOVLconst [c]) x) 2030 // cond: 2031 // result: (ANDLconst [c] x) 2032 for { 2033 _ = v.Args[1] 2034 v_0 := v.Args[0] 2035 if v_0.Op != OpAMD64MOVLconst { 2036 break 2037 } 2038 c := v_0.AuxInt 2039 x := v.Args[1] 2040 v.reset(OpAMD64ANDLconst) 2041 v.AuxInt = c 2042 v.AddArg(x) 2043 return true 2044 } 2045 // match: (ANDL x x) 2046 // cond: 2047 // result: x 2048 for { 2049 _ = v.Args[1] 2050 x := v.Args[0] 2051 if x != v.Args[1] { 2052 break 2053 } 2054 v.reset(OpCopy) 2055 v.Type = x.Type 2056 v.AddArg(x) 2057 return true 2058 } 2059 // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) 2060 // cond: canMergeLoad(v, l, x) && clobber(l) 2061 // result: (ANDLmem x [off] {sym} ptr mem) 2062 for { 2063 _ = v.Args[1] 2064 x := v.Args[0] 2065 l := v.Args[1] 2066 if l.Op != OpAMD64MOVLload { 2067 break 2068 } 2069 off := l.AuxInt 2070 sym := l.Aux 2071 _ = l.Args[1] 2072 ptr := l.Args[0] 2073 mem := l.Args[1] 2074 if !(canMergeLoad(v, l, x) && clobber(l)) { 2075 break 2076 } 2077 v.reset(OpAMD64ANDLmem) 2078 v.AuxInt = off 2079 v.Aux = sym 2080 v.AddArg(x) 2081 v.AddArg(ptr) 2082 v.AddArg(mem) 2083 return true 2084 } 2085 // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) 2086 // cond: canMergeLoad(v, l, x) && clobber(l) 2087 // result: (ANDLmem x [off] {sym} ptr mem) 2088 for { 2089 _ = v.Args[1] 2090 l := v.Args[0] 2091 if l.Op != OpAMD64MOVLload { 2092 break 2093 } 2094 off := l.AuxInt 2095 sym := l.Aux 2096 _ = l.Args[1] 2097 ptr := l.Args[0] 2098 mem := l.Args[1] 2099 x := v.Args[1] 2100 if !(canMergeLoad(v, l, x) && clobber(l)) { 2101 break 2102 } 2103 v.reset(OpAMD64ANDLmem) 2104 v.AuxInt = off 2105 v.Aux = sym 2106 v.AddArg(x) 2107 v.AddArg(ptr) 2108 v.AddArg(mem) 2109 return true 2110 } 2111 return false 2112 } 2113 func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { 2114 // match: (ANDLconst [c] (ANDLconst [d] x)) 2115 // cond: 2116 // result: (ANDLconst [c & d] x) 2117 for { 2118 c := v.AuxInt 2119 v_0 := v.Args[0] 2120 if v_0.Op != OpAMD64ANDLconst { 2121 break 2122 } 2123 d := v_0.AuxInt 2124 x := v_0.Args[0] 2125 v.reset(OpAMD64ANDLconst) 2126 v.AuxInt = c & d 2127 v.AddArg(x) 2128 return true 2129 } 2130 // match: (ANDLconst [0xFF] x) 2131 // cond: 2132 // result: (MOVBQZX x) 2133 for { 2134 if v.AuxInt != 0xFF { 2135 break 2136 } 2137 x := v.Args[0] 2138 v.reset(OpAMD64MOVBQZX) 2139 v.AddArg(x) 2140 return true 2141 } 2142 // match: (ANDLconst [0xFFFF] x) 2143 // cond: 2144 // result: (MOVWQZX x) 2145 for { 2146 if v.AuxInt != 0xFFFF { 2147 break 2148 } 2149 x := v.Args[0] 2150 v.reset(OpAMD64MOVWQZX) 2151 v.AddArg(x) 2152 return true 2153 } 2154 // match: (ANDLconst [c] _) 2155 // cond: int32(c)==0 2156 // result: (MOVLconst [0]) 2157 for { 2158 c := v.AuxInt 2159 if !(int32(c) == 0) { 2160 break 2161 } 2162 v.reset(OpAMD64MOVLconst) 2163 v.AuxInt = 0 2164 return true 2165 } 2166 // match: (ANDLconst [c] x) 2167 // cond: int32(c)==-1 2168 // result: x 2169 for { 2170 c := v.AuxInt 2171 x := v.Args[0] 2172 if !(int32(c) == -1) { 2173 break 2174 } 2175 v.reset(OpCopy) 2176 v.Type = x.Type 2177 v.AddArg(x) 2178 return true 2179 } 2180 // match: (ANDLconst [c] (MOVLconst [d])) 2181 // cond: 2182 // result: (MOVLconst [c&d]) 2183 for { 2184 c := v.AuxInt 2185 v_0 := v.Args[0] 2186 if v_0.Op != OpAMD64MOVLconst { 2187 break 2188 } 2189 d := v_0.AuxInt 2190 v.reset(OpAMD64MOVLconst) 2191 v.AuxInt = c & d 2192 return true 2193 } 2194 return false 2195 } 2196 func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { 2197 // match: (ANDQ x (MOVQconst [c])) 2198 // cond: is32Bit(c) 2199 // result: (ANDQconst [c] x) 2200 for { 2201 _ = v.Args[1] 2202 x := v.Args[0] 2203 v_1 := v.Args[1] 2204 if v_1.Op != OpAMD64MOVQconst { 2205 break 2206 } 2207 c := v_1.AuxInt 2208 if !(is32Bit(c)) { 2209 break 2210 } 2211 v.reset(OpAMD64ANDQconst) 2212 v.AuxInt = c 2213 v.AddArg(x) 2214 return true 2215 } 2216 // match: (ANDQ (MOVQconst [c]) x) 2217 // cond: is32Bit(c) 2218 // result: (ANDQconst [c] x) 2219 for { 2220 _ = v.Args[1] 2221 v_0 := v.Args[0] 2222 if v_0.Op != OpAMD64MOVQconst { 2223 break 2224 } 2225 c := v_0.AuxInt 2226 x := v.Args[1] 2227 if !(is32Bit(c)) { 2228 break 2229 } 2230 v.reset(OpAMD64ANDQconst) 2231 v.AuxInt = c 2232 v.AddArg(x) 2233 return true 2234 } 2235 // match: (ANDQ x x) 2236 // cond: 2237 // result: x 2238 for { 2239 _ = v.Args[1] 2240 x := v.Args[0] 2241 if x != v.Args[1] { 2242 break 2243 } 2244 v.reset(OpCopy) 2245 v.Type = x.Type 2246 v.AddArg(x) 2247 return true 2248 } 2249 // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) 2250 // cond: canMergeLoad(v, l, x) && clobber(l) 2251 // result: (ANDQmem x [off] {sym} ptr mem) 2252 for { 2253 _ = v.Args[1] 2254 x := v.Args[0] 2255 l := v.Args[1] 2256 if l.Op != OpAMD64MOVQload { 2257 break 2258 } 2259 off := l.AuxInt 2260 sym := l.Aux 2261 _ = l.Args[1] 2262 ptr := l.Args[0] 2263 mem := l.Args[1] 2264 if !(canMergeLoad(v, l, x) && clobber(l)) { 2265 break 2266 } 2267 v.reset(OpAMD64ANDQmem) 2268 v.AuxInt = off 2269 v.Aux = sym 2270 v.AddArg(x) 2271 v.AddArg(ptr) 2272 v.AddArg(mem) 2273 return true 2274 } 2275 // match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x) 2276 // cond: canMergeLoad(v, l, x) && clobber(l) 2277 // result: (ANDQmem x [off] {sym} ptr mem) 2278 for { 2279 _ = v.Args[1] 2280 l := v.Args[0] 2281 if l.Op != OpAMD64MOVQload { 2282 break 2283 } 2284 off := l.AuxInt 2285 sym := l.Aux 2286 _ = l.Args[1] 2287 ptr := l.Args[0] 2288 mem := l.Args[1] 2289 x := v.Args[1] 2290 if !(canMergeLoad(v, l, x) && clobber(l)) { 2291 break 2292 } 2293 v.reset(OpAMD64ANDQmem) 2294 v.AuxInt = off 2295 v.Aux = sym 2296 v.AddArg(x) 2297 v.AddArg(ptr) 2298 v.AddArg(mem) 2299 return true 2300 } 2301 return false 2302 } 2303 func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { 2304 // match: (ANDQconst [c] (ANDQconst [d] x)) 2305 // cond: 2306 // result: (ANDQconst [c & d] x) 2307 for { 2308 c := v.AuxInt 2309 v_0 := v.Args[0] 2310 if v_0.Op != OpAMD64ANDQconst { 2311 break 2312 } 2313 d := v_0.AuxInt 2314 x := v_0.Args[0] 2315 v.reset(OpAMD64ANDQconst) 2316 v.AuxInt = c & d 2317 v.AddArg(x) 2318 return true 2319 } 2320 // match: (ANDQconst [0xFF] x) 2321 // cond: 2322 // result: (MOVBQZX x) 2323 for { 2324 if v.AuxInt != 0xFF { 2325 break 2326 } 2327 x := v.Args[0] 2328 v.reset(OpAMD64MOVBQZX) 2329 v.AddArg(x) 2330 return true 2331 } 2332 // match: (ANDQconst [0xFFFF] x) 2333 // cond: 2334 // result: (MOVWQZX x) 2335 for { 2336 if v.AuxInt != 0xFFFF { 2337 break 2338 } 2339 x := v.Args[0] 2340 v.reset(OpAMD64MOVWQZX) 2341 v.AddArg(x) 2342 return true 2343 } 2344 // match: (ANDQconst [0xFFFFFFFF] x) 2345 // cond: 2346 // result: (MOVLQZX x) 2347 for { 2348 if v.AuxInt != 0xFFFFFFFF { 2349 break 2350 } 2351 x := v.Args[0] 2352 v.reset(OpAMD64MOVLQZX) 2353 v.AddArg(x) 2354 return true 2355 } 2356 // match: (ANDQconst [0] _) 2357 // cond: 2358 // result: (MOVQconst [0]) 2359 for { 2360 if v.AuxInt != 0 { 2361 break 2362 } 2363 v.reset(OpAMD64MOVQconst) 2364 v.AuxInt = 0 2365 return true 2366 } 2367 // match: (ANDQconst [-1] x) 2368 // cond: 2369 // result: x 2370 for { 2371 if v.AuxInt != -1 { 2372 break 2373 } 2374 x := v.Args[0] 2375 v.reset(OpCopy) 2376 v.Type = x.Type 2377 v.AddArg(x) 2378 return true 2379 } 2380 // match: (ANDQconst [c] (MOVQconst [d])) 2381 // cond: 2382 // result: (MOVQconst [c&d]) 2383 for { 2384 c := v.AuxInt 2385 v_0 := v.Args[0] 2386 if v_0.Op != OpAMD64MOVQconst { 2387 break 2388 } 2389 d := v_0.AuxInt 2390 v.reset(OpAMD64MOVQconst) 2391 v.AuxInt = c & d 2392 return true 2393 } 2394 return false 2395 } 2396 func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool { 2397 b := v.Block 2398 _ = b 2399 // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) 2400 // cond: 2401 // result: (BSFQ (ORQconst <t> [1<<8] x)) 2402 for { 2403 v_0 := v.Args[0] 2404 if v_0.Op != OpAMD64ORQconst { 2405 break 2406 } 2407 t := v_0.Type 2408 if v_0.AuxInt != 1<<8 { 2409 break 2410 } 2411 v_0_0 := v_0.Args[0] 2412 if v_0_0.Op != OpAMD64MOVBQZX { 2413 break 2414 } 2415 x := v_0_0.Args[0] 2416 v.reset(OpAMD64BSFQ) 2417 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2418 v0.AuxInt = 1 << 8 2419 v0.AddArg(x) 2420 v.AddArg(v0) 2421 return true 2422 } 2423 // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) 2424 // cond: 2425 // result: (BSFQ (ORQconst <t> [1<<16] x)) 2426 for { 2427 v_0 := v.Args[0] 2428 if v_0.Op != OpAMD64ORQconst { 2429 break 2430 } 2431 t := v_0.Type 2432 if v_0.AuxInt != 1<<16 { 2433 break 2434 } 2435 v_0_0 := v_0.Args[0] 2436 if v_0_0.Op != OpAMD64MOVWQZX { 2437 break 2438 } 2439 x := v_0_0.Args[0] 2440 v.reset(OpAMD64BSFQ) 2441 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2442 v0.AuxInt = 1 << 16 2443 v0.AddArg(x) 2444 v.AddArg(v0) 2445 return true 2446 } 2447 return false 2448 } 2449 func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool { 2450 // match: (BTQconst [c] x) 2451 // cond: c < 32 2452 // result: (BTLconst [c] x) 2453 for { 2454 c := v.AuxInt 2455 x := v.Args[0] 2456 if !(c < 32) { 2457 break 2458 } 2459 v.reset(OpAMD64BTLconst) 2460 v.AuxInt = c 2461 v.AddArg(x) 2462 return true 2463 } 2464 return false 2465 } 2466 func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool { 2467 // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) 2468 // cond: c != 0 2469 // result: x 2470 for { 2471 _ = v.Args[2] 2472 x := v.Args[0] 2473 v_2 := v.Args[2] 2474 if v_2.Op != OpSelect1 { 2475 break 2476 } 2477 v_2_0 := v_2.Args[0] 2478 if v_2_0.Op != OpAMD64BSFQ { 2479 break 2480 } 2481 v_2_0_0 := v_2_0.Args[0] 2482 if v_2_0_0.Op != OpAMD64ORQconst { 2483 break 2484 } 2485 c := v_2_0_0.AuxInt 2486 if !(c != 0) { 2487 break 2488 } 2489 v.reset(OpCopy) 2490 v.Type = x.Type 2491 v.AddArg(x) 2492 return true 2493 } 2494 return false 2495 } 2496 func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { 2497 b := v.Block 2498 _ = b 2499 // match: (CMPB x (MOVLconst [c])) 2500 // cond: 2501 // result: (CMPBconst x [int64(int8(c))]) 2502 for { 2503 _ = v.Args[1] 2504 x := v.Args[0] 2505 v_1 := v.Args[1] 2506 if v_1.Op != OpAMD64MOVLconst { 2507 break 2508 } 2509 c := v_1.AuxInt 2510 v.reset(OpAMD64CMPBconst) 2511 v.AuxInt = int64(int8(c)) 2512 v.AddArg(x) 2513 return true 2514 } 2515 // match: (CMPB (MOVLconst [c]) x) 2516 // cond: 2517 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 2518 for { 2519 _ = v.Args[1] 2520 v_0 := v.Args[0] 2521 if v_0.Op != OpAMD64MOVLconst { 2522 break 2523 } 2524 c := v_0.AuxInt 2525 x := v.Args[1] 2526 v.reset(OpAMD64InvertFlags) 2527 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 2528 v0.AuxInt = int64(int8(c)) 2529 v0.AddArg(x) 2530 v.AddArg(v0) 2531 return true 2532 } 2533 return false 2534 } 2535 func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool { 2536 // match: (CMPBconst (MOVLconst [x]) [y]) 2537 // cond: int8(x)==int8(y) 2538 // result: (FlagEQ) 2539 for { 2540 y := v.AuxInt 2541 v_0 := v.Args[0] 2542 if v_0.Op != OpAMD64MOVLconst { 2543 break 2544 } 2545 x := v_0.AuxInt 2546 if !(int8(x) == int8(y)) { 2547 break 2548 } 2549 v.reset(OpAMD64FlagEQ) 2550 return true 2551 } 2552 // match: (CMPBconst (MOVLconst [x]) [y]) 2553 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 2554 // result: (FlagLT_ULT) 2555 for { 2556 y := v.AuxInt 2557 v_0 := v.Args[0] 2558 if v_0.Op != OpAMD64MOVLconst { 2559 break 2560 } 2561 x := v_0.AuxInt 2562 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 2563 break 2564 } 2565 v.reset(OpAMD64FlagLT_ULT) 2566 return true 2567 } 2568 // match: (CMPBconst (MOVLconst [x]) [y]) 2569 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 2570 // result: (FlagLT_UGT) 2571 for { 2572 y := v.AuxInt 2573 v_0 := v.Args[0] 2574 if v_0.Op != OpAMD64MOVLconst { 2575 break 2576 } 2577 x := v_0.AuxInt 2578 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 2579 break 2580 } 2581 v.reset(OpAMD64FlagLT_UGT) 2582 return true 2583 } 2584 // match: (CMPBconst (MOVLconst [x]) [y]) 2585 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 2586 // result: (FlagGT_ULT) 2587 for { 2588 y := v.AuxInt 2589 v_0 := v.Args[0] 2590 if v_0.Op != OpAMD64MOVLconst { 2591 break 2592 } 2593 x := v_0.AuxInt 2594 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 2595 break 2596 } 2597 v.reset(OpAMD64FlagGT_ULT) 2598 return true 2599 } 2600 // match: (CMPBconst (MOVLconst [x]) [y]) 2601 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 2602 // result: (FlagGT_UGT) 2603 for { 2604 y := v.AuxInt 2605 v_0 := v.Args[0] 2606 if v_0.Op != OpAMD64MOVLconst { 2607 break 2608 } 2609 x := v_0.AuxInt 2610 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 2611 break 2612 } 2613 v.reset(OpAMD64FlagGT_UGT) 2614 return true 2615 } 2616 // match: (CMPBconst (ANDLconst _ [m]) [n]) 2617 // cond: 0 <= int8(m) && int8(m) < int8(n) 2618 // result: (FlagLT_ULT) 2619 for { 2620 n := v.AuxInt 2621 v_0 := v.Args[0] 2622 if v_0.Op != OpAMD64ANDLconst { 2623 break 2624 } 2625 m := v_0.AuxInt 2626 if !(0 <= int8(m) && int8(m) < int8(n)) { 2627 break 2628 } 2629 v.reset(OpAMD64FlagLT_ULT) 2630 return true 2631 } 2632 // match: (CMPBconst (ANDL x y) [0]) 2633 // cond: 2634 // result: (TESTB x y) 2635 for { 2636 if v.AuxInt != 0 { 2637 break 2638 } 2639 v_0 := v.Args[0] 2640 if v_0.Op != OpAMD64ANDL { 2641 break 2642 } 2643 _ = v_0.Args[1] 2644 x := v_0.Args[0] 2645 y := v_0.Args[1] 2646 v.reset(OpAMD64TESTB) 2647 v.AddArg(x) 2648 v.AddArg(y) 2649 return true 2650 } 2651 // match: (CMPBconst (ANDLconst [c] x) [0]) 2652 // cond: 2653 // result: (TESTBconst [int64(int8(c))] x) 2654 for { 2655 if v.AuxInt != 0 { 2656 break 2657 } 2658 v_0 := v.Args[0] 2659 if v_0.Op != OpAMD64ANDLconst { 2660 break 2661 } 2662 c := v_0.AuxInt 2663 x := v_0.Args[0] 2664 v.reset(OpAMD64TESTBconst) 2665 v.AuxInt = int64(int8(c)) 2666 v.AddArg(x) 2667 return true 2668 } 2669 // match: (CMPBconst x [0]) 2670 // cond: 2671 // result: (TESTB x x) 2672 for { 2673 if v.AuxInt != 0 { 2674 break 2675 } 2676 x := v.Args[0] 2677 v.reset(OpAMD64TESTB) 2678 v.AddArg(x) 2679 v.AddArg(x) 2680 return true 2681 } 2682 return false 2683 } 2684 func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { 2685 b := v.Block 2686 _ = b 2687 // match: (CMPL x (MOVLconst [c])) 2688 // cond: 2689 // result: (CMPLconst x [c]) 2690 for { 2691 _ = v.Args[1] 2692 x := v.Args[0] 2693 v_1 := v.Args[1] 2694 if v_1.Op != OpAMD64MOVLconst { 2695 break 2696 } 2697 c := v_1.AuxInt 2698 v.reset(OpAMD64CMPLconst) 2699 v.AuxInt = c 2700 v.AddArg(x) 2701 return true 2702 } 2703 // match: (CMPL (MOVLconst [c]) x) 2704 // cond: 2705 // result: (InvertFlags (CMPLconst x [c])) 2706 for { 2707 _ = v.Args[1] 2708 v_0 := v.Args[0] 2709 if v_0.Op != OpAMD64MOVLconst { 2710 break 2711 } 2712 c := v_0.AuxInt 2713 x := v.Args[1] 2714 v.reset(OpAMD64InvertFlags) 2715 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 2716 v0.AuxInt = c 2717 v0.AddArg(x) 2718 v.AddArg(v0) 2719 return true 2720 } 2721 return false 2722 } 2723 func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool { 2724 // match: (CMPLconst (MOVLconst [x]) [y]) 2725 // cond: int32(x)==int32(y) 2726 // result: (FlagEQ) 2727 for { 2728 y := v.AuxInt 2729 v_0 := v.Args[0] 2730 if v_0.Op != OpAMD64MOVLconst { 2731 break 2732 } 2733 x := v_0.AuxInt 2734 if !(int32(x) == int32(y)) { 2735 break 2736 } 2737 v.reset(OpAMD64FlagEQ) 2738 return true 2739 } 2740 // match: (CMPLconst (MOVLconst [x]) [y]) 2741 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 2742 // result: (FlagLT_ULT) 2743 for { 2744 y := v.AuxInt 2745 v_0 := v.Args[0] 2746 if v_0.Op != OpAMD64MOVLconst { 2747 break 2748 } 2749 x := v_0.AuxInt 2750 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 2751 break 2752 } 2753 v.reset(OpAMD64FlagLT_ULT) 2754 return true 2755 } 2756 // match: (CMPLconst (MOVLconst [x]) [y]) 2757 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 2758 // result: (FlagLT_UGT) 2759 for { 2760 y := v.AuxInt 2761 v_0 := v.Args[0] 2762 if v_0.Op != OpAMD64MOVLconst { 2763 break 2764 } 2765 x := v_0.AuxInt 2766 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 2767 break 2768 } 2769 v.reset(OpAMD64FlagLT_UGT) 2770 return true 2771 } 2772 // match: (CMPLconst (MOVLconst [x]) [y]) 2773 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 2774 // result: (FlagGT_ULT) 2775 for { 2776 y := v.AuxInt 2777 v_0 := v.Args[0] 2778 if v_0.Op != OpAMD64MOVLconst { 2779 break 2780 } 2781 x := v_0.AuxInt 2782 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 2783 break 2784 } 2785 v.reset(OpAMD64FlagGT_ULT) 2786 return true 2787 } 2788 // match: (CMPLconst (MOVLconst [x]) [y]) 2789 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 2790 // result: (FlagGT_UGT) 2791 for { 2792 y := v.AuxInt 2793 v_0 := v.Args[0] 2794 if v_0.Op != OpAMD64MOVLconst { 2795 break 2796 } 2797 x := v_0.AuxInt 2798 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 2799 break 2800 } 2801 v.reset(OpAMD64FlagGT_UGT) 2802 return true 2803 } 2804 // match: (CMPLconst (SHRLconst _ [c]) [n]) 2805 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 2806 // result: (FlagLT_ULT) 2807 for { 2808 n := v.AuxInt 2809 v_0 := v.Args[0] 2810 if v_0.Op != OpAMD64SHRLconst { 2811 break 2812 } 2813 c := v_0.AuxInt 2814 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 2815 break 2816 } 2817 v.reset(OpAMD64FlagLT_ULT) 2818 return true 2819 } 2820 // match: (CMPLconst (ANDLconst _ [m]) [n]) 2821 // cond: 0 <= int32(m) && int32(m) < int32(n) 2822 // result: (FlagLT_ULT) 2823 for { 2824 n := v.AuxInt 2825 v_0 := v.Args[0] 2826 if v_0.Op != OpAMD64ANDLconst { 2827 break 2828 } 2829 m := v_0.AuxInt 2830 if !(0 <= int32(m) && int32(m) < int32(n)) { 2831 break 2832 } 2833 v.reset(OpAMD64FlagLT_ULT) 2834 return true 2835 } 2836 // match: (CMPLconst (ANDL x y) [0]) 2837 // cond: 2838 // result: (TESTL x y) 2839 for { 2840 if v.AuxInt != 0 { 2841 break 2842 } 2843 v_0 := v.Args[0] 2844 if v_0.Op != OpAMD64ANDL { 2845 break 2846 } 2847 _ = v_0.Args[1] 2848 x := v_0.Args[0] 2849 y := v_0.Args[1] 2850 v.reset(OpAMD64TESTL) 2851 v.AddArg(x) 2852 v.AddArg(y) 2853 return true 2854 } 2855 // match: (CMPLconst (ANDLconst [c] x) [0]) 2856 // cond: 2857 // result: (TESTLconst [c] x) 2858 for { 2859 if v.AuxInt != 0 { 2860 break 2861 } 2862 v_0 := v.Args[0] 2863 if v_0.Op != OpAMD64ANDLconst { 2864 break 2865 } 2866 c := v_0.AuxInt 2867 x := v_0.Args[0] 2868 v.reset(OpAMD64TESTLconst) 2869 v.AuxInt = c 2870 v.AddArg(x) 2871 return true 2872 } 2873 // match: (CMPLconst x [0]) 2874 // cond: 2875 // result: (TESTL x x) 2876 for { 2877 if v.AuxInt != 0 { 2878 break 2879 } 2880 x := v.Args[0] 2881 v.reset(OpAMD64TESTL) 2882 v.AddArg(x) 2883 v.AddArg(x) 2884 return true 2885 } 2886 return false 2887 } 2888 func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { 2889 b := v.Block 2890 _ = b 2891 // match: (CMPQ x (MOVQconst [c])) 2892 // cond: is32Bit(c) 2893 // result: (CMPQconst x [c]) 2894 for { 2895 _ = v.Args[1] 2896 x := v.Args[0] 2897 v_1 := v.Args[1] 2898 if v_1.Op != OpAMD64MOVQconst { 2899 break 2900 } 2901 c := v_1.AuxInt 2902 if !(is32Bit(c)) { 2903 break 2904 } 2905 v.reset(OpAMD64CMPQconst) 2906 v.AuxInt = c 2907 v.AddArg(x) 2908 return true 2909 } 2910 // match: (CMPQ (MOVQconst [c]) x) 2911 // cond: is32Bit(c) 2912 // result: (InvertFlags (CMPQconst x [c])) 2913 for { 2914 _ = v.Args[1] 2915 v_0 := v.Args[0] 2916 if v_0.Op != OpAMD64MOVQconst { 2917 break 2918 } 2919 c := v_0.AuxInt 2920 x := v.Args[1] 2921 if !(is32Bit(c)) { 2922 break 2923 } 2924 v.reset(OpAMD64InvertFlags) 2925 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 2926 v0.AuxInt = c 2927 v0.AddArg(x) 2928 v.AddArg(v0) 2929 return true 2930 } 2931 return false 2932 } 2933 func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool { 2934 // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) 2935 // cond: 2936 // result: (FlagLT_ULT) 2937 for { 2938 if v.AuxInt != 32 { 2939 break 2940 } 2941 v_0 := v.Args[0] 2942 if v_0.Op != OpAMD64NEGQ { 2943 break 2944 } 2945 v_0_0 := v_0.Args[0] 2946 if v_0_0.Op != OpAMD64ADDQconst { 2947 break 2948 } 2949 if v_0_0.AuxInt != -16 { 2950 break 2951 } 2952 v_0_0_0 := v_0_0.Args[0] 2953 if v_0_0_0.Op != OpAMD64ANDQconst { 2954 break 2955 } 2956 if v_0_0_0.AuxInt != 15 { 2957 break 2958 } 2959 v.reset(OpAMD64FlagLT_ULT) 2960 return true 2961 } 2962 // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) 2963 // cond: 2964 // result: (FlagLT_ULT) 2965 for { 2966 if v.AuxInt != 32 { 2967 break 2968 } 2969 v_0 := v.Args[0] 2970 if v_0.Op != OpAMD64NEGQ { 2971 break 2972 } 2973 v_0_0 := v_0.Args[0] 2974 if v_0_0.Op != OpAMD64ADDQconst { 2975 break 2976 } 2977 if v_0_0.AuxInt != -8 { 2978 break 2979 } 2980 v_0_0_0 := v_0_0.Args[0] 2981 if v_0_0_0.Op != OpAMD64ANDQconst { 2982 break 2983 } 2984 if v_0_0_0.AuxInt != 7 { 2985 break 2986 } 2987 v.reset(OpAMD64FlagLT_ULT) 2988 return true 2989 } 2990 // match: (CMPQconst (MOVQconst [x]) [y]) 2991 // cond: x==y 2992 // result: (FlagEQ) 2993 for { 2994 y := v.AuxInt 2995 v_0 := v.Args[0] 2996 if v_0.Op != OpAMD64MOVQconst { 2997 break 2998 } 2999 x := v_0.AuxInt 3000 if !(x == y) { 3001 break 3002 } 3003 v.reset(OpAMD64FlagEQ) 3004 return true 3005 } 3006 // match: (CMPQconst (MOVQconst [x]) [y]) 3007 // cond: x<y && uint64(x)<uint64(y) 3008 // result: (FlagLT_ULT) 3009 for { 3010 y := v.AuxInt 3011 v_0 := v.Args[0] 3012 if v_0.Op != OpAMD64MOVQconst { 3013 break 3014 } 3015 x := v_0.AuxInt 3016 if !(x < y && uint64(x) < uint64(y)) { 3017 break 3018 } 3019 v.reset(OpAMD64FlagLT_ULT) 3020 return true 3021 } 3022 // match: (CMPQconst (MOVQconst [x]) [y]) 3023 // cond: x<y && uint64(x)>uint64(y) 3024 // result: (FlagLT_UGT) 3025 for { 3026 y := v.AuxInt 3027 v_0 := v.Args[0] 3028 if v_0.Op != OpAMD64MOVQconst { 3029 break 3030 } 3031 x := v_0.AuxInt 3032 if !(x < y && uint64(x) > uint64(y)) { 3033 break 3034 } 3035 v.reset(OpAMD64FlagLT_UGT) 3036 return true 3037 } 3038 // match: (CMPQconst (MOVQconst [x]) [y]) 3039 // cond: x>y && uint64(x)<uint64(y) 3040 // result: (FlagGT_ULT) 3041 for { 3042 y := v.AuxInt 3043 v_0 := v.Args[0] 3044 if v_0.Op != OpAMD64MOVQconst { 3045 break 3046 } 3047 x := v_0.AuxInt 3048 if !(x > y && uint64(x) < uint64(y)) { 3049 break 3050 } 3051 v.reset(OpAMD64FlagGT_ULT) 3052 return true 3053 } 3054 // match: (CMPQconst (MOVQconst [x]) [y]) 3055 // cond: x>y && uint64(x)>uint64(y) 3056 // result: (FlagGT_UGT) 3057 for { 3058 y := v.AuxInt 3059 v_0 := v.Args[0] 3060 if v_0.Op != OpAMD64MOVQconst { 3061 break 3062 } 3063 x := v_0.AuxInt 3064 if !(x > y && uint64(x) > uint64(y)) { 3065 break 3066 } 3067 v.reset(OpAMD64FlagGT_UGT) 3068 return true 3069 } 3070 // match: (CMPQconst (MOVBQZX _) [c]) 3071 // cond: 0xFF < c 3072 // result: (FlagLT_ULT) 3073 for { 3074 c := v.AuxInt 3075 v_0 := v.Args[0] 3076 if v_0.Op != OpAMD64MOVBQZX { 3077 break 3078 } 3079 if !(0xFF < c) { 3080 break 3081 } 3082 v.reset(OpAMD64FlagLT_ULT) 3083 return true 3084 } 3085 // match: (CMPQconst (MOVWQZX _) [c]) 3086 // cond: 0xFFFF < c 3087 // result: (FlagLT_ULT) 3088 for { 3089 c := v.AuxInt 3090 v_0 := v.Args[0] 3091 if v_0.Op != OpAMD64MOVWQZX { 3092 break 3093 } 3094 if !(0xFFFF < c) { 3095 break 3096 } 3097 v.reset(OpAMD64FlagLT_ULT) 3098 return true 3099 } 3100 // match: (CMPQconst (MOVLQZX _) [c]) 3101 // cond: 0xFFFFFFFF < c 3102 // result: (FlagLT_ULT) 3103 for { 3104 c := v.AuxInt 3105 v_0 := v.Args[0] 3106 if v_0.Op != OpAMD64MOVLQZX { 3107 break 3108 } 3109 if !(0xFFFFFFFF < c) { 3110 break 3111 } 3112 v.reset(OpAMD64FlagLT_ULT) 3113 return true 3114 } 3115 return false 3116 } 3117 func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool { 3118 // match: (CMPQconst (SHRQconst _ [c]) [n]) 3119 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 3120 // result: (FlagLT_ULT) 3121 for { 3122 n := v.AuxInt 3123 v_0 := v.Args[0] 3124 if v_0.Op != OpAMD64SHRQconst { 3125 break 3126 } 3127 c := v_0.AuxInt 3128 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 3129 break 3130 } 3131 v.reset(OpAMD64FlagLT_ULT) 3132 return true 3133 } 3134 // match: (CMPQconst (ANDQconst _ [m]) [n]) 3135 // cond: 0 <= m && m < n 3136 // result: (FlagLT_ULT) 3137 for { 3138 n := v.AuxInt 3139 v_0 := v.Args[0] 3140 if v_0.Op != OpAMD64ANDQconst { 3141 break 3142 } 3143 m := v_0.AuxInt 3144 if !(0 <= m && m < n) { 3145 break 3146 } 3147 v.reset(OpAMD64FlagLT_ULT) 3148 return true 3149 } 3150 // match: (CMPQconst (ANDLconst _ [m]) [n]) 3151 // cond: 0 <= m && m < n 3152 // result: (FlagLT_ULT) 3153 for { 3154 n := v.AuxInt 3155 v_0 := v.Args[0] 3156 if v_0.Op != OpAMD64ANDLconst { 3157 break 3158 } 3159 m := v_0.AuxInt 3160 if !(0 <= m && m < n) { 3161 break 3162 } 3163 v.reset(OpAMD64FlagLT_ULT) 3164 return true 3165 } 3166 // match: (CMPQconst (ANDQ x y) [0]) 3167 // cond: 3168 // result: (TESTQ x y) 3169 for { 3170 if v.AuxInt != 0 { 3171 break 3172 } 3173 v_0 := v.Args[0] 3174 if v_0.Op != OpAMD64ANDQ { 3175 break 3176 } 3177 _ = v_0.Args[1] 3178 x := v_0.Args[0] 3179 y := v_0.Args[1] 3180 v.reset(OpAMD64TESTQ) 3181 v.AddArg(x) 3182 v.AddArg(y) 3183 return true 3184 } 3185 // match: (CMPQconst (ANDQconst [c] x) [0]) 3186 // cond: 3187 // result: (TESTQconst [c] x) 3188 for { 3189 if v.AuxInt != 0 { 3190 break 3191 } 3192 v_0 := v.Args[0] 3193 if v_0.Op != OpAMD64ANDQconst { 3194 break 3195 } 3196 c := v_0.AuxInt 3197 x := v_0.Args[0] 3198 v.reset(OpAMD64TESTQconst) 3199 v.AuxInt = c 3200 v.AddArg(x) 3201 return true 3202 } 3203 // match: (CMPQconst x [0]) 3204 // cond: 3205 // result: (TESTQ x x) 3206 for { 3207 if v.AuxInt != 0 { 3208 break 3209 } 3210 x := v.Args[0] 3211 v.reset(OpAMD64TESTQ) 3212 v.AddArg(x) 3213 v.AddArg(x) 3214 return true 3215 } 3216 return false 3217 } 3218 func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { 3219 b := v.Block 3220 _ = b 3221 // match: (CMPW x (MOVLconst [c])) 3222 // cond: 3223 // result: (CMPWconst x [int64(int16(c))]) 3224 for { 3225 _ = v.Args[1] 3226 x := v.Args[0] 3227 v_1 := v.Args[1] 3228 if v_1.Op != OpAMD64MOVLconst { 3229 break 3230 } 3231 c := v_1.AuxInt 3232 v.reset(OpAMD64CMPWconst) 3233 v.AuxInt = int64(int16(c)) 3234 v.AddArg(x) 3235 return true 3236 } 3237 // match: (CMPW (MOVLconst [c]) x) 3238 // cond: 3239 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 3240 for { 3241 _ = v.Args[1] 3242 v_0 := v.Args[0] 3243 if v_0.Op != OpAMD64MOVLconst { 3244 break 3245 } 3246 c := v_0.AuxInt 3247 x := v.Args[1] 3248 v.reset(OpAMD64InvertFlags) 3249 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 3250 v0.AuxInt = int64(int16(c)) 3251 v0.AddArg(x) 3252 v.AddArg(v0) 3253 return true 3254 } 3255 return false 3256 } 3257 func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool { 3258 // match: (CMPWconst (MOVLconst [x]) [y]) 3259 // cond: int16(x)==int16(y) 3260 // result: (FlagEQ) 3261 for { 3262 y := v.AuxInt 3263 v_0 := v.Args[0] 3264 if v_0.Op != OpAMD64MOVLconst { 3265 break 3266 } 3267 x := v_0.AuxInt 3268 if !(int16(x) == int16(y)) { 3269 break 3270 } 3271 v.reset(OpAMD64FlagEQ) 3272 return true 3273 } 3274 // match: (CMPWconst (MOVLconst [x]) [y]) 3275 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 3276 // result: (FlagLT_ULT) 3277 for { 3278 y := v.AuxInt 3279 v_0 := v.Args[0] 3280 if v_0.Op != OpAMD64MOVLconst { 3281 break 3282 } 3283 x := v_0.AuxInt 3284 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 3285 break 3286 } 3287 v.reset(OpAMD64FlagLT_ULT) 3288 return true 3289 } 3290 // match: (CMPWconst (MOVLconst [x]) [y]) 3291 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 3292 // result: (FlagLT_UGT) 3293 for { 3294 y := v.AuxInt 3295 v_0 := v.Args[0] 3296 if v_0.Op != OpAMD64MOVLconst { 3297 break 3298 } 3299 x := v_0.AuxInt 3300 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 3301 break 3302 } 3303 v.reset(OpAMD64FlagLT_UGT) 3304 return true 3305 } 3306 // match: (CMPWconst (MOVLconst [x]) [y]) 3307 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 3308 // result: (FlagGT_ULT) 3309 for { 3310 y := v.AuxInt 3311 v_0 := v.Args[0] 3312 if v_0.Op != OpAMD64MOVLconst { 3313 break 3314 } 3315 x := v_0.AuxInt 3316 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 3317 break 3318 } 3319 v.reset(OpAMD64FlagGT_ULT) 3320 return true 3321 } 3322 // match: (CMPWconst (MOVLconst [x]) [y]) 3323 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 3324 // result: (FlagGT_UGT) 3325 for { 3326 y := v.AuxInt 3327 v_0 := v.Args[0] 3328 if v_0.Op != OpAMD64MOVLconst { 3329 break 3330 } 3331 x := v_0.AuxInt 3332 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 3333 break 3334 } 3335 v.reset(OpAMD64FlagGT_UGT) 3336 return true 3337 } 3338 // match: (CMPWconst (ANDLconst _ [m]) [n]) 3339 // cond: 0 <= int16(m) && int16(m) < int16(n) 3340 // result: (FlagLT_ULT) 3341 for { 3342 n := v.AuxInt 3343 v_0 := v.Args[0] 3344 if v_0.Op != OpAMD64ANDLconst { 3345 break 3346 } 3347 m := v_0.AuxInt 3348 if !(0 <= int16(m) && int16(m) < int16(n)) { 3349 break 3350 } 3351 v.reset(OpAMD64FlagLT_ULT) 3352 return true 3353 } 3354 // match: (CMPWconst (ANDL x y) [0]) 3355 // cond: 3356 // result: (TESTW x y) 3357 for { 3358 if v.AuxInt != 0 { 3359 break 3360 } 3361 v_0 := v.Args[0] 3362 if v_0.Op != OpAMD64ANDL { 3363 break 3364 } 3365 _ = v_0.Args[1] 3366 x := v_0.Args[0] 3367 y := v_0.Args[1] 3368 v.reset(OpAMD64TESTW) 3369 v.AddArg(x) 3370 v.AddArg(y) 3371 return true 3372 } 3373 // match: (CMPWconst (ANDLconst [c] x) [0]) 3374 // cond: 3375 // result: (TESTWconst [int64(int16(c))] x) 3376 for { 3377 if v.AuxInt != 0 { 3378 break 3379 } 3380 v_0 := v.Args[0] 3381 if v_0.Op != OpAMD64ANDLconst { 3382 break 3383 } 3384 c := v_0.AuxInt 3385 x := v_0.Args[0] 3386 v.reset(OpAMD64TESTWconst) 3387 v.AuxInt = int64(int16(c)) 3388 v.AddArg(x) 3389 return true 3390 } 3391 // match: (CMPWconst x [0]) 3392 // cond: 3393 // result: (TESTW x x) 3394 for { 3395 if v.AuxInt != 0 { 3396 break 3397 } 3398 x := v.Args[0] 3399 v.reset(OpAMD64TESTW) 3400 v.AddArg(x) 3401 v.AddArg(x) 3402 return true 3403 } 3404 return false 3405 } 3406 func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool { 3407 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3408 // cond: is32Bit(off1+off2) 3409 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 3410 for { 3411 off1 := v.AuxInt 3412 sym := v.Aux 3413 _ = v.Args[3] 3414 v_0 := v.Args[0] 3415 if v_0.Op != OpAMD64ADDQconst { 3416 break 3417 } 3418 off2 := v_0.AuxInt 3419 ptr := v_0.Args[0] 3420 old := v.Args[1] 3421 new_ := v.Args[2] 3422 mem := v.Args[3] 3423 if !(is32Bit(off1 + off2)) { 3424 break 3425 } 3426 v.reset(OpAMD64CMPXCHGLlock) 3427 v.AuxInt = off1 + off2 3428 v.Aux = sym 3429 v.AddArg(ptr) 3430 v.AddArg(old) 3431 v.AddArg(new_) 3432 v.AddArg(mem) 3433 return true 3434 } 3435 return false 3436 } 3437 func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool { 3438 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3439 // cond: is32Bit(off1+off2) 3440 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 3441 for { 3442 off1 := v.AuxInt 3443 sym := v.Aux 3444 _ = v.Args[3] 3445 v_0 := v.Args[0] 3446 if v_0.Op != OpAMD64ADDQconst { 3447 break 3448 } 3449 off2 := v_0.AuxInt 3450 ptr := v_0.Args[0] 3451 old := v.Args[1] 3452 new_ := v.Args[2] 3453 mem := v.Args[3] 3454 if !(is32Bit(off1 + off2)) { 3455 break 3456 } 3457 v.reset(OpAMD64CMPXCHGQlock) 3458 v.AuxInt = off1 + off2 3459 v.Aux = sym 3460 v.AddArg(ptr) 3461 v.AddArg(old) 3462 v.AddArg(new_) 3463 v.AddArg(mem) 3464 return true 3465 } 3466 return false 3467 } 3468 func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool { 3469 // match: (LEAL [c] {s} (ADDLconst [d] x)) 3470 // cond: is32Bit(c+d) 3471 // result: (LEAL [c+d] {s} x) 3472 for { 3473 c := v.AuxInt 3474 s := v.Aux 3475 v_0 := v.Args[0] 3476 if v_0.Op != OpAMD64ADDLconst { 3477 break 3478 } 3479 d := v_0.AuxInt 3480 x := v_0.Args[0] 3481 if !(is32Bit(c + d)) { 3482 break 3483 } 3484 v.reset(OpAMD64LEAL) 3485 v.AuxInt = c + d 3486 v.Aux = s 3487 v.AddArg(x) 3488 return true 3489 } 3490 return false 3491 } 3492 func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool { 3493 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 3494 // cond: is32Bit(c+d) 3495 // result: (LEAQ [c+d] {s} x) 3496 for { 3497 c := v.AuxInt 3498 s := v.Aux 3499 v_0 := v.Args[0] 3500 if v_0.Op != OpAMD64ADDQconst { 3501 break 3502 } 3503 d := v_0.AuxInt 3504 x := v_0.Args[0] 3505 if !(is32Bit(c + d)) { 3506 break 3507 } 3508 v.reset(OpAMD64LEAQ) 3509 v.AuxInt = c + d 3510 v.Aux = s 3511 v.AddArg(x) 3512 return true 3513 } 3514 // match: (LEAQ [c] {s} (ADDQ x y)) 3515 // cond: x.Op != OpSB && y.Op != OpSB 3516 // result: (LEAQ1 [c] {s} x y) 3517 for { 3518 c := v.AuxInt 3519 s := v.Aux 3520 v_0 := v.Args[0] 3521 if v_0.Op != OpAMD64ADDQ { 3522 break 3523 } 3524 _ = v_0.Args[1] 3525 x := v_0.Args[0] 3526 y := v_0.Args[1] 3527 if !(x.Op != OpSB && y.Op != OpSB) { 3528 break 3529 } 3530 v.reset(OpAMD64LEAQ1) 3531 v.AuxInt = c 3532 v.Aux = s 3533 v.AddArg(x) 3534 v.AddArg(y) 3535 return true 3536 } 3537 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 3538 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3539 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 3540 for { 3541 off1 := v.AuxInt 3542 sym1 := v.Aux 3543 v_0 := v.Args[0] 3544 if v_0.Op != OpAMD64LEAQ { 3545 break 3546 } 3547 off2 := v_0.AuxInt 3548 sym2 := v_0.Aux 3549 x := v_0.Args[0] 3550 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3551 break 3552 } 3553 v.reset(OpAMD64LEAQ) 3554 v.AuxInt = off1 + off2 3555 v.Aux = mergeSym(sym1, sym2) 3556 v.AddArg(x) 3557 return true 3558 } 3559 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 3560 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3561 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3562 for { 3563 off1 := v.AuxInt 3564 sym1 := v.Aux 3565 v_0 := v.Args[0] 3566 if v_0.Op != OpAMD64LEAQ1 { 3567 break 3568 } 3569 off2 := v_0.AuxInt 3570 sym2 := v_0.Aux 3571 _ = v_0.Args[1] 3572 x := v_0.Args[0] 3573 y := v_0.Args[1] 3574 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3575 break 3576 } 3577 v.reset(OpAMD64LEAQ1) 3578 v.AuxInt = off1 + off2 3579 v.Aux = mergeSym(sym1, sym2) 3580 v.AddArg(x) 3581 v.AddArg(y) 3582 return true 3583 } 3584 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 3585 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3586 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3587 for { 3588 off1 := v.AuxInt 3589 sym1 := v.Aux 3590 v_0 := v.Args[0] 3591 if v_0.Op != OpAMD64LEAQ2 { 3592 break 3593 } 3594 off2 := v_0.AuxInt 3595 sym2 := v_0.Aux 3596 _ = v_0.Args[1] 3597 x := v_0.Args[0] 3598 y := v_0.Args[1] 3599 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3600 break 3601 } 3602 v.reset(OpAMD64LEAQ2) 3603 v.AuxInt = off1 + off2 3604 v.Aux = mergeSym(sym1, sym2) 3605 v.AddArg(x) 3606 v.AddArg(y) 3607 return true 3608 } 3609 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 3610 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3611 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3612 for { 3613 off1 := v.AuxInt 3614 sym1 := v.Aux 3615 v_0 := v.Args[0] 3616 if v_0.Op != OpAMD64LEAQ4 { 3617 break 3618 } 3619 off2 := v_0.AuxInt 3620 sym2 := v_0.Aux 3621 _ = v_0.Args[1] 3622 x := v_0.Args[0] 3623 y := v_0.Args[1] 3624 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3625 break 3626 } 3627 v.reset(OpAMD64LEAQ4) 3628 v.AuxInt = off1 + off2 3629 v.Aux = mergeSym(sym1, sym2) 3630 v.AddArg(x) 3631 v.AddArg(y) 3632 return true 3633 } 3634 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 3635 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3636 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 3637 for { 3638 off1 := v.AuxInt 3639 sym1 := v.Aux 3640 v_0 := v.Args[0] 3641 if v_0.Op != OpAMD64LEAQ8 { 3642 break 3643 } 3644 off2 := v_0.AuxInt 3645 sym2 := v_0.Aux 3646 _ = v_0.Args[1] 3647 x := v_0.Args[0] 3648 y := v_0.Args[1] 3649 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3650 break 3651 } 3652 v.reset(OpAMD64LEAQ8) 3653 v.AuxInt = off1 + off2 3654 v.Aux = mergeSym(sym1, sym2) 3655 v.AddArg(x) 3656 v.AddArg(y) 3657 return true 3658 } 3659 return false 3660 } 3661 func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { 3662 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 3663 // cond: is32Bit(c+d) && x.Op != OpSB 3664 // result: (LEAQ1 [c+d] {s} x y) 3665 for { 3666 c := v.AuxInt 3667 s := v.Aux 3668 _ = v.Args[1] 3669 v_0 := v.Args[0] 3670 if v_0.Op != OpAMD64ADDQconst { 3671 break 3672 } 3673 d := v_0.AuxInt 3674 x := v_0.Args[0] 3675 y := v.Args[1] 3676 if !(is32Bit(c+d) && x.Op != OpSB) { 3677 break 3678 } 3679 v.reset(OpAMD64LEAQ1) 3680 v.AuxInt = c + d 3681 v.Aux = s 3682 v.AddArg(x) 3683 v.AddArg(y) 3684 return true 3685 } 3686 // match: (LEAQ1 [c] {s} y (ADDQconst [d] x)) 3687 // cond: is32Bit(c+d) && x.Op != OpSB 3688 // result: (LEAQ1 [c+d] {s} x y) 3689 for { 3690 c := v.AuxInt 3691 s := v.Aux 3692 _ = v.Args[1] 3693 y := v.Args[0] 3694 v_1 := v.Args[1] 3695 if v_1.Op != OpAMD64ADDQconst { 3696 break 3697 } 3698 d := v_1.AuxInt 3699 x := v_1.Args[0] 3700 if !(is32Bit(c+d) && x.Op != OpSB) { 3701 break 3702 } 3703 v.reset(OpAMD64LEAQ1) 3704 v.AuxInt = c + d 3705 v.Aux = s 3706 v.AddArg(x) 3707 v.AddArg(y) 3708 return true 3709 } 3710 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 3711 // cond: 3712 // result: (LEAQ2 [c] {s} x y) 3713 for { 3714 c := v.AuxInt 3715 s := v.Aux 3716 _ = v.Args[1] 3717 x := v.Args[0] 3718 v_1 := v.Args[1] 3719 if v_1.Op != OpAMD64SHLQconst { 3720 break 3721 } 3722 if v_1.AuxInt != 1 { 3723 break 3724 } 3725 y := v_1.Args[0] 3726 v.reset(OpAMD64LEAQ2) 3727 v.AuxInt = c 3728 v.Aux = s 3729 v.AddArg(x) 3730 v.AddArg(y) 3731 return true 3732 } 3733 // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x) 3734 // cond: 3735 // result: (LEAQ2 [c] {s} x y) 3736 for { 3737 c := v.AuxInt 3738 s := v.Aux 3739 _ = v.Args[1] 3740 v_0 := v.Args[0] 3741 if v_0.Op != OpAMD64SHLQconst { 3742 break 3743 } 3744 if v_0.AuxInt != 1 { 3745 break 3746 } 3747 y := v_0.Args[0] 3748 x := v.Args[1] 3749 v.reset(OpAMD64LEAQ2) 3750 v.AuxInt = c 3751 v.Aux = s 3752 v.AddArg(x) 3753 v.AddArg(y) 3754 return true 3755 } 3756 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 3757 // cond: 3758 // result: (LEAQ4 [c] {s} x y) 3759 for { 3760 c := v.AuxInt 3761 s := v.Aux 3762 _ = v.Args[1] 3763 x := v.Args[0] 3764 v_1 := v.Args[1] 3765 if v_1.Op != OpAMD64SHLQconst { 3766 break 3767 } 3768 if v_1.AuxInt != 2 { 3769 break 3770 } 3771 y := v_1.Args[0] 3772 v.reset(OpAMD64LEAQ4) 3773 v.AuxInt = c 3774 v.Aux = s 3775 v.AddArg(x) 3776 v.AddArg(y) 3777 return true 3778 } 3779 // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x) 3780 // cond: 3781 // result: (LEAQ4 [c] {s} x y) 3782 for { 3783 c := v.AuxInt 3784 s := v.Aux 3785 _ = v.Args[1] 3786 v_0 := v.Args[0] 3787 if v_0.Op != OpAMD64SHLQconst { 3788 break 3789 } 3790 if v_0.AuxInt != 2 { 3791 break 3792 } 3793 y := v_0.Args[0] 3794 x := v.Args[1] 3795 v.reset(OpAMD64LEAQ4) 3796 v.AuxInt = c 3797 v.Aux = s 3798 v.AddArg(x) 3799 v.AddArg(y) 3800 return true 3801 } 3802 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 3803 // cond: 3804 // result: (LEAQ8 [c] {s} x y) 3805 for { 3806 c := v.AuxInt 3807 s := v.Aux 3808 _ = v.Args[1] 3809 x := v.Args[0] 3810 v_1 := v.Args[1] 3811 if v_1.Op != OpAMD64SHLQconst { 3812 break 3813 } 3814 if v_1.AuxInt != 3 { 3815 break 3816 } 3817 y := v_1.Args[0] 3818 v.reset(OpAMD64LEAQ8) 3819 v.AuxInt = c 3820 v.Aux = s 3821 v.AddArg(x) 3822 v.AddArg(y) 3823 return true 3824 } 3825 // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x) 3826 // cond: 3827 // result: (LEAQ8 [c] {s} x y) 3828 for { 3829 c := v.AuxInt 3830 s := v.Aux 3831 _ = v.Args[1] 3832 v_0 := v.Args[0] 3833 if v_0.Op != OpAMD64SHLQconst { 3834 break 3835 } 3836 if v_0.AuxInt != 3 { 3837 break 3838 } 3839 y := v_0.Args[0] 3840 x := v.Args[1] 3841 v.reset(OpAMD64LEAQ8) 3842 v.AuxInt = c 3843 v.Aux = s 3844 v.AddArg(x) 3845 v.AddArg(y) 3846 return true 3847 } 3848 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3849 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3850 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3851 for { 3852 off1 := v.AuxInt 3853 sym1 := v.Aux 3854 _ = v.Args[1] 3855 v_0 := v.Args[0] 3856 if v_0.Op != OpAMD64LEAQ { 3857 break 3858 } 3859 off2 := v_0.AuxInt 3860 sym2 := v_0.Aux 3861 x := v_0.Args[0] 3862 y := v.Args[1] 3863 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3864 break 3865 } 3866 v.reset(OpAMD64LEAQ1) 3867 v.AuxInt = off1 + off2 3868 v.Aux = mergeSym(sym1, sym2) 3869 v.AddArg(x) 3870 v.AddArg(y) 3871 return true 3872 } 3873 // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x)) 3874 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3875 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3876 for { 3877 off1 := v.AuxInt 3878 sym1 := v.Aux 3879 _ = v.Args[1] 3880 y := v.Args[0] 3881 v_1 := v.Args[1] 3882 if v_1.Op != OpAMD64LEAQ { 3883 break 3884 } 3885 off2 := v_1.AuxInt 3886 sym2 := v_1.Aux 3887 x := v_1.Args[0] 3888 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 3889 break 3890 } 3891 v.reset(OpAMD64LEAQ1) 3892 v.AuxInt = off1 + off2 3893 v.Aux = mergeSym(sym1, sym2) 3894 v.AddArg(x) 3895 v.AddArg(y) 3896 return true 3897 } 3898 return false 3899 } 3900 func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool { 3901 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 3902 // cond: is32Bit(c+d) && x.Op != OpSB 3903 // result: (LEAQ2 [c+d] {s} x y) 3904 for { 3905 c := v.AuxInt 3906 s := v.Aux 3907 _ = v.Args[1] 3908 v_0 := v.Args[0] 3909 if v_0.Op != OpAMD64ADDQconst { 3910 break 3911 } 3912 d := v_0.AuxInt 3913 x := v_0.Args[0] 3914 y := v.Args[1] 3915 if !(is32Bit(c+d) && x.Op != OpSB) { 3916 break 3917 } 3918 v.reset(OpAMD64LEAQ2) 3919 v.AuxInt = c + d 3920 v.Aux = s 3921 v.AddArg(x) 3922 v.AddArg(y) 3923 return true 3924 } 3925 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 3926 // cond: is32Bit(c+2*d) && y.Op != OpSB 3927 // result: (LEAQ2 [c+2*d] {s} x y) 3928 for { 3929 c := v.AuxInt 3930 s := v.Aux 3931 _ = v.Args[1] 3932 x := v.Args[0] 3933 v_1 := v.Args[1] 3934 if v_1.Op != OpAMD64ADDQconst { 3935 break 3936 } 3937 d := v_1.AuxInt 3938 y := v_1.Args[0] 3939 if !(is32Bit(c+2*d) && y.Op != OpSB) { 3940 break 3941 } 3942 v.reset(OpAMD64LEAQ2) 3943 v.AuxInt = c + 2*d 3944 v.Aux = s 3945 v.AddArg(x) 3946 v.AddArg(y) 3947 return true 3948 } 3949 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 3950 // cond: 3951 // result: (LEAQ4 [c] {s} x y) 3952 for { 3953 c := v.AuxInt 3954 s := v.Aux 3955 _ = v.Args[1] 3956 x := v.Args[0] 3957 v_1 := v.Args[1] 3958 if v_1.Op != OpAMD64SHLQconst { 3959 break 3960 } 3961 if v_1.AuxInt != 1 { 3962 break 3963 } 3964 y := v_1.Args[0] 3965 v.reset(OpAMD64LEAQ4) 3966 v.AuxInt = c 3967 v.Aux = s 3968 v.AddArg(x) 3969 v.AddArg(y) 3970 return true 3971 } 3972 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 3973 // cond: 3974 // result: (LEAQ8 [c] {s} x y) 3975 for { 3976 c := v.AuxInt 3977 s := v.Aux 3978 _ = v.Args[1] 3979 x := v.Args[0] 3980 v_1 := v.Args[1] 3981 if v_1.Op != OpAMD64SHLQconst { 3982 break 3983 } 3984 if v_1.AuxInt != 2 { 3985 break 3986 } 3987 y := v_1.Args[0] 3988 v.reset(OpAMD64LEAQ8) 3989 v.AuxInt = c 3990 v.Aux = s 3991 v.AddArg(x) 3992 v.AddArg(y) 3993 return true 3994 } 3995 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 3996 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 3997 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3998 for { 3999 off1 := v.AuxInt 4000 sym1 := v.Aux 4001 _ = v.Args[1] 4002 v_0 := v.Args[0] 4003 if v_0.Op != OpAMD64LEAQ { 4004 break 4005 } 4006 off2 := v_0.AuxInt 4007 sym2 := v_0.Aux 4008 x := v_0.Args[0] 4009 y := v.Args[1] 4010 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4011 break 4012 } 4013 v.reset(OpAMD64LEAQ2) 4014 v.AuxInt = off1 + off2 4015 v.Aux = mergeSym(sym1, sym2) 4016 v.AddArg(x) 4017 v.AddArg(y) 4018 return true 4019 } 4020 return false 4021 } 4022 func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool { 4023 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 4024 // cond: is32Bit(c+d) && x.Op != OpSB 4025 // result: (LEAQ4 [c+d] {s} x y) 4026 for { 4027 c := v.AuxInt 4028 s := v.Aux 4029 _ = v.Args[1] 4030 v_0 := v.Args[0] 4031 if v_0.Op != OpAMD64ADDQconst { 4032 break 4033 } 4034 d := v_0.AuxInt 4035 x := v_0.Args[0] 4036 y := v.Args[1] 4037 if !(is32Bit(c+d) && x.Op != OpSB) { 4038 break 4039 } 4040 v.reset(OpAMD64LEAQ4) 4041 v.AuxInt = c + d 4042 v.Aux = s 4043 v.AddArg(x) 4044 v.AddArg(y) 4045 return true 4046 } 4047 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 4048 // cond: is32Bit(c+4*d) && y.Op != OpSB 4049 // result: (LEAQ4 [c+4*d] {s} x y) 4050 for { 4051 c := v.AuxInt 4052 s := v.Aux 4053 _ = v.Args[1] 4054 x := v.Args[0] 4055 v_1 := v.Args[1] 4056 if v_1.Op != OpAMD64ADDQconst { 4057 break 4058 } 4059 d := v_1.AuxInt 4060 y := v_1.Args[0] 4061 if !(is32Bit(c+4*d) && y.Op != OpSB) { 4062 break 4063 } 4064 v.reset(OpAMD64LEAQ4) 4065 v.AuxInt = c + 4*d 4066 v.Aux = s 4067 v.AddArg(x) 4068 v.AddArg(y) 4069 return true 4070 } 4071 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 4072 // cond: 4073 // result: (LEAQ8 [c] {s} x y) 4074 for { 4075 c := v.AuxInt 4076 s := v.Aux 4077 _ = v.Args[1] 4078 x := v.Args[0] 4079 v_1 := v.Args[1] 4080 if v_1.Op != OpAMD64SHLQconst { 4081 break 4082 } 4083 if v_1.AuxInt != 1 { 4084 break 4085 } 4086 y := v_1.Args[0] 4087 v.reset(OpAMD64LEAQ8) 4088 v.AuxInt = c 4089 v.Aux = s 4090 v.AddArg(x) 4091 v.AddArg(y) 4092 return true 4093 } 4094 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4095 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4096 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 4097 for { 4098 off1 := v.AuxInt 4099 sym1 := v.Aux 4100 _ = v.Args[1] 4101 v_0 := v.Args[0] 4102 if v_0.Op != OpAMD64LEAQ { 4103 break 4104 } 4105 off2 := v_0.AuxInt 4106 sym2 := v_0.Aux 4107 x := v_0.Args[0] 4108 y := v.Args[1] 4109 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4110 break 4111 } 4112 v.reset(OpAMD64LEAQ4) 4113 v.AuxInt = off1 + off2 4114 v.Aux = mergeSym(sym1, sym2) 4115 v.AddArg(x) 4116 v.AddArg(y) 4117 return true 4118 } 4119 return false 4120 } 4121 func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool { 4122 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 4123 // cond: is32Bit(c+d) && x.Op != OpSB 4124 // result: (LEAQ8 [c+d] {s} x y) 4125 for { 4126 c := v.AuxInt 4127 s := v.Aux 4128 _ = v.Args[1] 4129 v_0 := v.Args[0] 4130 if v_0.Op != OpAMD64ADDQconst { 4131 break 4132 } 4133 d := v_0.AuxInt 4134 x := v_0.Args[0] 4135 y := v.Args[1] 4136 if !(is32Bit(c+d) && x.Op != OpSB) { 4137 break 4138 } 4139 v.reset(OpAMD64LEAQ8) 4140 v.AuxInt = c + d 4141 v.Aux = s 4142 v.AddArg(x) 4143 v.AddArg(y) 4144 return true 4145 } 4146 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 4147 // cond: is32Bit(c+8*d) && y.Op != OpSB 4148 // result: (LEAQ8 [c+8*d] {s} x y) 4149 for { 4150 c := v.AuxInt 4151 s := v.Aux 4152 _ = v.Args[1] 4153 x := v.Args[0] 4154 v_1 := v.Args[1] 4155 if v_1.Op != OpAMD64ADDQconst { 4156 break 4157 } 4158 d := v_1.AuxInt 4159 y := v_1.Args[0] 4160 if !(is32Bit(c+8*d) && y.Op != OpSB) { 4161 break 4162 } 4163 v.reset(OpAMD64LEAQ8) 4164 v.AuxInt = c + 8*d 4165 v.Aux = s 4166 v.AddArg(x) 4167 v.AddArg(y) 4168 return true 4169 } 4170 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4171 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4172 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 4173 for { 4174 off1 := v.AuxInt 4175 sym1 := v.Aux 4176 _ = v.Args[1] 4177 v_0 := v.Args[0] 4178 if v_0.Op != OpAMD64LEAQ { 4179 break 4180 } 4181 off2 := v_0.AuxInt 4182 sym2 := v_0.Aux 4183 x := v_0.Args[0] 4184 y := v.Args[1] 4185 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4186 break 4187 } 4188 v.reset(OpAMD64LEAQ8) 4189 v.AuxInt = off1 + off2 4190 v.Aux = mergeSym(sym1, sym2) 4191 v.AddArg(x) 4192 v.AddArg(y) 4193 return true 4194 } 4195 return false 4196 } 4197 func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { 4198 b := v.Block 4199 _ = b 4200 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 4201 // cond: x.Uses == 1 && clobber(x) 4202 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4203 for { 4204 x := v.Args[0] 4205 if x.Op != OpAMD64MOVBload { 4206 break 4207 } 4208 off := x.AuxInt 4209 sym := x.Aux 4210 _ = x.Args[1] 4211 ptr := x.Args[0] 4212 mem := x.Args[1] 4213 if !(x.Uses == 1 && clobber(x)) { 4214 break 4215 } 4216 b = x.Block 4217 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4218 v.reset(OpCopy) 4219 v.AddArg(v0) 4220 v0.AuxInt = off 4221 v0.Aux = sym 4222 v0.AddArg(ptr) 4223 v0.AddArg(mem) 4224 return true 4225 } 4226 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 4227 // cond: x.Uses == 1 && clobber(x) 4228 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4229 for { 4230 x := v.Args[0] 4231 if x.Op != OpAMD64MOVWload { 4232 break 4233 } 4234 off := x.AuxInt 4235 sym := x.Aux 4236 _ = x.Args[1] 4237 ptr := x.Args[0] 4238 mem := x.Args[1] 4239 if !(x.Uses == 1 && clobber(x)) { 4240 break 4241 } 4242 b = x.Block 4243 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4244 v.reset(OpCopy) 4245 v.AddArg(v0) 4246 v0.AuxInt = off 4247 v0.Aux = sym 4248 v0.AddArg(ptr) 4249 v0.AddArg(mem) 4250 return true 4251 } 4252 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 4253 // cond: x.Uses == 1 && clobber(x) 4254 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4255 for { 4256 x := v.Args[0] 4257 if x.Op != OpAMD64MOVLload { 4258 break 4259 } 4260 off := x.AuxInt 4261 sym := x.Aux 4262 _ = x.Args[1] 4263 ptr := x.Args[0] 4264 mem := x.Args[1] 4265 if !(x.Uses == 1 && clobber(x)) { 4266 break 4267 } 4268 b = x.Block 4269 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4270 v.reset(OpCopy) 4271 v.AddArg(v0) 4272 v0.AuxInt = off 4273 v0.Aux = sym 4274 v0.AddArg(ptr) 4275 v0.AddArg(mem) 4276 return true 4277 } 4278 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 4279 // cond: x.Uses == 1 && clobber(x) 4280 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4281 for { 4282 x := v.Args[0] 4283 if x.Op != OpAMD64MOVQload { 4284 break 4285 } 4286 off := x.AuxInt 4287 sym := x.Aux 4288 _ = x.Args[1] 4289 ptr := x.Args[0] 4290 mem := x.Args[1] 4291 if !(x.Uses == 1 && clobber(x)) { 4292 break 4293 } 4294 b = x.Block 4295 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4296 v.reset(OpCopy) 4297 v.AddArg(v0) 4298 v0.AuxInt = off 4299 v0.Aux = sym 4300 v0.AddArg(ptr) 4301 v0.AddArg(mem) 4302 return true 4303 } 4304 // match: (MOVBQSX (ANDLconst [c] x)) 4305 // cond: c & 0x80 == 0 4306 // result: (ANDLconst [c & 0x7f] x) 4307 for { 4308 v_0 := v.Args[0] 4309 if v_0.Op != OpAMD64ANDLconst { 4310 break 4311 } 4312 c := v_0.AuxInt 4313 x := v_0.Args[0] 4314 if !(c&0x80 == 0) { 4315 break 4316 } 4317 v.reset(OpAMD64ANDLconst) 4318 v.AuxInt = c & 0x7f 4319 v.AddArg(x) 4320 return true 4321 } 4322 // match: (MOVBQSX x:(MOVBQSX _)) 4323 // cond: 4324 // result: x 4325 for { 4326 x := v.Args[0] 4327 if x.Op != OpAMD64MOVBQSX { 4328 break 4329 } 4330 v.reset(OpCopy) 4331 v.Type = x.Type 4332 v.AddArg(x) 4333 return true 4334 } 4335 return false 4336 } 4337 func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool { 4338 // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4339 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4340 // result: (MOVBQSX x) 4341 for { 4342 off := v.AuxInt 4343 sym := v.Aux 4344 _ = v.Args[1] 4345 ptr := v.Args[0] 4346 v_1 := v.Args[1] 4347 if v_1.Op != OpAMD64MOVBstore { 4348 break 4349 } 4350 off2 := v_1.AuxInt 4351 sym2 := v_1.Aux 4352 _ = v_1.Args[2] 4353 ptr2 := v_1.Args[0] 4354 x := v_1.Args[1] 4355 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4356 break 4357 } 4358 v.reset(OpAMD64MOVBQSX) 4359 v.AddArg(x) 4360 return true 4361 } 4362 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4363 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4364 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4365 for { 4366 off1 := v.AuxInt 4367 sym1 := v.Aux 4368 _ = v.Args[1] 4369 v_0 := v.Args[0] 4370 if v_0.Op != OpAMD64LEAQ { 4371 break 4372 } 4373 off2 := v_0.AuxInt 4374 sym2 := v_0.Aux 4375 base := v_0.Args[0] 4376 mem := v.Args[1] 4377 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4378 break 4379 } 4380 v.reset(OpAMD64MOVBQSXload) 4381 v.AuxInt = off1 + off2 4382 v.Aux = mergeSym(sym1, sym2) 4383 v.AddArg(base) 4384 v.AddArg(mem) 4385 return true 4386 } 4387 return false 4388 } 4389 func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { 4390 b := v.Block 4391 _ = b 4392 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 4393 // cond: x.Uses == 1 && clobber(x) 4394 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4395 for { 4396 x := v.Args[0] 4397 if x.Op != OpAMD64MOVBload { 4398 break 4399 } 4400 off := x.AuxInt 4401 sym := x.Aux 4402 _ = x.Args[1] 4403 ptr := x.Args[0] 4404 mem := x.Args[1] 4405 if !(x.Uses == 1 && clobber(x)) { 4406 break 4407 } 4408 b = x.Block 4409 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4410 v.reset(OpCopy) 4411 v.AddArg(v0) 4412 v0.AuxInt = off 4413 v0.Aux = sym 4414 v0.AddArg(ptr) 4415 v0.AddArg(mem) 4416 return true 4417 } 4418 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 4419 // cond: x.Uses == 1 && clobber(x) 4420 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4421 for { 4422 x := v.Args[0] 4423 if x.Op != OpAMD64MOVWload { 4424 break 4425 } 4426 off := x.AuxInt 4427 sym := x.Aux 4428 _ = x.Args[1] 4429 ptr := x.Args[0] 4430 mem := x.Args[1] 4431 if !(x.Uses == 1 && clobber(x)) { 4432 break 4433 } 4434 b = x.Block 4435 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4436 v.reset(OpCopy) 4437 v.AddArg(v0) 4438 v0.AuxInt = off 4439 v0.Aux = sym 4440 v0.AddArg(ptr) 4441 v0.AddArg(mem) 4442 return true 4443 } 4444 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 4445 // cond: x.Uses == 1 && clobber(x) 4446 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4447 for { 4448 x := v.Args[0] 4449 if x.Op != OpAMD64MOVLload { 4450 break 4451 } 4452 off := x.AuxInt 4453 sym := x.Aux 4454 _ = x.Args[1] 4455 ptr := x.Args[0] 4456 mem := x.Args[1] 4457 if !(x.Uses == 1 && clobber(x)) { 4458 break 4459 } 4460 b = x.Block 4461 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4462 v.reset(OpCopy) 4463 v.AddArg(v0) 4464 v0.AuxInt = off 4465 v0.Aux = sym 4466 v0.AddArg(ptr) 4467 v0.AddArg(mem) 4468 return true 4469 } 4470 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 4471 // cond: x.Uses == 1 && clobber(x) 4472 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4473 for { 4474 x := v.Args[0] 4475 if x.Op != OpAMD64MOVQload { 4476 break 4477 } 4478 off := x.AuxInt 4479 sym := x.Aux 4480 _ = x.Args[1] 4481 ptr := x.Args[0] 4482 mem := x.Args[1] 4483 if !(x.Uses == 1 && clobber(x)) { 4484 break 4485 } 4486 b = x.Block 4487 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4488 v.reset(OpCopy) 4489 v.AddArg(v0) 4490 v0.AuxInt = off 4491 v0.Aux = sym 4492 v0.AddArg(ptr) 4493 v0.AddArg(mem) 4494 return true 4495 } 4496 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 4497 // cond: x.Uses == 1 && clobber(x) 4498 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 4499 for { 4500 x := v.Args[0] 4501 if x.Op != OpAMD64MOVBloadidx1 { 4502 break 4503 } 4504 off := x.AuxInt 4505 sym := x.Aux 4506 _ = x.Args[2] 4507 ptr := x.Args[0] 4508 idx := x.Args[1] 4509 mem := x.Args[2] 4510 if !(x.Uses == 1 && clobber(x)) { 4511 break 4512 } 4513 b = x.Block 4514 v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) 4515 v.reset(OpCopy) 4516 v.AddArg(v0) 4517 v0.AuxInt = off 4518 v0.Aux = sym 4519 v0.AddArg(ptr) 4520 v0.AddArg(idx) 4521 v0.AddArg(mem) 4522 return true 4523 } 4524 // match: (MOVBQZX (ANDLconst [c] x)) 4525 // cond: 4526 // result: (ANDLconst [c & 0xff] x) 4527 for { 4528 v_0 := v.Args[0] 4529 if v_0.Op != OpAMD64ANDLconst { 4530 break 4531 } 4532 c := v_0.AuxInt 4533 x := v_0.Args[0] 4534 v.reset(OpAMD64ANDLconst) 4535 v.AuxInt = c & 0xff 4536 v.AddArg(x) 4537 return true 4538 } 4539 // match: (MOVBQZX x:(MOVBQZX _)) 4540 // cond: 4541 // result: x 4542 for { 4543 x := v.Args[0] 4544 if x.Op != OpAMD64MOVBQZX { 4545 break 4546 } 4547 v.reset(OpCopy) 4548 v.Type = x.Type 4549 v.AddArg(x) 4550 return true 4551 } 4552 return false 4553 } 4554 func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { 4555 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4556 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4557 // result: (MOVBQZX x) 4558 for { 4559 off := v.AuxInt 4560 sym := v.Aux 4561 _ = v.Args[1] 4562 ptr := v.Args[0] 4563 v_1 := v.Args[1] 4564 if v_1.Op != OpAMD64MOVBstore { 4565 break 4566 } 4567 off2 := v_1.AuxInt 4568 sym2 := v_1.Aux 4569 _ = v_1.Args[2] 4570 ptr2 := v_1.Args[0] 4571 x := v_1.Args[1] 4572 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4573 break 4574 } 4575 v.reset(OpAMD64MOVBQZX) 4576 v.AddArg(x) 4577 return true 4578 } 4579 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 4580 // cond: is32Bit(off1+off2) 4581 // result: (MOVBload [off1+off2] {sym} ptr mem) 4582 for { 4583 off1 := v.AuxInt 4584 sym := v.Aux 4585 _ = v.Args[1] 4586 v_0 := v.Args[0] 4587 if v_0.Op != OpAMD64ADDQconst { 4588 break 4589 } 4590 off2 := v_0.AuxInt 4591 ptr := v_0.Args[0] 4592 mem := v.Args[1] 4593 if !(is32Bit(off1 + off2)) { 4594 break 4595 } 4596 v.reset(OpAMD64MOVBload) 4597 v.AuxInt = off1 + off2 4598 v.Aux = sym 4599 v.AddArg(ptr) 4600 v.AddArg(mem) 4601 return true 4602 } 4603 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4604 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4605 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4606 for { 4607 off1 := v.AuxInt 4608 sym1 := v.Aux 4609 _ = v.Args[1] 4610 v_0 := v.Args[0] 4611 if v_0.Op != OpAMD64LEAQ { 4612 break 4613 } 4614 off2 := v_0.AuxInt 4615 sym2 := v_0.Aux 4616 base := v_0.Args[0] 4617 mem := v.Args[1] 4618 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4619 break 4620 } 4621 v.reset(OpAMD64MOVBload) 4622 v.AuxInt = off1 + off2 4623 v.Aux = mergeSym(sym1, sym2) 4624 v.AddArg(base) 4625 v.AddArg(mem) 4626 return true 4627 } 4628 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 4629 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4630 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 4631 for { 4632 off1 := v.AuxInt 4633 sym1 := v.Aux 4634 _ = v.Args[1] 4635 v_0 := v.Args[0] 4636 if v_0.Op != OpAMD64LEAQ1 { 4637 break 4638 } 4639 off2 := v_0.AuxInt 4640 sym2 := v_0.Aux 4641 _ = v_0.Args[1] 4642 ptr := v_0.Args[0] 4643 idx := v_0.Args[1] 4644 mem := v.Args[1] 4645 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4646 break 4647 } 4648 v.reset(OpAMD64MOVBloadidx1) 4649 v.AuxInt = off1 + off2 4650 v.Aux = mergeSym(sym1, sym2) 4651 v.AddArg(ptr) 4652 v.AddArg(idx) 4653 v.AddArg(mem) 4654 return true 4655 } 4656 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 4657 // cond: ptr.Op != OpSB 4658 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 4659 for { 4660 off := v.AuxInt 4661 sym := v.Aux 4662 _ = v.Args[1] 4663 v_0 := v.Args[0] 4664 if v_0.Op != OpAMD64ADDQ { 4665 break 4666 } 4667 _ = v_0.Args[1] 4668 ptr := v_0.Args[0] 4669 idx := v_0.Args[1] 4670 mem := v.Args[1] 4671 if !(ptr.Op != OpSB) { 4672 break 4673 } 4674 v.reset(OpAMD64MOVBloadidx1) 4675 v.AuxInt = off 4676 v.Aux = sym 4677 v.AddArg(ptr) 4678 v.AddArg(idx) 4679 v.AddArg(mem) 4680 return true 4681 } 4682 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 4683 // cond: canMergeSym(sym1, sym2) 4684 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4685 for { 4686 off1 := v.AuxInt 4687 sym1 := v.Aux 4688 _ = v.Args[1] 4689 v_0 := v.Args[0] 4690 if v_0.Op != OpAMD64LEAL { 4691 break 4692 } 4693 off2 := v_0.AuxInt 4694 sym2 := v_0.Aux 4695 base := v_0.Args[0] 4696 mem := v.Args[1] 4697 if !(canMergeSym(sym1, sym2)) { 4698 break 4699 } 4700 v.reset(OpAMD64MOVBload) 4701 v.AuxInt = off1 + off2 4702 v.Aux = mergeSym(sym1, sym2) 4703 v.AddArg(base) 4704 v.AddArg(mem) 4705 return true 4706 } 4707 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 4708 // cond: is32Bit(off1+off2) 4709 // result: (MOVBload [off1+off2] {sym} ptr mem) 4710 for { 4711 off1 := v.AuxInt 4712 sym := v.Aux 4713 _ = v.Args[1] 4714 v_0 := v.Args[0] 4715 if v_0.Op != OpAMD64ADDLconst { 4716 break 4717 } 4718 off2 := v_0.AuxInt 4719 ptr := v_0.Args[0] 4720 mem := v.Args[1] 4721 if !(is32Bit(off1 + off2)) { 4722 break 4723 } 4724 v.reset(OpAMD64MOVBload) 4725 v.AuxInt = off1 + off2 4726 v.Aux = sym 4727 v.AddArg(ptr) 4728 v.AddArg(mem) 4729 return true 4730 } 4731 return false 4732 } 4733 func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { 4734 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 4735 // cond: 4736 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4737 for { 4738 c := v.AuxInt 4739 sym := v.Aux 4740 _ = v.Args[2] 4741 v_0 := v.Args[0] 4742 if v_0.Op != OpAMD64ADDQconst { 4743 break 4744 } 4745 d := v_0.AuxInt 4746 ptr := v_0.Args[0] 4747 idx := v.Args[1] 4748 mem := v.Args[2] 4749 v.reset(OpAMD64MOVBloadidx1) 4750 v.AuxInt = c + d 4751 v.Aux = sym 4752 v.AddArg(ptr) 4753 v.AddArg(idx) 4754 v.AddArg(mem) 4755 return true 4756 } 4757 // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 4758 // cond: 4759 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4760 for { 4761 c := v.AuxInt 4762 sym := v.Aux 4763 _ = v.Args[2] 4764 idx := v.Args[0] 4765 v_1 := v.Args[1] 4766 if v_1.Op != OpAMD64ADDQconst { 4767 break 4768 } 4769 d := v_1.AuxInt 4770 ptr := v_1.Args[0] 4771 mem := v.Args[2] 4772 v.reset(OpAMD64MOVBloadidx1) 4773 v.AuxInt = c + d 4774 v.Aux = sym 4775 v.AddArg(ptr) 4776 v.AddArg(idx) 4777 v.AddArg(mem) 4778 return true 4779 } 4780 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 4781 // cond: 4782 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4783 for { 4784 c := v.AuxInt 4785 sym := v.Aux 4786 _ = v.Args[2] 4787 ptr := v.Args[0] 4788 v_1 := v.Args[1] 4789 if v_1.Op != OpAMD64ADDQconst { 4790 break 4791 } 4792 d := v_1.AuxInt 4793 idx := v_1.Args[0] 4794 mem := v.Args[2] 4795 v.reset(OpAMD64MOVBloadidx1) 4796 v.AuxInt = c + d 4797 v.Aux = sym 4798 v.AddArg(ptr) 4799 v.AddArg(idx) 4800 v.AddArg(mem) 4801 return true 4802 } 4803 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 4804 // cond: 4805 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 4806 for { 4807 c := v.AuxInt 4808 sym := v.Aux 4809 _ = v.Args[2] 4810 v_0 := v.Args[0] 4811 if v_0.Op != OpAMD64ADDQconst { 4812 break 4813 } 4814 d := v_0.AuxInt 4815 idx := v_0.Args[0] 4816 ptr := v.Args[1] 4817 mem := v.Args[2] 4818 v.reset(OpAMD64MOVBloadidx1) 4819 v.AuxInt = c + d 4820 v.Aux = sym 4821 v.AddArg(ptr) 4822 v.AddArg(idx) 4823 v.AddArg(mem) 4824 return true 4825 } 4826 return false 4827 } 4828 func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { 4829 b := v.Block 4830 _ = b 4831 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 4832 // cond: 4833 // result: (MOVBstore [off] {sym} ptr x mem) 4834 for { 4835 off := v.AuxInt 4836 sym := v.Aux 4837 _ = v.Args[2] 4838 ptr := v.Args[0] 4839 v_1 := v.Args[1] 4840 if v_1.Op != OpAMD64MOVBQSX { 4841 break 4842 } 4843 x := v_1.Args[0] 4844 mem := v.Args[2] 4845 v.reset(OpAMD64MOVBstore) 4846 v.AuxInt = off 4847 v.Aux = sym 4848 v.AddArg(ptr) 4849 v.AddArg(x) 4850 v.AddArg(mem) 4851 return true 4852 } 4853 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 4854 // cond: 4855 // result: (MOVBstore [off] {sym} ptr x mem) 4856 for { 4857 off := v.AuxInt 4858 sym := v.Aux 4859 _ = v.Args[2] 4860 ptr := v.Args[0] 4861 v_1 := v.Args[1] 4862 if v_1.Op != OpAMD64MOVBQZX { 4863 break 4864 } 4865 x := v_1.Args[0] 4866 mem := v.Args[2] 4867 v.reset(OpAMD64MOVBstore) 4868 v.AuxInt = off 4869 v.Aux = sym 4870 v.AddArg(ptr) 4871 v.AddArg(x) 4872 v.AddArg(mem) 4873 return true 4874 } 4875 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 4876 // cond: is32Bit(off1+off2) 4877 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 4878 for { 4879 off1 := v.AuxInt 4880 sym := v.Aux 4881 _ = v.Args[2] 4882 v_0 := v.Args[0] 4883 if v_0.Op != OpAMD64ADDQconst { 4884 break 4885 } 4886 off2 := v_0.AuxInt 4887 ptr := v_0.Args[0] 4888 val := v.Args[1] 4889 mem := v.Args[2] 4890 if !(is32Bit(off1 + off2)) { 4891 break 4892 } 4893 v.reset(OpAMD64MOVBstore) 4894 v.AuxInt = off1 + off2 4895 v.Aux = sym 4896 v.AddArg(ptr) 4897 v.AddArg(val) 4898 v.AddArg(mem) 4899 return true 4900 } 4901 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 4902 // cond: validOff(off) 4903 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 4904 for { 4905 off := v.AuxInt 4906 sym := v.Aux 4907 _ = v.Args[2] 4908 ptr := v.Args[0] 4909 v_1 := v.Args[1] 4910 if v_1.Op != OpAMD64MOVLconst { 4911 break 4912 } 4913 c := v_1.AuxInt 4914 mem := v.Args[2] 4915 if !(validOff(off)) { 4916 break 4917 } 4918 v.reset(OpAMD64MOVBstoreconst) 4919 v.AuxInt = makeValAndOff(int64(int8(c)), off) 4920 v.Aux = sym 4921 v.AddArg(ptr) 4922 v.AddArg(mem) 4923 return true 4924 } 4925 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 4926 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4927 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 4928 for { 4929 off1 := v.AuxInt 4930 sym1 := v.Aux 4931 _ = v.Args[2] 4932 v_0 := v.Args[0] 4933 if v_0.Op != OpAMD64LEAQ { 4934 break 4935 } 4936 off2 := v_0.AuxInt 4937 sym2 := v_0.Aux 4938 base := v_0.Args[0] 4939 val := v.Args[1] 4940 mem := v.Args[2] 4941 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4942 break 4943 } 4944 v.reset(OpAMD64MOVBstore) 4945 v.AuxInt = off1 + off2 4946 v.Aux = mergeSym(sym1, sym2) 4947 v.AddArg(base) 4948 v.AddArg(val) 4949 v.AddArg(mem) 4950 return true 4951 } 4952 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 4953 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4954 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 4955 for { 4956 off1 := v.AuxInt 4957 sym1 := v.Aux 4958 _ = v.Args[2] 4959 v_0 := v.Args[0] 4960 if v_0.Op != OpAMD64LEAQ1 { 4961 break 4962 } 4963 off2 := v_0.AuxInt 4964 sym2 := v_0.Aux 4965 _ = v_0.Args[1] 4966 ptr := v_0.Args[0] 4967 idx := v_0.Args[1] 4968 val := v.Args[1] 4969 mem := v.Args[2] 4970 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4971 break 4972 } 4973 v.reset(OpAMD64MOVBstoreidx1) 4974 v.AuxInt = off1 + off2 4975 v.Aux = mergeSym(sym1, sym2) 4976 v.AddArg(ptr) 4977 v.AddArg(idx) 4978 v.AddArg(val) 4979 v.AddArg(mem) 4980 return true 4981 } 4982 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 4983 // cond: ptr.Op != OpSB 4984 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 4985 for { 4986 off := v.AuxInt 4987 sym := v.Aux 4988 _ = v.Args[2] 4989 v_0 := v.Args[0] 4990 if v_0.Op != OpAMD64ADDQ { 4991 break 4992 } 4993 _ = v_0.Args[1] 4994 ptr := v_0.Args[0] 4995 idx := v_0.Args[1] 4996 val := v.Args[1] 4997 mem := v.Args[2] 4998 if !(ptr.Op != OpSB) { 4999 break 5000 } 5001 v.reset(OpAMD64MOVBstoreidx1) 5002 v.AuxInt = off 5003 v.Aux = sym 5004 v.AddArg(ptr) 5005 v.AddArg(idx) 5006 v.AddArg(val) 5007 v.AddArg(mem) 5008 return true 5009 } 5010 // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) 5011 // cond: x0.Uses == 1 && clobber(x0) 5012 // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem) 5013 for { 5014 i := v.AuxInt 5015 s := v.Aux 5016 _ = v.Args[2] 5017 p := v.Args[0] 5018 w := v.Args[1] 5019 x0 := v.Args[2] 5020 if x0.Op != OpAMD64MOVBstore { 5021 break 5022 } 5023 if x0.AuxInt != i-1 { 5024 break 5025 } 5026 if x0.Aux != s { 5027 break 5028 } 5029 _ = x0.Args[2] 5030 if p != x0.Args[0] { 5031 break 5032 } 5033 x0_1 := x0.Args[1] 5034 if x0_1.Op != OpAMD64SHRWconst { 5035 break 5036 } 5037 if x0_1.AuxInt != 8 { 5038 break 5039 } 5040 if w != x0_1.Args[0] { 5041 break 5042 } 5043 mem := x0.Args[2] 5044 if !(x0.Uses == 1 && clobber(x0)) { 5045 break 5046 } 5047 v.reset(OpAMD64MOVWstore) 5048 v.AuxInt = i - 1 5049 v.Aux = s 5050 v.AddArg(p) 5051 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 5052 v0.AuxInt = 8 5053 v0.AddArg(w) 5054 v.AddArg(v0) 5055 v.AddArg(mem) 5056 return true 5057 } 5058 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 5059 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 5060 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) 5061 for { 5062 i := v.AuxInt 5063 s := v.Aux 5064 _ = v.Args[2] 5065 p := v.Args[0] 5066 w := v.Args[1] 5067 x2 := v.Args[2] 5068 if x2.Op != OpAMD64MOVBstore { 5069 break 5070 } 5071 if x2.AuxInt != i-1 { 5072 break 5073 } 5074 if x2.Aux != s { 5075 break 5076 } 5077 _ = x2.Args[2] 5078 if p != x2.Args[0] { 5079 break 5080 } 5081 x2_1 := x2.Args[1] 5082 if x2_1.Op != OpAMD64SHRLconst { 5083 break 5084 } 5085 if x2_1.AuxInt != 8 { 5086 break 5087 } 5088 if w != x2_1.Args[0] { 5089 break 5090 } 5091 x1 := x2.Args[2] 5092 if x1.Op != OpAMD64MOVBstore { 5093 break 5094 } 5095 if x1.AuxInt != i-2 { 5096 break 5097 } 5098 if x1.Aux != s { 5099 break 5100 } 5101 _ = x1.Args[2] 5102 if p != x1.Args[0] { 5103 break 5104 } 5105 x1_1 := x1.Args[1] 5106 if x1_1.Op != OpAMD64SHRLconst { 5107 break 5108 } 5109 if x1_1.AuxInt != 16 { 5110 break 5111 } 5112 if w != x1_1.Args[0] { 5113 break 5114 } 5115 x0 := x1.Args[2] 5116 if x0.Op != OpAMD64MOVBstore { 5117 break 5118 } 5119 if x0.AuxInt != i-3 { 5120 break 5121 } 5122 if x0.Aux != s { 5123 break 5124 } 5125 _ = x0.Args[2] 5126 if p != x0.Args[0] { 5127 break 5128 } 5129 x0_1 := x0.Args[1] 5130 if x0_1.Op != OpAMD64SHRLconst { 5131 break 5132 } 5133 if x0_1.AuxInt != 24 { 5134 break 5135 } 5136 if w != x0_1.Args[0] { 5137 break 5138 } 5139 mem := x0.Args[2] 5140 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 5141 break 5142 } 5143 v.reset(OpAMD64MOVLstore) 5144 v.AuxInt = i - 3 5145 v.Aux = s 5146 v.AddArg(p) 5147 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 5148 v0.AddArg(w) 5149 v.AddArg(v0) 5150 v.AddArg(mem) 5151 return true 5152 } 5153 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 5154 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 5155 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) 5156 for { 5157 i := v.AuxInt 5158 s := v.Aux 5159 _ = v.Args[2] 5160 p := v.Args[0] 5161 w := v.Args[1] 5162 x6 := v.Args[2] 5163 if x6.Op != OpAMD64MOVBstore { 5164 break 5165 } 5166 if x6.AuxInt != i-1 { 5167 break 5168 } 5169 if x6.Aux != s { 5170 break 5171 } 5172 _ = x6.Args[2] 5173 if p != x6.Args[0] { 5174 break 5175 } 5176 x6_1 := x6.Args[1] 5177 if x6_1.Op != OpAMD64SHRQconst { 5178 break 5179 } 5180 if x6_1.AuxInt != 8 { 5181 break 5182 } 5183 if w != x6_1.Args[0] { 5184 break 5185 } 5186 x5 := x6.Args[2] 5187 if x5.Op != OpAMD64MOVBstore { 5188 break 5189 } 5190 if x5.AuxInt != i-2 { 5191 break 5192 } 5193 if x5.Aux != s { 5194 break 5195 } 5196 _ = x5.Args[2] 5197 if p != x5.Args[0] { 5198 break 5199 } 5200 x5_1 := x5.Args[1] 5201 if x5_1.Op != OpAMD64SHRQconst { 5202 break 5203 } 5204 if x5_1.AuxInt != 16 { 5205 break 5206 } 5207 if w != x5_1.Args[0] { 5208 break 5209 } 5210 x4 := x5.Args[2] 5211 if x4.Op != OpAMD64MOVBstore { 5212 break 5213 } 5214 if x4.AuxInt != i-3 { 5215 break 5216 } 5217 if x4.Aux != s { 5218 break 5219 } 5220 _ = x4.Args[2] 5221 if p != x4.Args[0] { 5222 break 5223 } 5224 x4_1 := x4.Args[1] 5225 if x4_1.Op != OpAMD64SHRQconst { 5226 break 5227 } 5228 if x4_1.AuxInt != 24 { 5229 break 5230 } 5231 if w != x4_1.Args[0] { 5232 break 5233 } 5234 x3 := x4.Args[2] 5235 if x3.Op != OpAMD64MOVBstore { 5236 break 5237 } 5238 if x3.AuxInt != i-4 { 5239 break 5240 } 5241 if x3.Aux != s { 5242 break 5243 } 5244 _ = x3.Args[2] 5245 if p != x3.Args[0] { 5246 break 5247 } 5248 x3_1 := x3.Args[1] 5249 if x3_1.Op != OpAMD64SHRQconst { 5250 break 5251 } 5252 if x3_1.AuxInt != 32 { 5253 break 5254 } 5255 if w != x3_1.Args[0] { 5256 break 5257 } 5258 x2 := x3.Args[2] 5259 if x2.Op != OpAMD64MOVBstore { 5260 break 5261 } 5262 if x2.AuxInt != i-5 { 5263 break 5264 } 5265 if x2.Aux != s { 5266 break 5267 } 5268 _ = x2.Args[2] 5269 if p != x2.Args[0] { 5270 break 5271 } 5272 x2_1 := x2.Args[1] 5273 if x2_1.Op != OpAMD64SHRQconst { 5274 break 5275 } 5276 if x2_1.AuxInt != 40 { 5277 break 5278 } 5279 if w != x2_1.Args[0] { 5280 break 5281 } 5282 x1 := x2.Args[2] 5283 if x1.Op != OpAMD64MOVBstore { 5284 break 5285 } 5286 if x1.AuxInt != i-6 { 5287 break 5288 } 5289 if x1.Aux != s { 5290 break 5291 } 5292 _ = x1.Args[2] 5293 if p != x1.Args[0] { 5294 break 5295 } 5296 x1_1 := x1.Args[1] 5297 if x1_1.Op != OpAMD64SHRQconst { 5298 break 5299 } 5300 if x1_1.AuxInt != 48 { 5301 break 5302 } 5303 if w != x1_1.Args[0] { 5304 break 5305 } 5306 x0 := x1.Args[2] 5307 if x0.Op != OpAMD64MOVBstore { 5308 break 5309 } 5310 if x0.AuxInt != i-7 { 5311 break 5312 } 5313 if x0.Aux != s { 5314 break 5315 } 5316 _ = x0.Args[2] 5317 if p != x0.Args[0] { 5318 break 5319 } 5320 x0_1 := x0.Args[1] 5321 if x0_1.Op != OpAMD64SHRQconst { 5322 break 5323 } 5324 if x0_1.AuxInt != 56 { 5325 break 5326 } 5327 if w != x0_1.Args[0] { 5328 break 5329 } 5330 mem := x0.Args[2] 5331 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 5332 break 5333 } 5334 v.reset(OpAMD64MOVQstore) 5335 v.AuxInt = i - 7 5336 v.Aux = s 5337 v.AddArg(p) 5338 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 5339 v0.AddArg(w) 5340 v.AddArg(v0) 5341 v.AddArg(mem) 5342 return true 5343 } 5344 return false 5345 } 5346 func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { 5347 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 5348 // cond: x.Uses == 1 && clobber(x) 5349 // result: (MOVWstore [i-1] {s} p w mem) 5350 for { 5351 i := v.AuxInt 5352 s := v.Aux 5353 _ = v.Args[2] 5354 p := v.Args[0] 5355 v_1 := v.Args[1] 5356 if v_1.Op != OpAMD64SHRQconst { 5357 break 5358 } 5359 if v_1.AuxInt != 8 { 5360 break 5361 } 5362 w := v_1.Args[0] 5363 x := v.Args[2] 5364 if x.Op != OpAMD64MOVBstore { 5365 break 5366 } 5367 if x.AuxInt != i-1 { 5368 break 5369 } 5370 if x.Aux != s { 5371 break 5372 } 5373 _ = x.Args[2] 5374 if p != x.Args[0] { 5375 break 5376 } 5377 if w != x.Args[1] { 5378 break 5379 } 5380 mem := x.Args[2] 5381 if !(x.Uses == 1 && clobber(x)) { 5382 break 5383 } 5384 v.reset(OpAMD64MOVWstore) 5385 v.AuxInt = i - 1 5386 v.Aux = s 5387 v.AddArg(p) 5388 v.AddArg(w) 5389 v.AddArg(mem) 5390 return true 5391 } 5392 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 5393 // cond: x.Uses == 1 && clobber(x) 5394 // result: (MOVWstore [i-1] {s} p w0 mem) 5395 for { 5396 i := v.AuxInt 5397 s := v.Aux 5398 _ = v.Args[2] 5399 p := v.Args[0] 5400 v_1 := v.Args[1] 5401 if v_1.Op != OpAMD64SHRQconst { 5402 break 5403 } 5404 j := v_1.AuxInt 5405 w := v_1.Args[0] 5406 x := v.Args[2] 5407 if x.Op != OpAMD64MOVBstore { 5408 break 5409 } 5410 if x.AuxInt != i-1 { 5411 break 5412 } 5413 if x.Aux != s { 5414 break 5415 } 5416 _ = x.Args[2] 5417 if p != x.Args[0] { 5418 break 5419 } 5420 w0 := x.Args[1] 5421 if w0.Op != OpAMD64SHRQconst { 5422 break 5423 } 5424 if w0.AuxInt != j-8 { 5425 break 5426 } 5427 if w != w0.Args[0] { 5428 break 5429 } 5430 mem := x.Args[2] 5431 if !(x.Uses == 1 && clobber(x)) { 5432 break 5433 } 5434 v.reset(OpAMD64MOVWstore) 5435 v.AuxInt = i - 1 5436 v.Aux = s 5437 v.AddArg(p) 5438 v.AddArg(w0) 5439 v.AddArg(mem) 5440 return true 5441 } 5442 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 5443 // cond: canMergeSym(sym1, sym2) 5444 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5445 for { 5446 off1 := v.AuxInt 5447 sym1 := v.Aux 5448 _ = v.Args[2] 5449 v_0 := v.Args[0] 5450 if v_0.Op != OpAMD64LEAL { 5451 break 5452 } 5453 off2 := v_0.AuxInt 5454 sym2 := v_0.Aux 5455 base := v_0.Args[0] 5456 val := v.Args[1] 5457 mem := v.Args[2] 5458 if !(canMergeSym(sym1, sym2)) { 5459 break 5460 } 5461 v.reset(OpAMD64MOVBstore) 5462 v.AuxInt = off1 + off2 5463 v.Aux = mergeSym(sym1, sym2) 5464 v.AddArg(base) 5465 v.AddArg(val) 5466 v.AddArg(mem) 5467 return true 5468 } 5469 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 5470 // cond: is32Bit(off1+off2) 5471 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 5472 for { 5473 off1 := v.AuxInt 5474 sym := v.Aux 5475 _ = v.Args[2] 5476 v_0 := v.Args[0] 5477 if v_0.Op != OpAMD64ADDLconst { 5478 break 5479 } 5480 off2 := v_0.AuxInt 5481 ptr := v_0.Args[0] 5482 val := v.Args[1] 5483 mem := v.Args[2] 5484 if !(is32Bit(off1 + off2)) { 5485 break 5486 } 5487 v.reset(OpAMD64MOVBstore) 5488 v.AuxInt = off1 + off2 5489 v.Aux = sym 5490 v.AddArg(ptr) 5491 v.AddArg(val) 5492 v.AddArg(mem) 5493 return true 5494 } 5495 return false 5496 } 5497 func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { 5498 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 5499 // cond: ValAndOff(sc).canAdd(off) 5500 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5501 for { 5502 sc := v.AuxInt 5503 s := v.Aux 5504 _ = v.Args[1] 5505 v_0 := v.Args[0] 5506 if v_0.Op != OpAMD64ADDQconst { 5507 break 5508 } 5509 off := v_0.AuxInt 5510 ptr := v_0.Args[0] 5511 mem := v.Args[1] 5512 if !(ValAndOff(sc).canAdd(off)) { 5513 break 5514 } 5515 v.reset(OpAMD64MOVBstoreconst) 5516 v.AuxInt = ValAndOff(sc).add(off) 5517 v.Aux = s 5518 v.AddArg(ptr) 5519 v.AddArg(mem) 5520 return true 5521 } 5522 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 5523 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5524 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5525 for { 5526 sc := v.AuxInt 5527 sym1 := v.Aux 5528 _ = v.Args[1] 5529 v_0 := v.Args[0] 5530 if v_0.Op != OpAMD64LEAQ { 5531 break 5532 } 5533 off := v_0.AuxInt 5534 sym2 := v_0.Aux 5535 ptr := v_0.Args[0] 5536 mem := v.Args[1] 5537 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5538 break 5539 } 5540 v.reset(OpAMD64MOVBstoreconst) 5541 v.AuxInt = ValAndOff(sc).add(off) 5542 v.Aux = mergeSym(sym1, sym2) 5543 v.AddArg(ptr) 5544 v.AddArg(mem) 5545 return true 5546 } 5547 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 5548 // cond: canMergeSym(sym1, sym2) 5549 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 5550 for { 5551 x := v.AuxInt 5552 sym1 := v.Aux 5553 _ = v.Args[1] 5554 v_0 := v.Args[0] 5555 if v_0.Op != OpAMD64LEAQ1 { 5556 break 5557 } 5558 off := v_0.AuxInt 5559 sym2 := v_0.Aux 5560 _ = v_0.Args[1] 5561 ptr := v_0.Args[0] 5562 idx := v_0.Args[1] 5563 mem := v.Args[1] 5564 if !(canMergeSym(sym1, sym2)) { 5565 break 5566 } 5567 v.reset(OpAMD64MOVBstoreconstidx1) 5568 v.AuxInt = ValAndOff(x).add(off) 5569 v.Aux = mergeSym(sym1, sym2) 5570 v.AddArg(ptr) 5571 v.AddArg(idx) 5572 v.AddArg(mem) 5573 return true 5574 } 5575 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 5576 // cond: 5577 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 5578 for { 5579 x := v.AuxInt 5580 sym := v.Aux 5581 _ = v.Args[1] 5582 v_0 := v.Args[0] 5583 if v_0.Op != OpAMD64ADDQ { 5584 break 5585 } 5586 _ = v_0.Args[1] 5587 ptr := v_0.Args[0] 5588 idx := v_0.Args[1] 5589 mem := v.Args[1] 5590 v.reset(OpAMD64MOVBstoreconstidx1) 5591 v.AuxInt = x 5592 v.Aux = sym 5593 v.AddArg(ptr) 5594 v.AddArg(idx) 5595 v.AddArg(mem) 5596 return true 5597 } 5598 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 5599 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 5600 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 5601 for { 5602 c := v.AuxInt 5603 s := v.Aux 5604 _ = v.Args[1] 5605 p := v.Args[0] 5606 x := v.Args[1] 5607 if x.Op != OpAMD64MOVBstoreconst { 5608 break 5609 } 5610 a := x.AuxInt 5611 if x.Aux != s { 5612 break 5613 } 5614 _ = x.Args[1] 5615 if p != x.Args[0] { 5616 break 5617 } 5618 mem := x.Args[1] 5619 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 5620 break 5621 } 5622 v.reset(OpAMD64MOVWstoreconst) 5623 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 5624 v.Aux = s 5625 v.AddArg(p) 5626 v.AddArg(mem) 5627 return true 5628 } 5629 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 5630 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 5631 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 5632 for { 5633 sc := v.AuxInt 5634 sym1 := v.Aux 5635 _ = v.Args[1] 5636 v_0 := v.Args[0] 5637 if v_0.Op != OpAMD64LEAL { 5638 break 5639 } 5640 off := v_0.AuxInt 5641 sym2 := v_0.Aux 5642 ptr := v_0.Args[0] 5643 mem := v.Args[1] 5644 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 5645 break 5646 } 5647 v.reset(OpAMD64MOVBstoreconst) 5648 v.AuxInt = ValAndOff(sc).add(off) 5649 v.Aux = mergeSym(sym1, sym2) 5650 v.AddArg(ptr) 5651 v.AddArg(mem) 5652 return true 5653 } 5654 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 5655 // cond: ValAndOff(sc).canAdd(off) 5656 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 5657 for { 5658 sc := v.AuxInt 5659 s := v.Aux 5660 _ = v.Args[1] 5661 v_0 := v.Args[0] 5662 if v_0.Op != OpAMD64ADDLconst { 5663 break 5664 } 5665 off := v_0.AuxInt 5666 ptr := v_0.Args[0] 5667 mem := v.Args[1] 5668 if !(ValAndOff(sc).canAdd(off)) { 5669 break 5670 } 5671 v.reset(OpAMD64MOVBstoreconst) 5672 v.AuxInt = ValAndOff(sc).add(off) 5673 v.Aux = s 5674 v.AddArg(ptr) 5675 v.AddArg(mem) 5676 return true 5677 } 5678 return false 5679 } 5680 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { 5681 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 5682 // cond: 5683 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5684 for { 5685 x := v.AuxInt 5686 sym := v.Aux 5687 _ = v.Args[2] 5688 v_0 := v.Args[0] 5689 if v_0.Op != OpAMD64ADDQconst { 5690 break 5691 } 5692 c := v_0.AuxInt 5693 ptr := v_0.Args[0] 5694 idx := v.Args[1] 5695 mem := v.Args[2] 5696 v.reset(OpAMD64MOVBstoreconstidx1) 5697 v.AuxInt = ValAndOff(x).add(c) 5698 v.Aux = sym 5699 v.AddArg(ptr) 5700 v.AddArg(idx) 5701 v.AddArg(mem) 5702 return true 5703 } 5704 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 5705 // cond: 5706 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 5707 for { 5708 x := v.AuxInt 5709 sym := v.Aux 5710 _ = v.Args[2] 5711 ptr := v.Args[0] 5712 v_1 := v.Args[1] 5713 if v_1.Op != OpAMD64ADDQconst { 5714 break 5715 } 5716 c := v_1.AuxInt 5717 idx := v_1.Args[0] 5718 mem := v.Args[2] 5719 v.reset(OpAMD64MOVBstoreconstidx1) 5720 v.AuxInt = ValAndOff(x).add(c) 5721 v.Aux = sym 5722 v.AddArg(ptr) 5723 v.AddArg(idx) 5724 v.AddArg(mem) 5725 return true 5726 } 5727 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 5728 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 5729 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 5730 for { 5731 c := v.AuxInt 5732 s := v.Aux 5733 _ = v.Args[2] 5734 p := v.Args[0] 5735 i := v.Args[1] 5736 x := v.Args[2] 5737 if x.Op != OpAMD64MOVBstoreconstidx1 { 5738 break 5739 } 5740 a := x.AuxInt 5741 if x.Aux != s { 5742 break 5743 } 5744 _ = x.Args[2] 5745 if p != x.Args[0] { 5746 break 5747 } 5748 if i != x.Args[1] { 5749 break 5750 } 5751 mem := x.Args[2] 5752 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 5753 break 5754 } 5755 v.reset(OpAMD64MOVWstoreconstidx1) 5756 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 5757 v.Aux = s 5758 v.AddArg(p) 5759 v.AddArg(i) 5760 v.AddArg(mem) 5761 return true 5762 } 5763 return false 5764 } 5765 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { 5766 b := v.Block 5767 _ = b 5768 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 5769 // cond: 5770 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 5771 for { 5772 c := v.AuxInt 5773 sym := v.Aux 5774 _ = v.Args[3] 5775 v_0 := v.Args[0] 5776 if v_0.Op != OpAMD64ADDQconst { 5777 break 5778 } 5779 d := v_0.AuxInt 5780 ptr := v_0.Args[0] 5781 idx := v.Args[1] 5782 val := v.Args[2] 5783 mem := v.Args[3] 5784 v.reset(OpAMD64MOVBstoreidx1) 5785 v.AuxInt = c + d 5786 v.Aux = sym 5787 v.AddArg(ptr) 5788 v.AddArg(idx) 5789 v.AddArg(val) 5790 v.AddArg(mem) 5791 return true 5792 } 5793 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 5794 // cond: 5795 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 5796 for { 5797 c := v.AuxInt 5798 sym := v.Aux 5799 _ = v.Args[3] 5800 ptr := v.Args[0] 5801 v_1 := v.Args[1] 5802 if v_1.Op != OpAMD64ADDQconst { 5803 break 5804 } 5805 d := v_1.AuxInt 5806 idx := v_1.Args[0] 5807 val := v.Args[2] 5808 mem := v.Args[3] 5809 v.reset(OpAMD64MOVBstoreidx1) 5810 v.AuxInt = c + d 5811 v.Aux = sym 5812 v.AddArg(ptr) 5813 v.AddArg(idx) 5814 v.AddArg(val) 5815 v.AddArg(mem) 5816 return true 5817 } 5818 // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) 5819 // cond: x0.Uses == 1 && clobber(x0) 5820 // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem) 5821 for { 5822 i := v.AuxInt 5823 s := v.Aux 5824 _ = v.Args[3] 5825 p := v.Args[0] 5826 idx := v.Args[1] 5827 w := v.Args[2] 5828 x0 := v.Args[3] 5829 if x0.Op != OpAMD64MOVBstoreidx1 { 5830 break 5831 } 5832 if x0.AuxInt != i-1 { 5833 break 5834 } 5835 if x0.Aux != s { 5836 break 5837 } 5838 _ = x0.Args[3] 5839 if p != x0.Args[0] { 5840 break 5841 } 5842 if idx != x0.Args[1] { 5843 break 5844 } 5845 x0_2 := x0.Args[2] 5846 if x0_2.Op != OpAMD64SHRWconst { 5847 break 5848 } 5849 if x0_2.AuxInt != 8 { 5850 break 5851 } 5852 if w != x0_2.Args[0] { 5853 break 5854 } 5855 mem := x0.Args[3] 5856 if !(x0.Uses == 1 && clobber(x0)) { 5857 break 5858 } 5859 v.reset(OpAMD64MOVWstoreidx1) 5860 v.AuxInt = i - 1 5861 v.Aux = s 5862 v.AddArg(p) 5863 v.AddArg(idx) 5864 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 5865 v0.AuxInt = 8 5866 v0.AddArg(w) 5867 v.AddArg(v0) 5868 v.AddArg(mem) 5869 return true 5870 } 5871 // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) 5872 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 5873 // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem) 5874 for { 5875 i := v.AuxInt 5876 s := v.Aux 5877 _ = v.Args[3] 5878 p := v.Args[0] 5879 idx := v.Args[1] 5880 w := v.Args[2] 5881 x2 := v.Args[3] 5882 if x2.Op != OpAMD64MOVBstoreidx1 { 5883 break 5884 } 5885 if x2.AuxInt != i-1 { 5886 break 5887 } 5888 if x2.Aux != s { 5889 break 5890 } 5891 _ = x2.Args[3] 5892 if p != x2.Args[0] { 5893 break 5894 } 5895 if idx != x2.Args[1] { 5896 break 5897 } 5898 x2_2 := x2.Args[2] 5899 if x2_2.Op != OpAMD64SHRLconst { 5900 break 5901 } 5902 if x2_2.AuxInt != 8 { 5903 break 5904 } 5905 if w != x2_2.Args[0] { 5906 break 5907 } 5908 x1 := x2.Args[3] 5909 if x1.Op != OpAMD64MOVBstoreidx1 { 5910 break 5911 } 5912 if x1.AuxInt != i-2 { 5913 break 5914 } 5915 if x1.Aux != s { 5916 break 5917 } 5918 _ = x1.Args[3] 5919 if p != x1.Args[0] { 5920 break 5921 } 5922 if idx != x1.Args[1] { 5923 break 5924 } 5925 x1_2 := x1.Args[2] 5926 if x1_2.Op != OpAMD64SHRLconst { 5927 break 5928 } 5929 if x1_2.AuxInt != 16 { 5930 break 5931 } 5932 if w != x1_2.Args[0] { 5933 break 5934 } 5935 x0 := x1.Args[3] 5936 if x0.Op != OpAMD64MOVBstoreidx1 { 5937 break 5938 } 5939 if x0.AuxInt != i-3 { 5940 break 5941 } 5942 if x0.Aux != s { 5943 break 5944 } 5945 _ = x0.Args[3] 5946 if p != x0.Args[0] { 5947 break 5948 } 5949 if idx != x0.Args[1] { 5950 break 5951 } 5952 x0_2 := x0.Args[2] 5953 if x0_2.Op != OpAMD64SHRLconst { 5954 break 5955 } 5956 if x0_2.AuxInt != 24 { 5957 break 5958 } 5959 if w != x0_2.Args[0] { 5960 break 5961 } 5962 mem := x0.Args[3] 5963 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 5964 break 5965 } 5966 v.reset(OpAMD64MOVLstoreidx1) 5967 v.AuxInt = i - 3 5968 v.Aux = s 5969 v.AddArg(p) 5970 v.AddArg(idx) 5971 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 5972 v0.AddArg(w) 5973 v.AddArg(v0) 5974 v.AddArg(mem) 5975 return true 5976 } 5977 // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) 5978 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 5979 // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem) 5980 for { 5981 i := v.AuxInt 5982 s := v.Aux 5983 _ = v.Args[3] 5984 p := v.Args[0] 5985 idx := v.Args[1] 5986 w := v.Args[2] 5987 x6 := v.Args[3] 5988 if x6.Op != OpAMD64MOVBstoreidx1 { 5989 break 5990 } 5991 if x6.AuxInt != i-1 { 5992 break 5993 } 5994 if x6.Aux != s { 5995 break 5996 } 5997 _ = x6.Args[3] 5998 if p != x6.Args[0] { 5999 break 6000 } 6001 if idx != x6.Args[1] { 6002 break 6003 } 6004 x6_2 := x6.Args[2] 6005 if x6_2.Op != OpAMD64SHRQconst { 6006 break 6007 } 6008 if x6_2.AuxInt != 8 { 6009 break 6010 } 6011 if w != x6_2.Args[0] { 6012 break 6013 } 6014 x5 := x6.Args[3] 6015 if x5.Op != OpAMD64MOVBstoreidx1 { 6016 break 6017 } 6018 if x5.AuxInt != i-2 { 6019 break 6020 } 6021 if x5.Aux != s { 6022 break 6023 } 6024 _ = x5.Args[3] 6025 if p != x5.Args[0] { 6026 break 6027 } 6028 if idx != x5.Args[1] { 6029 break 6030 } 6031 x5_2 := x5.Args[2] 6032 if x5_2.Op != OpAMD64SHRQconst { 6033 break 6034 } 6035 if x5_2.AuxInt != 16 { 6036 break 6037 } 6038 if w != x5_2.Args[0] { 6039 break 6040 } 6041 x4 := x5.Args[3] 6042 if x4.Op != OpAMD64MOVBstoreidx1 { 6043 break 6044 } 6045 if x4.AuxInt != i-3 { 6046 break 6047 } 6048 if x4.Aux != s { 6049 break 6050 } 6051 _ = x4.Args[3] 6052 if p != x4.Args[0] { 6053 break 6054 } 6055 if idx != x4.Args[1] { 6056 break 6057 } 6058 x4_2 := x4.Args[2] 6059 if x4_2.Op != OpAMD64SHRQconst { 6060 break 6061 } 6062 if x4_2.AuxInt != 24 { 6063 break 6064 } 6065 if w != x4_2.Args[0] { 6066 break 6067 } 6068 x3 := x4.Args[3] 6069 if x3.Op != OpAMD64MOVBstoreidx1 { 6070 break 6071 } 6072 if x3.AuxInt != i-4 { 6073 break 6074 } 6075 if x3.Aux != s { 6076 break 6077 } 6078 _ = x3.Args[3] 6079 if p != x3.Args[0] { 6080 break 6081 } 6082 if idx != x3.Args[1] { 6083 break 6084 } 6085 x3_2 := x3.Args[2] 6086 if x3_2.Op != OpAMD64SHRQconst { 6087 break 6088 } 6089 if x3_2.AuxInt != 32 { 6090 break 6091 } 6092 if w != x3_2.Args[0] { 6093 break 6094 } 6095 x2 := x3.Args[3] 6096 if x2.Op != OpAMD64MOVBstoreidx1 { 6097 break 6098 } 6099 if x2.AuxInt != i-5 { 6100 break 6101 } 6102 if x2.Aux != s { 6103 break 6104 } 6105 _ = x2.Args[3] 6106 if p != x2.Args[0] { 6107 break 6108 } 6109 if idx != x2.Args[1] { 6110 break 6111 } 6112 x2_2 := x2.Args[2] 6113 if x2_2.Op != OpAMD64SHRQconst { 6114 break 6115 } 6116 if x2_2.AuxInt != 40 { 6117 break 6118 } 6119 if w != x2_2.Args[0] { 6120 break 6121 } 6122 x1 := x2.Args[3] 6123 if x1.Op != OpAMD64MOVBstoreidx1 { 6124 break 6125 } 6126 if x1.AuxInt != i-6 { 6127 break 6128 } 6129 if x1.Aux != s { 6130 break 6131 } 6132 _ = x1.Args[3] 6133 if p != x1.Args[0] { 6134 break 6135 } 6136 if idx != x1.Args[1] { 6137 break 6138 } 6139 x1_2 := x1.Args[2] 6140 if x1_2.Op != OpAMD64SHRQconst { 6141 break 6142 } 6143 if x1_2.AuxInt != 48 { 6144 break 6145 } 6146 if w != x1_2.Args[0] { 6147 break 6148 } 6149 x0 := x1.Args[3] 6150 if x0.Op != OpAMD64MOVBstoreidx1 { 6151 break 6152 } 6153 if x0.AuxInt != i-7 { 6154 break 6155 } 6156 if x0.Aux != s { 6157 break 6158 } 6159 _ = x0.Args[3] 6160 if p != x0.Args[0] { 6161 break 6162 } 6163 if idx != x0.Args[1] { 6164 break 6165 } 6166 x0_2 := x0.Args[2] 6167 if x0_2.Op != OpAMD64SHRQconst { 6168 break 6169 } 6170 if x0_2.AuxInt != 56 { 6171 break 6172 } 6173 if w != x0_2.Args[0] { 6174 break 6175 } 6176 mem := x0.Args[3] 6177 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 6178 break 6179 } 6180 v.reset(OpAMD64MOVQstoreidx1) 6181 v.AuxInt = i - 7 6182 v.Aux = s 6183 v.AddArg(p) 6184 v.AddArg(idx) 6185 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 6186 v0.AddArg(w) 6187 v.AddArg(v0) 6188 v.AddArg(mem) 6189 return true 6190 } 6191 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 6192 // cond: x.Uses == 1 && clobber(x) 6193 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 6194 for { 6195 i := v.AuxInt 6196 s := v.Aux 6197 _ = v.Args[3] 6198 p := v.Args[0] 6199 idx := v.Args[1] 6200 v_2 := v.Args[2] 6201 if v_2.Op != OpAMD64SHRQconst { 6202 break 6203 } 6204 if v_2.AuxInt != 8 { 6205 break 6206 } 6207 w := v_2.Args[0] 6208 x := v.Args[3] 6209 if x.Op != OpAMD64MOVBstoreidx1 { 6210 break 6211 } 6212 if x.AuxInt != i-1 { 6213 break 6214 } 6215 if x.Aux != s { 6216 break 6217 } 6218 _ = x.Args[3] 6219 if p != x.Args[0] { 6220 break 6221 } 6222 if idx != x.Args[1] { 6223 break 6224 } 6225 if w != x.Args[2] { 6226 break 6227 } 6228 mem := x.Args[3] 6229 if !(x.Uses == 1 && clobber(x)) { 6230 break 6231 } 6232 v.reset(OpAMD64MOVWstoreidx1) 6233 v.AuxInt = i - 1 6234 v.Aux = s 6235 v.AddArg(p) 6236 v.AddArg(idx) 6237 v.AddArg(w) 6238 v.AddArg(mem) 6239 return true 6240 } 6241 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 6242 // cond: x.Uses == 1 && clobber(x) 6243 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 6244 for { 6245 i := v.AuxInt 6246 s := v.Aux 6247 _ = v.Args[3] 6248 p := v.Args[0] 6249 idx := v.Args[1] 6250 v_2 := v.Args[2] 6251 if v_2.Op != OpAMD64SHRQconst { 6252 break 6253 } 6254 j := v_2.AuxInt 6255 w := v_2.Args[0] 6256 x := v.Args[3] 6257 if x.Op != OpAMD64MOVBstoreidx1 { 6258 break 6259 } 6260 if x.AuxInt != i-1 { 6261 break 6262 } 6263 if x.Aux != s { 6264 break 6265 } 6266 _ = x.Args[3] 6267 if p != x.Args[0] { 6268 break 6269 } 6270 if idx != x.Args[1] { 6271 break 6272 } 6273 w0 := x.Args[2] 6274 if w0.Op != OpAMD64SHRQconst { 6275 break 6276 } 6277 if w0.AuxInt != j-8 { 6278 break 6279 } 6280 if w != w0.Args[0] { 6281 break 6282 } 6283 mem := x.Args[3] 6284 if !(x.Uses == 1 && clobber(x)) { 6285 break 6286 } 6287 v.reset(OpAMD64MOVWstoreidx1) 6288 v.AuxInt = i - 1 6289 v.Aux = s 6290 v.AddArg(p) 6291 v.AddArg(idx) 6292 v.AddArg(w0) 6293 v.AddArg(mem) 6294 return true 6295 } 6296 return false 6297 } 6298 func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { 6299 b := v.Block 6300 _ = b 6301 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 6302 // cond: x.Uses == 1 && clobber(x) 6303 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 6304 for { 6305 x := v.Args[0] 6306 if x.Op != OpAMD64MOVLload { 6307 break 6308 } 6309 off := x.AuxInt 6310 sym := x.Aux 6311 _ = x.Args[1] 6312 ptr := x.Args[0] 6313 mem := x.Args[1] 6314 if !(x.Uses == 1 && clobber(x)) { 6315 break 6316 } 6317 b = x.Block 6318 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 6319 v.reset(OpCopy) 6320 v.AddArg(v0) 6321 v0.AuxInt = off 6322 v0.Aux = sym 6323 v0.AddArg(ptr) 6324 v0.AddArg(mem) 6325 return true 6326 } 6327 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 6328 // cond: x.Uses == 1 && clobber(x) 6329 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 6330 for { 6331 x := v.Args[0] 6332 if x.Op != OpAMD64MOVQload { 6333 break 6334 } 6335 off := x.AuxInt 6336 sym := x.Aux 6337 _ = x.Args[1] 6338 ptr := x.Args[0] 6339 mem := x.Args[1] 6340 if !(x.Uses == 1 && clobber(x)) { 6341 break 6342 } 6343 b = x.Block 6344 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 6345 v.reset(OpCopy) 6346 v.AddArg(v0) 6347 v0.AuxInt = off 6348 v0.Aux = sym 6349 v0.AddArg(ptr) 6350 v0.AddArg(mem) 6351 return true 6352 } 6353 // match: (MOVLQSX (ANDLconst [c] x)) 6354 // cond: c & 0x80000000 == 0 6355 // result: (ANDLconst [c & 0x7fffffff] x) 6356 for { 6357 v_0 := v.Args[0] 6358 if v_0.Op != OpAMD64ANDLconst { 6359 break 6360 } 6361 c := v_0.AuxInt 6362 x := v_0.Args[0] 6363 if !(c&0x80000000 == 0) { 6364 break 6365 } 6366 v.reset(OpAMD64ANDLconst) 6367 v.AuxInt = c & 0x7fffffff 6368 v.AddArg(x) 6369 return true 6370 } 6371 // match: (MOVLQSX x:(MOVLQSX _)) 6372 // cond: 6373 // result: x 6374 for { 6375 x := v.Args[0] 6376 if x.Op != OpAMD64MOVLQSX { 6377 break 6378 } 6379 v.reset(OpCopy) 6380 v.Type = x.Type 6381 v.AddArg(x) 6382 return true 6383 } 6384 // match: (MOVLQSX x:(MOVWQSX _)) 6385 // cond: 6386 // result: x 6387 for { 6388 x := v.Args[0] 6389 if x.Op != OpAMD64MOVWQSX { 6390 break 6391 } 6392 v.reset(OpCopy) 6393 v.Type = x.Type 6394 v.AddArg(x) 6395 return true 6396 } 6397 // match: (MOVLQSX x:(MOVBQSX _)) 6398 // cond: 6399 // result: x 6400 for { 6401 x := v.Args[0] 6402 if x.Op != OpAMD64MOVBQSX { 6403 break 6404 } 6405 v.reset(OpCopy) 6406 v.Type = x.Type 6407 v.AddArg(x) 6408 return true 6409 } 6410 return false 6411 } 6412 func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool { 6413 // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 6414 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 6415 // result: (MOVLQSX x) 6416 for { 6417 off := v.AuxInt 6418 sym := v.Aux 6419 _ = v.Args[1] 6420 ptr := v.Args[0] 6421 v_1 := v.Args[1] 6422 if v_1.Op != OpAMD64MOVLstore { 6423 break 6424 } 6425 off2 := v_1.AuxInt 6426 sym2 := v_1.Aux 6427 _ = v_1.Args[2] 6428 ptr2 := v_1.Args[0] 6429 x := v_1.Args[1] 6430 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 6431 break 6432 } 6433 v.reset(OpAMD64MOVLQSX) 6434 v.AddArg(x) 6435 return true 6436 } 6437 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6438 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6439 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6440 for { 6441 off1 := v.AuxInt 6442 sym1 := v.Aux 6443 _ = v.Args[1] 6444 v_0 := v.Args[0] 6445 if v_0.Op != OpAMD64LEAQ { 6446 break 6447 } 6448 off2 := v_0.AuxInt 6449 sym2 := v_0.Aux 6450 base := v_0.Args[0] 6451 mem := v.Args[1] 6452 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6453 break 6454 } 6455 v.reset(OpAMD64MOVLQSXload) 6456 v.AuxInt = off1 + off2 6457 v.Aux = mergeSym(sym1, sym2) 6458 v.AddArg(base) 6459 v.AddArg(mem) 6460 return true 6461 } 6462 return false 6463 } 6464 func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { 6465 b := v.Block 6466 _ = b 6467 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 6468 // cond: x.Uses == 1 && clobber(x) 6469 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 6470 for { 6471 x := v.Args[0] 6472 if x.Op != OpAMD64MOVLload { 6473 break 6474 } 6475 off := x.AuxInt 6476 sym := x.Aux 6477 _ = x.Args[1] 6478 ptr := x.Args[0] 6479 mem := x.Args[1] 6480 if !(x.Uses == 1 && clobber(x)) { 6481 break 6482 } 6483 b = x.Block 6484 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 6485 v.reset(OpCopy) 6486 v.AddArg(v0) 6487 v0.AuxInt = off 6488 v0.Aux = sym 6489 v0.AddArg(ptr) 6490 v0.AddArg(mem) 6491 return true 6492 } 6493 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 6494 // cond: x.Uses == 1 && clobber(x) 6495 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 6496 for { 6497 x := v.Args[0] 6498 if x.Op != OpAMD64MOVQload { 6499 break 6500 } 6501 off := x.AuxInt 6502 sym := x.Aux 6503 _ = x.Args[1] 6504 ptr := x.Args[0] 6505 mem := x.Args[1] 6506 if !(x.Uses == 1 && clobber(x)) { 6507 break 6508 } 6509 b = x.Block 6510 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 6511 v.reset(OpCopy) 6512 v.AddArg(v0) 6513 v0.AuxInt = off 6514 v0.Aux = sym 6515 v0.AddArg(ptr) 6516 v0.AddArg(mem) 6517 return true 6518 } 6519 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 6520 // cond: x.Uses == 1 && clobber(x) 6521 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 6522 for { 6523 x := v.Args[0] 6524 if x.Op != OpAMD64MOVLloadidx1 { 6525 break 6526 } 6527 off := x.AuxInt 6528 sym := x.Aux 6529 _ = x.Args[2] 6530 ptr := x.Args[0] 6531 idx := x.Args[1] 6532 mem := x.Args[2] 6533 if !(x.Uses == 1 && clobber(x)) { 6534 break 6535 } 6536 b = x.Block 6537 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 6538 v.reset(OpCopy) 6539 v.AddArg(v0) 6540 v0.AuxInt = off 6541 v0.Aux = sym 6542 v0.AddArg(ptr) 6543 v0.AddArg(idx) 6544 v0.AddArg(mem) 6545 return true 6546 } 6547 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 6548 // cond: x.Uses == 1 && clobber(x) 6549 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 6550 for { 6551 x := v.Args[0] 6552 if x.Op != OpAMD64MOVLloadidx4 { 6553 break 6554 } 6555 off := x.AuxInt 6556 sym := x.Aux 6557 _ = x.Args[2] 6558 ptr := x.Args[0] 6559 idx := x.Args[1] 6560 mem := x.Args[2] 6561 if !(x.Uses == 1 && clobber(x)) { 6562 break 6563 } 6564 b = x.Block 6565 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) 6566 v.reset(OpCopy) 6567 v.AddArg(v0) 6568 v0.AuxInt = off 6569 v0.Aux = sym 6570 v0.AddArg(ptr) 6571 v0.AddArg(idx) 6572 v0.AddArg(mem) 6573 return true 6574 } 6575 // match: (MOVLQZX (ANDLconst [c] x)) 6576 // cond: 6577 // result: (ANDLconst [c] x) 6578 for { 6579 v_0 := v.Args[0] 6580 if v_0.Op != OpAMD64ANDLconst { 6581 break 6582 } 6583 c := v_0.AuxInt 6584 x := v_0.Args[0] 6585 v.reset(OpAMD64ANDLconst) 6586 v.AuxInt = c 6587 v.AddArg(x) 6588 return true 6589 } 6590 // match: (MOVLQZX x:(MOVLQZX _)) 6591 // cond: 6592 // result: x 6593 for { 6594 x := v.Args[0] 6595 if x.Op != OpAMD64MOVLQZX { 6596 break 6597 } 6598 v.reset(OpCopy) 6599 v.Type = x.Type 6600 v.AddArg(x) 6601 return true 6602 } 6603 // match: (MOVLQZX x:(MOVWQZX _)) 6604 // cond: 6605 // result: x 6606 for { 6607 x := v.Args[0] 6608 if x.Op != OpAMD64MOVWQZX { 6609 break 6610 } 6611 v.reset(OpCopy) 6612 v.Type = x.Type 6613 v.AddArg(x) 6614 return true 6615 } 6616 // match: (MOVLQZX x:(MOVBQZX _)) 6617 // cond: 6618 // result: x 6619 for { 6620 x := v.Args[0] 6621 if x.Op != OpAMD64MOVBQZX { 6622 break 6623 } 6624 v.reset(OpCopy) 6625 v.Type = x.Type 6626 v.AddArg(x) 6627 return true 6628 } 6629 return false 6630 } 6631 func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool { 6632 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 6633 // cond: is32Bit(off1+off2) 6634 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 6635 for { 6636 off1 := v.AuxInt 6637 sym := v.Aux 6638 _ = v.Args[1] 6639 v_0 := v.Args[0] 6640 if v_0.Op != OpAMD64ADDQconst { 6641 break 6642 } 6643 off2 := v_0.AuxInt 6644 ptr := v_0.Args[0] 6645 mem := v.Args[1] 6646 if !(is32Bit(off1 + off2)) { 6647 break 6648 } 6649 v.reset(OpAMD64MOVLatomicload) 6650 v.AuxInt = off1 + off2 6651 v.Aux = sym 6652 v.AddArg(ptr) 6653 v.AddArg(mem) 6654 return true 6655 } 6656 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 6657 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6658 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 6659 for { 6660 off1 := v.AuxInt 6661 sym1 := v.Aux 6662 _ = v.Args[1] 6663 v_0 := v.Args[0] 6664 if v_0.Op != OpAMD64LEAQ { 6665 break 6666 } 6667 off2 := v_0.AuxInt 6668 sym2 := v_0.Aux 6669 ptr := v_0.Args[0] 6670 mem := v.Args[1] 6671 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6672 break 6673 } 6674 v.reset(OpAMD64MOVLatomicload) 6675 v.AuxInt = off1 + off2 6676 v.Aux = mergeSym(sym1, sym2) 6677 v.AddArg(ptr) 6678 v.AddArg(mem) 6679 return true 6680 } 6681 return false 6682 } 6683 func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { 6684 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 6685 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 6686 // result: (MOVLQZX x) 6687 for { 6688 off := v.AuxInt 6689 sym := v.Aux 6690 _ = v.Args[1] 6691 ptr := v.Args[0] 6692 v_1 := v.Args[1] 6693 if v_1.Op != OpAMD64MOVLstore { 6694 break 6695 } 6696 off2 := v_1.AuxInt 6697 sym2 := v_1.Aux 6698 _ = v_1.Args[2] 6699 ptr2 := v_1.Args[0] 6700 x := v_1.Args[1] 6701 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 6702 break 6703 } 6704 v.reset(OpAMD64MOVLQZX) 6705 v.AddArg(x) 6706 return true 6707 } 6708 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 6709 // cond: is32Bit(off1+off2) 6710 // result: (MOVLload [off1+off2] {sym} ptr mem) 6711 for { 6712 off1 := v.AuxInt 6713 sym := v.Aux 6714 _ = v.Args[1] 6715 v_0 := v.Args[0] 6716 if v_0.Op != OpAMD64ADDQconst { 6717 break 6718 } 6719 off2 := v_0.AuxInt 6720 ptr := v_0.Args[0] 6721 mem := v.Args[1] 6722 if !(is32Bit(off1 + off2)) { 6723 break 6724 } 6725 v.reset(OpAMD64MOVLload) 6726 v.AuxInt = off1 + off2 6727 v.Aux = sym 6728 v.AddArg(ptr) 6729 v.AddArg(mem) 6730 return true 6731 } 6732 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 6733 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6734 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6735 for { 6736 off1 := v.AuxInt 6737 sym1 := v.Aux 6738 _ = v.Args[1] 6739 v_0 := v.Args[0] 6740 if v_0.Op != OpAMD64LEAQ { 6741 break 6742 } 6743 off2 := v_0.AuxInt 6744 sym2 := v_0.Aux 6745 base := v_0.Args[0] 6746 mem := v.Args[1] 6747 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6748 break 6749 } 6750 v.reset(OpAMD64MOVLload) 6751 v.AuxInt = off1 + off2 6752 v.Aux = mergeSym(sym1, sym2) 6753 v.AddArg(base) 6754 v.AddArg(mem) 6755 return true 6756 } 6757 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 6758 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6759 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6760 for { 6761 off1 := v.AuxInt 6762 sym1 := v.Aux 6763 _ = v.Args[1] 6764 v_0 := v.Args[0] 6765 if v_0.Op != OpAMD64LEAQ1 { 6766 break 6767 } 6768 off2 := v_0.AuxInt 6769 sym2 := v_0.Aux 6770 _ = v_0.Args[1] 6771 ptr := v_0.Args[0] 6772 idx := v_0.Args[1] 6773 mem := v.Args[1] 6774 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6775 break 6776 } 6777 v.reset(OpAMD64MOVLloadidx1) 6778 v.AuxInt = off1 + off2 6779 v.Aux = mergeSym(sym1, sym2) 6780 v.AddArg(ptr) 6781 v.AddArg(idx) 6782 v.AddArg(mem) 6783 return true 6784 } 6785 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 6786 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 6787 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 6788 for { 6789 off1 := v.AuxInt 6790 sym1 := v.Aux 6791 _ = v.Args[1] 6792 v_0 := v.Args[0] 6793 if v_0.Op != OpAMD64LEAQ4 { 6794 break 6795 } 6796 off2 := v_0.AuxInt 6797 sym2 := v_0.Aux 6798 _ = v_0.Args[1] 6799 ptr := v_0.Args[0] 6800 idx := v_0.Args[1] 6801 mem := v.Args[1] 6802 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 6803 break 6804 } 6805 v.reset(OpAMD64MOVLloadidx4) 6806 v.AuxInt = off1 + off2 6807 v.Aux = mergeSym(sym1, sym2) 6808 v.AddArg(ptr) 6809 v.AddArg(idx) 6810 v.AddArg(mem) 6811 return true 6812 } 6813 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 6814 // cond: ptr.Op != OpSB 6815 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 6816 for { 6817 off := v.AuxInt 6818 sym := v.Aux 6819 _ = v.Args[1] 6820 v_0 := v.Args[0] 6821 if v_0.Op != OpAMD64ADDQ { 6822 break 6823 } 6824 _ = v_0.Args[1] 6825 ptr := v_0.Args[0] 6826 idx := v_0.Args[1] 6827 mem := v.Args[1] 6828 if !(ptr.Op != OpSB) { 6829 break 6830 } 6831 v.reset(OpAMD64MOVLloadidx1) 6832 v.AuxInt = off 6833 v.Aux = sym 6834 v.AddArg(ptr) 6835 v.AddArg(idx) 6836 v.AddArg(mem) 6837 return true 6838 } 6839 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 6840 // cond: canMergeSym(sym1, sym2) 6841 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 6842 for { 6843 off1 := v.AuxInt 6844 sym1 := v.Aux 6845 _ = v.Args[1] 6846 v_0 := v.Args[0] 6847 if v_0.Op != OpAMD64LEAL { 6848 break 6849 } 6850 off2 := v_0.AuxInt 6851 sym2 := v_0.Aux 6852 base := v_0.Args[0] 6853 mem := v.Args[1] 6854 if !(canMergeSym(sym1, sym2)) { 6855 break 6856 } 6857 v.reset(OpAMD64MOVLload) 6858 v.AuxInt = off1 + off2 6859 v.Aux = mergeSym(sym1, sym2) 6860 v.AddArg(base) 6861 v.AddArg(mem) 6862 return true 6863 } 6864 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 6865 // cond: is32Bit(off1+off2) 6866 // result: (MOVLload [off1+off2] {sym} ptr mem) 6867 for { 6868 off1 := v.AuxInt 6869 sym := v.Aux 6870 _ = v.Args[1] 6871 v_0 := v.Args[0] 6872 if v_0.Op != OpAMD64ADDLconst { 6873 break 6874 } 6875 off2 := v_0.AuxInt 6876 ptr := v_0.Args[0] 6877 mem := v.Args[1] 6878 if !(is32Bit(off1 + off2)) { 6879 break 6880 } 6881 v.reset(OpAMD64MOVLload) 6882 v.AuxInt = off1 + off2 6883 v.Aux = sym 6884 v.AddArg(ptr) 6885 v.AddArg(mem) 6886 return true 6887 } 6888 return false 6889 } 6890 func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { 6891 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 6892 // cond: 6893 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 6894 for { 6895 c := v.AuxInt 6896 sym := v.Aux 6897 _ = v.Args[2] 6898 ptr := v.Args[0] 6899 v_1 := v.Args[1] 6900 if v_1.Op != OpAMD64SHLQconst { 6901 break 6902 } 6903 if v_1.AuxInt != 2 { 6904 break 6905 } 6906 idx := v_1.Args[0] 6907 mem := v.Args[2] 6908 v.reset(OpAMD64MOVLloadidx4) 6909 v.AuxInt = c 6910 v.Aux = sym 6911 v.AddArg(ptr) 6912 v.AddArg(idx) 6913 v.AddArg(mem) 6914 return true 6915 } 6916 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [2] idx) ptr mem) 6917 // cond: 6918 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 6919 for { 6920 c := v.AuxInt 6921 sym := v.Aux 6922 _ = v.Args[2] 6923 v_0 := v.Args[0] 6924 if v_0.Op != OpAMD64SHLQconst { 6925 break 6926 } 6927 if v_0.AuxInt != 2 { 6928 break 6929 } 6930 idx := v_0.Args[0] 6931 ptr := v.Args[1] 6932 mem := v.Args[2] 6933 v.reset(OpAMD64MOVLloadidx4) 6934 v.AuxInt = c 6935 v.Aux = sym 6936 v.AddArg(ptr) 6937 v.AddArg(idx) 6938 v.AddArg(mem) 6939 return true 6940 } 6941 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 6942 // cond: 6943 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 6944 for { 6945 c := v.AuxInt 6946 sym := v.Aux 6947 _ = v.Args[2] 6948 v_0 := v.Args[0] 6949 if v_0.Op != OpAMD64ADDQconst { 6950 break 6951 } 6952 d := v_0.AuxInt 6953 ptr := v_0.Args[0] 6954 idx := v.Args[1] 6955 mem := v.Args[2] 6956 v.reset(OpAMD64MOVLloadidx1) 6957 v.AuxInt = c + d 6958 v.Aux = sym 6959 v.AddArg(ptr) 6960 v.AddArg(idx) 6961 v.AddArg(mem) 6962 return true 6963 } 6964 // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 6965 // cond: 6966 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 6967 for { 6968 c := v.AuxInt 6969 sym := v.Aux 6970 _ = v.Args[2] 6971 idx := v.Args[0] 6972 v_1 := v.Args[1] 6973 if v_1.Op != OpAMD64ADDQconst { 6974 break 6975 } 6976 d := v_1.AuxInt 6977 ptr := v_1.Args[0] 6978 mem := v.Args[2] 6979 v.reset(OpAMD64MOVLloadidx1) 6980 v.AuxInt = c + d 6981 v.Aux = sym 6982 v.AddArg(ptr) 6983 v.AddArg(idx) 6984 v.AddArg(mem) 6985 return true 6986 } 6987 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 6988 // cond: 6989 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 6990 for { 6991 c := v.AuxInt 6992 sym := v.Aux 6993 _ = v.Args[2] 6994 ptr := v.Args[0] 6995 v_1 := v.Args[1] 6996 if v_1.Op != OpAMD64ADDQconst { 6997 break 6998 } 6999 d := v_1.AuxInt 7000 idx := v_1.Args[0] 7001 mem := v.Args[2] 7002 v.reset(OpAMD64MOVLloadidx1) 7003 v.AuxInt = c + d 7004 v.Aux = sym 7005 v.AddArg(ptr) 7006 v.AddArg(idx) 7007 v.AddArg(mem) 7008 return true 7009 } 7010 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 7011 // cond: 7012 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7013 for { 7014 c := v.AuxInt 7015 sym := v.Aux 7016 _ = v.Args[2] 7017 v_0 := v.Args[0] 7018 if v_0.Op != OpAMD64ADDQconst { 7019 break 7020 } 7021 d := v_0.AuxInt 7022 idx := v_0.Args[0] 7023 ptr := v.Args[1] 7024 mem := v.Args[2] 7025 v.reset(OpAMD64MOVLloadidx1) 7026 v.AuxInt = c + d 7027 v.Aux = sym 7028 v.AddArg(ptr) 7029 v.AddArg(idx) 7030 v.AddArg(mem) 7031 return true 7032 } 7033 return false 7034 } 7035 func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { 7036 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 7037 // cond: 7038 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 7039 for { 7040 c := v.AuxInt 7041 sym := v.Aux 7042 _ = v.Args[2] 7043 v_0 := v.Args[0] 7044 if v_0.Op != OpAMD64ADDQconst { 7045 break 7046 } 7047 d := v_0.AuxInt 7048 ptr := v_0.Args[0] 7049 idx := v.Args[1] 7050 mem := v.Args[2] 7051 v.reset(OpAMD64MOVLloadidx4) 7052 v.AuxInt = c + d 7053 v.Aux = sym 7054 v.AddArg(ptr) 7055 v.AddArg(idx) 7056 v.AddArg(mem) 7057 return true 7058 } 7059 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 7060 // cond: 7061 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 7062 for { 7063 c := v.AuxInt 7064 sym := v.Aux 7065 _ = v.Args[2] 7066 ptr := v.Args[0] 7067 v_1 := v.Args[1] 7068 if v_1.Op != OpAMD64ADDQconst { 7069 break 7070 } 7071 d := v_1.AuxInt 7072 idx := v_1.Args[0] 7073 mem := v.Args[2] 7074 v.reset(OpAMD64MOVLloadidx4) 7075 v.AuxInt = c + 4*d 7076 v.Aux = sym 7077 v.AddArg(ptr) 7078 v.AddArg(idx) 7079 v.AddArg(mem) 7080 return true 7081 } 7082 return false 7083 } 7084 func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { 7085 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 7086 // cond: 7087 // result: (MOVLstore [off] {sym} ptr x mem) 7088 for { 7089 off := v.AuxInt 7090 sym := v.Aux 7091 _ = v.Args[2] 7092 ptr := v.Args[0] 7093 v_1 := v.Args[1] 7094 if v_1.Op != OpAMD64MOVLQSX { 7095 break 7096 } 7097 x := v_1.Args[0] 7098 mem := v.Args[2] 7099 v.reset(OpAMD64MOVLstore) 7100 v.AuxInt = off 7101 v.Aux = sym 7102 v.AddArg(ptr) 7103 v.AddArg(x) 7104 v.AddArg(mem) 7105 return true 7106 } 7107 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 7108 // cond: 7109 // result: (MOVLstore [off] {sym} ptr x mem) 7110 for { 7111 off := v.AuxInt 7112 sym := v.Aux 7113 _ = v.Args[2] 7114 ptr := v.Args[0] 7115 v_1 := v.Args[1] 7116 if v_1.Op != OpAMD64MOVLQZX { 7117 break 7118 } 7119 x := v_1.Args[0] 7120 mem := v.Args[2] 7121 v.reset(OpAMD64MOVLstore) 7122 v.AuxInt = off 7123 v.Aux = sym 7124 v.AddArg(ptr) 7125 v.AddArg(x) 7126 v.AddArg(mem) 7127 return true 7128 } 7129 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 7130 // cond: is32Bit(off1+off2) 7131 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 7132 for { 7133 off1 := v.AuxInt 7134 sym := v.Aux 7135 _ = v.Args[2] 7136 v_0 := v.Args[0] 7137 if v_0.Op != OpAMD64ADDQconst { 7138 break 7139 } 7140 off2 := v_0.AuxInt 7141 ptr := v_0.Args[0] 7142 val := v.Args[1] 7143 mem := v.Args[2] 7144 if !(is32Bit(off1 + off2)) { 7145 break 7146 } 7147 v.reset(OpAMD64MOVLstore) 7148 v.AuxInt = off1 + off2 7149 v.Aux = sym 7150 v.AddArg(ptr) 7151 v.AddArg(val) 7152 v.AddArg(mem) 7153 return true 7154 } 7155 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 7156 // cond: validOff(off) 7157 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 7158 for { 7159 off := v.AuxInt 7160 sym := v.Aux 7161 _ = v.Args[2] 7162 ptr := v.Args[0] 7163 v_1 := v.Args[1] 7164 if v_1.Op != OpAMD64MOVLconst { 7165 break 7166 } 7167 c := v_1.AuxInt 7168 mem := v.Args[2] 7169 if !(validOff(off)) { 7170 break 7171 } 7172 v.reset(OpAMD64MOVLstoreconst) 7173 v.AuxInt = makeValAndOff(int64(int32(c)), off) 7174 v.Aux = sym 7175 v.AddArg(ptr) 7176 v.AddArg(mem) 7177 return true 7178 } 7179 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 7180 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7181 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7182 for { 7183 off1 := v.AuxInt 7184 sym1 := v.Aux 7185 _ = v.Args[2] 7186 v_0 := v.Args[0] 7187 if v_0.Op != OpAMD64LEAQ { 7188 break 7189 } 7190 off2 := v_0.AuxInt 7191 sym2 := v_0.Aux 7192 base := v_0.Args[0] 7193 val := v.Args[1] 7194 mem := v.Args[2] 7195 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7196 break 7197 } 7198 v.reset(OpAMD64MOVLstore) 7199 v.AuxInt = off1 + off2 7200 v.Aux = mergeSym(sym1, sym2) 7201 v.AddArg(base) 7202 v.AddArg(val) 7203 v.AddArg(mem) 7204 return true 7205 } 7206 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 7207 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7208 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7209 for { 7210 off1 := v.AuxInt 7211 sym1 := v.Aux 7212 _ = v.Args[2] 7213 v_0 := v.Args[0] 7214 if v_0.Op != OpAMD64LEAQ1 { 7215 break 7216 } 7217 off2 := v_0.AuxInt 7218 sym2 := v_0.Aux 7219 _ = v_0.Args[1] 7220 ptr := v_0.Args[0] 7221 idx := v_0.Args[1] 7222 val := v.Args[1] 7223 mem := v.Args[2] 7224 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7225 break 7226 } 7227 v.reset(OpAMD64MOVLstoreidx1) 7228 v.AuxInt = off1 + off2 7229 v.Aux = mergeSym(sym1, sym2) 7230 v.AddArg(ptr) 7231 v.AddArg(idx) 7232 v.AddArg(val) 7233 v.AddArg(mem) 7234 return true 7235 } 7236 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 7237 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7238 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 7239 for { 7240 off1 := v.AuxInt 7241 sym1 := v.Aux 7242 _ = v.Args[2] 7243 v_0 := v.Args[0] 7244 if v_0.Op != OpAMD64LEAQ4 { 7245 break 7246 } 7247 off2 := v_0.AuxInt 7248 sym2 := v_0.Aux 7249 _ = v_0.Args[1] 7250 ptr := v_0.Args[0] 7251 idx := v_0.Args[1] 7252 val := v.Args[1] 7253 mem := v.Args[2] 7254 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7255 break 7256 } 7257 v.reset(OpAMD64MOVLstoreidx4) 7258 v.AuxInt = off1 + off2 7259 v.Aux = mergeSym(sym1, sym2) 7260 v.AddArg(ptr) 7261 v.AddArg(idx) 7262 v.AddArg(val) 7263 v.AddArg(mem) 7264 return true 7265 } 7266 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 7267 // cond: ptr.Op != OpSB 7268 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 7269 for { 7270 off := v.AuxInt 7271 sym := v.Aux 7272 _ = v.Args[2] 7273 v_0 := v.Args[0] 7274 if v_0.Op != OpAMD64ADDQ { 7275 break 7276 } 7277 _ = v_0.Args[1] 7278 ptr := v_0.Args[0] 7279 idx := v_0.Args[1] 7280 val := v.Args[1] 7281 mem := v.Args[2] 7282 if !(ptr.Op != OpSB) { 7283 break 7284 } 7285 v.reset(OpAMD64MOVLstoreidx1) 7286 v.AuxInt = off 7287 v.Aux = sym 7288 v.AddArg(ptr) 7289 v.AddArg(idx) 7290 v.AddArg(val) 7291 v.AddArg(mem) 7292 return true 7293 } 7294 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 7295 // cond: x.Uses == 1 && clobber(x) 7296 // result: (MOVQstore [i-4] {s} p w mem) 7297 for { 7298 i := v.AuxInt 7299 s := v.Aux 7300 _ = v.Args[2] 7301 p := v.Args[0] 7302 v_1 := v.Args[1] 7303 if v_1.Op != OpAMD64SHRQconst { 7304 break 7305 } 7306 if v_1.AuxInt != 32 { 7307 break 7308 } 7309 w := v_1.Args[0] 7310 x := v.Args[2] 7311 if x.Op != OpAMD64MOVLstore { 7312 break 7313 } 7314 if x.AuxInt != i-4 { 7315 break 7316 } 7317 if x.Aux != s { 7318 break 7319 } 7320 _ = x.Args[2] 7321 if p != x.Args[0] { 7322 break 7323 } 7324 if w != x.Args[1] { 7325 break 7326 } 7327 mem := x.Args[2] 7328 if !(x.Uses == 1 && clobber(x)) { 7329 break 7330 } 7331 v.reset(OpAMD64MOVQstore) 7332 v.AuxInt = i - 4 7333 v.Aux = s 7334 v.AddArg(p) 7335 v.AddArg(w) 7336 v.AddArg(mem) 7337 return true 7338 } 7339 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 7340 // cond: x.Uses == 1 && clobber(x) 7341 // result: (MOVQstore [i-4] {s} p w0 mem) 7342 for { 7343 i := v.AuxInt 7344 s := v.Aux 7345 _ = v.Args[2] 7346 p := v.Args[0] 7347 v_1 := v.Args[1] 7348 if v_1.Op != OpAMD64SHRQconst { 7349 break 7350 } 7351 j := v_1.AuxInt 7352 w := v_1.Args[0] 7353 x := v.Args[2] 7354 if x.Op != OpAMD64MOVLstore { 7355 break 7356 } 7357 if x.AuxInt != i-4 { 7358 break 7359 } 7360 if x.Aux != s { 7361 break 7362 } 7363 _ = x.Args[2] 7364 if p != x.Args[0] { 7365 break 7366 } 7367 w0 := x.Args[1] 7368 if w0.Op != OpAMD64SHRQconst { 7369 break 7370 } 7371 if w0.AuxInt != j-32 { 7372 break 7373 } 7374 if w != w0.Args[0] { 7375 break 7376 } 7377 mem := x.Args[2] 7378 if !(x.Uses == 1 && clobber(x)) { 7379 break 7380 } 7381 v.reset(OpAMD64MOVQstore) 7382 v.AuxInt = i - 4 7383 v.Aux = s 7384 v.AddArg(p) 7385 v.AddArg(w0) 7386 v.AddArg(mem) 7387 return true 7388 } 7389 return false 7390 } 7391 func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { 7392 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 7393 // cond: canMergeSym(sym1, sym2) 7394 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 7395 for { 7396 off1 := v.AuxInt 7397 sym1 := v.Aux 7398 _ = v.Args[2] 7399 v_0 := v.Args[0] 7400 if v_0.Op != OpAMD64LEAL { 7401 break 7402 } 7403 off2 := v_0.AuxInt 7404 sym2 := v_0.Aux 7405 base := v_0.Args[0] 7406 val := v.Args[1] 7407 mem := v.Args[2] 7408 if !(canMergeSym(sym1, sym2)) { 7409 break 7410 } 7411 v.reset(OpAMD64MOVLstore) 7412 v.AuxInt = off1 + off2 7413 v.Aux = mergeSym(sym1, sym2) 7414 v.AddArg(base) 7415 v.AddArg(val) 7416 v.AddArg(mem) 7417 return true 7418 } 7419 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 7420 // cond: is32Bit(off1+off2) 7421 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 7422 for { 7423 off1 := v.AuxInt 7424 sym := v.Aux 7425 _ = v.Args[2] 7426 v_0 := v.Args[0] 7427 if v_0.Op != OpAMD64ADDLconst { 7428 break 7429 } 7430 off2 := v_0.AuxInt 7431 ptr := v_0.Args[0] 7432 val := v.Args[1] 7433 mem := v.Args[2] 7434 if !(is32Bit(off1 + off2)) { 7435 break 7436 } 7437 v.reset(OpAMD64MOVLstore) 7438 v.AuxInt = off1 + off2 7439 v.Aux = sym 7440 v.AddArg(ptr) 7441 v.AddArg(val) 7442 v.AddArg(mem) 7443 return true 7444 } 7445 return false 7446 } 7447 func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { 7448 b := v.Block 7449 _ = b 7450 typ := &b.Func.Config.Types 7451 _ = typ 7452 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 7453 // cond: ValAndOff(sc).canAdd(off) 7454 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7455 for { 7456 sc := v.AuxInt 7457 s := v.Aux 7458 _ = v.Args[1] 7459 v_0 := v.Args[0] 7460 if v_0.Op != OpAMD64ADDQconst { 7461 break 7462 } 7463 off := v_0.AuxInt 7464 ptr := v_0.Args[0] 7465 mem := v.Args[1] 7466 if !(ValAndOff(sc).canAdd(off)) { 7467 break 7468 } 7469 v.reset(OpAMD64MOVLstoreconst) 7470 v.AuxInt = ValAndOff(sc).add(off) 7471 v.Aux = s 7472 v.AddArg(ptr) 7473 v.AddArg(mem) 7474 return true 7475 } 7476 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 7477 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7478 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7479 for { 7480 sc := v.AuxInt 7481 sym1 := v.Aux 7482 _ = v.Args[1] 7483 v_0 := v.Args[0] 7484 if v_0.Op != OpAMD64LEAQ { 7485 break 7486 } 7487 off := v_0.AuxInt 7488 sym2 := v_0.Aux 7489 ptr := v_0.Args[0] 7490 mem := v.Args[1] 7491 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7492 break 7493 } 7494 v.reset(OpAMD64MOVLstoreconst) 7495 v.AuxInt = ValAndOff(sc).add(off) 7496 v.Aux = mergeSym(sym1, sym2) 7497 v.AddArg(ptr) 7498 v.AddArg(mem) 7499 return true 7500 } 7501 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 7502 // cond: canMergeSym(sym1, sym2) 7503 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7504 for { 7505 x := v.AuxInt 7506 sym1 := v.Aux 7507 _ = v.Args[1] 7508 v_0 := v.Args[0] 7509 if v_0.Op != OpAMD64LEAQ1 { 7510 break 7511 } 7512 off := v_0.AuxInt 7513 sym2 := v_0.Aux 7514 _ = v_0.Args[1] 7515 ptr := v_0.Args[0] 7516 idx := v_0.Args[1] 7517 mem := v.Args[1] 7518 if !(canMergeSym(sym1, sym2)) { 7519 break 7520 } 7521 v.reset(OpAMD64MOVLstoreconstidx1) 7522 v.AuxInt = ValAndOff(x).add(off) 7523 v.Aux = mergeSym(sym1, sym2) 7524 v.AddArg(ptr) 7525 v.AddArg(idx) 7526 v.AddArg(mem) 7527 return true 7528 } 7529 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 7530 // cond: canMergeSym(sym1, sym2) 7531 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 7532 for { 7533 x := v.AuxInt 7534 sym1 := v.Aux 7535 _ = v.Args[1] 7536 v_0 := v.Args[0] 7537 if v_0.Op != OpAMD64LEAQ4 { 7538 break 7539 } 7540 off := v_0.AuxInt 7541 sym2 := v_0.Aux 7542 _ = v_0.Args[1] 7543 ptr := v_0.Args[0] 7544 idx := v_0.Args[1] 7545 mem := v.Args[1] 7546 if !(canMergeSym(sym1, sym2)) { 7547 break 7548 } 7549 v.reset(OpAMD64MOVLstoreconstidx4) 7550 v.AuxInt = ValAndOff(x).add(off) 7551 v.Aux = mergeSym(sym1, sym2) 7552 v.AddArg(ptr) 7553 v.AddArg(idx) 7554 v.AddArg(mem) 7555 return true 7556 } 7557 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 7558 // cond: 7559 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 7560 for { 7561 x := v.AuxInt 7562 sym := v.Aux 7563 _ = v.Args[1] 7564 v_0 := v.Args[0] 7565 if v_0.Op != OpAMD64ADDQ { 7566 break 7567 } 7568 _ = v_0.Args[1] 7569 ptr := v_0.Args[0] 7570 idx := v_0.Args[1] 7571 mem := v.Args[1] 7572 v.reset(OpAMD64MOVLstoreconstidx1) 7573 v.AuxInt = x 7574 v.Aux = sym 7575 v.AddArg(ptr) 7576 v.AddArg(idx) 7577 v.AddArg(mem) 7578 return true 7579 } 7580 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 7581 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 7582 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 7583 for { 7584 c := v.AuxInt 7585 s := v.Aux 7586 _ = v.Args[1] 7587 p := v.Args[0] 7588 x := v.Args[1] 7589 if x.Op != OpAMD64MOVLstoreconst { 7590 break 7591 } 7592 a := x.AuxInt 7593 if x.Aux != s { 7594 break 7595 } 7596 _ = x.Args[1] 7597 if p != x.Args[0] { 7598 break 7599 } 7600 mem := x.Args[1] 7601 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 7602 break 7603 } 7604 v.reset(OpAMD64MOVQstore) 7605 v.AuxInt = ValAndOff(a).Off() 7606 v.Aux = s 7607 v.AddArg(p) 7608 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 7609 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 7610 v.AddArg(v0) 7611 v.AddArg(mem) 7612 return true 7613 } 7614 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 7615 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 7616 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 7617 for { 7618 sc := v.AuxInt 7619 sym1 := v.Aux 7620 _ = v.Args[1] 7621 v_0 := v.Args[0] 7622 if v_0.Op != OpAMD64LEAL { 7623 break 7624 } 7625 off := v_0.AuxInt 7626 sym2 := v_0.Aux 7627 ptr := v_0.Args[0] 7628 mem := v.Args[1] 7629 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 7630 break 7631 } 7632 v.reset(OpAMD64MOVLstoreconst) 7633 v.AuxInt = ValAndOff(sc).add(off) 7634 v.Aux = mergeSym(sym1, sym2) 7635 v.AddArg(ptr) 7636 v.AddArg(mem) 7637 return true 7638 } 7639 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 7640 // cond: ValAndOff(sc).canAdd(off) 7641 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 7642 for { 7643 sc := v.AuxInt 7644 s := v.Aux 7645 _ = v.Args[1] 7646 v_0 := v.Args[0] 7647 if v_0.Op != OpAMD64ADDLconst { 7648 break 7649 } 7650 off := v_0.AuxInt 7651 ptr := v_0.Args[0] 7652 mem := v.Args[1] 7653 if !(ValAndOff(sc).canAdd(off)) { 7654 break 7655 } 7656 v.reset(OpAMD64MOVLstoreconst) 7657 v.AuxInt = ValAndOff(sc).add(off) 7658 v.Aux = s 7659 v.AddArg(ptr) 7660 v.AddArg(mem) 7661 return true 7662 } 7663 return false 7664 } 7665 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { 7666 b := v.Block 7667 _ = b 7668 typ := &b.Func.Config.Types 7669 _ = typ 7670 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 7671 // cond: 7672 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 7673 for { 7674 c := v.AuxInt 7675 sym := v.Aux 7676 _ = v.Args[2] 7677 ptr := v.Args[0] 7678 v_1 := v.Args[1] 7679 if v_1.Op != OpAMD64SHLQconst { 7680 break 7681 } 7682 if v_1.AuxInt != 2 { 7683 break 7684 } 7685 idx := v_1.Args[0] 7686 mem := v.Args[2] 7687 v.reset(OpAMD64MOVLstoreconstidx4) 7688 v.AuxInt = c 7689 v.Aux = sym 7690 v.AddArg(ptr) 7691 v.AddArg(idx) 7692 v.AddArg(mem) 7693 return true 7694 } 7695 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 7696 // cond: 7697 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7698 for { 7699 x := v.AuxInt 7700 sym := v.Aux 7701 _ = v.Args[2] 7702 v_0 := v.Args[0] 7703 if v_0.Op != OpAMD64ADDQconst { 7704 break 7705 } 7706 c := v_0.AuxInt 7707 ptr := v_0.Args[0] 7708 idx := v.Args[1] 7709 mem := v.Args[2] 7710 v.reset(OpAMD64MOVLstoreconstidx1) 7711 v.AuxInt = ValAndOff(x).add(c) 7712 v.Aux = sym 7713 v.AddArg(ptr) 7714 v.AddArg(idx) 7715 v.AddArg(mem) 7716 return true 7717 } 7718 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 7719 // cond: 7720 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7721 for { 7722 x := v.AuxInt 7723 sym := v.Aux 7724 _ = v.Args[2] 7725 ptr := v.Args[0] 7726 v_1 := v.Args[1] 7727 if v_1.Op != OpAMD64ADDQconst { 7728 break 7729 } 7730 c := v_1.AuxInt 7731 idx := v_1.Args[0] 7732 mem := v.Args[2] 7733 v.reset(OpAMD64MOVLstoreconstidx1) 7734 v.AuxInt = ValAndOff(x).add(c) 7735 v.Aux = sym 7736 v.AddArg(ptr) 7737 v.AddArg(idx) 7738 v.AddArg(mem) 7739 return true 7740 } 7741 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 7742 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 7743 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 7744 for { 7745 c := v.AuxInt 7746 s := v.Aux 7747 _ = v.Args[2] 7748 p := v.Args[0] 7749 i := v.Args[1] 7750 x := v.Args[2] 7751 if x.Op != OpAMD64MOVLstoreconstidx1 { 7752 break 7753 } 7754 a := x.AuxInt 7755 if x.Aux != s { 7756 break 7757 } 7758 _ = x.Args[2] 7759 if p != x.Args[0] { 7760 break 7761 } 7762 if i != x.Args[1] { 7763 break 7764 } 7765 mem := x.Args[2] 7766 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 7767 break 7768 } 7769 v.reset(OpAMD64MOVQstoreidx1) 7770 v.AuxInt = ValAndOff(a).Off() 7771 v.Aux = s 7772 v.AddArg(p) 7773 v.AddArg(i) 7774 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 7775 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 7776 v.AddArg(v0) 7777 v.AddArg(mem) 7778 return true 7779 } 7780 return false 7781 } 7782 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { 7783 b := v.Block 7784 _ = b 7785 typ := &b.Func.Config.Types 7786 _ = typ 7787 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 7788 // cond: 7789 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 7790 for { 7791 x := v.AuxInt 7792 sym := v.Aux 7793 _ = v.Args[2] 7794 v_0 := v.Args[0] 7795 if v_0.Op != OpAMD64ADDQconst { 7796 break 7797 } 7798 c := v_0.AuxInt 7799 ptr := v_0.Args[0] 7800 idx := v.Args[1] 7801 mem := v.Args[2] 7802 v.reset(OpAMD64MOVLstoreconstidx4) 7803 v.AuxInt = ValAndOff(x).add(c) 7804 v.Aux = sym 7805 v.AddArg(ptr) 7806 v.AddArg(idx) 7807 v.AddArg(mem) 7808 return true 7809 } 7810 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 7811 // cond: 7812 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 7813 for { 7814 x := v.AuxInt 7815 sym := v.Aux 7816 _ = v.Args[2] 7817 ptr := v.Args[0] 7818 v_1 := v.Args[1] 7819 if v_1.Op != OpAMD64ADDQconst { 7820 break 7821 } 7822 c := v_1.AuxInt 7823 idx := v_1.Args[0] 7824 mem := v.Args[2] 7825 v.reset(OpAMD64MOVLstoreconstidx4) 7826 v.AuxInt = ValAndOff(x).add(4 * c) 7827 v.Aux = sym 7828 v.AddArg(ptr) 7829 v.AddArg(idx) 7830 v.AddArg(mem) 7831 return true 7832 } 7833 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 7834 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 7835 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 7836 for { 7837 c := v.AuxInt 7838 s := v.Aux 7839 _ = v.Args[2] 7840 p := v.Args[0] 7841 i := v.Args[1] 7842 x := v.Args[2] 7843 if x.Op != OpAMD64MOVLstoreconstidx4 { 7844 break 7845 } 7846 a := x.AuxInt 7847 if x.Aux != s { 7848 break 7849 } 7850 _ = x.Args[2] 7851 if p != x.Args[0] { 7852 break 7853 } 7854 if i != x.Args[1] { 7855 break 7856 } 7857 mem := x.Args[2] 7858 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 7859 break 7860 } 7861 v.reset(OpAMD64MOVQstoreidx1) 7862 v.AuxInt = ValAndOff(a).Off() 7863 v.Aux = s 7864 v.AddArg(p) 7865 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 7866 v0.AuxInt = 2 7867 v0.AddArg(i) 7868 v.AddArg(v0) 7869 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 7870 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 7871 v.AddArg(v1) 7872 v.AddArg(mem) 7873 return true 7874 } 7875 return false 7876 } 7877 func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { 7878 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 7879 // cond: 7880 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 7881 for { 7882 c := v.AuxInt 7883 sym := v.Aux 7884 _ = v.Args[3] 7885 ptr := v.Args[0] 7886 v_1 := v.Args[1] 7887 if v_1.Op != OpAMD64SHLQconst { 7888 break 7889 } 7890 if v_1.AuxInt != 2 { 7891 break 7892 } 7893 idx := v_1.Args[0] 7894 val := v.Args[2] 7895 mem := v.Args[3] 7896 v.reset(OpAMD64MOVLstoreidx4) 7897 v.AuxInt = c 7898 v.Aux = sym 7899 v.AddArg(ptr) 7900 v.AddArg(idx) 7901 v.AddArg(val) 7902 v.AddArg(mem) 7903 return true 7904 } 7905 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 7906 // cond: 7907 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 7908 for { 7909 c := v.AuxInt 7910 sym := v.Aux 7911 _ = v.Args[3] 7912 v_0 := v.Args[0] 7913 if v_0.Op != OpAMD64ADDQconst { 7914 break 7915 } 7916 d := v_0.AuxInt 7917 ptr := v_0.Args[0] 7918 idx := v.Args[1] 7919 val := v.Args[2] 7920 mem := v.Args[3] 7921 v.reset(OpAMD64MOVLstoreidx1) 7922 v.AuxInt = c + d 7923 v.Aux = sym 7924 v.AddArg(ptr) 7925 v.AddArg(idx) 7926 v.AddArg(val) 7927 v.AddArg(mem) 7928 return true 7929 } 7930 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 7931 // cond: 7932 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 7933 for { 7934 c := v.AuxInt 7935 sym := v.Aux 7936 _ = v.Args[3] 7937 ptr := v.Args[0] 7938 v_1 := v.Args[1] 7939 if v_1.Op != OpAMD64ADDQconst { 7940 break 7941 } 7942 d := v_1.AuxInt 7943 idx := v_1.Args[0] 7944 val := v.Args[2] 7945 mem := v.Args[3] 7946 v.reset(OpAMD64MOVLstoreidx1) 7947 v.AuxInt = c + d 7948 v.Aux = sym 7949 v.AddArg(ptr) 7950 v.AddArg(idx) 7951 v.AddArg(val) 7952 v.AddArg(mem) 7953 return true 7954 } 7955 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 7956 // cond: x.Uses == 1 && clobber(x) 7957 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 7958 for { 7959 i := v.AuxInt 7960 s := v.Aux 7961 _ = v.Args[3] 7962 p := v.Args[0] 7963 idx := v.Args[1] 7964 v_2 := v.Args[2] 7965 if v_2.Op != OpAMD64SHRQconst { 7966 break 7967 } 7968 if v_2.AuxInt != 32 { 7969 break 7970 } 7971 w := v_2.Args[0] 7972 x := v.Args[3] 7973 if x.Op != OpAMD64MOVLstoreidx1 { 7974 break 7975 } 7976 if x.AuxInt != i-4 { 7977 break 7978 } 7979 if x.Aux != s { 7980 break 7981 } 7982 _ = x.Args[3] 7983 if p != x.Args[0] { 7984 break 7985 } 7986 if idx != x.Args[1] { 7987 break 7988 } 7989 if w != x.Args[2] { 7990 break 7991 } 7992 mem := x.Args[3] 7993 if !(x.Uses == 1 && clobber(x)) { 7994 break 7995 } 7996 v.reset(OpAMD64MOVQstoreidx1) 7997 v.AuxInt = i - 4 7998 v.Aux = s 7999 v.AddArg(p) 8000 v.AddArg(idx) 8001 v.AddArg(w) 8002 v.AddArg(mem) 8003 return true 8004 } 8005 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 8006 // cond: x.Uses == 1 && clobber(x) 8007 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 8008 for { 8009 i := v.AuxInt 8010 s := v.Aux 8011 _ = v.Args[3] 8012 p := v.Args[0] 8013 idx := v.Args[1] 8014 v_2 := v.Args[2] 8015 if v_2.Op != OpAMD64SHRQconst { 8016 break 8017 } 8018 j := v_2.AuxInt 8019 w := v_2.Args[0] 8020 x := v.Args[3] 8021 if x.Op != OpAMD64MOVLstoreidx1 { 8022 break 8023 } 8024 if x.AuxInt != i-4 { 8025 break 8026 } 8027 if x.Aux != s { 8028 break 8029 } 8030 _ = x.Args[3] 8031 if p != x.Args[0] { 8032 break 8033 } 8034 if idx != x.Args[1] { 8035 break 8036 } 8037 w0 := x.Args[2] 8038 if w0.Op != OpAMD64SHRQconst { 8039 break 8040 } 8041 if w0.AuxInt != j-32 { 8042 break 8043 } 8044 if w != w0.Args[0] { 8045 break 8046 } 8047 mem := x.Args[3] 8048 if !(x.Uses == 1 && clobber(x)) { 8049 break 8050 } 8051 v.reset(OpAMD64MOVQstoreidx1) 8052 v.AuxInt = i - 4 8053 v.Aux = s 8054 v.AddArg(p) 8055 v.AddArg(idx) 8056 v.AddArg(w0) 8057 v.AddArg(mem) 8058 return true 8059 } 8060 return false 8061 } 8062 func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { 8063 b := v.Block 8064 _ = b 8065 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 8066 // cond: 8067 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 8068 for { 8069 c := v.AuxInt 8070 sym := v.Aux 8071 _ = v.Args[3] 8072 v_0 := v.Args[0] 8073 if v_0.Op != OpAMD64ADDQconst { 8074 break 8075 } 8076 d := v_0.AuxInt 8077 ptr := v_0.Args[0] 8078 idx := v.Args[1] 8079 val := v.Args[2] 8080 mem := v.Args[3] 8081 v.reset(OpAMD64MOVLstoreidx4) 8082 v.AuxInt = c + d 8083 v.Aux = sym 8084 v.AddArg(ptr) 8085 v.AddArg(idx) 8086 v.AddArg(val) 8087 v.AddArg(mem) 8088 return true 8089 } 8090 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 8091 // cond: 8092 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 8093 for { 8094 c := v.AuxInt 8095 sym := v.Aux 8096 _ = v.Args[3] 8097 ptr := v.Args[0] 8098 v_1 := v.Args[1] 8099 if v_1.Op != OpAMD64ADDQconst { 8100 break 8101 } 8102 d := v_1.AuxInt 8103 idx := v_1.Args[0] 8104 val := v.Args[2] 8105 mem := v.Args[3] 8106 v.reset(OpAMD64MOVLstoreidx4) 8107 v.AuxInt = c + 4*d 8108 v.Aux = sym 8109 v.AddArg(ptr) 8110 v.AddArg(idx) 8111 v.AddArg(val) 8112 v.AddArg(mem) 8113 return true 8114 } 8115 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 8116 // cond: x.Uses == 1 && clobber(x) 8117 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 8118 for { 8119 i := v.AuxInt 8120 s := v.Aux 8121 _ = v.Args[3] 8122 p := v.Args[0] 8123 idx := v.Args[1] 8124 v_2 := v.Args[2] 8125 if v_2.Op != OpAMD64SHRQconst { 8126 break 8127 } 8128 if v_2.AuxInt != 32 { 8129 break 8130 } 8131 w := v_2.Args[0] 8132 x := v.Args[3] 8133 if x.Op != OpAMD64MOVLstoreidx4 { 8134 break 8135 } 8136 if x.AuxInt != i-4 { 8137 break 8138 } 8139 if x.Aux != s { 8140 break 8141 } 8142 _ = x.Args[3] 8143 if p != x.Args[0] { 8144 break 8145 } 8146 if idx != x.Args[1] { 8147 break 8148 } 8149 if w != x.Args[2] { 8150 break 8151 } 8152 mem := x.Args[3] 8153 if !(x.Uses == 1 && clobber(x)) { 8154 break 8155 } 8156 v.reset(OpAMD64MOVQstoreidx1) 8157 v.AuxInt = i - 4 8158 v.Aux = s 8159 v.AddArg(p) 8160 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 8161 v0.AuxInt = 2 8162 v0.AddArg(idx) 8163 v.AddArg(v0) 8164 v.AddArg(w) 8165 v.AddArg(mem) 8166 return true 8167 } 8168 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 8169 // cond: x.Uses == 1 && clobber(x) 8170 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 8171 for { 8172 i := v.AuxInt 8173 s := v.Aux 8174 _ = v.Args[3] 8175 p := v.Args[0] 8176 idx := v.Args[1] 8177 v_2 := v.Args[2] 8178 if v_2.Op != OpAMD64SHRQconst { 8179 break 8180 } 8181 j := v_2.AuxInt 8182 w := v_2.Args[0] 8183 x := v.Args[3] 8184 if x.Op != OpAMD64MOVLstoreidx4 { 8185 break 8186 } 8187 if x.AuxInt != i-4 { 8188 break 8189 } 8190 if x.Aux != s { 8191 break 8192 } 8193 _ = x.Args[3] 8194 if p != x.Args[0] { 8195 break 8196 } 8197 if idx != x.Args[1] { 8198 break 8199 } 8200 w0 := x.Args[2] 8201 if w0.Op != OpAMD64SHRQconst { 8202 break 8203 } 8204 if w0.AuxInt != j-32 { 8205 break 8206 } 8207 if w != w0.Args[0] { 8208 break 8209 } 8210 mem := x.Args[3] 8211 if !(x.Uses == 1 && clobber(x)) { 8212 break 8213 } 8214 v.reset(OpAMD64MOVQstoreidx1) 8215 v.AuxInt = i - 4 8216 v.Aux = s 8217 v.AddArg(p) 8218 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 8219 v0.AuxInt = 2 8220 v0.AddArg(idx) 8221 v.AddArg(v0) 8222 v.AddArg(w0) 8223 v.AddArg(mem) 8224 return true 8225 } 8226 return false 8227 } 8228 func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool { 8229 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 8230 // cond: is32Bit(off1+off2) 8231 // result: (MOVOload [off1+off2] {sym} ptr mem) 8232 for { 8233 off1 := v.AuxInt 8234 sym := v.Aux 8235 _ = v.Args[1] 8236 v_0 := v.Args[0] 8237 if v_0.Op != OpAMD64ADDQconst { 8238 break 8239 } 8240 off2 := v_0.AuxInt 8241 ptr := v_0.Args[0] 8242 mem := v.Args[1] 8243 if !(is32Bit(off1 + off2)) { 8244 break 8245 } 8246 v.reset(OpAMD64MOVOload) 8247 v.AuxInt = off1 + off2 8248 v.Aux = sym 8249 v.AddArg(ptr) 8250 v.AddArg(mem) 8251 return true 8252 } 8253 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8254 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8255 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8256 for { 8257 off1 := v.AuxInt 8258 sym1 := v.Aux 8259 _ = v.Args[1] 8260 v_0 := v.Args[0] 8261 if v_0.Op != OpAMD64LEAQ { 8262 break 8263 } 8264 off2 := v_0.AuxInt 8265 sym2 := v_0.Aux 8266 base := v_0.Args[0] 8267 mem := v.Args[1] 8268 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8269 break 8270 } 8271 v.reset(OpAMD64MOVOload) 8272 v.AuxInt = off1 + off2 8273 v.Aux = mergeSym(sym1, sym2) 8274 v.AddArg(base) 8275 v.AddArg(mem) 8276 return true 8277 } 8278 return false 8279 } 8280 func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool { 8281 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8282 // cond: is32Bit(off1+off2) 8283 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 8284 for { 8285 off1 := v.AuxInt 8286 sym := v.Aux 8287 _ = v.Args[2] 8288 v_0 := v.Args[0] 8289 if v_0.Op != OpAMD64ADDQconst { 8290 break 8291 } 8292 off2 := v_0.AuxInt 8293 ptr := v_0.Args[0] 8294 val := v.Args[1] 8295 mem := v.Args[2] 8296 if !(is32Bit(off1 + off2)) { 8297 break 8298 } 8299 v.reset(OpAMD64MOVOstore) 8300 v.AuxInt = off1 + off2 8301 v.Aux = sym 8302 v.AddArg(ptr) 8303 v.AddArg(val) 8304 v.AddArg(mem) 8305 return true 8306 } 8307 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8308 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8309 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8310 for { 8311 off1 := v.AuxInt 8312 sym1 := v.Aux 8313 _ = v.Args[2] 8314 v_0 := v.Args[0] 8315 if v_0.Op != OpAMD64LEAQ { 8316 break 8317 } 8318 off2 := v_0.AuxInt 8319 sym2 := v_0.Aux 8320 base := v_0.Args[0] 8321 val := v.Args[1] 8322 mem := v.Args[2] 8323 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8324 break 8325 } 8326 v.reset(OpAMD64MOVOstore) 8327 v.AuxInt = off1 + off2 8328 v.Aux = mergeSym(sym1, sym2) 8329 v.AddArg(base) 8330 v.AddArg(val) 8331 v.AddArg(mem) 8332 return true 8333 } 8334 return false 8335 } 8336 func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool { 8337 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 8338 // cond: is32Bit(off1+off2) 8339 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 8340 for { 8341 off1 := v.AuxInt 8342 sym := v.Aux 8343 _ = v.Args[1] 8344 v_0 := v.Args[0] 8345 if v_0.Op != OpAMD64ADDQconst { 8346 break 8347 } 8348 off2 := v_0.AuxInt 8349 ptr := v_0.Args[0] 8350 mem := v.Args[1] 8351 if !(is32Bit(off1 + off2)) { 8352 break 8353 } 8354 v.reset(OpAMD64MOVQatomicload) 8355 v.AuxInt = off1 + off2 8356 v.Aux = sym 8357 v.AddArg(ptr) 8358 v.AddArg(mem) 8359 return true 8360 } 8361 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 8362 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8363 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 8364 for { 8365 off1 := v.AuxInt 8366 sym1 := v.Aux 8367 _ = v.Args[1] 8368 v_0 := v.Args[0] 8369 if v_0.Op != OpAMD64LEAQ { 8370 break 8371 } 8372 off2 := v_0.AuxInt 8373 sym2 := v_0.Aux 8374 ptr := v_0.Args[0] 8375 mem := v.Args[1] 8376 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8377 break 8378 } 8379 v.reset(OpAMD64MOVQatomicload) 8380 v.AuxInt = off1 + off2 8381 v.Aux = mergeSym(sym1, sym2) 8382 v.AddArg(ptr) 8383 v.AddArg(mem) 8384 return true 8385 } 8386 return false 8387 } 8388 func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { 8389 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 8390 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 8391 // result: x 8392 for { 8393 off := v.AuxInt 8394 sym := v.Aux 8395 _ = v.Args[1] 8396 ptr := v.Args[0] 8397 v_1 := v.Args[1] 8398 if v_1.Op != OpAMD64MOVQstore { 8399 break 8400 } 8401 off2 := v_1.AuxInt 8402 sym2 := v_1.Aux 8403 _ = v_1.Args[2] 8404 ptr2 := v_1.Args[0] 8405 x := v_1.Args[1] 8406 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 8407 break 8408 } 8409 v.reset(OpCopy) 8410 v.Type = x.Type 8411 v.AddArg(x) 8412 return true 8413 } 8414 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 8415 // cond: is32Bit(off1+off2) 8416 // result: (MOVQload [off1+off2] {sym} ptr mem) 8417 for { 8418 off1 := v.AuxInt 8419 sym := v.Aux 8420 _ = v.Args[1] 8421 v_0 := v.Args[0] 8422 if v_0.Op != OpAMD64ADDQconst { 8423 break 8424 } 8425 off2 := v_0.AuxInt 8426 ptr := v_0.Args[0] 8427 mem := v.Args[1] 8428 if !(is32Bit(off1 + off2)) { 8429 break 8430 } 8431 v.reset(OpAMD64MOVQload) 8432 v.AuxInt = off1 + off2 8433 v.Aux = sym 8434 v.AddArg(ptr) 8435 v.AddArg(mem) 8436 return true 8437 } 8438 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 8439 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8440 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8441 for { 8442 off1 := v.AuxInt 8443 sym1 := v.Aux 8444 _ = v.Args[1] 8445 v_0 := v.Args[0] 8446 if v_0.Op != OpAMD64LEAQ { 8447 break 8448 } 8449 off2 := v_0.AuxInt 8450 sym2 := v_0.Aux 8451 base := v_0.Args[0] 8452 mem := v.Args[1] 8453 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8454 break 8455 } 8456 v.reset(OpAMD64MOVQload) 8457 v.AuxInt = off1 + off2 8458 v.Aux = mergeSym(sym1, sym2) 8459 v.AddArg(base) 8460 v.AddArg(mem) 8461 return true 8462 } 8463 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 8464 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8465 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8466 for { 8467 off1 := v.AuxInt 8468 sym1 := v.Aux 8469 _ = v.Args[1] 8470 v_0 := v.Args[0] 8471 if v_0.Op != OpAMD64LEAQ1 { 8472 break 8473 } 8474 off2 := v_0.AuxInt 8475 sym2 := v_0.Aux 8476 _ = v_0.Args[1] 8477 ptr := v_0.Args[0] 8478 idx := v_0.Args[1] 8479 mem := v.Args[1] 8480 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8481 break 8482 } 8483 v.reset(OpAMD64MOVQloadidx1) 8484 v.AuxInt = off1 + off2 8485 v.Aux = mergeSym(sym1, sym2) 8486 v.AddArg(ptr) 8487 v.AddArg(idx) 8488 v.AddArg(mem) 8489 return true 8490 } 8491 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 8492 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8493 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 8494 for { 8495 off1 := v.AuxInt 8496 sym1 := v.Aux 8497 _ = v.Args[1] 8498 v_0 := v.Args[0] 8499 if v_0.Op != OpAMD64LEAQ8 { 8500 break 8501 } 8502 off2 := v_0.AuxInt 8503 sym2 := v_0.Aux 8504 _ = v_0.Args[1] 8505 ptr := v_0.Args[0] 8506 idx := v_0.Args[1] 8507 mem := v.Args[1] 8508 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8509 break 8510 } 8511 v.reset(OpAMD64MOVQloadidx8) 8512 v.AuxInt = off1 + off2 8513 v.Aux = mergeSym(sym1, sym2) 8514 v.AddArg(ptr) 8515 v.AddArg(idx) 8516 v.AddArg(mem) 8517 return true 8518 } 8519 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 8520 // cond: ptr.Op != OpSB 8521 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 8522 for { 8523 off := v.AuxInt 8524 sym := v.Aux 8525 _ = v.Args[1] 8526 v_0 := v.Args[0] 8527 if v_0.Op != OpAMD64ADDQ { 8528 break 8529 } 8530 _ = v_0.Args[1] 8531 ptr := v_0.Args[0] 8532 idx := v_0.Args[1] 8533 mem := v.Args[1] 8534 if !(ptr.Op != OpSB) { 8535 break 8536 } 8537 v.reset(OpAMD64MOVQloadidx1) 8538 v.AuxInt = off 8539 v.Aux = sym 8540 v.AddArg(ptr) 8541 v.AddArg(idx) 8542 v.AddArg(mem) 8543 return true 8544 } 8545 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 8546 // cond: canMergeSym(sym1, sym2) 8547 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 8548 for { 8549 off1 := v.AuxInt 8550 sym1 := v.Aux 8551 _ = v.Args[1] 8552 v_0 := v.Args[0] 8553 if v_0.Op != OpAMD64LEAL { 8554 break 8555 } 8556 off2 := v_0.AuxInt 8557 sym2 := v_0.Aux 8558 base := v_0.Args[0] 8559 mem := v.Args[1] 8560 if !(canMergeSym(sym1, sym2)) { 8561 break 8562 } 8563 v.reset(OpAMD64MOVQload) 8564 v.AuxInt = off1 + off2 8565 v.Aux = mergeSym(sym1, sym2) 8566 v.AddArg(base) 8567 v.AddArg(mem) 8568 return true 8569 } 8570 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 8571 // cond: is32Bit(off1+off2) 8572 // result: (MOVQload [off1+off2] {sym} ptr mem) 8573 for { 8574 off1 := v.AuxInt 8575 sym := v.Aux 8576 _ = v.Args[1] 8577 v_0 := v.Args[0] 8578 if v_0.Op != OpAMD64ADDLconst { 8579 break 8580 } 8581 off2 := v_0.AuxInt 8582 ptr := v_0.Args[0] 8583 mem := v.Args[1] 8584 if !(is32Bit(off1 + off2)) { 8585 break 8586 } 8587 v.reset(OpAMD64MOVQload) 8588 v.AuxInt = off1 + off2 8589 v.Aux = sym 8590 v.AddArg(ptr) 8591 v.AddArg(mem) 8592 return true 8593 } 8594 return false 8595 } 8596 func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { 8597 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 8598 // cond: 8599 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 8600 for { 8601 c := v.AuxInt 8602 sym := v.Aux 8603 _ = v.Args[2] 8604 ptr := v.Args[0] 8605 v_1 := v.Args[1] 8606 if v_1.Op != OpAMD64SHLQconst { 8607 break 8608 } 8609 if v_1.AuxInt != 3 { 8610 break 8611 } 8612 idx := v_1.Args[0] 8613 mem := v.Args[2] 8614 v.reset(OpAMD64MOVQloadidx8) 8615 v.AuxInt = c 8616 v.Aux = sym 8617 v.AddArg(ptr) 8618 v.AddArg(idx) 8619 v.AddArg(mem) 8620 return true 8621 } 8622 // match: (MOVQloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 8623 // cond: 8624 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 8625 for { 8626 c := v.AuxInt 8627 sym := v.Aux 8628 _ = v.Args[2] 8629 v_0 := v.Args[0] 8630 if v_0.Op != OpAMD64SHLQconst { 8631 break 8632 } 8633 if v_0.AuxInt != 3 { 8634 break 8635 } 8636 idx := v_0.Args[0] 8637 ptr := v.Args[1] 8638 mem := v.Args[2] 8639 v.reset(OpAMD64MOVQloadidx8) 8640 v.AuxInt = c 8641 v.Aux = sym 8642 v.AddArg(ptr) 8643 v.AddArg(idx) 8644 v.AddArg(mem) 8645 return true 8646 } 8647 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 8648 // cond: 8649 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8650 for { 8651 c := v.AuxInt 8652 sym := v.Aux 8653 _ = v.Args[2] 8654 v_0 := v.Args[0] 8655 if v_0.Op != OpAMD64ADDQconst { 8656 break 8657 } 8658 d := v_0.AuxInt 8659 ptr := v_0.Args[0] 8660 idx := v.Args[1] 8661 mem := v.Args[2] 8662 v.reset(OpAMD64MOVQloadidx1) 8663 v.AuxInt = c + d 8664 v.Aux = sym 8665 v.AddArg(ptr) 8666 v.AddArg(idx) 8667 v.AddArg(mem) 8668 return true 8669 } 8670 // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 8671 // cond: 8672 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8673 for { 8674 c := v.AuxInt 8675 sym := v.Aux 8676 _ = v.Args[2] 8677 idx := v.Args[0] 8678 v_1 := v.Args[1] 8679 if v_1.Op != OpAMD64ADDQconst { 8680 break 8681 } 8682 d := v_1.AuxInt 8683 ptr := v_1.Args[0] 8684 mem := v.Args[2] 8685 v.reset(OpAMD64MOVQloadidx1) 8686 v.AuxInt = c + d 8687 v.Aux = sym 8688 v.AddArg(ptr) 8689 v.AddArg(idx) 8690 v.AddArg(mem) 8691 return true 8692 } 8693 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 8694 // cond: 8695 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8696 for { 8697 c := v.AuxInt 8698 sym := v.Aux 8699 _ = v.Args[2] 8700 ptr := v.Args[0] 8701 v_1 := v.Args[1] 8702 if v_1.Op != OpAMD64ADDQconst { 8703 break 8704 } 8705 d := v_1.AuxInt 8706 idx := v_1.Args[0] 8707 mem := v.Args[2] 8708 v.reset(OpAMD64MOVQloadidx1) 8709 v.AuxInt = c + d 8710 v.Aux = sym 8711 v.AddArg(ptr) 8712 v.AddArg(idx) 8713 v.AddArg(mem) 8714 return true 8715 } 8716 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 8717 // cond: 8718 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 8719 for { 8720 c := v.AuxInt 8721 sym := v.Aux 8722 _ = v.Args[2] 8723 v_0 := v.Args[0] 8724 if v_0.Op != OpAMD64ADDQconst { 8725 break 8726 } 8727 d := v_0.AuxInt 8728 idx := v_0.Args[0] 8729 ptr := v.Args[1] 8730 mem := v.Args[2] 8731 v.reset(OpAMD64MOVQloadidx1) 8732 v.AuxInt = c + d 8733 v.Aux = sym 8734 v.AddArg(ptr) 8735 v.AddArg(idx) 8736 v.AddArg(mem) 8737 return true 8738 } 8739 return false 8740 } 8741 func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { 8742 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 8743 // cond: 8744 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 8745 for { 8746 c := v.AuxInt 8747 sym := v.Aux 8748 _ = v.Args[2] 8749 v_0 := v.Args[0] 8750 if v_0.Op != OpAMD64ADDQconst { 8751 break 8752 } 8753 d := v_0.AuxInt 8754 ptr := v_0.Args[0] 8755 idx := v.Args[1] 8756 mem := v.Args[2] 8757 v.reset(OpAMD64MOVQloadidx8) 8758 v.AuxInt = c + d 8759 v.Aux = sym 8760 v.AddArg(ptr) 8761 v.AddArg(idx) 8762 v.AddArg(mem) 8763 return true 8764 } 8765 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 8766 // cond: 8767 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 8768 for { 8769 c := v.AuxInt 8770 sym := v.Aux 8771 _ = v.Args[2] 8772 ptr := v.Args[0] 8773 v_1 := v.Args[1] 8774 if v_1.Op != OpAMD64ADDQconst { 8775 break 8776 } 8777 d := v_1.AuxInt 8778 idx := v_1.Args[0] 8779 mem := v.Args[2] 8780 v.reset(OpAMD64MOVQloadidx8) 8781 v.AuxInt = c + 8*d 8782 v.Aux = sym 8783 v.AddArg(ptr) 8784 v.AddArg(idx) 8785 v.AddArg(mem) 8786 return true 8787 } 8788 return false 8789 } 8790 func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { 8791 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8792 // cond: is32Bit(off1+off2) 8793 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 8794 for { 8795 off1 := v.AuxInt 8796 sym := v.Aux 8797 _ = v.Args[2] 8798 v_0 := v.Args[0] 8799 if v_0.Op != OpAMD64ADDQconst { 8800 break 8801 } 8802 off2 := v_0.AuxInt 8803 ptr := v_0.Args[0] 8804 val := v.Args[1] 8805 mem := v.Args[2] 8806 if !(is32Bit(off1 + off2)) { 8807 break 8808 } 8809 v.reset(OpAMD64MOVQstore) 8810 v.AuxInt = off1 + off2 8811 v.Aux = sym 8812 v.AddArg(ptr) 8813 v.AddArg(val) 8814 v.AddArg(mem) 8815 return true 8816 } 8817 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 8818 // cond: validValAndOff(c,off) 8819 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 8820 for { 8821 off := v.AuxInt 8822 sym := v.Aux 8823 _ = v.Args[2] 8824 ptr := v.Args[0] 8825 v_1 := v.Args[1] 8826 if v_1.Op != OpAMD64MOVQconst { 8827 break 8828 } 8829 c := v_1.AuxInt 8830 mem := v.Args[2] 8831 if !(validValAndOff(c, off)) { 8832 break 8833 } 8834 v.reset(OpAMD64MOVQstoreconst) 8835 v.AuxInt = makeValAndOff(c, off) 8836 v.Aux = sym 8837 v.AddArg(ptr) 8838 v.AddArg(mem) 8839 return true 8840 } 8841 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8842 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8843 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8844 for { 8845 off1 := v.AuxInt 8846 sym1 := v.Aux 8847 _ = v.Args[2] 8848 v_0 := v.Args[0] 8849 if v_0.Op != OpAMD64LEAQ { 8850 break 8851 } 8852 off2 := v_0.AuxInt 8853 sym2 := v_0.Aux 8854 base := v_0.Args[0] 8855 val := v.Args[1] 8856 mem := v.Args[2] 8857 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8858 break 8859 } 8860 v.reset(OpAMD64MOVQstore) 8861 v.AuxInt = off1 + off2 8862 v.Aux = mergeSym(sym1, sym2) 8863 v.AddArg(base) 8864 v.AddArg(val) 8865 v.AddArg(mem) 8866 return true 8867 } 8868 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8869 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8870 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8871 for { 8872 off1 := v.AuxInt 8873 sym1 := v.Aux 8874 _ = v.Args[2] 8875 v_0 := v.Args[0] 8876 if v_0.Op != OpAMD64LEAQ1 { 8877 break 8878 } 8879 off2 := v_0.AuxInt 8880 sym2 := v_0.Aux 8881 _ = v_0.Args[1] 8882 ptr := v_0.Args[0] 8883 idx := v_0.Args[1] 8884 val := v.Args[1] 8885 mem := v.Args[2] 8886 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8887 break 8888 } 8889 v.reset(OpAMD64MOVQstoreidx1) 8890 v.AuxInt = off1 + off2 8891 v.Aux = mergeSym(sym1, sym2) 8892 v.AddArg(ptr) 8893 v.AddArg(idx) 8894 v.AddArg(val) 8895 v.AddArg(mem) 8896 return true 8897 } 8898 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 8899 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8900 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8901 for { 8902 off1 := v.AuxInt 8903 sym1 := v.Aux 8904 _ = v.Args[2] 8905 v_0 := v.Args[0] 8906 if v_0.Op != OpAMD64LEAQ8 { 8907 break 8908 } 8909 off2 := v_0.AuxInt 8910 sym2 := v_0.Aux 8911 _ = v_0.Args[1] 8912 ptr := v_0.Args[0] 8913 idx := v_0.Args[1] 8914 val := v.Args[1] 8915 mem := v.Args[2] 8916 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8917 break 8918 } 8919 v.reset(OpAMD64MOVQstoreidx8) 8920 v.AuxInt = off1 + off2 8921 v.Aux = mergeSym(sym1, sym2) 8922 v.AddArg(ptr) 8923 v.AddArg(idx) 8924 v.AddArg(val) 8925 v.AddArg(mem) 8926 return true 8927 } 8928 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 8929 // cond: ptr.Op != OpSB 8930 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 8931 for { 8932 off := v.AuxInt 8933 sym := v.Aux 8934 _ = v.Args[2] 8935 v_0 := v.Args[0] 8936 if v_0.Op != OpAMD64ADDQ { 8937 break 8938 } 8939 _ = v_0.Args[1] 8940 ptr := v_0.Args[0] 8941 idx := v_0.Args[1] 8942 val := v.Args[1] 8943 mem := v.Args[2] 8944 if !(ptr.Op != OpSB) { 8945 break 8946 } 8947 v.reset(OpAMD64MOVQstoreidx1) 8948 v.AuxInt = off 8949 v.Aux = sym 8950 v.AddArg(ptr) 8951 v.AddArg(idx) 8952 v.AddArg(val) 8953 v.AddArg(mem) 8954 return true 8955 } 8956 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 8957 // cond: canMergeSym(sym1, sym2) 8958 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8959 for { 8960 off1 := v.AuxInt 8961 sym1 := v.Aux 8962 _ = v.Args[2] 8963 v_0 := v.Args[0] 8964 if v_0.Op != OpAMD64LEAL { 8965 break 8966 } 8967 off2 := v_0.AuxInt 8968 sym2 := v_0.Aux 8969 base := v_0.Args[0] 8970 val := v.Args[1] 8971 mem := v.Args[2] 8972 if !(canMergeSym(sym1, sym2)) { 8973 break 8974 } 8975 v.reset(OpAMD64MOVQstore) 8976 v.AuxInt = off1 + off2 8977 v.Aux = mergeSym(sym1, sym2) 8978 v.AddArg(base) 8979 v.AddArg(val) 8980 v.AddArg(mem) 8981 return true 8982 } 8983 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 8984 // cond: is32Bit(off1+off2) 8985 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 8986 for { 8987 off1 := v.AuxInt 8988 sym := v.Aux 8989 _ = v.Args[2] 8990 v_0 := v.Args[0] 8991 if v_0.Op != OpAMD64ADDLconst { 8992 break 8993 } 8994 off2 := v_0.AuxInt 8995 ptr := v_0.Args[0] 8996 val := v.Args[1] 8997 mem := v.Args[2] 8998 if !(is32Bit(off1 + off2)) { 8999 break 9000 } 9001 v.reset(OpAMD64MOVQstore) 9002 v.AuxInt = off1 + off2 9003 v.Aux = sym 9004 v.AddArg(ptr) 9005 v.AddArg(val) 9006 v.AddArg(mem) 9007 return true 9008 } 9009 return false 9010 } 9011 func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { 9012 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 9013 // cond: ValAndOff(sc).canAdd(off) 9014 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 9015 for { 9016 sc := v.AuxInt 9017 s := v.Aux 9018 _ = v.Args[1] 9019 v_0 := v.Args[0] 9020 if v_0.Op != OpAMD64ADDQconst { 9021 break 9022 } 9023 off := v_0.AuxInt 9024 ptr := v_0.Args[0] 9025 mem := v.Args[1] 9026 if !(ValAndOff(sc).canAdd(off)) { 9027 break 9028 } 9029 v.reset(OpAMD64MOVQstoreconst) 9030 v.AuxInt = ValAndOff(sc).add(off) 9031 v.Aux = s 9032 v.AddArg(ptr) 9033 v.AddArg(mem) 9034 return true 9035 } 9036 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 9037 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 9038 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 9039 for { 9040 sc := v.AuxInt 9041 sym1 := v.Aux 9042 _ = v.Args[1] 9043 v_0 := v.Args[0] 9044 if v_0.Op != OpAMD64LEAQ { 9045 break 9046 } 9047 off := v_0.AuxInt 9048 sym2 := v_0.Aux 9049 ptr := v_0.Args[0] 9050 mem := v.Args[1] 9051 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 9052 break 9053 } 9054 v.reset(OpAMD64MOVQstoreconst) 9055 v.AuxInt = ValAndOff(sc).add(off) 9056 v.Aux = mergeSym(sym1, sym2) 9057 v.AddArg(ptr) 9058 v.AddArg(mem) 9059 return true 9060 } 9061 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 9062 // cond: canMergeSym(sym1, sym2) 9063 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 9064 for { 9065 x := v.AuxInt 9066 sym1 := v.Aux 9067 _ = v.Args[1] 9068 v_0 := v.Args[0] 9069 if v_0.Op != OpAMD64LEAQ1 { 9070 break 9071 } 9072 off := v_0.AuxInt 9073 sym2 := v_0.Aux 9074 _ = v_0.Args[1] 9075 ptr := v_0.Args[0] 9076 idx := v_0.Args[1] 9077 mem := v.Args[1] 9078 if !(canMergeSym(sym1, sym2)) { 9079 break 9080 } 9081 v.reset(OpAMD64MOVQstoreconstidx1) 9082 v.AuxInt = ValAndOff(x).add(off) 9083 v.Aux = mergeSym(sym1, sym2) 9084 v.AddArg(ptr) 9085 v.AddArg(idx) 9086 v.AddArg(mem) 9087 return true 9088 } 9089 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 9090 // cond: canMergeSym(sym1, sym2) 9091 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 9092 for { 9093 x := v.AuxInt 9094 sym1 := v.Aux 9095 _ = v.Args[1] 9096 v_0 := v.Args[0] 9097 if v_0.Op != OpAMD64LEAQ8 { 9098 break 9099 } 9100 off := v_0.AuxInt 9101 sym2 := v_0.Aux 9102 _ = v_0.Args[1] 9103 ptr := v_0.Args[0] 9104 idx := v_0.Args[1] 9105 mem := v.Args[1] 9106 if !(canMergeSym(sym1, sym2)) { 9107 break 9108 } 9109 v.reset(OpAMD64MOVQstoreconstidx8) 9110 v.AuxInt = ValAndOff(x).add(off) 9111 v.Aux = mergeSym(sym1, sym2) 9112 v.AddArg(ptr) 9113 v.AddArg(idx) 9114 v.AddArg(mem) 9115 return true 9116 } 9117 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 9118 // cond: 9119 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 9120 for { 9121 x := v.AuxInt 9122 sym := v.Aux 9123 _ = v.Args[1] 9124 v_0 := v.Args[0] 9125 if v_0.Op != OpAMD64ADDQ { 9126 break 9127 } 9128 _ = v_0.Args[1] 9129 ptr := v_0.Args[0] 9130 idx := v_0.Args[1] 9131 mem := v.Args[1] 9132 v.reset(OpAMD64MOVQstoreconstidx1) 9133 v.AuxInt = x 9134 v.Aux = sym 9135 v.AddArg(ptr) 9136 v.AddArg(idx) 9137 v.AddArg(mem) 9138 return true 9139 } 9140 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 9141 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 9142 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 9143 for { 9144 sc := v.AuxInt 9145 sym1 := v.Aux 9146 _ = v.Args[1] 9147 v_0 := v.Args[0] 9148 if v_0.Op != OpAMD64LEAL { 9149 break 9150 } 9151 off := v_0.AuxInt 9152 sym2 := v_0.Aux 9153 ptr := v_0.Args[0] 9154 mem := v.Args[1] 9155 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 9156 break 9157 } 9158 v.reset(OpAMD64MOVQstoreconst) 9159 v.AuxInt = ValAndOff(sc).add(off) 9160 v.Aux = mergeSym(sym1, sym2) 9161 v.AddArg(ptr) 9162 v.AddArg(mem) 9163 return true 9164 } 9165 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 9166 // cond: ValAndOff(sc).canAdd(off) 9167 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 9168 for { 9169 sc := v.AuxInt 9170 s := v.Aux 9171 _ = v.Args[1] 9172 v_0 := v.Args[0] 9173 if v_0.Op != OpAMD64ADDLconst { 9174 break 9175 } 9176 off := v_0.AuxInt 9177 ptr := v_0.Args[0] 9178 mem := v.Args[1] 9179 if !(ValAndOff(sc).canAdd(off)) { 9180 break 9181 } 9182 v.reset(OpAMD64MOVQstoreconst) 9183 v.AuxInt = ValAndOff(sc).add(off) 9184 v.Aux = s 9185 v.AddArg(ptr) 9186 v.AddArg(mem) 9187 return true 9188 } 9189 return false 9190 } 9191 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { 9192 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 9193 // cond: 9194 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 9195 for { 9196 c := v.AuxInt 9197 sym := v.Aux 9198 _ = v.Args[2] 9199 ptr := v.Args[0] 9200 v_1 := v.Args[1] 9201 if v_1.Op != OpAMD64SHLQconst { 9202 break 9203 } 9204 if v_1.AuxInt != 3 { 9205 break 9206 } 9207 idx := v_1.Args[0] 9208 mem := v.Args[2] 9209 v.reset(OpAMD64MOVQstoreconstidx8) 9210 v.AuxInt = c 9211 v.Aux = sym 9212 v.AddArg(ptr) 9213 v.AddArg(idx) 9214 v.AddArg(mem) 9215 return true 9216 } 9217 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 9218 // cond: 9219 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9220 for { 9221 x := v.AuxInt 9222 sym := v.Aux 9223 _ = v.Args[2] 9224 v_0 := v.Args[0] 9225 if v_0.Op != OpAMD64ADDQconst { 9226 break 9227 } 9228 c := v_0.AuxInt 9229 ptr := v_0.Args[0] 9230 idx := v.Args[1] 9231 mem := v.Args[2] 9232 v.reset(OpAMD64MOVQstoreconstidx1) 9233 v.AuxInt = ValAndOff(x).add(c) 9234 v.Aux = sym 9235 v.AddArg(ptr) 9236 v.AddArg(idx) 9237 v.AddArg(mem) 9238 return true 9239 } 9240 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 9241 // cond: 9242 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9243 for { 9244 x := v.AuxInt 9245 sym := v.Aux 9246 _ = v.Args[2] 9247 ptr := v.Args[0] 9248 v_1 := v.Args[1] 9249 if v_1.Op != OpAMD64ADDQconst { 9250 break 9251 } 9252 c := v_1.AuxInt 9253 idx := v_1.Args[0] 9254 mem := v.Args[2] 9255 v.reset(OpAMD64MOVQstoreconstidx1) 9256 v.AuxInt = ValAndOff(x).add(c) 9257 v.Aux = sym 9258 v.AddArg(ptr) 9259 v.AddArg(idx) 9260 v.AddArg(mem) 9261 return true 9262 } 9263 return false 9264 } 9265 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { 9266 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 9267 // cond: 9268 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 9269 for { 9270 x := v.AuxInt 9271 sym := v.Aux 9272 _ = v.Args[2] 9273 v_0 := v.Args[0] 9274 if v_0.Op != OpAMD64ADDQconst { 9275 break 9276 } 9277 c := v_0.AuxInt 9278 ptr := v_0.Args[0] 9279 idx := v.Args[1] 9280 mem := v.Args[2] 9281 v.reset(OpAMD64MOVQstoreconstidx8) 9282 v.AuxInt = ValAndOff(x).add(c) 9283 v.Aux = sym 9284 v.AddArg(ptr) 9285 v.AddArg(idx) 9286 v.AddArg(mem) 9287 return true 9288 } 9289 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 9290 // cond: 9291 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 9292 for { 9293 x := v.AuxInt 9294 sym := v.Aux 9295 _ = v.Args[2] 9296 ptr := v.Args[0] 9297 v_1 := v.Args[1] 9298 if v_1.Op != OpAMD64ADDQconst { 9299 break 9300 } 9301 c := v_1.AuxInt 9302 idx := v_1.Args[0] 9303 mem := v.Args[2] 9304 v.reset(OpAMD64MOVQstoreconstidx8) 9305 v.AuxInt = ValAndOff(x).add(8 * c) 9306 v.Aux = sym 9307 v.AddArg(ptr) 9308 v.AddArg(idx) 9309 v.AddArg(mem) 9310 return true 9311 } 9312 return false 9313 } 9314 func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { 9315 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 9316 // cond: 9317 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 9318 for { 9319 c := v.AuxInt 9320 sym := v.Aux 9321 _ = v.Args[3] 9322 ptr := v.Args[0] 9323 v_1 := v.Args[1] 9324 if v_1.Op != OpAMD64SHLQconst { 9325 break 9326 } 9327 if v_1.AuxInt != 3 { 9328 break 9329 } 9330 idx := v_1.Args[0] 9331 val := v.Args[2] 9332 mem := v.Args[3] 9333 v.reset(OpAMD64MOVQstoreidx8) 9334 v.AuxInt = c 9335 v.Aux = sym 9336 v.AddArg(ptr) 9337 v.AddArg(idx) 9338 v.AddArg(val) 9339 v.AddArg(mem) 9340 return true 9341 } 9342 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9343 // cond: 9344 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 9345 for { 9346 c := v.AuxInt 9347 sym := v.Aux 9348 _ = v.Args[3] 9349 v_0 := v.Args[0] 9350 if v_0.Op != OpAMD64ADDQconst { 9351 break 9352 } 9353 d := v_0.AuxInt 9354 ptr := v_0.Args[0] 9355 idx := v.Args[1] 9356 val := v.Args[2] 9357 mem := v.Args[3] 9358 v.reset(OpAMD64MOVQstoreidx1) 9359 v.AuxInt = c + d 9360 v.Aux = sym 9361 v.AddArg(ptr) 9362 v.AddArg(idx) 9363 v.AddArg(val) 9364 v.AddArg(mem) 9365 return true 9366 } 9367 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9368 // cond: 9369 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 9370 for { 9371 c := v.AuxInt 9372 sym := v.Aux 9373 _ = v.Args[3] 9374 ptr := v.Args[0] 9375 v_1 := v.Args[1] 9376 if v_1.Op != OpAMD64ADDQconst { 9377 break 9378 } 9379 d := v_1.AuxInt 9380 idx := v_1.Args[0] 9381 val := v.Args[2] 9382 mem := v.Args[3] 9383 v.reset(OpAMD64MOVQstoreidx1) 9384 v.AuxInt = c + d 9385 v.Aux = sym 9386 v.AddArg(ptr) 9387 v.AddArg(idx) 9388 v.AddArg(val) 9389 v.AddArg(mem) 9390 return true 9391 } 9392 return false 9393 } 9394 func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { 9395 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9396 // cond: 9397 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 9398 for { 9399 c := v.AuxInt 9400 sym := v.Aux 9401 _ = v.Args[3] 9402 v_0 := v.Args[0] 9403 if v_0.Op != OpAMD64ADDQconst { 9404 break 9405 } 9406 d := v_0.AuxInt 9407 ptr := v_0.Args[0] 9408 idx := v.Args[1] 9409 val := v.Args[2] 9410 mem := v.Args[3] 9411 v.reset(OpAMD64MOVQstoreidx8) 9412 v.AuxInt = c + d 9413 v.Aux = sym 9414 v.AddArg(ptr) 9415 v.AddArg(idx) 9416 v.AddArg(val) 9417 v.AddArg(mem) 9418 return true 9419 } 9420 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9421 // cond: 9422 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 9423 for { 9424 c := v.AuxInt 9425 sym := v.Aux 9426 _ = v.Args[3] 9427 ptr := v.Args[0] 9428 v_1 := v.Args[1] 9429 if v_1.Op != OpAMD64ADDQconst { 9430 break 9431 } 9432 d := v_1.AuxInt 9433 idx := v_1.Args[0] 9434 val := v.Args[2] 9435 mem := v.Args[3] 9436 v.reset(OpAMD64MOVQstoreidx8) 9437 v.AuxInt = c + 8*d 9438 v.Aux = sym 9439 v.AddArg(ptr) 9440 v.AddArg(idx) 9441 v.AddArg(val) 9442 v.AddArg(mem) 9443 return true 9444 } 9445 return false 9446 } 9447 func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { 9448 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 9449 // cond: is32Bit(off1+off2) 9450 // result: (MOVSDload [off1+off2] {sym} ptr mem) 9451 for { 9452 off1 := v.AuxInt 9453 sym := v.Aux 9454 _ = v.Args[1] 9455 v_0 := v.Args[0] 9456 if v_0.Op != OpAMD64ADDQconst { 9457 break 9458 } 9459 off2 := v_0.AuxInt 9460 ptr := v_0.Args[0] 9461 mem := v.Args[1] 9462 if !(is32Bit(off1 + off2)) { 9463 break 9464 } 9465 v.reset(OpAMD64MOVSDload) 9466 v.AuxInt = off1 + off2 9467 v.Aux = sym 9468 v.AddArg(ptr) 9469 v.AddArg(mem) 9470 return true 9471 } 9472 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9473 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9474 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9475 for { 9476 off1 := v.AuxInt 9477 sym1 := v.Aux 9478 _ = v.Args[1] 9479 v_0 := v.Args[0] 9480 if v_0.Op != OpAMD64LEAQ { 9481 break 9482 } 9483 off2 := v_0.AuxInt 9484 sym2 := v_0.Aux 9485 base := v_0.Args[0] 9486 mem := v.Args[1] 9487 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9488 break 9489 } 9490 v.reset(OpAMD64MOVSDload) 9491 v.AuxInt = off1 + off2 9492 v.Aux = mergeSym(sym1, sym2) 9493 v.AddArg(base) 9494 v.AddArg(mem) 9495 return true 9496 } 9497 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 9498 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9499 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9500 for { 9501 off1 := v.AuxInt 9502 sym1 := v.Aux 9503 _ = v.Args[1] 9504 v_0 := v.Args[0] 9505 if v_0.Op != OpAMD64LEAQ1 { 9506 break 9507 } 9508 off2 := v_0.AuxInt 9509 sym2 := v_0.Aux 9510 _ = v_0.Args[1] 9511 ptr := v_0.Args[0] 9512 idx := v_0.Args[1] 9513 mem := v.Args[1] 9514 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9515 break 9516 } 9517 v.reset(OpAMD64MOVSDloadidx1) 9518 v.AuxInt = off1 + off2 9519 v.Aux = mergeSym(sym1, sym2) 9520 v.AddArg(ptr) 9521 v.AddArg(idx) 9522 v.AddArg(mem) 9523 return true 9524 } 9525 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 9526 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9527 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9528 for { 9529 off1 := v.AuxInt 9530 sym1 := v.Aux 9531 _ = v.Args[1] 9532 v_0 := v.Args[0] 9533 if v_0.Op != OpAMD64LEAQ8 { 9534 break 9535 } 9536 off2 := v_0.AuxInt 9537 sym2 := v_0.Aux 9538 _ = v_0.Args[1] 9539 ptr := v_0.Args[0] 9540 idx := v_0.Args[1] 9541 mem := v.Args[1] 9542 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9543 break 9544 } 9545 v.reset(OpAMD64MOVSDloadidx8) 9546 v.AuxInt = off1 + off2 9547 v.Aux = mergeSym(sym1, sym2) 9548 v.AddArg(ptr) 9549 v.AddArg(idx) 9550 v.AddArg(mem) 9551 return true 9552 } 9553 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 9554 // cond: ptr.Op != OpSB 9555 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 9556 for { 9557 off := v.AuxInt 9558 sym := v.Aux 9559 _ = v.Args[1] 9560 v_0 := v.Args[0] 9561 if v_0.Op != OpAMD64ADDQ { 9562 break 9563 } 9564 _ = v_0.Args[1] 9565 ptr := v_0.Args[0] 9566 idx := v_0.Args[1] 9567 mem := v.Args[1] 9568 if !(ptr.Op != OpSB) { 9569 break 9570 } 9571 v.reset(OpAMD64MOVSDloadidx1) 9572 v.AuxInt = off 9573 v.Aux = sym 9574 v.AddArg(ptr) 9575 v.AddArg(idx) 9576 v.AddArg(mem) 9577 return true 9578 } 9579 return false 9580 } 9581 func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { 9582 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 9583 // cond: 9584 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 9585 for { 9586 c := v.AuxInt 9587 sym := v.Aux 9588 _ = v.Args[2] 9589 ptr := v.Args[0] 9590 v_1 := v.Args[1] 9591 if v_1.Op != OpAMD64SHLQconst { 9592 break 9593 } 9594 if v_1.AuxInt != 3 { 9595 break 9596 } 9597 idx := v_1.Args[0] 9598 mem := v.Args[2] 9599 v.reset(OpAMD64MOVSDloadidx8) 9600 v.AuxInt = c 9601 v.Aux = sym 9602 v.AddArg(ptr) 9603 v.AddArg(idx) 9604 v.AddArg(mem) 9605 return true 9606 } 9607 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9608 // cond: 9609 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 9610 for { 9611 c := v.AuxInt 9612 sym := v.Aux 9613 _ = v.Args[2] 9614 v_0 := v.Args[0] 9615 if v_0.Op != OpAMD64ADDQconst { 9616 break 9617 } 9618 d := v_0.AuxInt 9619 ptr := v_0.Args[0] 9620 idx := v.Args[1] 9621 mem := v.Args[2] 9622 v.reset(OpAMD64MOVSDloadidx1) 9623 v.AuxInt = c + d 9624 v.Aux = sym 9625 v.AddArg(ptr) 9626 v.AddArg(idx) 9627 v.AddArg(mem) 9628 return true 9629 } 9630 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 9631 // cond: 9632 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 9633 for { 9634 c := v.AuxInt 9635 sym := v.Aux 9636 _ = v.Args[2] 9637 ptr := v.Args[0] 9638 v_1 := v.Args[1] 9639 if v_1.Op != OpAMD64ADDQconst { 9640 break 9641 } 9642 d := v_1.AuxInt 9643 idx := v_1.Args[0] 9644 mem := v.Args[2] 9645 v.reset(OpAMD64MOVSDloadidx1) 9646 v.AuxInt = c + d 9647 v.Aux = sym 9648 v.AddArg(ptr) 9649 v.AddArg(idx) 9650 v.AddArg(mem) 9651 return true 9652 } 9653 return false 9654 } 9655 func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { 9656 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 9657 // cond: 9658 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 9659 for { 9660 c := v.AuxInt 9661 sym := v.Aux 9662 _ = v.Args[2] 9663 v_0 := v.Args[0] 9664 if v_0.Op != OpAMD64ADDQconst { 9665 break 9666 } 9667 d := v_0.AuxInt 9668 ptr := v_0.Args[0] 9669 idx := v.Args[1] 9670 mem := v.Args[2] 9671 v.reset(OpAMD64MOVSDloadidx8) 9672 v.AuxInt = c + d 9673 v.Aux = sym 9674 v.AddArg(ptr) 9675 v.AddArg(idx) 9676 v.AddArg(mem) 9677 return true 9678 } 9679 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 9680 // cond: 9681 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 9682 for { 9683 c := v.AuxInt 9684 sym := v.Aux 9685 _ = v.Args[2] 9686 ptr := v.Args[0] 9687 v_1 := v.Args[1] 9688 if v_1.Op != OpAMD64ADDQconst { 9689 break 9690 } 9691 d := v_1.AuxInt 9692 idx := v_1.Args[0] 9693 mem := v.Args[2] 9694 v.reset(OpAMD64MOVSDloadidx8) 9695 v.AuxInt = c + 8*d 9696 v.Aux = sym 9697 v.AddArg(ptr) 9698 v.AddArg(idx) 9699 v.AddArg(mem) 9700 return true 9701 } 9702 return false 9703 } 9704 func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { 9705 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9706 // cond: is32Bit(off1+off2) 9707 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 9708 for { 9709 off1 := v.AuxInt 9710 sym := v.Aux 9711 _ = v.Args[2] 9712 v_0 := v.Args[0] 9713 if v_0.Op != OpAMD64ADDQconst { 9714 break 9715 } 9716 off2 := v_0.AuxInt 9717 ptr := v_0.Args[0] 9718 val := v.Args[1] 9719 mem := v.Args[2] 9720 if !(is32Bit(off1 + off2)) { 9721 break 9722 } 9723 v.reset(OpAMD64MOVSDstore) 9724 v.AuxInt = off1 + off2 9725 v.Aux = sym 9726 v.AddArg(ptr) 9727 v.AddArg(val) 9728 v.AddArg(mem) 9729 return true 9730 } 9731 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9732 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9733 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9734 for { 9735 off1 := v.AuxInt 9736 sym1 := v.Aux 9737 _ = v.Args[2] 9738 v_0 := v.Args[0] 9739 if v_0.Op != OpAMD64LEAQ { 9740 break 9741 } 9742 off2 := v_0.AuxInt 9743 sym2 := v_0.Aux 9744 base := v_0.Args[0] 9745 val := v.Args[1] 9746 mem := v.Args[2] 9747 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9748 break 9749 } 9750 v.reset(OpAMD64MOVSDstore) 9751 v.AuxInt = off1 + off2 9752 v.Aux = mergeSym(sym1, sym2) 9753 v.AddArg(base) 9754 v.AddArg(val) 9755 v.AddArg(mem) 9756 return true 9757 } 9758 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 9759 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9760 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9761 for { 9762 off1 := v.AuxInt 9763 sym1 := v.Aux 9764 _ = v.Args[2] 9765 v_0 := v.Args[0] 9766 if v_0.Op != OpAMD64LEAQ1 { 9767 break 9768 } 9769 off2 := v_0.AuxInt 9770 sym2 := v_0.Aux 9771 _ = v_0.Args[1] 9772 ptr := v_0.Args[0] 9773 idx := v_0.Args[1] 9774 val := v.Args[1] 9775 mem := v.Args[2] 9776 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9777 break 9778 } 9779 v.reset(OpAMD64MOVSDstoreidx1) 9780 v.AuxInt = off1 + off2 9781 v.Aux = mergeSym(sym1, sym2) 9782 v.AddArg(ptr) 9783 v.AddArg(idx) 9784 v.AddArg(val) 9785 v.AddArg(mem) 9786 return true 9787 } 9788 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 9789 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9790 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 9791 for { 9792 off1 := v.AuxInt 9793 sym1 := v.Aux 9794 _ = v.Args[2] 9795 v_0 := v.Args[0] 9796 if v_0.Op != OpAMD64LEAQ8 { 9797 break 9798 } 9799 off2 := v_0.AuxInt 9800 sym2 := v_0.Aux 9801 _ = v_0.Args[1] 9802 ptr := v_0.Args[0] 9803 idx := v_0.Args[1] 9804 val := v.Args[1] 9805 mem := v.Args[2] 9806 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9807 break 9808 } 9809 v.reset(OpAMD64MOVSDstoreidx8) 9810 v.AuxInt = off1 + off2 9811 v.Aux = mergeSym(sym1, sym2) 9812 v.AddArg(ptr) 9813 v.AddArg(idx) 9814 v.AddArg(val) 9815 v.AddArg(mem) 9816 return true 9817 } 9818 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 9819 // cond: ptr.Op != OpSB 9820 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 9821 for { 9822 off := v.AuxInt 9823 sym := v.Aux 9824 _ = v.Args[2] 9825 v_0 := v.Args[0] 9826 if v_0.Op != OpAMD64ADDQ { 9827 break 9828 } 9829 _ = v_0.Args[1] 9830 ptr := v_0.Args[0] 9831 idx := v_0.Args[1] 9832 val := v.Args[1] 9833 mem := v.Args[2] 9834 if !(ptr.Op != OpSB) { 9835 break 9836 } 9837 v.reset(OpAMD64MOVSDstoreidx1) 9838 v.AuxInt = off 9839 v.Aux = sym 9840 v.AddArg(ptr) 9841 v.AddArg(idx) 9842 v.AddArg(val) 9843 v.AddArg(mem) 9844 return true 9845 } 9846 return false 9847 } 9848 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { 9849 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 9850 // cond: 9851 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 9852 for { 9853 c := v.AuxInt 9854 sym := v.Aux 9855 _ = v.Args[3] 9856 ptr := v.Args[0] 9857 v_1 := v.Args[1] 9858 if v_1.Op != OpAMD64SHLQconst { 9859 break 9860 } 9861 if v_1.AuxInt != 3 { 9862 break 9863 } 9864 idx := v_1.Args[0] 9865 val := v.Args[2] 9866 mem := v.Args[3] 9867 v.reset(OpAMD64MOVSDstoreidx8) 9868 v.AuxInt = c 9869 v.Aux = sym 9870 v.AddArg(ptr) 9871 v.AddArg(idx) 9872 v.AddArg(val) 9873 v.AddArg(mem) 9874 return true 9875 } 9876 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9877 // cond: 9878 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 9879 for { 9880 c := v.AuxInt 9881 sym := v.Aux 9882 _ = v.Args[3] 9883 v_0 := v.Args[0] 9884 if v_0.Op != OpAMD64ADDQconst { 9885 break 9886 } 9887 d := v_0.AuxInt 9888 ptr := v_0.Args[0] 9889 idx := v.Args[1] 9890 val := v.Args[2] 9891 mem := v.Args[3] 9892 v.reset(OpAMD64MOVSDstoreidx1) 9893 v.AuxInt = c + d 9894 v.Aux = sym 9895 v.AddArg(ptr) 9896 v.AddArg(idx) 9897 v.AddArg(val) 9898 v.AddArg(mem) 9899 return true 9900 } 9901 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9902 // cond: 9903 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 9904 for { 9905 c := v.AuxInt 9906 sym := v.Aux 9907 _ = v.Args[3] 9908 ptr := v.Args[0] 9909 v_1 := v.Args[1] 9910 if v_1.Op != OpAMD64ADDQconst { 9911 break 9912 } 9913 d := v_1.AuxInt 9914 idx := v_1.Args[0] 9915 val := v.Args[2] 9916 mem := v.Args[3] 9917 v.reset(OpAMD64MOVSDstoreidx1) 9918 v.AuxInt = c + d 9919 v.Aux = sym 9920 v.AddArg(ptr) 9921 v.AddArg(idx) 9922 v.AddArg(val) 9923 v.AddArg(mem) 9924 return true 9925 } 9926 return false 9927 } 9928 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { 9929 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9930 // cond: 9931 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 9932 for { 9933 c := v.AuxInt 9934 sym := v.Aux 9935 _ = v.Args[3] 9936 v_0 := v.Args[0] 9937 if v_0.Op != OpAMD64ADDQconst { 9938 break 9939 } 9940 d := v_0.AuxInt 9941 ptr := v_0.Args[0] 9942 idx := v.Args[1] 9943 val := v.Args[2] 9944 mem := v.Args[3] 9945 v.reset(OpAMD64MOVSDstoreidx8) 9946 v.AuxInt = c + d 9947 v.Aux = sym 9948 v.AddArg(ptr) 9949 v.AddArg(idx) 9950 v.AddArg(val) 9951 v.AddArg(mem) 9952 return true 9953 } 9954 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9955 // cond: 9956 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 9957 for { 9958 c := v.AuxInt 9959 sym := v.Aux 9960 _ = v.Args[3] 9961 ptr := v.Args[0] 9962 v_1 := v.Args[1] 9963 if v_1.Op != OpAMD64ADDQconst { 9964 break 9965 } 9966 d := v_1.AuxInt 9967 idx := v_1.Args[0] 9968 val := v.Args[2] 9969 mem := v.Args[3] 9970 v.reset(OpAMD64MOVSDstoreidx8) 9971 v.AuxInt = c + 8*d 9972 v.Aux = sym 9973 v.AddArg(ptr) 9974 v.AddArg(idx) 9975 v.AddArg(val) 9976 v.AddArg(mem) 9977 return true 9978 } 9979 return false 9980 } 9981 func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { 9982 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 9983 // cond: is32Bit(off1+off2) 9984 // result: (MOVSSload [off1+off2] {sym} ptr mem) 9985 for { 9986 off1 := v.AuxInt 9987 sym := v.Aux 9988 _ = v.Args[1] 9989 v_0 := v.Args[0] 9990 if v_0.Op != OpAMD64ADDQconst { 9991 break 9992 } 9993 off2 := v_0.AuxInt 9994 ptr := v_0.Args[0] 9995 mem := v.Args[1] 9996 if !(is32Bit(off1 + off2)) { 9997 break 9998 } 9999 v.reset(OpAMD64MOVSSload) 10000 v.AuxInt = off1 + off2 10001 v.Aux = sym 10002 v.AddArg(ptr) 10003 v.AddArg(mem) 10004 return true 10005 } 10006 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10007 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10008 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10009 for { 10010 off1 := v.AuxInt 10011 sym1 := v.Aux 10012 _ = v.Args[1] 10013 v_0 := v.Args[0] 10014 if v_0.Op != OpAMD64LEAQ { 10015 break 10016 } 10017 off2 := v_0.AuxInt 10018 sym2 := v_0.Aux 10019 base := v_0.Args[0] 10020 mem := v.Args[1] 10021 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10022 break 10023 } 10024 v.reset(OpAMD64MOVSSload) 10025 v.AuxInt = off1 + off2 10026 v.Aux = mergeSym(sym1, sym2) 10027 v.AddArg(base) 10028 v.AddArg(mem) 10029 return true 10030 } 10031 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 10032 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10033 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10034 for { 10035 off1 := v.AuxInt 10036 sym1 := v.Aux 10037 _ = v.Args[1] 10038 v_0 := v.Args[0] 10039 if v_0.Op != OpAMD64LEAQ1 { 10040 break 10041 } 10042 off2 := v_0.AuxInt 10043 sym2 := v_0.Aux 10044 _ = v_0.Args[1] 10045 ptr := v_0.Args[0] 10046 idx := v_0.Args[1] 10047 mem := v.Args[1] 10048 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10049 break 10050 } 10051 v.reset(OpAMD64MOVSSloadidx1) 10052 v.AuxInt = off1 + off2 10053 v.Aux = mergeSym(sym1, sym2) 10054 v.AddArg(ptr) 10055 v.AddArg(idx) 10056 v.AddArg(mem) 10057 return true 10058 } 10059 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 10060 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10061 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10062 for { 10063 off1 := v.AuxInt 10064 sym1 := v.Aux 10065 _ = v.Args[1] 10066 v_0 := v.Args[0] 10067 if v_0.Op != OpAMD64LEAQ4 { 10068 break 10069 } 10070 off2 := v_0.AuxInt 10071 sym2 := v_0.Aux 10072 _ = v_0.Args[1] 10073 ptr := v_0.Args[0] 10074 idx := v_0.Args[1] 10075 mem := v.Args[1] 10076 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10077 break 10078 } 10079 v.reset(OpAMD64MOVSSloadidx4) 10080 v.AuxInt = off1 + off2 10081 v.Aux = mergeSym(sym1, sym2) 10082 v.AddArg(ptr) 10083 v.AddArg(idx) 10084 v.AddArg(mem) 10085 return true 10086 } 10087 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 10088 // cond: ptr.Op != OpSB 10089 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 10090 for { 10091 off := v.AuxInt 10092 sym := v.Aux 10093 _ = v.Args[1] 10094 v_0 := v.Args[0] 10095 if v_0.Op != OpAMD64ADDQ { 10096 break 10097 } 10098 _ = v_0.Args[1] 10099 ptr := v_0.Args[0] 10100 idx := v_0.Args[1] 10101 mem := v.Args[1] 10102 if !(ptr.Op != OpSB) { 10103 break 10104 } 10105 v.reset(OpAMD64MOVSSloadidx1) 10106 v.AuxInt = off 10107 v.Aux = sym 10108 v.AddArg(ptr) 10109 v.AddArg(idx) 10110 v.AddArg(mem) 10111 return true 10112 } 10113 return false 10114 } 10115 func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { 10116 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 10117 // cond: 10118 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 10119 for { 10120 c := v.AuxInt 10121 sym := v.Aux 10122 _ = v.Args[2] 10123 ptr := v.Args[0] 10124 v_1 := v.Args[1] 10125 if v_1.Op != OpAMD64SHLQconst { 10126 break 10127 } 10128 if v_1.AuxInt != 2 { 10129 break 10130 } 10131 idx := v_1.Args[0] 10132 mem := v.Args[2] 10133 v.reset(OpAMD64MOVSSloadidx4) 10134 v.AuxInt = c 10135 v.Aux = sym 10136 v.AddArg(ptr) 10137 v.AddArg(idx) 10138 v.AddArg(mem) 10139 return true 10140 } 10141 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 10142 // cond: 10143 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 10144 for { 10145 c := v.AuxInt 10146 sym := v.Aux 10147 _ = v.Args[2] 10148 v_0 := v.Args[0] 10149 if v_0.Op != OpAMD64ADDQconst { 10150 break 10151 } 10152 d := v_0.AuxInt 10153 ptr := v_0.Args[0] 10154 idx := v.Args[1] 10155 mem := v.Args[2] 10156 v.reset(OpAMD64MOVSSloadidx1) 10157 v.AuxInt = c + d 10158 v.Aux = sym 10159 v.AddArg(ptr) 10160 v.AddArg(idx) 10161 v.AddArg(mem) 10162 return true 10163 } 10164 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 10165 // cond: 10166 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 10167 for { 10168 c := v.AuxInt 10169 sym := v.Aux 10170 _ = v.Args[2] 10171 ptr := v.Args[0] 10172 v_1 := v.Args[1] 10173 if v_1.Op != OpAMD64ADDQconst { 10174 break 10175 } 10176 d := v_1.AuxInt 10177 idx := v_1.Args[0] 10178 mem := v.Args[2] 10179 v.reset(OpAMD64MOVSSloadidx1) 10180 v.AuxInt = c + d 10181 v.Aux = sym 10182 v.AddArg(ptr) 10183 v.AddArg(idx) 10184 v.AddArg(mem) 10185 return true 10186 } 10187 return false 10188 } 10189 func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { 10190 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 10191 // cond: 10192 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 10193 for { 10194 c := v.AuxInt 10195 sym := v.Aux 10196 _ = v.Args[2] 10197 v_0 := v.Args[0] 10198 if v_0.Op != OpAMD64ADDQconst { 10199 break 10200 } 10201 d := v_0.AuxInt 10202 ptr := v_0.Args[0] 10203 idx := v.Args[1] 10204 mem := v.Args[2] 10205 v.reset(OpAMD64MOVSSloadidx4) 10206 v.AuxInt = c + d 10207 v.Aux = sym 10208 v.AddArg(ptr) 10209 v.AddArg(idx) 10210 v.AddArg(mem) 10211 return true 10212 } 10213 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 10214 // cond: 10215 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 10216 for { 10217 c := v.AuxInt 10218 sym := v.Aux 10219 _ = v.Args[2] 10220 ptr := v.Args[0] 10221 v_1 := v.Args[1] 10222 if v_1.Op != OpAMD64ADDQconst { 10223 break 10224 } 10225 d := v_1.AuxInt 10226 idx := v_1.Args[0] 10227 mem := v.Args[2] 10228 v.reset(OpAMD64MOVSSloadidx4) 10229 v.AuxInt = c + 4*d 10230 v.Aux = sym 10231 v.AddArg(ptr) 10232 v.AddArg(idx) 10233 v.AddArg(mem) 10234 return true 10235 } 10236 return false 10237 } 10238 func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { 10239 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 10240 // cond: is32Bit(off1+off2) 10241 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 10242 for { 10243 off1 := v.AuxInt 10244 sym := v.Aux 10245 _ = v.Args[2] 10246 v_0 := v.Args[0] 10247 if v_0.Op != OpAMD64ADDQconst { 10248 break 10249 } 10250 off2 := v_0.AuxInt 10251 ptr := v_0.Args[0] 10252 val := v.Args[1] 10253 mem := v.Args[2] 10254 if !(is32Bit(off1 + off2)) { 10255 break 10256 } 10257 v.reset(OpAMD64MOVSSstore) 10258 v.AuxInt = off1 + off2 10259 v.Aux = sym 10260 v.AddArg(ptr) 10261 v.AddArg(val) 10262 v.AddArg(mem) 10263 return true 10264 } 10265 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 10266 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10267 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10268 for { 10269 off1 := v.AuxInt 10270 sym1 := v.Aux 10271 _ = v.Args[2] 10272 v_0 := v.Args[0] 10273 if v_0.Op != OpAMD64LEAQ { 10274 break 10275 } 10276 off2 := v_0.AuxInt 10277 sym2 := v_0.Aux 10278 base := v_0.Args[0] 10279 val := v.Args[1] 10280 mem := v.Args[2] 10281 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10282 break 10283 } 10284 v.reset(OpAMD64MOVSSstore) 10285 v.AuxInt = off1 + off2 10286 v.Aux = mergeSym(sym1, sym2) 10287 v.AddArg(base) 10288 v.AddArg(val) 10289 v.AddArg(mem) 10290 return true 10291 } 10292 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 10293 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10294 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10295 for { 10296 off1 := v.AuxInt 10297 sym1 := v.Aux 10298 _ = v.Args[2] 10299 v_0 := v.Args[0] 10300 if v_0.Op != OpAMD64LEAQ1 { 10301 break 10302 } 10303 off2 := v_0.AuxInt 10304 sym2 := v_0.Aux 10305 _ = v_0.Args[1] 10306 ptr := v_0.Args[0] 10307 idx := v_0.Args[1] 10308 val := v.Args[1] 10309 mem := v.Args[2] 10310 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10311 break 10312 } 10313 v.reset(OpAMD64MOVSSstoreidx1) 10314 v.AuxInt = off1 + off2 10315 v.Aux = mergeSym(sym1, sym2) 10316 v.AddArg(ptr) 10317 v.AddArg(idx) 10318 v.AddArg(val) 10319 v.AddArg(mem) 10320 return true 10321 } 10322 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 10323 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10324 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10325 for { 10326 off1 := v.AuxInt 10327 sym1 := v.Aux 10328 _ = v.Args[2] 10329 v_0 := v.Args[0] 10330 if v_0.Op != OpAMD64LEAQ4 { 10331 break 10332 } 10333 off2 := v_0.AuxInt 10334 sym2 := v_0.Aux 10335 _ = v_0.Args[1] 10336 ptr := v_0.Args[0] 10337 idx := v_0.Args[1] 10338 val := v.Args[1] 10339 mem := v.Args[2] 10340 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10341 break 10342 } 10343 v.reset(OpAMD64MOVSSstoreidx4) 10344 v.AuxInt = off1 + off2 10345 v.Aux = mergeSym(sym1, sym2) 10346 v.AddArg(ptr) 10347 v.AddArg(idx) 10348 v.AddArg(val) 10349 v.AddArg(mem) 10350 return true 10351 } 10352 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 10353 // cond: ptr.Op != OpSB 10354 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 10355 for { 10356 off := v.AuxInt 10357 sym := v.Aux 10358 _ = v.Args[2] 10359 v_0 := v.Args[0] 10360 if v_0.Op != OpAMD64ADDQ { 10361 break 10362 } 10363 _ = v_0.Args[1] 10364 ptr := v_0.Args[0] 10365 idx := v_0.Args[1] 10366 val := v.Args[1] 10367 mem := v.Args[2] 10368 if !(ptr.Op != OpSB) { 10369 break 10370 } 10371 v.reset(OpAMD64MOVSSstoreidx1) 10372 v.AuxInt = off 10373 v.Aux = sym 10374 v.AddArg(ptr) 10375 v.AddArg(idx) 10376 v.AddArg(val) 10377 v.AddArg(mem) 10378 return true 10379 } 10380 return false 10381 } 10382 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { 10383 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 10384 // cond: 10385 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 10386 for { 10387 c := v.AuxInt 10388 sym := v.Aux 10389 _ = v.Args[3] 10390 ptr := v.Args[0] 10391 v_1 := v.Args[1] 10392 if v_1.Op != OpAMD64SHLQconst { 10393 break 10394 } 10395 if v_1.AuxInt != 2 { 10396 break 10397 } 10398 idx := v_1.Args[0] 10399 val := v.Args[2] 10400 mem := v.Args[3] 10401 v.reset(OpAMD64MOVSSstoreidx4) 10402 v.AuxInt = c 10403 v.Aux = sym 10404 v.AddArg(ptr) 10405 v.AddArg(idx) 10406 v.AddArg(val) 10407 v.AddArg(mem) 10408 return true 10409 } 10410 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10411 // cond: 10412 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 10413 for { 10414 c := v.AuxInt 10415 sym := v.Aux 10416 _ = v.Args[3] 10417 v_0 := v.Args[0] 10418 if v_0.Op != OpAMD64ADDQconst { 10419 break 10420 } 10421 d := v_0.AuxInt 10422 ptr := v_0.Args[0] 10423 idx := v.Args[1] 10424 val := v.Args[2] 10425 mem := v.Args[3] 10426 v.reset(OpAMD64MOVSSstoreidx1) 10427 v.AuxInt = c + d 10428 v.Aux = sym 10429 v.AddArg(ptr) 10430 v.AddArg(idx) 10431 v.AddArg(val) 10432 v.AddArg(mem) 10433 return true 10434 } 10435 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10436 // cond: 10437 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 10438 for { 10439 c := v.AuxInt 10440 sym := v.Aux 10441 _ = v.Args[3] 10442 ptr := v.Args[0] 10443 v_1 := v.Args[1] 10444 if v_1.Op != OpAMD64ADDQconst { 10445 break 10446 } 10447 d := v_1.AuxInt 10448 idx := v_1.Args[0] 10449 val := v.Args[2] 10450 mem := v.Args[3] 10451 v.reset(OpAMD64MOVSSstoreidx1) 10452 v.AuxInt = c + d 10453 v.Aux = sym 10454 v.AddArg(ptr) 10455 v.AddArg(idx) 10456 v.AddArg(val) 10457 v.AddArg(mem) 10458 return true 10459 } 10460 return false 10461 } 10462 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { 10463 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10464 // cond: 10465 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 10466 for { 10467 c := v.AuxInt 10468 sym := v.Aux 10469 _ = v.Args[3] 10470 v_0 := v.Args[0] 10471 if v_0.Op != OpAMD64ADDQconst { 10472 break 10473 } 10474 d := v_0.AuxInt 10475 ptr := v_0.Args[0] 10476 idx := v.Args[1] 10477 val := v.Args[2] 10478 mem := v.Args[3] 10479 v.reset(OpAMD64MOVSSstoreidx4) 10480 v.AuxInt = c + d 10481 v.Aux = sym 10482 v.AddArg(ptr) 10483 v.AddArg(idx) 10484 v.AddArg(val) 10485 v.AddArg(mem) 10486 return true 10487 } 10488 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10489 // cond: 10490 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 10491 for { 10492 c := v.AuxInt 10493 sym := v.Aux 10494 _ = v.Args[3] 10495 ptr := v.Args[0] 10496 v_1 := v.Args[1] 10497 if v_1.Op != OpAMD64ADDQconst { 10498 break 10499 } 10500 d := v_1.AuxInt 10501 idx := v_1.Args[0] 10502 val := v.Args[2] 10503 mem := v.Args[3] 10504 v.reset(OpAMD64MOVSSstoreidx4) 10505 v.AuxInt = c + 4*d 10506 v.Aux = sym 10507 v.AddArg(ptr) 10508 v.AddArg(idx) 10509 v.AddArg(val) 10510 v.AddArg(mem) 10511 return true 10512 } 10513 return false 10514 } 10515 func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { 10516 b := v.Block 10517 _ = b 10518 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 10519 // cond: x.Uses == 1 && clobber(x) 10520 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 10521 for { 10522 x := v.Args[0] 10523 if x.Op != OpAMD64MOVWload { 10524 break 10525 } 10526 off := x.AuxInt 10527 sym := x.Aux 10528 _ = x.Args[1] 10529 ptr := x.Args[0] 10530 mem := x.Args[1] 10531 if !(x.Uses == 1 && clobber(x)) { 10532 break 10533 } 10534 b = x.Block 10535 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 10536 v.reset(OpCopy) 10537 v.AddArg(v0) 10538 v0.AuxInt = off 10539 v0.Aux = sym 10540 v0.AddArg(ptr) 10541 v0.AddArg(mem) 10542 return true 10543 } 10544 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 10545 // cond: x.Uses == 1 && clobber(x) 10546 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 10547 for { 10548 x := v.Args[0] 10549 if x.Op != OpAMD64MOVLload { 10550 break 10551 } 10552 off := x.AuxInt 10553 sym := x.Aux 10554 _ = x.Args[1] 10555 ptr := x.Args[0] 10556 mem := x.Args[1] 10557 if !(x.Uses == 1 && clobber(x)) { 10558 break 10559 } 10560 b = x.Block 10561 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 10562 v.reset(OpCopy) 10563 v.AddArg(v0) 10564 v0.AuxInt = off 10565 v0.Aux = sym 10566 v0.AddArg(ptr) 10567 v0.AddArg(mem) 10568 return true 10569 } 10570 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 10571 // cond: x.Uses == 1 && clobber(x) 10572 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 10573 for { 10574 x := v.Args[0] 10575 if x.Op != OpAMD64MOVQload { 10576 break 10577 } 10578 off := x.AuxInt 10579 sym := x.Aux 10580 _ = x.Args[1] 10581 ptr := x.Args[0] 10582 mem := x.Args[1] 10583 if !(x.Uses == 1 && clobber(x)) { 10584 break 10585 } 10586 b = x.Block 10587 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 10588 v.reset(OpCopy) 10589 v.AddArg(v0) 10590 v0.AuxInt = off 10591 v0.Aux = sym 10592 v0.AddArg(ptr) 10593 v0.AddArg(mem) 10594 return true 10595 } 10596 // match: (MOVWQSX (ANDLconst [c] x)) 10597 // cond: c & 0x8000 == 0 10598 // result: (ANDLconst [c & 0x7fff] x) 10599 for { 10600 v_0 := v.Args[0] 10601 if v_0.Op != OpAMD64ANDLconst { 10602 break 10603 } 10604 c := v_0.AuxInt 10605 x := v_0.Args[0] 10606 if !(c&0x8000 == 0) { 10607 break 10608 } 10609 v.reset(OpAMD64ANDLconst) 10610 v.AuxInt = c & 0x7fff 10611 v.AddArg(x) 10612 return true 10613 } 10614 // match: (MOVWQSX x:(MOVWQSX _)) 10615 // cond: 10616 // result: x 10617 for { 10618 x := v.Args[0] 10619 if x.Op != OpAMD64MOVWQSX { 10620 break 10621 } 10622 v.reset(OpCopy) 10623 v.Type = x.Type 10624 v.AddArg(x) 10625 return true 10626 } 10627 // match: (MOVWQSX x:(MOVBQSX _)) 10628 // cond: 10629 // result: x 10630 for { 10631 x := v.Args[0] 10632 if x.Op != OpAMD64MOVBQSX { 10633 break 10634 } 10635 v.reset(OpCopy) 10636 v.Type = x.Type 10637 v.AddArg(x) 10638 return true 10639 } 10640 return false 10641 } 10642 func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool { 10643 // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 10644 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 10645 // result: (MOVWQSX x) 10646 for { 10647 off := v.AuxInt 10648 sym := v.Aux 10649 _ = v.Args[1] 10650 ptr := v.Args[0] 10651 v_1 := v.Args[1] 10652 if v_1.Op != OpAMD64MOVWstore { 10653 break 10654 } 10655 off2 := v_1.AuxInt 10656 sym2 := v_1.Aux 10657 _ = v_1.Args[2] 10658 ptr2 := v_1.Args[0] 10659 x := v_1.Args[1] 10660 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 10661 break 10662 } 10663 v.reset(OpAMD64MOVWQSX) 10664 v.AddArg(x) 10665 return true 10666 } 10667 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10668 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10669 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10670 for { 10671 off1 := v.AuxInt 10672 sym1 := v.Aux 10673 _ = v.Args[1] 10674 v_0 := v.Args[0] 10675 if v_0.Op != OpAMD64LEAQ { 10676 break 10677 } 10678 off2 := v_0.AuxInt 10679 sym2 := v_0.Aux 10680 base := v_0.Args[0] 10681 mem := v.Args[1] 10682 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10683 break 10684 } 10685 v.reset(OpAMD64MOVWQSXload) 10686 v.AuxInt = off1 + off2 10687 v.Aux = mergeSym(sym1, sym2) 10688 v.AddArg(base) 10689 v.AddArg(mem) 10690 return true 10691 } 10692 return false 10693 } 10694 func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { 10695 b := v.Block 10696 _ = b 10697 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 10698 // cond: x.Uses == 1 && clobber(x) 10699 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 10700 for { 10701 x := v.Args[0] 10702 if x.Op != OpAMD64MOVWload { 10703 break 10704 } 10705 off := x.AuxInt 10706 sym := x.Aux 10707 _ = x.Args[1] 10708 ptr := x.Args[0] 10709 mem := x.Args[1] 10710 if !(x.Uses == 1 && clobber(x)) { 10711 break 10712 } 10713 b = x.Block 10714 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 10715 v.reset(OpCopy) 10716 v.AddArg(v0) 10717 v0.AuxInt = off 10718 v0.Aux = sym 10719 v0.AddArg(ptr) 10720 v0.AddArg(mem) 10721 return true 10722 } 10723 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 10724 // cond: x.Uses == 1 && clobber(x) 10725 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 10726 for { 10727 x := v.Args[0] 10728 if x.Op != OpAMD64MOVLload { 10729 break 10730 } 10731 off := x.AuxInt 10732 sym := x.Aux 10733 _ = x.Args[1] 10734 ptr := x.Args[0] 10735 mem := x.Args[1] 10736 if !(x.Uses == 1 && clobber(x)) { 10737 break 10738 } 10739 b = x.Block 10740 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 10741 v.reset(OpCopy) 10742 v.AddArg(v0) 10743 v0.AuxInt = off 10744 v0.Aux = sym 10745 v0.AddArg(ptr) 10746 v0.AddArg(mem) 10747 return true 10748 } 10749 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 10750 // cond: x.Uses == 1 && clobber(x) 10751 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 10752 for { 10753 x := v.Args[0] 10754 if x.Op != OpAMD64MOVQload { 10755 break 10756 } 10757 off := x.AuxInt 10758 sym := x.Aux 10759 _ = x.Args[1] 10760 ptr := x.Args[0] 10761 mem := x.Args[1] 10762 if !(x.Uses == 1 && clobber(x)) { 10763 break 10764 } 10765 b = x.Block 10766 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 10767 v.reset(OpCopy) 10768 v.AddArg(v0) 10769 v0.AuxInt = off 10770 v0.Aux = sym 10771 v0.AddArg(ptr) 10772 v0.AddArg(mem) 10773 return true 10774 } 10775 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 10776 // cond: x.Uses == 1 && clobber(x) 10777 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 10778 for { 10779 x := v.Args[0] 10780 if x.Op != OpAMD64MOVWloadidx1 { 10781 break 10782 } 10783 off := x.AuxInt 10784 sym := x.Aux 10785 _ = x.Args[2] 10786 ptr := x.Args[0] 10787 idx := x.Args[1] 10788 mem := x.Args[2] 10789 if !(x.Uses == 1 && clobber(x)) { 10790 break 10791 } 10792 b = x.Block 10793 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 10794 v.reset(OpCopy) 10795 v.AddArg(v0) 10796 v0.AuxInt = off 10797 v0.Aux = sym 10798 v0.AddArg(ptr) 10799 v0.AddArg(idx) 10800 v0.AddArg(mem) 10801 return true 10802 } 10803 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 10804 // cond: x.Uses == 1 && clobber(x) 10805 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 10806 for { 10807 x := v.Args[0] 10808 if x.Op != OpAMD64MOVWloadidx2 { 10809 break 10810 } 10811 off := x.AuxInt 10812 sym := x.Aux 10813 _ = x.Args[2] 10814 ptr := x.Args[0] 10815 idx := x.Args[1] 10816 mem := x.Args[2] 10817 if !(x.Uses == 1 && clobber(x)) { 10818 break 10819 } 10820 b = x.Block 10821 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) 10822 v.reset(OpCopy) 10823 v.AddArg(v0) 10824 v0.AuxInt = off 10825 v0.Aux = sym 10826 v0.AddArg(ptr) 10827 v0.AddArg(idx) 10828 v0.AddArg(mem) 10829 return true 10830 } 10831 // match: (MOVWQZX (ANDLconst [c] x)) 10832 // cond: 10833 // result: (ANDLconst [c & 0xffff] x) 10834 for { 10835 v_0 := v.Args[0] 10836 if v_0.Op != OpAMD64ANDLconst { 10837 break 10838 } 10839 c := v_0.AuxInt 10840 x := v_0.Args[0] 10841 v.reset(OpAMD64ANDLconst) 10842 v.AuxInt = c & 0xffff 10843 v.AddArg(x) 10844 return true 10845 } 10846 // match: (MOVWQZX x:(MOVWQZX _)) 10847 // cond: 10848 // result: x 10849 for { 10850 x := v.Args[0] 10851 if x.Op != OpAMD64MOVWQZX { 10852 break 10853 } 10854 v.reset(OpCopy) 10855 v.Type = x.Type 10856 v.AddArg(x) 10857 return true 10858 } 10859 // match: (MOVWQZX x:(MOVBQZX _)) 10860 // cond: 10861 // result: x 10862 for { 10863 x := v.Args[0] 10864 if x.Op != OpAMD64MOVBQZX { 10865 break 10866 } 10867 v.reset(OpCopy) 10868 v.Type = x.Type 10869 v.AddArg(x) 10870 return true 10871 } 10872 return false 10873 } 10874 func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { 10875 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 10876 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 10877 // result: (MOVWQZX x) 10878 for { 10879 off := v.AuxInt 10880 sym := v.Aux 10881 _ = v.Args[1] 10882 ptr := v.Args[0] 10883 v_1 := v.Args[1] 10884 if v_1.Op != OpAMD64MOVWstore { 10885 break 10886 } 10887 off2 := v_1.AuxInt 10888 sym2 := v_1.Aux 10889 _ = v_1.Args[2] 10890 ptr2 := v_1.Args[0] 10891 x := v_1.Args[1] 10892 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 10893 break 10894 } 10895 v.reset(OpAMD64MOVWQZX) 10896 v.AddArg(x) 10897 return true 10898 } 10899 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 10900 // cond: is32Bit(off1+off2) 10901 // result: (MOVWload [off1+off2] {sym} ptr mem) 10902 for { 10903 off1 := v.AuxInt 10904 sym := v.Aux 10905 _ = v.Args[1] 10906 v_0 := v.Args[0] 10907 if v_0.Op != OpAMD64ADDQconst { 10908 break 10909 } 10910 off2 := v_0.AuxInt 10911 ptr := v_0.Args[0] 10912 mem := v.Args[1] 10913 if !(is32Bit(off1 + off2)) { 10914 break 10915 } 10916 v.reset(OpAMD64MOVWload) 10917 v.AuxInt = off1 + off2 10918 v.Aux = sym 10919 v.AddArg(ptr) 10920 v.AddArg(mem) 10921 return true 10922 } 10923 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10924 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10925 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10926 for { 10927 off1 := v.AuxInt 10928 sym1 := v.Aux 10929 _ = v.Args[1] 10930 v_0 := v.Args[0] 10931 if v_0.Op != OpAMD64LEAQ { 10932 break 10933 } 10934 off2 := v_0.AuxInt 10935 sym2 := v_0.Aux 10936 base := v_0.Args[0] 10937 mem := v.Args[1] 10938 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10939 break 10940 } 10941 v.reset(OpAMD64MOVWload) 10942 v.AuxInt = off1 + off2 10943 v.Aux = mergeSym(sym1, sym2) 10944 v.AddArg(base) 10945 v.AddArg(mem) 10946 return true 10947 } 10948 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 10949 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10950 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10951 for { 10952 off1 := v.AuxInt 10953 sym1 := v.Aux 10954 _ = v.Args[1] 10955 v_0 := v.Args[0] 10956 if v_0.Op != OpAMD64LEAQ1 { 10957 break 10958 } 10959 off2 := v_0.AuxInt 10960 sym2 := v_0.Aux 10961 _ = v_0.Args[1] 10962 ptr := v_0.Args[0] 10963 idx := v_0.Args[1] 10964 mem := v.Args[1] 10965 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10966 break 10967 } 10968 v.reset(OpAMD64MOVWloadidx1) 10969 v.AuxInt = off1 + off2 10970 v.Aux = mergeSym(sym1, sym2) 10971 v.AddArg(ptr) 10972 v.AddArg(idx) 10973 v.AddArg(mem) 10974 return true 10975 } 10976 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 10977 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10978 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10979 for { 10980 off1 := v.AuxInt 10981 sym1 := v.Aux 10982 _ = v.Args[1] 10983 v_0 := v.Args[0] 10984 if v_0.Op != OpAMD64LEAQ2 { 10985 break 10986 } 10987 off2 := v_0.AuxInt 10988 sym2 := v_0.Aux 10989 _ = v_0.Args[1] 10990 ptr := v_0.Args[0] 10991 idx := v_0.Args[1] 10992 mem := v.Args[1] 10993 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10994 break 10995 } 10996 v.reset(OpAMD64MOVWloadidx2) 10997 v.AuxInt = off1 + off2 10998 v.Aux = mergeSym(sym1, sym2) 10999 v.AddArg(ptr) 11000 v.AddArg(idx) 11001 v.AddArg(mem) 11002 return true 11003 } 11004 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 11005 // cond: ptr.Op != OpSB 11006 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 11007 for { 11008 off := v.AuxInt 11009 sym := v.Aux 11010 _ = v.Args[1] 11011 v_0 := v.Args[0] 11012 if v_0.Op != OpAMD64ADDQ { 11013 break 11014 } 11015 _ = v_0.Args[1] 11016 ptr := v_0.Args[0] 11017 idx := v_0.Args[1] 11018 mem := v.Args[1] 11019 if !(ptr.Op != OpSB) { 11020 break 11021 } 11022 v.reset(OpAMD64MOVWloadidx1) 11023 v.AuxInt = off 11024 v.Aux = sym 11025 v.AddArg(ptr) 11026 v.AddArg(idx) 11027 v.AddArg(mem) 11028 return true 11029 } 11030 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 11031 // cond: canMergeSym(sym1, sym2) 11032 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11033 for { 11034 off1 := v.AuxInt 11035 sym1 := v.Aux 11036 _ = v.Args[1] 11037 v_0 := v.Args[0] 11038 if v_0.Op != OpAMD64LEAL { 11039 break 11040 } 11041 off2 := v_0.AuxInt 11042 sym2 := v_0.Aux 11043 base := v_0.Args[0] 11044 mem := v.Args[1] 11045 if !(canMergeSym(sym1, sym2)) { 11046 break 11047 } 11048 v.reset(OpAMD64MOVWload) 11049 v.AuxInt = off1 + off2 11050 v.Aux = mergeSym(sym1, sym2) 11051 v.AddArg(base) 11052 v.AddArg(mem) 11053 return true 11054 } 11055 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 11056 // cond: is32Bit(off1+off2) 11057 // result: (MOVWload [off1+off2] {sym} ptr mem) 11058 for { 11059 off1 := v.AuxInt 11060 sym := v.Aux 11061 _ = v.Args[1] 11062 v_0 := v.Args[0] 11063 if v_0.Op != OpAMD64ADDLconst { 11064 break 11065 } 11066 off2 := v_0.AuxInt 11067 ptr := v_0.Args[0] 11068 mem := v.Args[1] 11069 if !(is32Bit(off1 + off2)) { 11070 break 11071 } 11072 v.reset(OpAMD64MOVWload) 11073 v.AuxInt = off1 + off2 11074 v.Aux = sym 11075 v.AddArg(ptr) 11076 v.AddArg(mem) 11077 return true 11078 } 11079 return false 11080 } 11081 func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { 11082 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 11083 // cond: 11084 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 11085 for { 11086 c := v.AuxInt 11087 sym := v.Aux 11088 _ = v.Args[2] 11089 ptr := v.Args[0] 11090 v_1 := v.Args[1] 11091 if v_1.Op != OpAMD64SHLQconst { 11092 break 11093 } 11094 if v_1.AuxInt != 1 { 11095 break 11096 } 11097 idx := v_1.Args[0] 11098 mem := v.Args[2] 11099 v.reset(OpAMD64MOVWloadidx2) 11100 v.AuxInt = c 11101 v.Aux = sym 11102 v.AddArg(ptr) 11103 v.AddArg(idx) 11104 v.AddArg(mem) 11105 return true 11106 } 11107 // match: (MOVWloadidx1 [c] {sym} (SHLQconst [1] idx) ptr mem) 11108 // cond: 11109 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 11110 for { 11111 c := v.AuxInt 11112 sym := v.Aux 11113 _ = v.Args[2] 11114 v_0 := v.Args[0] 11115 if v_0.Op != OpAMD64SHLQconst { 11116 break 11117 } 11118 if v_0.AuxInt != 1 { 11119 break 11120 } 11121 idx := v_0.Args[0] 11122 ptr := v.Args[1] 11123 mem := v.Args[2] 11124 v.reset(OpAMD64MOVWloadidx2) 11125 v.AuxInt = c 11126 v.Aux = sym 11127 v.AddArg(ptr) 11128 v.AddArg(idx) 11129 v.AddArg(mem) 11130 return true 11131 } 11132 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 11133 // cond: 11134 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 11135 for { 11136 c := v.AuxInt 11137 sym := v.Aux 11138 _ = v.Args[2] 11139 v_0 := v.Args[0] 11140 if v_0.Op != OpAMD64ADDQconst { 11141 break 11142 } 11143 d := v_0.AuxInt 11144 ptr := v_0.Args[0] 11145 idx := v.Args[1] 11146 mem := v.Args[2] 11147 v.reset(OpAMD64MOVWloadidx1) 11148 v.AuxInt = c + d 11149 v.Aux = sym 11150 v.AddArg(ptr) 11151 v.AddArg(idx) 11152 v.AddArg(mem) 11153 return true 11154 } 11155 // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 11156 // cond: 11157 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 11158 for { 11159 c := v.AuxInt 11160 sym := v.Aux 11161 _ = v.Args[2] 11162 idx := v.Args[0] 11163 v_1 := v.Args[1] 11164 if v_1.Op != OpAMD64ADDQconst { 11165 break 11166 } 11167 d := v_1.AuxInt 11168 ptr := v_1.Args[0] 11169 mem := v.Args[2] 11170 v.reset(OpAMD64MOVWloadidx1) 11171 v.AuxInt = c + d 11172 v.Aux = sym 11173 v.AddArg(ptr) 11174 v.AddArg(idx) 11175 v.AddArg(mem) 11176 return true 11177 } 11178 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 11179 // cond: 11180 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 11181 for { 11182 c := v.AuxInt 11183 sym := v.Aux 11184 _ = v.Args[2] 11185 ptr := v.Args[0] 11186 v_1 := v.Args[1] 11187 if v_1.Op != OpAMD64ADDQconst { 11188 break 11189 } 11190 d := v_1.AuxInt 11191 idx := v_1.Args[0] 11192 mem := v.Args[2] 11193 v.reset(OpAMD64MOVWloadidx1) 11194 v.AuxInt = c + d 11195 v.Aux = sym 11196 v.AddArg(ptr) 11197 v.AddArg(idx) 11198 v.AddArg(mem) 11199 return true 11200 } 11201 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 11202 // cond: 11203 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 11204 for { 11205 c := v.AuxInt 11206 sym := v.Aux 11207 _ = v.Args[2] 11208 v_0 := v.Args[0] 11209 if v_0.Op != OpAMD64ADDQconst { 11210 break 11211 } 11212 d := v_0.AuxInt 11213 idx := v_0.Args[0] 11214 ptr := v.Args[1] 11215 mem := v.Args[2] 11216 v.reset(OpAMD64MOVWloadidx1) 11217 v.AuxInt = c + d 11218 v.Aux = sym 11219 v.AddArg(ptr) 11220 v.AddArg(idx) 11221 v.AddArg(mem) 11222 return true 11223 } 11224 return false 11225 } 11226 func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { 11227 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 11228 // cond: 11229 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 11230 for { 11231 c := v.AuxInt 11232 sym := v.Aux 11233 _ = v.Args[2] 11234 v_0 := v.Args[0] 11235 if v_0.Op != OpAMD64ADDQconst { 11236 break 11237 } 11238 d := v_0.AuxInt 11239 ptr := v_0.Args[0] 11240 idx := v.Args[1] 11241 mem := v.Args[2] 11242 v.reset(OpAMD64MOVWloadidx2) 11243 v.AuxInt = c + d 11244 v.Aux = sym 11245 v.AddArg(ptr) 11246 v.AddArg(idx) 11247 v.AddArg(mem) 11248 return true 11249 } 11250 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 11251 // cond: 11252 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 11253 for { 11254 c := v.AuxInt 11255 sym := v.Aux 11256 _ = v.Args[2] 11257 ptr := v.Args[0] 11258 v_1 := v.Args[1] 11259 if v_1.Op != OpAMD64ADDQconst { 11260 break 11261 } 11262 d := v_1.AuxInt 11263 idx := v_1.Args[0] 11264 mem := v.Args[2] 11265 v.reset(OpAMD64MOVWloadidx2) 11266 v.AuxInt = c + 2*d 11267 v.Aux = sym 11268 v.AddArg(ptr) 11269 v.AddArg(idx) 11270 v.AddArg(mem) 11271 return true 11272 } 11273 return false 11274 } 11275 func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { 11276 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 11277 // cond: 11278 // result: (MOVWstore [off] {sym} ptr x mem) 11279 for { 11280 off := v.AuxInt 11281 sym := v.Aux 11282 _ = v.Args[2] 11283 ptr := v.Args[0] 11284 v_1 := v.Args[1] 11285 if v_1.Op != OpAMD64MOVWQSX { 11286 break 11287 } 11288 x := v_1.Args[0] 11289 mem := v.Args[2] 11290 v.reset(OpAMD64MOVWstore) 11291 v.AuxInt = off 11292 v.Aux = sym 11293 v.AddArg(ptr) 11294 v.AddArg(x) 11295 v.AddArg(mem) 11296 return true 11297 } 11298 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 11299 // cond: 11300 // result: (MOVWstore [off] {sym} ptr x mem) 11301 for { 11302 off := v.AuxInt 11303 sym := v.Aux 11304 _ = v.Args[2] 11305 ptr := v.Args[0] 11306 v_1 := v.Args[1] 11307 if v_1.Op != OpAMD64MOVWQZX { 11308 break 11309 } 11310 x := v_1.Args[0] 11311 mem := v.Args[2] 11312 v.reset(OpAMD64MOVWstore) 11313 v.AuxInt = off 11314 v.Aux = sym 11315 v.AddArg(ptr) 11316 v.AddArg(x) 11317 v.AddArg(mem) 11318 return true 11319 } 11320 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 11321 // cond: is32Bit(off1+off2) 11322 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 11323 for { 11324 off1 := v.AuxInt 11325 sym := v.Aux 11326 _ = v.Args[2] 11327 v_0 := v.Args[0] 11328 if v_0.Op != OpAMD64ADDQconst { 11329 break 11330 } 11331 off2 := v_0.AuxInt 11332 ptr := v_0.Args[0] 11333 val := v.Args[1] 11334 mem := v.Args[2] 11335 if !(is32Bit(off1 + off2)) { 11336 break 11337 } 11338 v.reset(OpAMD64MOVWstore) 11339 v.AuxInt = off1 + off2 11340 v.Aux = sym 11341 v.AddArg(ptr) 11342 v.AddArg(val) 11343 v.AddArg(mem) 11344 return true 11345 } 11346 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 11347 // cond: validOff(off) 11348 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 11349 for { 11350 off := v.AuxInt 11351 sym := v.Aux 11352 _ = v.Args[2] 11353 ptr := v.Args[0] 11354 v_1 := v.Args[1] 11355 if v_1.Op != OpAMD64MOVLconst { 11356 break 11357 } 11358 c := v_1.AuxInt 11359 mem := v.Args[2] 11360 if !(validOff(off)) { 11361 break 11362 } 11363 v.reset(OpAMD64MOVWstoreconst) 11364 v.AuxInt = makeValAndOff(int64(int16(c)), off) 11365 v.Aux = sym 11366 v.AddArg(ptr) 11367 v.AddArg(mem) 11368 return true 11369 } 11370 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 11371 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11372 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11373 for { 11374 off1 := v.AuxInt 11375 sym1 := v.Aux 11376 _ = v.Args[2] 11377 v_0 := v.Args[0] 11378 if v_0.Op != OpAMD64LEAQ { 11379 break 11380 } 11381 off2 := v_0.AuxInt 11382 sym2 := v_0.Aux 11383 base := v_0.Args[0] 11384 val := v.Args[1] 11385 mem := v.Args[2] 11386 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11387 break 11388 } 11389 v.reset(OpAMD64MOVWstore) 11390 v.AuxInt = off1 + off2 11391 v.Aux = mergeSym(sym1, sym2) 11392 v.AddArg(base) 11393 v.AddArg(val) 11394 v.AddArg(mem) 11395 return true 11396 } 11397 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 11398 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11399 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11400 for { 11401 off1 := v.AuxInt 11402 sym1 := v.Aux 11403 _ = v.Args[2] 11404 v_0 := v.Args[0] 11405 if v_0.Op != OpAMD64LEAQ1 { 11406 break 11407 } 11408 off2 := v_0.AuxInt 11409 sym2 := v_0.Aux 11410 _ = v_0.Args[1] 11411 ptr := v_0.Args[0] 11412 idx := v_0.Args[1] 11413 val := v.Args[1] 11414 mem := v.Args[2] 11415 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11416 break 11417 } 11418 v.reset(OpAMD64MOVWstoreidx1) 11419 v.AuxInt = off1 + off2 11420 v.Aux = mergeSym(sym1, sym2) 11421 v.AddArg(ptr) 11422 v.AddArg(idx) 11423 v.AddArg(val) 11424 v.AddArg(mem) 11425 return true 11426 } 11427 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 11428 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11429 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11430 for { 11431 off1 := v.AuxInt 11432 sym1 := v.Aux 11433 _ = v.Args[2] 11434 v_0 := v.Args[0] 11435 if v_0.Op != OpAMD64LEAQ2 { 11436 break 11437 } 11438 off2 := v_0.AuxInt 11439 sym2 := v_0.Aux 11440 _ = v_0.Args[1] 11441 ptr := v_0.Args[0] 11442 idx := v_0.Args[1] 11443 val := v.Args[1] 11444 mem := v.Args[2] 11445 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11446 break 11447 } 11448 v.reset(OpAMD64MOVWstoreidx2) 11449 v.AuxInt = off1 + off2 11450 v.Aux = mergeSym(sym1, sym2) 11451 v.AddArg(ptr) 11452 v.AddArg(idx) 11453 v.AddArg(val) 11454 v.AddArg(mem) 11455 return true 11456 } 11457 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 11458 // cond: ptr.Op != OpSB 11459 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 11460 for { 11461 off := v.AuxInt 11462 sym := v.Aux 11463 _ = v.Args[2] 11464 v_0 := v.Args[0] 11465 if v_0.Op != OpAMD64ADDQ { 11466 break 11467 } 11468 _ = v_0.Args[1] 11469 ptr := v_0.Args[0] 11470 idx := v_0.Args[1] 11471 val := v.Args[1] 11472 mem := v.Args[2] 11473 if !(ptr.Op != OpSB) { 11474 break 11475 } 11476 v.reset(OpAMD64MOVWstoreidx1) 11477 v.AuxInt = off 11478 v.Aux = sym 11479 v.AddArg(ptr) 11480 v.AddArg(idx) 11481 v.AddArg(val) 11482 v.AddArg(mem) 11483 return true 11484 } 11485 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 11486 // cond: x.Uses == 1 && clobber(x) 11487 // result: (MOVLstore [i-2] {s} p w mem) 11488 for { 11489 i := v.AuxInt 11490 s := v.Aux 11491 _ = v.Args[2] 11492 p := v.Args[0] 11493 v_1 := v.Args[1] 11494 if v_1.Op != OpAMD64SHRQconst { 11495 break 11496 } 11497 if v_1.AuxInt != 16 { 11498 break 11499 } 11500 w := v_1.Args[0] 11501 x := v.Args[2] 11502 if x.Op != OpAMD64MOVWstore { 11503 break 11504 } 11505 if x.AuxInt != i-2 { 11506 break 11507 } 11508 if x.Aux != s { 11509 break 11510 } 11511 _ = x.Args[2] 11512 if p != x.Args[0] { 11513 break 11514 } 11515 if w != x.Args[1] { 11516 break 11517 } 11518 mem := x.Args[2] 11519 if !(x.Uses == 1 && clobber(x)) { 11520 break 11521 } 11522 v.reset(OpAMD64MOVLstore) 11523 v.AuxInt = i - 2 11524 v.Aux = s 11525 v.AddArg(p) 11526 v.AddArg(w) 11527 v.AddArg(mem) 11528 return true 11529 } 11530 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 11531 // cond: x.Uses == 1 && clobber(x) 11532 // result: (MOVLstore [i-2] {s} p w0 mem) 11533 for { 11534 i := v.AuxInt 11535 s := v.Aux 11536 _ = v.Args[2] 11537 p := v.Args[0] 11538 v_1 := v.Args[1] 11539 if v_1.Op != OpAMD64SHRQconst { 11540 break 11541 } 11542 j := v_1.AuxInt 11543 w := v_1.Args[0] 11544 x := v.Args[2] 11545 if x.Op != OpAMD64MOVWstore { 11546 break 11547 } 11548 if x.AuxInt != i-2 { 11549 break 11550 } 11551 if x.Aux != s { 11552 break 11553 } 11554 _ = x.Args[2] 11555 if p != x.Args[0] { 11556 break 11557 } 11558 w0 := x.Args[1] 11559 if w0.Op != OpAMD64SHRQconst { 11560 break 11561 } 11562 if w0.AuxInt != j-16 { 11563 break 11564 } 11565 if w != w0.Args[0] { 11566 break 11567 } 11568 mem := x.Args[2] 11569 if !(x.Uses == 1 && clobber(x)) { 11570 break 11571 } 11572 v.reset(OpAMD64MOVLstore) 11573 v.AuxInt = i - 2 11574 v.Aux = s 11575 v.AddArg(p) 11576 v.AddArg(w0) 11577 v.AddArg(mem) 11578 return true 11579 } 11580 return false 11581 } 11582 func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { 11583 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 11584 // cond: canMergeSym(sym1, sym2) 11585 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11586 for { 11587 off1 := v.AuxInt 11588 sym1 := v.Aux 11589 _ = v.Args[2] 11590 v_0 := v.Args[0] 11591 if v_0.Op != OpAMD64LEAL { 11592 break 11593 } 11594 off2 := v_0.AuxInt 11595 sym2 := v_0.Aux 11596 base := v_0.Args[0] 11597 val := v.Args[1] 11598 mem := v.Args[2] 11599 if !(canMergeSym(sym1, sym2)) { 11600 break 11601 } 11602 v.reset(OpAMD64MOVWstore) 11603 v.AuxInt = off1 + off2 11604 v.Aux = mergeSym(sym1, sym2) 11605 v.AddArg(base) 11606 v.AddArg(val) 11607 v.AddArg(mem) 11608 return true 11609 } 11610 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 11611 // cond: is32Bit(off1+off2) 11612 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 11613 for { 11614 off1 := v.AuxInt 11615 sym := v.Aux 11616 _ = v.Args[2] 11617 v_0 := v.Args[0] 11618 if v_0.Op != OpAMD64ADDLconst { 11619 break 11620 } 11621 off2 := v_0.AuxInt 11622 ptr := v_0.Args[0] 11623 val := v.Args[1] 11624 mem := v.Args[2] 11625 if !(is32Bit(off1 + off2)) { 11626 break 11627 } 11628 v.reset(OpAMD64MOVWstore) 11629 v.AuxInt = off1 + off2 11630 v.Aux = sym 11631 v.AddArg(ptr) 11632 v.AddArg(val) 11633 v.AddArg(mem) 11634 return true 11635 } 11636 return false 11637 } 11638 func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { 11639 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 11640 // cond: ValAndOff(sc).canAdd(off) 11641 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 11642 for { 11643 sc := v.AuxInt 11644 s := v.Aux 11645 _ = v.Args[1] 11646 v_0 := v.Args[0] 11647 if v_0.Op != OpAMD64ADDQconst { 11648 break 11649 } 11650 off := v_0.AuxInt 11651 ptr := v_0.Args[0] 11652 mem := v.Args[1] 11653 if !(ValAndOff(sc).canAdd(off)) { 11654 break 11655 } 11656 v.reset(OpAMD64MOVWstoreconst) 11657 v.AuxInt = ValAndOff(sc).add(off) 11658 v.Aux = s 11659 v.AddArg(ptr) 11660 v.AddArg(mem) 11661 return true 11662 } 11663 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 11664 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 11665 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 11666 for { 11667 sc := v.AuxInt 11668 sym1 := v.Aux 11669 _ = v.Args[1] 11670 v_0 := v.Args[0] 11671 if v_0.Op != OpAMD64LEAQ { 11672 break 11673 } 11674 off := v_0.AuxInt 11675 sym2 := v_0.Aux 11676 ptr := v_0.Args[0] 11677 mem := v.Args[1] 11678 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 11679 break 11680 } 11681 v.reset(OpAMD64MOVWstoreconst) 11682 v.AuxInt = ValAndOff(sc).add(off) 11683 v.Aux = mergeSym(sym1, sym2) 11684 v.AddArg(ptr) 11685 v.AddArg(mem) 11686 return true 11687 } 11688 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 11689 // cond: canMergeSym(sym1, sym2) 11690 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 11691 for { 11692 x := v.AuxInt 11693 sym1 := v.Aux 11694 _ = v.Args[1] 11695 v_0 := v.Args[0] 11696 if v_0.Op != OpAMD64LEAQ1 { 11697 break 11698 } 11699 off := v_0.AuxInt 11700 sym2 := v_0.Aux 11701 _ = v_0.Args[1] 11702 ptr := v_0.Args[0] 11703 idx := v_0.Args[1] 11704 mem := v.Args[1] 11705 if !(canMergeSym(sym1, sym2)) { 11706 break 11707 } 11708 v.reset(OpAMD64MOVWstoreconstidx1) 11709 v.AuxInt = ValAndOff(x).add(off) 11710 v.Aux = mergeSym(sym1, sym2) 11711 v.AddArg(ptr) 11712 v.AddArg(idx) 11713 v.AddArg(mem) 11714 return true 11715 } 11716 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 11717 // cond: canMergeSym(sym1, sym2) 11718 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 11719 for { 11720 x := v.AuxInt 11721 sym1 := v.Aux 11722 _ = v.Args[1] 11723 v_0 := v.Args[0] 11724 if v_0.Op != OpAMD64LEAQ2 { 11725 break 11726 } 11727 off := v_0.AuxInt 11728 sym2 := v_0.Aux 11729 _ = v_0.Args[1] 11730 ptr := v_0.Args[0] 11731 idx := v_0.Args[1] 11732 mem := v.Args[1] 11733 if !(canMergeSym(sym1, sym2)) { 11734 break 11735 } 11736 v.reset(OpAMD64MOVWstoreconstidx2) 11737 v.AuxInt = ValAndOff(x).add(off) 11738 v.Aux = mergeSym(sym1, sym2) 11739 v.AddArg(ptr) 11740 v.AddArg(idx) 11741 v.AddArg(mem) 11742 return true 11743 } 11744 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 11745 // cond: 11746 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 11747 for { 11748 x := v.AuxInt 11749 sym := v.Aux 11750 _ = v.Args[1] 11751 v_0 := v.Args[0] 11752 if v_0.Op != OpAMD64ADDQ { 11753 break 11754 } 11755 _ = v_0.Args[1] 11756 ptr := v_0.Args[0] 11757 idx := v_0.Args[1] 11758 mem := v.Args[1] 11759 v.reset(OpAMD64MOVWstoreconstidx1) 11760 v.AuxInt = x 11761 v.Aux = sym 11762 v.AddArg(ptr) 11763 v.AddArg(idx) 11764 v.AddArg(mem) 11765 return true 11766 } 11767 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 11768 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 11769 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 11770 for { 11771 c := v.AuxInt 11772 s := v.Aux 11773 _ = v.Args[1] 11774 p := v.Args[0] 11775 x := v.Args[1] 11776 if x.Op != OpAMD64MOVWstoreconst { 11777 break 11778 } 11779 a := x.AuxInt 11780 if x.Aux != s { 11781 break 11782 } 11783 _ = x.Args[1] 11784 if p != x.Args[0] { 11785 break 11786 } 11787 mem := x.Args[1] 11788 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 11789 break 11790 } 11791 v.reset(OpAMD64MOVLstoreconst) 11792 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 11793 v.Aux = s 11794 v.AddArg(p) 11795 v.AddArg(mem) 11796 return true 11797 } 11798 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 11799 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 11800 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 11801 for { 11802 sc := v.AuxInt 11803 sym1 := v.Aux 11804 _ = v.Args[1] 11805 v_0 := v.Args[0] 11806 if v_0.Op != OpAMD64LEAL { 11807 break 11808 } 11809 off := v_0.AuxInt 11810 sym2 := v_0.Aux 11811 ptr := v_0.Args[0] 11812 mem := v.Args[1] 11813 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 11814 break 11815 } 11816 v.reset(OpAMD64MOVWstoreconst) 11817 v.AuxInt = ValAndOff(sc).add(off) 11818 v.Aux = mergeSym(sym1, sym2) 11819 v.AddArg(ptr) 11820 v.AddArg(mem) 11821 return true 11822 } 11823 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 11824 // cond: ValAndOff(sc).canAdd(off) 11825 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 11826 for { 11827 sc := v.AuxInt 11828 s := v.Aux 11829 _ = v.Args[1] 11830 v_0 := v.Args[0] 11831 if v_0.Op != OpAMD64ADDLconst { 11832 break 11833 } 11834 off := v_0.AuxInt 11835 ptr := v_0.Args[0] 11836 mem := v.Args[1] 11837 if !(ValAndOff(sc).canAdd(off)) { 11838 break 11839 } 11840 v.reset(OpAMD64MOVWstoreconst) 11841 v.AuxInt = ValAndOff(sc).add(off) 11842 v.Aux = s 11843 v.AddArg(ptr) 11844 v.AddArg(mem) 11845 return true 11846 } 11847 return false 11848 } 11849 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { 11850 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 11851 // cond: 11852 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 11853 for { 11854 c := v.AuxInt 11855 sym := v.Aux 11856 _ = v.Args[2] 11857 ptr := v.Args[0] 11858 v_1 := v.Args[1] 11859 if v_1.Op != OpAMD64SHLQconst { 11860 break 11861 } 11862 if v_1.AuxInt != 1 { 11863 break 11864 } 11865 idx := v_1.Args[0] 11866 mem := v.Args[2] 11867 v.reset(OpAMD64MOVWstoreconstidx2) 11868 v.AuxInt = c 11869 v.Aux = sym 11870 v.AddArg(ptr) 11871 v.AddArg(idx) 11872 v.AddArg(mem) 11873 return true 11874 } 11875 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 11876 // cond: 11877 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 11878 for { 11879 x := v.AuxInt 11880 sym := v.Aux 11881 _ = v.Args[2] 11882 v_0 := v.Args[0] 11883 if v_0.Op != OpAMD64ADDQconst { 11884 break 11885 } 11886 c := v_0.AuxInt 11887 ptr := v_0.Args[0] 11888 idx := v.Args[1] 11889 mem := v.Args[2] 11890 v.reset(OpAMD64MOVWstoreconstidx1) 11891 v.AuxInt = ValAndOff(x).add(c) 11892 v.Aux = sym 11893 v.AddArg(ptr) 11894 v.AddArg(idx) 11895 v.AddArg(mem) 11896 return true 11897 } 11898 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 11899 // cond: 11900 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 11901 for { 11902 x := v.AuxInt 11903 sym := v.Aux 11904 _ = v.Args[2] 11905 ptr := v.Args[0] 11906 v_1 := v.Args[1] 11907 if v_1.Op != OpAMD64ADDQconst { 11908 break 11909 } 11910 c := v_1.AuxInt 11911 idx := v_1.Args[0] 11912 mem := v.Args[2] 11913 v.reset(OpAMD64MOVWstoreconstidx1) 11914 v.AuxInt = ValAndOff(x).add(c) 11915 v.Aux = sym 11916 v.AddArg(ptr) 11917 v.AddArg(idx) 11918 v.AddArg(mem) 11919 return true 11920 } 11921 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 11922 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 11923 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 11924 for { 11925 c := v.AuxInt 11926 s := v.Aux 11927 _ = v.Args[2] 11928 p := v.Args[0] 11929 i := v.Args[1] 11930 x := v.Args[2] 11931 if x.Op != OpAMD64MOVWstoreconstidx1 { 11932 break 11933 } 11934 a := x.AuxInt 11935 if x.Aux != s { 11936 break 11937 } 11938 _ = x.Args[2] 11939 if p != x.Args[0] { 11940 break 11941 } 11942 if i != x.Args[1] { 11943 break 11944 } 11945 mem := x.Args[2] 11946 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 11947 break 11948 } 11949 v.reset(OpAMD64MOVLstoreconstidx1) 11950 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 11951 v.Aux = s 11952 v.AddArg(p) 11953 v.AddArg(i) 11954 v.AddArg(mem) 11955 return true 11956 } 11957 return false 11958 } 11959 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { 11960 b := v.Block 11961 _ = b 11962 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 11963 // cond: 11964 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 11965 for { 11966 x := v.AuxInt 11967 sym := v.Aux 11968 _ = v.Args[2] 11969 v_0 := v.Args[0] 11970 if v_0.Op != OpAMD64ADDQconst { 11971 break 11972 } 11973 c := v_0.AuxInt 11974 ptr := v_0.Args[0] 11975 idx := v.Args[1] 11976 mem := v.Args[2] 11977 v.reset(OpAMD64MOVWstoreconstidx2) 11978 v.AuxInt = ValAndOff(x).add(c) 11979 v.Aux = sym 11980 v.AddArg(ptr) 11981 v.AddArg(idx) 11982 v.AddArg(mem) 11983 return true 11984 } 11985 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 11986 // cond: 11987 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 11988 for { 11989 x := v.AuxInt 11990 sym := v.Aux 11991 _ = v.Args[2] 11992 ptr := v.Args[0] 11993 v_1 := v.Args[1] 11994 if v_1.Op != OpAMD64ADDQconst { 11995 break 11996 } 11997 c := v_1.AuxInt 11998 idx := v_1.Args[0] 11999 mem := v.Args[2] 12000 v.reset(OpAMD64MOVWstoreconstidx2) 12001 v.AuxInt = ValAndOff(x).add(2 * c) 12002 v.Aux = sym 12003 v.AddArg(ptr) 12004 v.AddArg(idx) 12005 v.AddArg(mem) 12006 return true 12007 } 12008 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 12009 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 12010 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 12011 for { 12012 c := v.AuxInt 12013 s := v.Aux 12014 _ = v.Args[2] 12015 p := v.Args[0] 12016 i := v.Args[1] 12017 x := v.Args[2] 12018 if x.Op != OpAMD64MOVWstoreconstidx2 { 12019 break 12020 } 12021 a := x.AuxInt 12022 if x.Aux != s { 12023 break 12024 } 12025 _ = x.Args[2] 12026 if p != x.Args[0] { 12027 break 12028 } 12029 if i != x.Args[1] { 12030 break 12031 } 12032 mem := x.Args[2] 12033 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 12034 break 12035 } 12036 v.reset(OpAMD64MOVLstoreconstidx1) 12037 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 12038 v.Aux = s 12039 v.AddArg(p) 12040 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 12041 v0.AuxInt = 1 12042 v0.AddArg(i) 12043 v.AddArg(v0) 12044 v.AddArg(mem) 12045 return true 12046 } 12047 return false 12048 } 12049 func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { 12050 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 12051 // cond: 12052 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 12053 for { 12054 c := v.AuxInt 12055 sym := v.Aux 12056 _ = v.Args[3] 12057 ptr := v.Args[0] 12058 v_1 := v.Args[1] 12059 if v_1.Op != OpAMD64SHLQconst { 12060 break 12061 } 12062 if v_1.AuxInt != 1 { 12063 break 12064 } 12065 idx := v_1.Args[0] 12066 val := v.Args[2] 12067 mem := v.Args[3] 12068 v.reset(OpAMD64MOVWstoreidx2) 12069 v.AuxInt = c 12070 v.Aux = sym 12071 v.AddArg(ptr) 12072 v.AddArg(idx) 12073 v.AddArg(val) 12074 v.AddArg(mem) 12075 return true 12076 } 12077 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 12078 // cond: 12079 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 12080 for { 12081 c := v.AuxInt 12082 sym := v.Aux 12083 _ = v.Args[3] 12084 v_0 := v.Args[0] 12085 if v_0.Op != OpAMD64ADDQconst { 12086 break 12087 } 12088 d := v_0.AuxInt 12089 ptr := v_0.Args[0] 12090 idx := v.Args[1] 12091 val := v.Args[2] 12092 mem := v.Args[3] 12093 v.reset(OpAMD64MOVWstoreidx1) 12094 v.AuxInt = c + d 12095 v.Aux = sym 12096 v.AddArg(ptr) 12097 v.AddArg(idx) 12098 v.AddArg(val) 12099 v.AddArg(mem) 12100 return true 12101 } 12102 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 12103 // cond: 12104 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 12105 for { 12106 c := v.AuxInt 12107 sym := v.Aux 12108 _ = v.Args[3] 12109 ptr := v.Args[0] 12110 v_1 := v.Args[1] 12111 if v_1.Op != OpAMD64ADDQconst { 12112 break 12113 } 12114 d := v_1.AuxInt 12115 idx := v_1.Args[0] 12116 val := v.Args[2] 12117 mem := v.Args[3] 12118 v.reset(OpAMD64MOVWstoreidx1) 12119 v.AuxInt = c + d 12120 v.Aux = sym 12121 v.AddArg(ptr) 12122 v.AddArg(idx) 12123 v.AddArg(val) 12124 v.AddArg(mem) 12125 return true 12126 } 12127 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 12128 // cond: x.Uses == 1 && clobber(x) 12129 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 12130 for { 12131 i := v.AuxInt 12132 s := v.Aux 12133 _ = v.Args[3] 12134 p := v.Args[0] 12135 idx := v.Args[1] 12136 v_2 := v.Args[2] 12137 if v_2.Op != OpAMD64SHRQconst { 12138 break 12139 } 12140 if v_2.AuxInt != 16 { 12141 break 12142 } 12143 w := v_2.Args[0] 12144 x := v.Args[3] 12145 if x.Op != OpAMD64MOVWstoreidx1 { 12146 break 12147 } 12148 if x.AuxInt != i-2 { 12149 break 12150 } 12151 if x.Aux != s { 12152 break 12153 } 12154 _ = x.Args[3] 12155 if p != x.Args[0] { 12156 break 12157 } 12158 if idx != x.Args[1] { 12159 break 12160 } 12161 if w != x.Args[2] { 12162 break 12163 } 12164 mem := x.Args[3] 12165 if !(x.Uses == 1 && clobber(x)) { 12166 break 12167 } 12168 v.reset(OpAMD64MOVLstoreidx1) 12169 v.AuxInt = i - 2 12170 v.Aux = s 12171 v.AddArg(p) 12172 v.AddArg(idx) 12173 v.AddArg(w) 12174 v.AddArg(mem) 12175 return true 12176 } 12177 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 12178 // cond: x.Uses == 1 && clobber(x) 12179 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 12180 for { 12181 i := v.AuxInt 12182 s := v.Aux 12183 _ = v.Args[3] 12184 p := v.Args[0] 12185 idx := v.Args[1] 12186 v_2 := v.Args[2] 12187 if v_2.Op != OpAMD64SHRQconst { 12188 break 12189 } 12190 j := v_2.AuxInt 12191 w := v_2.Args[0] 12192 x := v.Args[3] 12193 if x.Op != OpAMD64MOVWstoreidx1 { 12194 break 12195 } 12196 if x.AuxInt != i-2 { 12197 break 12198 } 12199 if x.Aux != s { 12200 break 12201 } 12202 _ = x.Args[3] 12203 if p != x.Args[0] { 12204 break 12205 } 12206 if idx != x.Args[1] { 12207 break 12208 } 12209 w0 := x.Args[2] 12210 if w0.Op != OpAMD64SHRQconst { 12211 break 12212 } 12213 if w0.AuxInt != j-16 { 12214 break 12215 } 12216 if w != w0.Args[0] { 12217 break 12218 } 12219 mem := x.Args[3] 12220 if !(x.Uses == 1 && clobber(x)) { 12221 break 12222 } 12223 v.reset(OpAMD64MOVLstoreidx1) 12224 v.AuxInt = i - 2 12225 v.Aux = s 12226 v.AddArg(p) 12227 v.AddArg(idx) 12228 v.AddArg(w0) 12229 v.AddArg(mem) 12230 return true 12231 } 12232 return false 12233 } 12234 func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { 12235 b := v.Block 12236 _ = b 12237 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 12238 // cond: 12239 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 12240 for { 12241 c := v.AuxInt 12242 sym := v.Aux 12243 _ = v.Args[3] 12244 v_0 := v.Args[0] 12245 if v_0.Op != OpAMD64ADDQconst { 12246 break 12247 } 12248 d := v_0.AuxInt 12249 ptr := v_0.Args[0] 12250 idx := v.Args[1] 12251 val := v.Args[2] 12252 mem := v.Args[3] 12253 v.reset(OpAMD64MOVWstoreidx2) 12254 v.AuxInt = c + d 12255 v.Aux = sym 12256 v.AddArg(ptr) 12257 v.AddArg(idx) 12258 v.AddArg(val) 12259 v.AddArg(mem) 12260 return true 12261 } 12262 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 12263 // cond: 12264 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 12265 for { 12266 c := v.AuxInt 12267 sym := v.Aux 12268 _ = v.Args[3] 12269 ptr := v.Args[0] 12270 v_1 := v.Args[1] 12271 if v_1.Op != OpAMD64ADDQconst { 12272 break 12273 } 12274 d := v_1.AuxInt 12275 idx := v_1.Args[0] 12276 val := v.Args[2] 12277 mem := v.Args[3] 12278 v.reset(OpAMD64MOVWstoreidx2) 12279 v.AuxInt = c + 2*d 12280 v.Aux = sym 12281 v.AddArg(ptr) 12282 v.AddArg(idx) 12283 v.AddArg(val) 12284 v.AddArg(mem) 12285 return true 12286 } 12287 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 12288 // cond: x.Uses == 1 && clobber(x) 12289 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 12290 for { 12291 i := v.AuxInt 12292 s := v.Aux 12293 _ = v.Args[3] 12294 p := v.Args[0] 12295 idx := v.Args[1] 12296 v_2 := v.Args[2] 12297 if v_2.Op != OpAMD64SHRQconst { 12298 break 12299 } 12300 if v_2.AuxInt != 16 { 12301 break 12302 } 12303 w := v_2.Args[0] 12304 x := v.Args[3] 12305 if x.Op != OpAMD64MOVWstoreidx2 { 12306 break 12307 } 12308 if x.AuxInt != i-2 { 12309 break 12310 } 12311 if x.Aux != s { 12312 break 12313 } 12314 _ = x.Args[3] 12315 if p != x.Args[0] { 12316 break 12317 } 12318 if idx != x.Args[1] { 12319 break 12320 } 12321 if w != x.Args[2] { 12322 break 12323 } 12324 mem := x.Args[3] 12325 if !(x.Uses == 1 && clobber(x)) { 12326 break 12327 } 12328 v.reset(OpAMD64MOVLstoreidx1) 12329 v.AuxInt = i - 2 12330 v.Aux = s 12331 v.AddArg(p) 12332 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 12333 v0.AuxInt = 1 12334 v0.AddArg(idx) 12335 v.AddArg(v0) 12336 v.AddArg(w) 12337 v.AddArg(mem) 12338 return true 12339 } 12340 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 12341 // cond: x.Uses == 1 && clobber(x) 12342 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 12343 for { 12344 i := v.AuxInt 12345 s := v.Aux 12346 _ = v.Args[3] 12347 p := v.Args[0] 12348 idx := v.Args[1] 12349 v_2 := v.Args[2] 12350 if v_2.Op != OpAMD64SHRQconst { 12351 break 12352 } 12353 j := v_2.AuxInt 12354 w := v_2.Args[0] 12355 x := v.Args[3] 12356 if x.Op != OpAMD64MOVWstoreidx2 { 12357 break 12358 } 12359 if x.AuxInt != i-2 { 12360 break 12361 } 12362 if x.Aux != s { 12363 break 12364 } 12365 _ = x.Args[3] 12366 if p != x.Args[0] { 12367 break 12368 } 12369 if idx != x.Args[1] { 12370 break 12371 } 12372 w0 := x.Args[2] 12373 if w0.Op != OpAMD64SHRQconst { 12374 break 12375 } 12376 if w0.AuxInt != j-16 { 12377 break 12378 } 12379 if w != w0.Args[0] { 12380 break 12381 } 12382 mem := x.Args[3] 12383 if !(x.Uses == 1 && clobber(x)) { 12384 break 12385 } 12386 v.reset(OpAMD64MOVLstoreidx1) 12387 v.AuxInt = i - 2 12388 v.Aux = s 12389 v.AddArg(p) 12390 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 12391 v0.AuxInt = 1 12392 v0.AddArg(idx) 12393 v.AddArg(v0) 12394 v.AddArg(w0) 12395 v.AddArg(mem) 12396 return true 12397 } 12398 return false 12399 } 12400 func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool { 12401 // match: (MULL x (MOVLconst [c])) 12402 // cond: 12403 // result: (MULLconst [c] x) 12404 for { 12405 _ = v.Args[1] 12406 x := v.Args[0] 12407 v_1 := v.Args[1] 12408 if v_1.Op != OpAMD64MOVLconst { 12409 break 12410 } 12411 c := v_1.AuxInt 12412 v.reset(OpAMD64MULLconst) 12413 v.AuxInt = c 12414 v.AddArg(x) 12415 return true 12416 } 12417 // match: (MULL (MOVLconst [c]) x) 12418 // cond: 12419 // result: (MULLconst [c] x) 12420 for { 12421 _ = v.Args[1] 12422 v_0 := v.Args[0] 12423 if v_0.Op != OpAMD64MOVLconst { 12424 break 12425 } 12426 c := v_0.AuxInt 12427 x := v.Args[1] 12428 v.reset(OpAMD64MULLconst) 12429 v.AuxInt = c 12430 v.AddArg(x) 12431 return true 12432 } 12433 return false 12434 } 12435 func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { 12436 // match: (MULLconst [c] (MULLconst [d] x)) 12437 // cond: 12438 // result: (MULLconst [int64(int32(c * d))] x) 12439 for { 12440 c := v.AuxInt 12441 v_0 := v.Args[0] 12442 if v_0.Op != OpAMD64MULLconst { 12443 break 12444 } 12445 d := v_0.AuxInt 12446 x := v_0.Args[0] 12447 v.reset(OpAMD64MULLconst) 12448 v.AuxInt = int64(int32(c * d)) 12449 v.AddArg(x) 12450 return true 12451 } 12452 // match: (MULLconst [c] (MOVLconst [d])) 12453 // cond: 12454 // result: (MOVLconst [int64(int32(c*d))]) 12455 for { 12456 c := v.AuxInt 12457 v_0 := v.Args[0] 12458 if v_0.Op != OpAMD64MOVLconst { 12459 break 12460 } 12461 d := v_0.AuxInt 12462 v.reset(OpAMD64MOVLconst) 12463 v.AuxInt = int64(int32(c * d)) 12464 return true 12465 } 12466 return false 12467 } 12468 func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool { 12469 // match: (MULQ x (MOVQconst [c])) 12470 // cond: is32Bit(c) 12471 // result: (MULQconst [c] x) 12472 for { 12473 _ = v.Args[1] 12474 x := v.Args[0] 12475 v_1 := v.Args[1] 12476 if v_1.Op != OpAMD64MOVQconst { 12477 break 12478 } 12479 c := v_1.AuxInt 12480 if !(is32Bit(c)) { 12481 break 12482 } 12483 v.reset(OpAMD64MULQconst) 12484 v.AuxInt = c 12485 v.AddArg(x) 12486 return true 12487 } 12488 // match: (MULQ (MOVQconst [c]) x) 12489 // cond: is32Bit(c) 12490 // result: (MULQconst [c] x) 12491 for { 12492 _ = v.Args[1] 12493 v_0 := v.Args[0] 12494 if v_0.Op != OpAMD64MOVQconst { 12495 break 12496 } 12497 c := v_0.AuxInt 12498 x := v.Args[1] 12499 if !(is32Bit(c)) { 12500 break 12501 } 12502 v.reset(OpAMD64MULQconst) 12503 v.AuxInt = c 12504 v.AddArg(x) 12505 return true 12506 } 12507 return false 12508 } 12509 func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { 12510 b := v.Block 12511 _ = b 12512 // match: (MULQconst [c] (MULQconst [d] x)) 12513 // cond: is32Bit(c*d) 12514 // result: (MULQconst [c * d] x) 12515 for { 12516 c := v.AuxInt 12517 v_0 := v.Args[0] 12518 if v_0.Op != OpAMD64MULQconst { 12519 break 12520 } 12521 d := v_0.AuxInt 12522 x := v_0.Args[0] 12523 if !(is32Bit(c * d)) { 12524 break 12525 } 12526 v.reset(OpAMD64MULQconst) 12527 v.AuxInt = c * d 12528 v.AddArg(x) 12529 return true 12530 } 12531 // match: (MULQconst [-1] x) 12532 // cond: 12533 // result: (NEGQ x) 12534 for { 12535 if v.AuxInt != -1 { 12536 break 12537 } 12538 x := v.Args[0] 12539 v.reset(OpAMD64NEGQ) 12540 v.AddArg(x) 12541 return true 12542 } 12543 // match: (MULQconst [0] _) 12544 // cond: 12545 // result: (MOVQconst [0]) 12546 for { 12547 if v.AuxInt != 0 { 12548 break 12549 } 12550 v.reset(OpAMD64MOVQconst) 12551 v.AuxInt = 0 12552 return true 12553 } 12554 // match: (MULQconst [1] x) 12555 // cond: 12556 // result: x 12557 for { 12558 if v.AuxInt != 1 { 12559 break 12560 } 12561 x := v.Args[0] 12562 v.reset(OpCopy) 12563 v.Type = x.Type 12564 v.AddArg(x) 12565 return true 12566 } 12567 // match: (MULQconst [3] x) 12568 // cond: 12569 // result: (LEAQ2 x x) 12570 for { 12571 if v.AuxInt != 3 { 12572 break 12573 } 12574 x := v.Args[0] 12575 v.reset(OpAMD64LEAQ2) 12576 v.AddArg(x) 12577 v.AddArg(x) 12578 return true 12579 } 12580 // match: (MULQconst [5] x) 12581 // cond: 12582 // result: (LEAQ4 x x) 12583 for { 12584 if v.AuxInt != 5 { 12585 break 12586 } 12587 x := v.Args[0] 12588 v.reset(OpAMD64LEAQ4) 12589 v.AddArg(x) 12590 v.AddArg(x) 12591 return true 12592 } 12593 // match: (MULQconst [7] x) 12594 // cond: 12595 // result: (LEAQ8 (NEGQ <v.Type> x) x) 12596 for { 12597 if v.AuxInt != 7 { 12598 break 12599 } 12600 x := v.Args[0] 12601 v.reset(OpAMD64LEAQ8) 12602 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, v.Type) 12603 v0.AddArg(x) 12604 v.AddArg(v0) 12605 v.AddArg(x) 12606 return true 12607 } 12608 // match: (MULQconst [9] x) 12609 // cond: 12610 // result: (LEAQ8 x x) 12611 for { 12612 if v.AuxInt != 9 { 12613 break 12614 } 12615 x := v.Args[0] 12616 v.reset(OpAMD64LEAQ8) 12617 v.AddArg(x) 12618 v.AddArg(x) 12619 return true 12620 } 12621 // match: (MULQconst [11] x) 12622 // cond: 12623 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 12624 for { 12625 if v.AuxInt != 11 { 12626 break 12627 } 12628 x := v.Args[0] 12629 v.reset(OpAMD64LEAQ2) 12630 v.AddArg(x) 12631 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12632 v0.AddArg(x) 12633 v0.AddArg(x) 12634 v.AddArg(v0) 12635 return true 12636 } 12637 // match: (MULQconst [13] x) 12638 // cond: 12639 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 12640 for { 12641 if v.AuxInt != 13 { 12642 break 12643 } 12644 x := v.Args[0] 12645 v.reset(OpAMD64LEAQ4) 12646 v.AddArg(x) 12647 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 12648 v0.AddArg(x) 12649 v0.AddArg(x) 12650 v.AddArg(v0) 12651 return true 12652 } 12653 return false 12654 } 12655 func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { 12656 b := v.Block 12657 _ = b 12658 // match: (MULQconst [21] x) 12659 // cond: 12660 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 12661 for { 12662 if v.AuxInt != 21 { 12663 break 12664 } 12665 x := v.Args[0] 12666 v.reset(OpAMD64LEAQ4) 12667 v.AddArg(x) 12668 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12669 v0.AddArg(x) 12670 v0.AddArg(x) 12671 v.AddArg(v0) 12672 return true 12673 } 12674 // match: (MULQconst [25] x) 12675 // cond: 12676 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 12677 for { 12678 if v.AuxInt != 25 { 12679 break 12680 } 12681 x := v.Args[0] 12682 v.reset(OpAMD64LEAQ8) 12683 v.AddArg(x) 12684 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 12685 v0.AddArg(x) 12686 v0.AddArg(x) 12687 v.AddArg(v0) 12688 return true 12689 } 12690 // match: (MULQconst [37] x) 12691 // cond: 12692 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 12693 for { 12694 if v.AuxInt != 37 { 12695 break 12696 } 12697 x := v.Args[0] 12698 v.reset(OpAMD64LEAQ4) 12699 v.AddArg(x) 12700 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 12701 v0.AddArg(x) 12702 v0.AddArg(x) 12703 v.AddArg(v0) 12704 return true 12705 } 12706 // match: (MULQconst [41] x) 12707 // cond: 12708 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 12709 for { 12710 if v.AuxInt != 41 { 12711 break 12712 } 12713 x := v.Args[0] 12714 v.reset(OpAMD64LEAQ8) 12715 v.AddArg(x) 12716 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12717 v0.AddArg(x) 12718 v0.AddArg(x) 12719 v.AddArg(v0) 12720 return true 12721 } 12722 // match: (MULQconst [73] x) 12723 // cond: 12724 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 12725 for { 12726 if v.AuxInt != 73 { 12727 break 12728 } 12729 x := v.Args[0] 12730 v.reset(OpAMD64LEAQ8) 12731 v.AddArg(x) 12732 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 12733 v0.AddArg(x) 12734 v0.AddArg(x) 12735 v.AddArg(v0) 12736 return true 12737 } 12738 // match: (MULQconst [c] x) 12739 // cond: isPowerOfTwo(c) 12740 // result: (SHLQconst [log2(c)] x) 12741 for { 12742 c := v.AuxInt 12743 x := v.Args[0] 12744 if !(isPowerOfTwo(c)) { 12745 break 12746 } 12747 v.reset(OpAMD64SHLQconst) 12748 v.AuxInt = log2(c) 12749 v.AddArg(x) 12750 return true 12751 } 12752 // match: (MULQconst [c] x) 12753 // cond: isPowerOfTwo(c+1) && c >= 15 12754 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 12755 for { 12756 c := v.AuxInt 12757 x := v.Args[0] 12758 if !(isPowerOfTwo(c+1) && c >= 15) { 12759 break 12760 } 12761 v.reset(OpAMD64SUBQ) 12762 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12763 v0.AuxInt = log2(c + 1) 12764 v0.AddArg(x) 12765 v.AddArg(v0) 12766 v.AddArg(x) 12767 return true 12768 } 12769 // match: (MULQconst [c] x) 12770 // cond: isPowerOfTwo(c-1) && c >= 17 12771 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 12772 for { 12773 c := v.AuxInt 12774 x := v.Args[0] 12775 if !(isPowerOfTwo(c-1) && c >= 17) { 12776 break 12777 } 12778 v.reset(OpAMD64LEAQ1) 12779 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12780 v0.AuxInt = log2(c - 1) 12781 v0.AddArg(x) 12782 v.AddArg(v0) 12783 v.AddArg(x) 12784 return true 12785 } 12786 // match: (MULQconst [c] x) 12787 // cond: isPowerOfTwo(c-2) && c >= 34 12788 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 12789 for { 12790 c := v.AuxInt 12791 x := v.Args[0] 12792 if !(isPowerOfTwo(c-2) && c >= 34) { 12793 break 12794 } 12795 v.reset(OpAMD64LEAQ2) 12796 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12797 v0.AuxInt = log2(c - 2) 12798 v0.AddArg(x) 12799 v.AddArg(v0) 12800 v.AddArg(x) 12801 return true 12802 } 12803 // match: (MULQconst [c] x) 12804 // cond: isPowerOfTwo(c-4) && c >= 68 12805 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 12806 for { 12807 c := v.AuxInt 12808 x := v.Args[0] 12809 if !(isPowerOfTwo(c-4) && c >= 68) { 12810 break 12811 } 12812 v.reset(OpAMD64LEAQ4) 12813 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12814 v0.AuxInt = log2(c - 4) 12815 v0.AddArg(x) 12816 v.AddArg(v0) 12817 v.AddArg(x) 12818 return true 12819 } 12820 return false 12821 } 12822 func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { 12823 b := v.Block 12824 _ = b 12825 // match: (MULQconst [c] x) 12826 // cond: isPowerOfTwo(c-8) && c >= 136 12827 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 12828 for { 12829 c := v.AuxInt 12830 x := v.Args[0] 12831 if !(isPowerOfTwo(c-8) && c >= 136) { 12832 break 12833 } 12834 v.reset(OpAMD64LEAQ8) 12835 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 12836 v0.AuxInt = log2(c - 8) 12837 v0.AddArg(x) 12838 v.AddArg(v0) 12839 v.AddArg(x) 12840 return true 12841 } 12842 // match: (MULQconst [c] x) 12843 // cond: c%3 == 0 && isPowerOfTwo(c/3) 12844 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 12845 for { 12846 c := v.AuxInt 12847 x := v.Args[0] 12848 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 12849 break 12850 } 12851 v.reset(OpAMD64SHLQconst) 12852 v.AuxInt = log2(c / 3) 12853 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 12854 v0.AddArg(x) 12855 v0.AddArg(x) 12856 v.AddArg(v0) 12857 return true 12858 } 12859 // match: (MULQconst [c] x) 12860 // cond: c%5 == 0 && isPowerOfTwo(c/5) 12861 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 12862 for { 12863 c := v.AuxInt 12864 x := v.Args[0] 12865 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 12866 break 12867 } 12868 v.reset(OpAMD64SHLQconst) 12869 v.AuxInt = log2(c / 5) 12870 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 12871 v0.AddArg(x) 12872 v0.AddArg(x) 12873 v.AddArg(v0) 12874 return true 12875 } 12876 // match: (MULQconst [c] x) 12877 // cond: c%9 == 0 && isPowerOfTwo(c/9) 12878 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 12879 for { 12880 c := v.AuxInt 12881 x := v.Args[0] 12882 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 12883 break 12884 } 12885 v.reset(OpAMD64SHLQconst) 12886 v.AuxInt = log2(c / 9) 12887 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 12888 v0.AddArg(x) 12889 v0.AddArg(x) 12890 v.AddArg(v0) 12891 return true 12892 } 12893 // match: (MULQconst [c] (MOVQconst [d])) 12894 // cond: 12895 // result: (MOVQconst [c*d]) 12896 for { 12897 c := v.AuxInt 12898 v_0 := v.Args[0] 12899 if v_0.Op != OpAMD64MOVQconst { 12900 break 12901 } 12902 d := v_0.AuxInt 12903 v.reset(OpAMD64MOVQconst) 12904 v.AuxInt = c * d 12905 return true 12906 } 12907 return false 12908 } 12909 func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { 12910 // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) 12911 // cond: canMergeLoad(v, l, x) && clobber(l) 12912 // result: (MULSDmem x [off] {sym} ptr mem) 12913 for { 12914 _ = v.Args[1] 12915 x := v.Args[0] 12916 l := v.Args[1] 12917 if l.Op != OpAMD64MOVSDload { 12918 break 12919 } 12920 off := l.AuxInt 12921 sym := l.Aux 12922 _ = l.Args[1] 12923 ptr := l.Args[0] 12924 mem := l.Args[1] 12925 if !(canMergeLoad(v, l, x) && clobber(l)) { 12926 break 12927 } 12928 v.reset(OpAMD64MULSDmem) 12929 v.AuxInt = off 12930 v.Aux = sym 12931 v.AddArg(x) 12932 v.AddArg(ptr) 12933 v.AddArg(mem) 12934 return true 12935 } 12936 // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) 12937 // cond: canMergeLoad(v, l, x) && clobber(l) 12938 // result: (MULSDmem x [off] {sym} ptr mem) 12939 for { 12940 _ = v.Args[1] 12941 l := v.Args[0] 12942 if l.Op != OpAMD64MOVSDload { 12943 break 12944 } 12945 off := l.AuxInt 12946 sym := l.Aux 12947 _ = l.Args[1] 12948 ptr := l.Args[0] 12949 mem := l.Args[1] 12950 x := v.Args[1] 12951 if !(canMergeLoad(v, l, x) && clobber(l)) { 12952 break 12953 } 12954 v.reset(OpAMD64MULSDmem) 12955 v.AuxInt = off 12956 v.Aux = sym 12957 v.AddArg(x) 12958 v.AddArg(ptr) 12959 v.AddArg(mem) 12960 return true 12961 } 12962 return false 12963 } 12964 func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { 12965 // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) 12966 // cond: canMergeLoad(v, l, x) && clobber(l) 12967 // result: (MULSSmem x [off] {sym} ptr mem) 12968 for { 12969 _ = v.Args[1] 12970 x := v.Args[0] 12971 l := v.Args[1] 12972 if l.Op != OpAMD64MOVSSload { 12973 break 12974 } 12975 off := l.AuxInt 12976 sym := l.Aux 12977 _ = l.Args[1] 12978 ptr := l.Args[0] 12979 mem := l.Args[1] 12980 if !(canMergeLoad(v, l, x) && clobber(l)) { 12981 break 12982 } 12983 v.reset(OpAMD64MULSSmem) 12984 v.AuxInt = off 12985 v.Aux = sym 12986 v.AddArg(x) 12987 v.AddArg(ptr) 12988 v.AddArg(mem) 12989 return true 12990 } 12991 // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) 12992 // cond: canMergeLoad(v, l, x) && clobber(l) 12993 // result: (MULSSmem x [off] {sym} ptr mem) 12994 for { 12995 _ = v.Args[1] 12996 l := v.Args[0] 12997 if l.Op != OpAMD64MOVSSload { 12998 break 12999 } 13000 off := l.AuxInt 13001 sym := l.Aux 13002 _ = l.Args[1] 13003 ptr := l.Args[0] 13004 mem := l.Args[1] 13005 x := v.Args[1] 13006 if !(canMergeLoad(v, l, x) && clobber(l)) { 13007 break 13008 } 13009 v.reset(OpAMD64MULSSmem) 13010 v.AuxInt = off 13011 v.Aux = sym 13012 v.AddArg(x) 13013 v.AddArg(ptr) 13014 v.AddArg(mem) 13015 return true 13016 } 13017 return false 13018 } 13019 func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { 13020 // match: (NEGL (MOVLconst [c])) 13021 // cond: 13022 // result: (MOVLconst [int64(int32(-c))]) 13023 for { 13024 v_0 := v.Args[0] 13025 if v_0.Op != OpAMD64MOVLconst { 13026 break 13027 } 13028 c := v_0.AuxInt 13029 v.reset(OpAMD64MOVLconst) 13030 v.AuxInt = int64(int32(-c)) 13031 return true 13032 } 13033 return false 13034 } 13035 func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { 13036 // match: (NEGQ (MOVQconst [c])) 13037 // cond: 13038 // result: (MOVQconst [-c]) 13039 for { 13040 v_0 := v.Args[0] 13041 if v_0.Op != OpAMD64MOVQconst { 13042 break 13043 } 13044 c := v_0.AuxInt 13045 v.reset(OpAMD64MOVQconst) 13046 v.AuxInt = -c 13047 return true 13048 } 13049 // match: (NEGQ (ADDQconst [c] (NEGQ x))) 13050 // cond: c != -(1<<31) 13051 // result: (ADDQconst [-c] x) 13052 for { 13053 v_0 := v.Args[0] 13054 if v_0.Op != OpAMD64ADDQconst { 13055 break 13056 } 13057 c := v_0.AuxInt 13058 v_0_0 := v_0.Args[0] 13059 if v_0_0.Op != OpAMD64NEGQ { 13060 break 13061 } 13062 x := v_0_0.Args[0] 13063 if !(c != -(1 << 31)) { 13064 break 13065 } 13066 v.reset(OpAMD64ADDQconst) 13067 v.AuxInt = -c 13068 v.AddArg(x) 13069 return true 13070 } 13071 return false 13072 } 13073 func rewriteValueAMD64_OpAMD64NOTL_0(v *Value) bool { 13074 // match: (NOTL (MOVLconst [c])) 13075 // cond: 13076 // result: (MOVLconst [^c]) 13077 for { 13078 v_0 := v.Args[0] 13079 if v_0.Op != OpAMD64MOVLconst { 13080 break 13081 } 13082 c := v_0.AuxInt 13083 v.reset(OpAMD64MOVLconst) 13084 v.AuxInt = ^c 13085 return true 13086 } 13087 return false 13088 } 13089 func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool { 13090 // match: (NOTQ (MOVQconst [c])) 13091 // cond: 13092 // result: (MOVQconst [^c]) 13093 for { 13094 v_0 := v.Args[0] 13095 if v_0.Op != OpAMD64MOVQconst { 13096 break 13097 } 13098 c := v_0.AuxInt 13099 v.reset(OpAMD64MOVQconst) 13100 v.AuxInt = ^c 13101 return true 13102 } 13103 return false 13104 } 13105 func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { 13106 // match: (ORL x (MOVLconst [c])) 13107 // cond: 13108 // result: (ORLconst [c] x) 13109 for { 13110 _ = v.Args[1] 13111 x := v.Args[0] 13112 v_1 := v.Args[1] 13113 if v_1.Op != OpAMD64MOVLconst { 13114 break 13115 } 13116 c := v_1.AuxInt 13117 v.reset(OpAMD64ORLconst) 13118 v.AuxInt = c 13119 v.AddArg(x) 13120 return true 13121 } 13122 // match: (ORL (MOVLconst [c]) x) 13123 // cond: 13124 // result: (ORLconst [c] x) 13125 for { 13126 _ = v.Args[1] 13127 v_0 := v.Args[0] 13128 if v_0.Op != OpAMD64MOVLconst { 13129 break 13130 } 13131 c := v_0.AuxInt 13132 x := v.Args[1] 13133 v.reset(OpAMD64ORLconst) 13134 v.AuxInt = c 13135 v.AddArg(x) 13136 return true 13137 } 13138 // match: (ORL (SHLLconst x [c]) (SHRLconst x [d])) 13139 // cond: d==32-c 13140 // result: (ROLLconst x [c]) 13141 for { 13142 _ = v.Args[1] 13143 v_0 := v.Args[0] 13144 if v_0.Op != OpAMD64SHLLconst { 13145 break 13146 } 13147 c := v_0.AuxInt 13148 x := v_0.Args[0] 13149 v_1 := v.Args[1] 13150 if v_1.Op != OpAMD64SHRLconst { 13151 break 13152 } 13153 d := v_1.AuxInt 13154 if x != v_1.Args[0] { 13155 break 13156 } 13157 if !(d == 32-c) { 13158 break 13159 } 13160 v.reset(OpAMD64ROLLconst) 13161 v.AuxInt = c 13162 v.AddArg(x) 13163 return true 13164 } 13165 // match: (ORL (SHRLconst x [d]) (SHLLconst x [c])) 13166 // cond: d==32-c 13167 // result: (ROLLconst x [c]) 13168 for { 13169 _ = v.Args[1] 13170 v_0 := v.Args[0] 13171 if v_0.Op != OpAMD64SHRLconst { 13172 break 13173 } 13174 d := v_0.AuxInt 13175 x := v_0.Args[0] 13176 v_1 := v.Args[1] 13177 if v_1.Op != OpAMD64SHLLconst { 13178 break 13179 } 13180 c := v_1.AuxInt 13181 if x != v_1.Args[0] { 13182 break 13183 } 13184 if !(d == 32-c) { 13185 break 13186 } 13187 v.reset(OpAMD64ROLLconst) 13188 v.AuxInt = c 13189 v.AddArg(x) 13190 return true 13191 } 13192 // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 13193 // cond: d==16-c && c < 16 && t.Size() == 2 13194 // result: (ROLWconst x [c]) 13195 for { 13196 t := v.Type 13197 _ = v.Args[1] 13198 v_0 := v.Args[0] 13199 if v_0.Op != OpAMD64SHLLconst { 13200 break 13201 } 13202 c := v_0.AuxInt 13203 x := v_0.Args[0] 13204 v_1 := v.Args[1] 13205 if v_1.Op != OpAMD64SHRWconst { 13206 break 13207 } 13208 d := v_1.AuxInt 13209 if x != v_1.Args[0] { 13210 break 13211 } 13212 if !(d == 16-c && c < 16 && t.Size() == 2) { 13213 break 13214 } 13215 v.reset(OpAMD64ROLWconst) 13216 v.AuxInt = c 13217 v.AddArg(x) 13218 return true 13219 } 13220 // match: (ORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 13221 // cond: d==16-c && c < 16 && t.Size() == 2 13222 // result: (ROLWconst x [c]) 13223 for { 13224 t := v.Type 13225 _ = v.Args[1] 13226 v_0 := v.Args[0] 13227 if v_0.Op != OpAMD64SHRWconst { 13228 break 13229 } 13230 d := v_0.AuxInt 13231 x := v_0.Args[0] 13232 v_1 := v.Args[1] 13233 if v_1.Op != OpAMD64SHLLconst { 13234 break 13235 } 13236 c := v_1.AuxInt 13237 if x != v_1.Args[0] { 13238 break 13239 } 13240 if !(d == 16-c && c < 16 && t.Size() == 2) { 13241 break 13242 } 13243 v.reset(OpAMD64ROLWconst) 13244 v.AuxInt = c 13245 v.AddArg(x) 13246 return true 13247 } 13248 // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 13249 // cond: d==8-c && c < 8 && t.Size() == 1 13250 // result: (ROLBconst x [c]) 13251 for { 13252 t := v.Type 13253 _ = v.Args[1] 13254 v_0 := v.Args[0] 13255 if v_0.Op != OpAMD64SHLLconst { 13256 break 13257 } 13258 c := v_0.AuxInt 13259 x := v_0.Args[0] 13260 v_1 := v.Args[1] 13261 if v_1.Op != OpAMD64SHRBconst { 13262 break 13263 } 13264 d := v_1.AuxInt 13265 if x != v_1.Args[0] { 13266 break 13267 } 13268 if !(d == 8-c && c < 8 && t.Size() == 1) { 13269 break 13270 } 13271 v.reset(OpAMD64ROLBconst) 13272 v.AuxInt = c 13273 v.AddArg(x) 13274 return true 13275 } 13276 // match: (ORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 13277 // cond: d==8-c && c < 8 && t.Size() == 1 13278 // result: (ROLBconst x [c]) 13279 for { 13280 t := v.Type 13281 _ = v.Args[1] 13282 v_0 := v.Args[0] 13283 if v_0.Op != OpAMD64SHRBconst { 13284 break 13285 } 13286 d := v_0.AuxInt 13287 x := v_0.Args[0] 13288 v_1 := v.Args[1] 13289 if v_1.Op != OpAMD64SHLLconst { 13290 break 13291 } 13292 c := v_1.AuxInt 13293 if x != v_1.Args[0] { 13294 break 13295 } 13296 if !(d == 8-c && c < 8 && t.Size() == 1) { 13297 break 13298 } 13299 v.reset(OpAMD64ROLBconst) 13300 v.AuxInt = c 13301 v.AddArg(x) 13302 return true 13303 } 13304 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 13305 // cond: 13306 // result: (ROLL x y) 13307 for { 13308 _ = v.Args[1] 13309 v_0 := v.Args[0] 13310 if v_0.Op != OpAMD64SHLL { 13311 break 13312 } 13313 _ = v_0.Args[1] 13314 x := v_0.Args[0] 13315 y := v_0.Args[1] 13316 v_1 := v.Args[1] 13317 if v_1.Op != OpAMD64ANDL { 13318 break 13319 } 13320 _ = v_1.Args[1] 13321 v_1_0 := v_1.Args[0] 13322 if v_1_0.Op != OpAMD64SHRL { 13323 break 13324 } 13325 _ = v_1_0.Args[1] 13326 if x != v_1_0.Args[0] { 13327 break 13328 } 13329 v_1_0_1 := v_1_0.Args[1] 13330 if v_1_0_1.Op != OpAMD64NEGQ { 13331 break 13332 } 13333 if y != v_1_0_1.Args[0] { 13334 break 13335 } 13336 v_1_1 := v_1.Args[1] 13337 if v_1_1.Op != OpAMD64SBBLcarrymask { 13338 break 13339 } 13340 v_1_1_0 := v_1_1.Args[0] 13341 if v_1_1_0.Op != OpAMD64CMPQconst { 13342 break 13343 } 13344 if v_1_1_0.AuxInt != 32 { 13345 break 13346 } 13347 v_1_1_0_0 := v_1_1_0.Args[0] 13348 if v_1_1_0_0.Op != OpAMD64NEGQ { 13349 break 13350 } 13351 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 13352 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 13353 break 13354 } 13355 if v_1_1_0_0_0.AuxInt != -32 { 13356 break 13357 } 13358 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 13359 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 13360 break 13361 } 13362 if v_1_1_0_0_0_0.AuxInt != 31 { 13363 break 13364 } 13365 if y != v_1_1_0_0_0_0.Args[0] { 13366 break 13367 } 13368 v.reset(OpAMD64ROLL) 13369 v.AddArg(x) 13370 v.AddArg(y) 13371 return true 13372 } 13373 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y)))) 13374 // cond: 13375 // result: (ROLL x y) 13376 for { 13377 _ = v.Args[1] 13378 v_0 := v.Args[0] 13379 if v_0.Op != OpAMD64SHLL { 13380 break 13381 } 13382 _ = v_0.Args[1] 13383 x := v_0.Args[0] 13384 y := v_0.Args[1] 13385 v_1 := v.Args[1] 13386 if v_1.Op != OpAMD64ANDL { 13387 break 13388 } 13389 _ = v_1.Args[1] 13390 v_1_0 := v_1.Args[0] 13391 if v_1_0.Op != OpAMD64SBBLcarrymask { 13392 break 13393 } 13394 v_1_0_0 := v_1_0.Args[0] 13395 if v_1_0_0.Op != OpAMD64CMPQconst { 13396 break 13397 } 13398 if v_1_0_0.AuxInt != 32 { 13399 break 13400 } 13401 v_1_0_0_0 := v_1_0_0.Args[0] 13402 if v_1_0_0_0.Op != OpAMD64NEGQ { 13403 break 13404 } 13405 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 13406 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 13407 break 13408 } 13409 if v_1_0_0_0_0.AuxInt != -32 { 13410 break 13411 } 13412 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 13413 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 13414 break 13415 } 13416 if v_1_0_0_0_0_0.AuxInt != 31 { 13417 break 13418 } 13419 if y != v_1_0_0_0_0_0.Args[0] { 13420 break 13421 } 13422 v_1_1 := v_1.Args[1] 13423 if v_1_1.Op != OpAMD64SHRL { 13424 break 13425 } 13426 _ = v_1_1.Args[1] 13427 if x != v_1_1.Args[0] { 13428 break 13429 } 13430 v_1_1_1 := v_1_1.Args[1] 13431 if v_1_1_1.Op != OpAMD64NEGQ { 13432 break 13433 } 13434 if y != v_1_1_1.Args[0] { 13435 break 13436 } 13437 v.reset(OpAMD64ROLL) 13438 v.AddArg(x) 13439 v.AddArg(y) 13440 return true 13441 } 13442 return false 13443 } 13444 func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { 13445 // match: (ORL (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHLL x y)) 13446 // cond: 13447 // result: (ROLL x y) 13448 for { 13449 _ = v.Args[1] 13450 v_0 := v.Args[0] 13451 if v_0.Op != OpAMD64ANDL { 13452 break 13453 } 13454 _ = v_0.Args[1] 13455 v_0_0 := v_0.Args[0] 13456 if v_0_0.Op != OpAMD64SHRL { 13457 break 13458 } 13459 _ = v_0_0.Args[1] 13460 x := v_0_0.Args[0] 13461 v_0_0_1 := v_0_0.Args[1] 13462 if v_0_0_1.Op != OpAMD64NEGQ { 13463 break 13464 } 13465 y := v_0_0_1.Args[0] 13466 v_0_1 := v_0.Args[1] 13467 if v_0_1.Op != OpAMD64SBBLcarrymask { 13468 break 13469 } 13470 v_0_1_0 := v_0_1.Args[0] 13471 if v_0_1_0.Op != OpAMD64CMPQconst { 13472 break 13473 } 13474 if v_0_1_0.AuxInt != 32 { 13475 break 13476 } 13477 v_0_1_0_0 := v_0_1_0.Args[0] 13478 if v_0_1_0_0.Op != OpAMD64NEGQ { 13479 break 13480 } 13481 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 13482 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 13483 break 13484 } 13485 if v_0_1_0_0_0.AuxInt != -32 { 13486 break 13487 } 13488 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 13489 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 13490 break 13491 } 13492 if v_0_1_0_0_0_0.AuxInt != 31 { 13493 break 13494 } 13495 if y != v_0_1_0_0_0_0.Args[0] { 13496 break 13497 } 13498 v_1 := v.Args[1] 13499 if v_1.Op != OpAMD64SHLL { 13500 break 13501 } 13502 _ = v_1.Args[1] 13503 if x != v_1.Args[0] { 13504 break 13505 } 13506 if y != v_1.Args[1] { 13507 break 13508 } 13509 v.reset(OpAMD64ROLL) 13510 v.AddArg(x) 13511 v.AddArg(y) 13512 return true 13513 } 13514 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y))) (SHLL x y)) 13515 // cond: 13516 // result: (ROLL x y) 13517 for { 13518 _ = v.Args[1] 13519 v_0 := v.Args[0] 13520 if v_0.Op != OpAMD64ANDL { 13521 break 13522 } 13523 _ = v_0.Args[1] 13524 v_0_0 := v_0.Args[0] 13525 if v_0_0.Op != OpAMD64SBBLcarrymask { 13526 break 13527 } 13528 v_0_0_0 := v_0_0.Args[0] 13529 if v_0_0_0.Op != OpAMD64CMPQconst { 13530 break 13531 } 13532 if v_0_0_0.AuxInt != 32 { 13533 break 13534 } 13535 v_0_0_0_0 := v_0_0_0.Args[0] 13536 if v_0_0_0_0.Op != OpAMD64NEGQ { 13537 break 13538 } 13539 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 13540 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 13541 break 13542 } 13543 if v_0_0_0_0_0.AuxInt != -32 { 13544 break 13545 } 13546 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 13547 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 13548 break 13549 } 13550 if v_0_0_0_0_0_0.AuxInt != 31 { 13551 break 13552 } 13553 y := v_0_0_0_0_0_0.Args[0] 13554 v_0_1 := v_0.Args[1] 13555 if v_0_1.Op != OpAMD64SHRL { 13556 break 13557 } 13558 _ = v_0_1.Args[1] 13559 x := v_0_1.Args[0] 13560 v_0_1_1 := v_0_1.Args[1] 13561 if v_0_1_1.Op != OpAMD64NEGQ { 13562 break 13563 } 13564 if y != v_0_1_1.Args[0] { 13565 break 13566 } 13567 v_1 := v.Args[1] 13568 if v_1.Op != OpAMD64SHLL { 13569 break 13570 } 13571 _ = v_1.Args[1] 13572 if x != v_1.Args[0] { 13573 break 13574 } 13575 if y != v_1.Args[1] { 13576 break 13577 } 13578 v.reset(OpAMD64ROLL) 13579 v.AddArg(x) 13580 v.AddArg(y) 13581 return true 13582 } 13583 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 13584 // cond: 13585 // result: (ROLL x y) 13586 for { 13587 _ = v.Args[1] 13588 v_0 := v.Args[0] 13589 if v_0.Op != OpAMD64SHLL { 13590 break 13591 } 13592 _ = v_0.Args[1] 13593 x := v_0.Args[0] 13594 y := v_0.Args[1] 13595 v_1 := v.Args[1] 13596 if v_1.Op != OpAMD64ANDL { 13597 break 13598 } 13599 _ = v_1.Args[1] 13600 v_1_0 := v_1.Args[0] 13601 if v_1_0.Op != OpAMD64SHRL { 13602 break 13603 } 13604 _ = v_1_0.Args[1] 13605 if x != v_1_0.Args[0] { 13606 break 13607 } 13608 v_1_0_1 := v_1_0.Args[1] 13609 if v_1_0_1.Op != OpAMD64NEGL { 13610 break 13611 } 13612 if y != v_1_0_1.Args[0] { 13613 break 13614 } 13615 v_1_1 := v_1.Args[1] 13616 if v_1_1.Op != OpAMD64SBBLcarrymask { 13617 break 13618 } 13619 v_1_1_0 := v_1_1.Args[0] 13620 if v_1_1_0.Op != OpAMD64CMPLconst { 13621 break 13622 } 13623 if v_1_1_0.AuxInt != 32 { 13624 break 13625 } 13626 v_1_1_0_0 := v_1_1_0.Args[0] 13627 if v_1_1_0_0.Op != OpAMD64NEGL { 13628 break 13629 } 13630 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 13631 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 13632 break 13633 } 13634 if v_1_1_0_0_0.AuxInt != -32 { 13635 break 13636 } 13637 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 13638 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 13639 break 13640 } 13641 if v_1_1_0_0_0_0.AuxInt != 31 { 13642 break 13643 } 13644 if y != v_1_1_0_0_0_0.Args[0] { 13645 break 13646 } 13647 v.reset(OpAMD64ROLL) 13648 v.AddArg(x) 13649 v.AddArg(y) 13650 return true 13651 } 13652 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y)))) 13653 // cond: 13654 // result: (ROLL x y) 13655 for { 13656 _ = v.Args[1] 13657 v_0 := v.Args[0] 13658 if v_0.Op != OpAMD64SHLL { 13659 break 13660 } 13661 _ = v_0.Args[1] 13662 x := v_0.Args[0] 13663 y := v_0.Args[1] 13664 v_1 := v.Args[1] 13665 if v_1.Op != OpAMD64ANDL { 13666 break 13667 } 13668 _ = v_1.Args[1] 13669 v_1_0 := v_1.Args[0] 13670 if v_1_0.Op != OpAMD64SBBLcarrymask { 13671 break 13672 } 13673 v_1_0_0 := v_1_0.Args[0] 13674 if v_1_0_0.Op != OpAMD64CMPLconst { 13675 break 13676 } 13677 if v_1_0_0.AuxInt != 32 { 13678 break 13679 } 13680 v_1_0_0_0 := v_1_0_0.Args[0] 13681 if v_1_0_0_0.Op != OpAMD64NEGL { 13682 break 13683 } 13684 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 13685 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 13686 break 13687 } 13688 if v_1_0_0_0_0.AuxInt != -32 { 13689 break 13690 } 13691 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 13692 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 13693 break 13694 } 13695 if v_1_0_0_0_0_0.AuxInt != 31 { 13696 break 13697 } 13698 if y != v_1_0_0_0_0_0.Args[0] { 13699 break 13700 } 13701 v_1_1 := v_1.Args[1] 13702 if v_1_1.Op != OpAMD64SHRL { 13703 break 13704 } 13705 _ = v_1_1.Args[1] 13706 if x != v_1_1.Args[0] { 13707 break 13708 } 13709 v_1_1_1 := v_1_1.Args[1] 13710 if v_1_1_1.Op != OpAMD64NEGL { 13711 break 13712 } 13713 if y != v_1_1_1.Args[0] { 13714 break 13715 } 13716 v.reset(OpAMD64ROLL) 13717 v.AddArg(x) 13718 v.AddArg(y) 13719 return true 13720 } 13721 // match: (ORL (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHLL x y)) 13722 // cond: 13723 // result: (ROLL x y) 13724 for { 13725 _ = v.Args[1] 13726 v_0 := v.Args[0] 13727 if v_0.Op != OpAMD64ANDL { 13728 break 13729 } 13730 _ = v_0.Args[1] 13731 v_0_0 := v_0.Args[0] 13732 if v_0_0.Op != OpAMD64SHRL { 13733 break 13734 } 13735 _ = v_0_0.Args[1] 13736 x := v_0_0.Args[0] 13737 v_0_0_1 := v_0_0.Args[1] 13738 if v_0_0_1.Op != OpAMD64NEGL { 13739 break 13740 } 13741 y := v_0_0_1.Args[0] 13742 v_0_1 := v_0.Args[1] 13743 if v_0_1.Op != OpAMD64SBBLcarrymask { 13744 break 13745 } 13746 v_0_1_0 := v_0_1.Args[0] 13747 if v_0_1_0.Op != OpAMD64CMPLconst { 13748 break 13749 } 13750 if v_0_1_0.AuxInt != 32 { 13751 break 13752 } 13753 v_0_1_0_0 := v_0_1_0.Args[0] 13754 if v_0_1_0_0.Op != OpAMD64NEGL { 13755 break 13756 } 13757 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 13758 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 13759 break 13760 } 13761 if v_0_1_0_0_0.AuxInt != -32 { 13762 break 13763 } 13764 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 13765 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 13766 break 13767 } 13768 if v_0_1_0_0_0_0.AuxInt != 31 { 13769 break 13770 } 13771 if y != v_0_1_0_0_0_0.Args[0] { 13772 break 13773 } 13774 v_1 := v.Args[1] 13775 if v_1.Op != OpAMD64SHLL { 13776 break 13777 } 13778 _ = v_1.Args[1] 13779 if x != v_1.Args[0] { 13780 break 13781 } 13782 if y != v_1.Args[1] { 13783 break 13784 } 13785 v.reset(OpAMD64ROLL) 13786 v.AddArg(x) 13787 v.AddArg(y) 13788 return true 13789 } 13790 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y))) (SHLL x y)) 13791 // cond: 13792 // result: (ROLL x y) 13793 for { 13794 _ = v.Args[1] 13795 v_0 := v.Args[0] 13796 if v_0.Op != OpAMD64ANDL { 13797 break 13798 } 13799 _ = v_0.Args[1] 13800 v_0_0 := v_0.Args[0] 13801 if v_0_0.Op != OpAMD64SBBLcarrymask { 13802 break 13803 } 13804 v_0_0_0 := v_0_0.Args[0] 13805 if v_0_0_0.Op != OpAMD64CMPLconst { 13806 break 13807 } 13808 if v_0_0_0.AuxInt != 32 { 13809 break 13810 } 13811 v_0_0_0_0 := v_0_0_0.Args[0] 13812 if v_0_0_0_0.Op != OpAMD64NEGL { 13813 break 13814 } 13815 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 13816 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 13817 break 13818 } 13819 if v_0_0_0_0_0.AuxInt != -32 { 13820 break 13821 } 13822 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 13823 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 13824 break 13825 } 13826 if v_0_0_0_0_0_0.AuxInt != 31 { 13827 break 13828 } 13829 y := v_0_0_0_0_0_0.Args[0] 13830 v_0_1 := v_0.Args[1] 13831 if v_0_1.Op != OpAMD64SHRL { 13832 break 13833 } 13834 _ = v_0_1.Args[1] 13835 x := v_0_1.Args[0] 13836 v_0_1_1 := v_0_1.Args[1] 13837 if v_0_1_1.Op != OpAMD64NEGL { 13838 break 13839 } 13840 if y != v_0_1_1.Args[0] { 13841 break 13842 } 13843 v_1 := v.Args[1] 13844 if v_1.Op != OpAMD64SHLL { 13845 break 13846 } 13847 _ = v_1.Args[1] 13848 if x != v_1.Args[0] { 13849 break 13850 } 13851 if y != v_1.Args[1] { 13852 break 13853 } 13854 v.reset(OpAMD64ROLL) 13855 v.AddArg(x) 13856 v.AddArg(y) 13857 return true 13858 } 13859 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 13860 // cond: 13861 // result: (RORL x y) 13862 for { 13863 _ = v.Args[1] 13864 v_0 := v.Args[0] 13865 if v_0.Op != OpAMD64SHRL { 13866 break 13867 } 13868 _ = v_0.Args[1] 13869 x := v_0.Args[0] 13870 y := v_0.Args[1] 13871 v_1 := v.Args[1] 13872 if v_1.Op != OpAMD64ANDL { 13873 break 13874 } 13875 _ = v_1.Args[1] 13876 v_1_0 := v_1.Args[0] 13877 if v_1_0.Op != OpAMD64SHLL { 13878 break 13879 } 13880 _ = v_1_0.Args[1] 13881 if x != v_1_0.Args[0] { 13882 break 13883 } 13884 v_1_0_1 := v_1_0.Args[1] 13885 if v_1_0_1.Op != OpAMD64NEGQ { 13886 break 13887 } 13888 if y != v_1_0_1.Args[0] { 13889 break 13890 } 13891 v_1_1 := v_1.Args[1] 13892 if v_1_1.Op != OpAMD64SBBLcarrymask { 13893 break 13894 } 13895 v_1_1_0 := v_1_1.Args[0] 13896 if v_1_1_0.Op != OpAMD64CMPQconst { 13897 break 13898 } 13899 if v_1_1_0.AuxInt != 32 { 13900 break 13901 } 13902 v_1_1_0_0 := v_1_1_0.Args[0] 13903 if v_1_1_0_0.Op != OpAMD64NEGQ { 13904 break 13905 } 13906 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 13907 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 13908 break 13909 } 13910 if v_1_1_0_0_0.AuxInt != -32 { 13911 break 13912 } 13913 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 13914 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 13915 break 13916 } 13917 if v_1_1_0_0_0_0.AuxInt != 31 { 13918 break 13919 } 13920 if y != v_1_1_0_0_0_0.Args[0] { 13921 break 13922 } 13923 v.reset(OpAMD64RORL) 13924 v.AddArg(x) 13925 v.AddArg(y) 13926 return true 13927 } 13928 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y)))) 13929 // cond: 13930 // result: (RORL x y) 13931 for { 13932 _ = v.Args[1] 13933 v_0 := v.Args[0] 13934 if v_0.Op != OpAMD64SHRL { 13935 break 13936 } 13937 _ = v_0.Args[1] 13938 x := v_0.Args[0] 13939 y := v_0.Args[1] 13940 v_1 := v.Args[1] 13941 if v_1.Op != OpAMD64ANDL { 13942 break 13943 } 13944 _ = v_1.Args[1] 13945 v_1_0 := v_1.Args[0] 13946 if v_1_0.Op != OpAMD64SBBLcarrymask { 13947 break 13948 } 13949 v_1_0_0 := v_1_0.Args[0] 13950 if v_1_0_0.Op != OpAMD64CMPQconst { 13951 break 13952 } 13953 if v_1_0_0.AuxInt != 32 { 13954 break 13955 } 13956 v_1_0_0_0 := v_1_0_0.Args[0] 13957 if v_1_0_0_0.Op != OpAMD64NEGQ { 13958 break 13959 } 13960 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 13961 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 13962 break 13963 } 13964 if v_1_0_0_0_0.AuxInt != -32 { 13965 break 13966 } 13967 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 13968 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 13969 break 13970 } 13971 if v_1_0_0_0_0_0.AuxInt != 31 { 13972 break 13973 } 13974 if y != v_1_0_0_0_0_0.Args[0] { 13975 break 13976 } 13977 v_1_1 := v_1.Args[1] 13978 if v_1_1.Op != OpAMD64SHLL { 13979 break 13980 } 13981 _ = v_1_1.Args[1] 13982 if x != v_1_1.Args[0] { 13983 break 13984 } 13985 v_1_1_1 := v_1_1.Args[1] 13986 if v_1_1_1.Op != OpAMD64NEGQ { 13987 break 13988 } 13989 if y != v_1_1_1.Args[0] { 13990 break 13991 } 13992 v.reset(OpAMD64RORL) 13993 v.AddArg(x) 13994 v.AddArg(y) 13995 return true 13996 } 13997 // match: (ORL (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHRL x y)) 13998 // cond: 13999 // result: (RORL x y) 14000 for { 14001 _ = v.Args[1] 14002 v_0 := v.Args[0] 14003 if v_0.Op != OpAMD64ANDL { 14004 break 14005 } 14006 _ = v_0.Args[1] 14007 v_0_0 := v_0.Args[0] 14008 if v_0_0.Op != OpAMD64SHLL { 14009 break 14010 } 14011 _ = v_0_0.Args[1] 14012 x := v_0_0.Args[0] 14013 v_0_0_1 := v_0_0.Args[1] 14014 if v_0_0_1.Op != OpAMD64NEGQ { 14015 break 14016 } 14017 y := v_0_0_1.Args[0] 14018 v_0_1 := v_0.Args[1] 14019 if v_0_1.Op != OpAMD64SBBLcarrymask { 14020 break 14021 } 14022 v_0_1_0 := v_0_1.Args[0] 14023 if v_0_1_0.Op != OpAMD64CMPQconst { 14024 break 14025 } 14026 if v_0_1_0.AuxInt != 32 { 14027 break 14028 } 14029 v_0_1_0_0 := v_0_1_0.Args[0] 14030 if v_0_1_0_0.Op != OpAMD64NEGQ { 14031 break 14032 } 14033 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 14034 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 14035 break 14036 } 14037 if v_0_1_0_0_0.AuxInt != -32 { 14038 break 14039 } 14040 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 14041 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 14042 break 14043 } 14044 if v_0_1_0_0_0_0.AuxInt != 31 { 14045 break 14046 } 14047 if y != v_0_1_0_0_0_0.Args[0] { 14048 break 14049 } 14050 v_1 := v.Args[1] 14051 if v_1.Op != OpAMD64SHRL { 14052 break 14053 } 14054 _ = v_1.Args[1] 14055 if x != v_1.Args[0] { 14056 break 14057 } 14058 if y != v_1.Args[1] { 14059 break 14060 } 14061 v.reset(OpAMD64RORL) 14062 v.AddArg(x) 14063 v.AddArg(y) 14064 return true 14065 } 14066 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y))) (SHRL x y)) 14067 // cond: 14068 // result: (RORL x y) 14069 for { 14070 _ = v.Args[1] 14071 v_0 := v.Args[0] 14072 if v_0.Op != OpAMD64ANDL { 14073 break 14074 } 14075 _ = v_0.Args[1] 14076 v_0_0 := v_0.Args[0] 14077 if v_0_0.Op != OpAMD64SBBLcarrymask { 14078 break 14079 } 14080 v_0_0_0 := v_0_0.Args[0] 14081 if v_0_0_0.Op != OpAMD64CMPQconst { 14082 break 14083 } 14084 if v_0_0_0.AuxInt != 32 { 14085 break 14086 } 14087 v_0_0_0_0 := v_0_0_0.Args[0] 14088 if v_0_0_0_0.Op != OpAMD64NEGQ { 14089 break 14090 } 14091 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 14092 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 14093 break 14094 } 14095 if v_0_0_0_0_0.AuxInt != -32 { 14096 break 14097 } 14098 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 14099 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 14100 break 14101 } 14102 if v_0_0_0_0_0_0.AuxInt != 31 { 14103 break 14104 } 14105 y := v_0_0_0_0_0_0.Args[0] 14106 v_0_1 := v_0.Args[1] 14107 if v_0_1.Op != OpAMD64SHLL { 14108 break 14109 } 14110 _ = v_0_1.Args[1] 14111 x := v_0_1.Args[0] 14112 v_0_1_1 := v_0_1.Args[1] 14113 if v_0_1_1.Op != OpAMD64NEGQ { 14114 break 14115 } 14116 if y != v_0_1_1.Args[0] { 14117 break 14118 } 14119 v_1 := v.Args[1] 14120 if v_1.Op != OpAMD64SHRL { 14121 break 14122 } 14123 _ = v_1.Args[1] 14124 if x != v_1.Args[0] { 14125 break 14126 } 14127 if y != v_1.Args[1] { 14128 break 14129 } 14130 v.reset(OpAMD64RORL) 14131 v.AddArg(x) 14132 v.AddArg(y) 14133 return true 14134 } 14135 return false 14136 } 14137 func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { 14138 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 14139 // cond: 14140 // result: (RORL x y) 14141 for { 14142 _ = v.Args[1] 14143 v_0 := v.Args[0] 14144 if v_0.Op != OpAMD64SHRL { 14145 break 14146 } 14147 _ = v_0.Args[1] 14148 x := v_0.Args[0] 14149 y := v_0.Args[1] 14150 v_1 := v.Args[1] 14151 if v_1.Op != OpAMD64ANDL { 14152 break 14153 } 14154 _ = v_1.Args[1] 14155 v_1_0 := v_1.Args[0] 14156 if v_1_0.Op != OpAMD64SHLL { 14157 break 14158 } 14159 _ = v_1_0.Args[1] 14160 if x != v_1_0.Args[0] { 14161 break 14162 } 14163 v_1_0_1 := v_1_0.Args[1] 14164 if v_1_0_1.Op != OpAMD64NEGL { 14165 break 14166 } 14167 if y != v_1_0_1.Args[0] { 14168 break 14169 } 14170 v_1_1 := v_1.Args[1] 14171 if v_1_1.Op != OpAMD64SBBLcarrymask { 14172 break 14173 } 14174 v_1_1_0 := v_1_1.Args[0] 14175 if v_1_1_0.Op != OpAMD64CMPLconst { 14176 break 14177 } 14178 if v_1_1_0.AuxInt != 32 { 14179 break 14180 } 14181 v_1_1_0_0 := v_1_1_0.Args[0] 14182 if v_1_1_0_0.Op != OpAMD64NEGL { 14183 break 14184 } 14185 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 14186 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 14187 break 14188 } 14189 if v_1_1_0_0_0.AuxInt != -32 { 14190 break 14191 } 14192 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 14193 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 14194 break 14195 } 14196 if v_1_1_0_0_0_0.AuxInt != 31 { 14197 break 14198 } 14199 if y != v_1_1_0_0_0_0.Args[0] { 14200 break 14201 } 14202 v.reset(OpAMD64RORL) 14203 v.AddArg(x) 14204 v.AddArg(y) 14205 return true 14206 } 14207 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y)))) 14208 // cond: 14209 // result: (RORL x y) 14210 for { 14211 _ = v.Args[1] 14212 v_0 := v.Args[0] 14213 if v_0.Op != OpAMD64SHRL { 14214 break 14215 } 14216 _ = v_0.Args[1] 14217 x := v_0.Args[0] 14218 y := v_0.Args[1] 14219 v_1 := v.Args[1] 14220 if v_1.Op != OpAMD64ANDL { 14221 break 14222 } 14223 _ = v_1.Args[1] 14224 v_1_0 := v_1.Args[0] 14225 if v_1_0.Op != OpAMD64SBBLcarrymask { 14226 break 14227 } 14228 v_1_0_0 := v_1_0.Args[0] 14229 if v_1_0_0.Op != OpAMD64CMPLconst { 14230 break 14231 } 14232 if v_1_0_0.AuxInt != 32 { 14233 break 14234 } 14235 v_1_0_0_0 := v_1_0_0.Args[0] 14236 if v_1_0_0_0.Op != OpAMD64NEGL { 14237 break 14238 } 14239 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 14240 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 14241 break 14242 } 14243 if v_1_0_0_0_0.AuxInt != -32 { 14244 break 14245 } 14246 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 14247 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 14248 break 14249 } 14250 if v_1_0_0_0_0_0.AuxInt != 31 { 14251 break 14252 } 14253 if y != v_1_0_0_0_0_0.Args[0] { 14254 break 14255 } 14256 v_1_1 := v_1.Args[1] 14257 if v_1_1.Op != OpAMD64SHLL { 14258 break 14259 } 14260 _ = v_1_1.Args[1] 14261 if x != v_1_1.Args[0] { 14262 break 14263 } 14264 v_1_1_1 := v_1_1.Args[1] 14265 if v_1_1_1.Op != OpAMD64NEGL { 14266 break 14267 } 14268 if y != v_1_1_1.Args[0] { 14269 break 14270 } 14271 v.reset(OpAMD64RORL) 14272 v.AddArg(x) 14273 v.AddArg(y) 14274 return true 14275 } 14276 // match: (ORL (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHRL x y)) 14277 // cond: 14278 // result: (RORL x y) 14279 for { 14280 _ = v.Args[1] 14281 v_0 := v.Args[0] 14282 if v_0.Op != OpAMD64ANDL { 14283 break 14284 } 14285 _ = v_0.Args[1] 14286 v_0_0 := v_0.Args[0] 14287 if v_0_0.Op != OpAMD64SHLL { 14288 break 14289 } 14290 _ = v_0_0.Args[1] 14291 x := v_0_0.Args[0] 14292 v_0_0_1 := v_0_0.Args[1] 14293 if v_0_0_1.Op != OpAMD64NEGL { 14294 break 14295 } 14296 y := v_0_0_1.Args[0] 14297 v_0_1 := v_0.Args[1] 14298 if v_0_1.Op != OpAMD64SBBLcarrymask { 14299 break 14300 } 14301 v_0_1_0 := v_0_1.Args[0] 14302 if v_0_1_0.Op != OpAMD64CMPLconst { 14303 break 14304 } 14305 if v_0_1_0.AuxInt != 32 { 14306 break 14307 } 14308 v_0_1_0_0 := v_0_1_0.Args[0] 14309 if v_0_1_0_0.Op != OpAMD64NEGL { 14310 break 14311 } 14312 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 14313 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 14314 break 14315 } 14316 if v_0_1_0_0_0.AuxInt != -32 { 14317 break 14318 } 14319 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 14320 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 14321 break 14322 } 14323 if v_0_1_0_0_0_0.AuxInt != 31 { 14324 break 14325 } 14326 if y != v_0_1_0_0_0_0.Args[0] { 14327 break 14328 } 14329 v_1 := v.Args[1] 14330 if v_1.Op != OpAMD64SHRL { 14331 break 14332 } 14333 _ = v_1.Args[1] 14334 if x != v_1.Args[0] { 14335 break 14336 } 14337 if y != v_1.Args[1] { 14338 break 14339 } 14340 v.reset(OpAMD64RORL) 14341 v.AddArg(x) 14342 v.AddArg(y) 14343 return true 14344 } 14345 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y))) (SHRL x y)) 14346 // cond: 14347 // result: (RORL x y) 14348 for { 14349 _ = v.Args[1] 14350 v_0 := v.Args[0] 14351 if v_0.Op != OpAMD64ANDL { 14352 break 14353 } 14354 _ = v_0.Args[1] 14355 v_0_0 := v_0.Args[0] 14356 if v_0_0.Op != OpAMD64SBBLcarrymask { 14357 break 14358 } 14359 v_0_0_0 := v_0_0.Args[0] 14360 if v_0_0_0.Op != OpAMD64CMPLconst { 14361 break 14362 } 14363 if v_0_0_0.AuxInt != 32 { 14364 break 14365 } 14366 v_0_0_0_0 := v_0_0_0.Args[0] 14367 if v_0_0_0_0.Op != OpAMD64NEGL { 14368 break 14369 } 14370 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 14371 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 14372 break 14373 } 14374 if v_0_0_0_0_0.AuxInt != -32 { 14375 break 14376 } 14377 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 14378 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 14379 break 14380 } 14381 if v_0_0_0_0_0_0.AuxInt != 31 { 14382 break 14383 } 14384 y := v_0_0_0_0_0_0.Args[0] 14385 v_0_1 := v_0.Args[1] 14386 if v_0_1.Op != OpAMD64SHLL { 14387 break 14388 } 14389 _ = v_0_1.Args[1] 14390 x := v_0_1.Args[0] 14391 v_0_1_1 := v_0_1.Args[1] 14392 if v_0_1_1.Op != OpAMD64NEGL { 14393 break 14394 } 14395 if y != v_0_1_1.Args[0] { 14396 break 14397 } 14398 v_1 := v.Args[1] 14399 if v_1.Op != OpAMD64SHRL { 14400 break 14401 } 14402 _ = v_1.Args[1] 14403 if x != v_1.Args[0] { 14404 break 14405 } 14406 if y != v_1.Args[1] { 14407 break 14408 } 14409 v.reset(OpAMD64RORL) 14410 v.AddArg(x) 14411 v.AddArg(y) 14412 return true 14413 } 14414 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) 14415 // cond: v.Type.Size() == 2 14416 // result: (ROLW x y) 14417 for { 14418 _ = v.Args[1] 14419 v_0 := v.Args[0] 14420 if v_0.Op != OpAMD64SHLL { 14421 break 14422 } 14423 _ = v_0.Args[1] 14424 x := v_0.Args[0] 14425 v_0_1 := v_0.Args[1] 14426 if v_0_1.Op != OpAMD64ANDQconst { 14427 break 14428 } 14429 if v_0_1.AuxInt != 15 { 14430 break 14431 } 14432 y := v_0_1.Args[0] 14433 v_1 := v.Args[1] 14434 if v_1.Op != OpAMD64ANDL { 14435 break 14436 } 14437 _ = v_1.Args[1] 14438 v_1_0 := v_1.Args[0] 14439 if v_1_0.Op != OpAMD64SHRW { 14440 break 14441 } 14442 _ = v_1_0.Args[1] 14443 if x != v_1_0.Args[0] { 14444 break 14445 } 14446 v_1_0_1 := v_1_0.Args[1] 14447 if v_1_0_1.Op != OpAMD64NEGQ { 14448 break 14449 } 14450 v_1_0_1_0 := v_1_0_1.Args[0] 14451 if v_1_0_1_0.Op != OpAMD64ADDQconst { 14452 break 14453 } 14454 if v_1_0_1_0.AuxInt != -16 { 14455 break 14456 } 14457 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 14458 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 14459 break 14460 } 14461 if v_1_0_1_0_0.AuxInt != 15 { 14462 break 14463 } 14464 if y != v_1_0_1_0_0.Args[0] { 14465 break 14466 } 14467 v_1_1 := v_1.Args[1] 14468 if v_1_1.Op != OpAMD64SBBLcarrymask { 14469 break 14470 } 14471 v_1_1_0 := v_1_1.Args[0] 14472 if v_1_1_0.Op != OpAMD64CMPQconst { 14473 break 14474 } 14475 if v_1_1_0.AuxInt != 16 { 14476 break 14477 } 14478 v_1_1_0_0 := v_1_1_0.Args[0] 14479 if v_1_1_0_0.Op != OpAMD64NEGQ { 14480 break 14481 } 14482 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 14483 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 14484 break 14485 } 14486 if v_1_1_0_0_0.AuxInt != -16 { 14487 break 14488 } 14489 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 14490 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 14491 break 14492 } 14493 if v_1_1_0_0_0_0.AuxInt != 15 { 14494 break 14495 } 14496 if y != v_1_1_0_0_0_0.Args[0] { 14497 break 14498 } 14499 if !(v.Type.Size() == 2) { 14500 break 14501 } 14502 v.reset(OpAMD64ROLW) 14503 v.AddArg(x) 14504 v.AddArg(y) 14505 return true 14506 } 14507 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))) 14508 // cond: v.Type.Size() == 2 14509 // result: (ROLW x y) 14510 for { 14511 _ = v.Args[1] 14512 v_0 := v.Args[0] 14513 if v_0.Op != OpAMD64SHLL { 14514 break 14515 } 14516 _ = v_0.Args[1] 14517 x := v_0.Args[0] 14518 v_0_1 := v_0.Args[1] 14519 if v_0_1.Op != OpAMD64ANDQconst { 14520 break 14521 } 14522 if v_0_1.AuxInt != 15 { 14523 break 14524 } 14525 y := v_0_1.Args[0] 14526 v_1 := v.Args[1] 14527 if v_1.Op != OpAMD64ANDL { 14528 break 14529 } 14530 _ = v_1.Args[1] 14531 v_1_0 := v_1.Args[0] 14532 if v_1_0.Op != OpAMD64SBBLcarrymask { 14533 break 14534 } 14535 v_1_0_0 := v_1_0.Args[0] 14536 if v_1_0_0.Op != OpAMD64CMPQconst { 14537 break 14538 } 14539 if v_1_0_0.AuxInt != 16 { 14540 break 14541 } 14542 v_1_0_0_0 := v_1_0_0.Args[0] 14543 if v_1_0_0_0.Op != OpAMD64NEGQ { 14544 break 14545 } 14546 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 14547 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 14548 break 14549 } 14550 if v_1_0_0_0_0.AuxInt != -16 { 14551 break 14552 } 14553 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 14554 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 14555 break 14556 } 14557 if v_1_0_0_0_0_0.AuxInt != 15 { 14558 break 14559 } 14560 if y != v_1_0_0_0_0_0.Args[0] { 14561 break 14562 } 14563 v_1_1 := v_1.Args[1] 14564 if v_1_1.Op != OpAMD64SHRW { 14565 break 14566 } 14567 _ = v_1_1.Args[1] 14568 if x != v_1_1.Args[0] { 14569 break 14570 } 14571 v_1_1_1 := v_1_1.Args[1] 14572 if v_1_1_1.Op != OpAMD64NEGQ { 14573 break 14574 } 14575 v_1_1_1_0 := v_1_1_1.Args[0] 14576 if v_1_1_1_0.Op != OpAMD64ADDQconst { 14577 break 14578 } 14579 if v_1_1_1_0.AuxInt != -16 { 14580 break 14581 } 14582 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 14583 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 14584 break 14585 } 14586 if v_1_1_1_0_0.AuxInt != 15 { 14587 break 14588 } 14589 if y != v_1_1_1_0_0.Args[0] { 14590 break 14591 } 14592 if !(v.Type.Size() == 2) { 14593 break 14594 } 14595 v.reset(OpAMD64ROLW) 14596 v.AddArg(x) 14597 v.AddArg(y) 14598 return true 14599 } 14600 // match: (ORL (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))) (SHLL x (ANDQconst y [15]))) 14601 // cond: v.Type.Size() == 2 14602 // result: (ROLW x y) 14603 for { 14604 _ = v.Args[1] 14605 v_0 := v.Args[0] 14606 if v_0.Op != OpAMD64ANDL { 14607 break 14608 } 14609 _ = v_0.Args[1] 14610 v_0_0 := v_0.Args[0] 14611 if v_0_0.Op != OpAMD64SHRW { 14612 break 14613 } 14614 _ = v_0_0.Args[1] 14615 x := v_0_0.Args[0] 14616 v_0_0_1 := v_0_0.Args[1] 14617 if v_0_0_1.Op != OpAMD64NEGQ { 14618 break 14619 } 14620 v_0_0_1_0 := v_0_0_1.Args[0] 14621 if v_0_0_1_0.Op != OpAMD64ADDQconst { 14622 break 14623 } 14624 if v_0_0_1_0.AuxInt != -16 { 14625 break 14626 } 14627 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 14628 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 14629 break 14630 } 14631 if v_0_0_1_0_0.AuxInt != 15 { 14632 break 14633 } 14634 y := v_0_0_1_0_0.Args[0] 14635 v_0_1 := v_0.Args[1] 14636 if v_0_1.Op != OpAMD64SBBLcarrymask { 14637 break 14638 } 14639 v_0_1_0 := v_0_1.Args[0] 14640 if v_0_1_0.Op != OpAMD64CMPQconst { 14641 break 14642 } 14643 if v_0_1_0.AuxInt != 16 { 14644 break 14645 } 14646 v_0_1_0_0 := v_0_1_0.Args[0] 14647 if v_0_1_0_0.Op != OpAMD64NEGQ { 14648 break 14649 } 14650 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 14651 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 14652 break 14653 } 14654 if v_0_1_0_0_0.AuxInt != -16 { 14655 break 14656 } 14657 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 14658 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 14659 break 14660 } 14661 if v_0_1_0_0_0_0.AuxInt != 15 { 14662 break 14663 } 14664 if y != v_0_1_0_0_0_0.Args[0] { 14665 break 14666 } 14667 v_1 := v.Args[1] 14668 if v_1.Op != OpAMD64SHLL { 14669 break 14670 } 14671 _ = v_1.Args[1] 14672 if x != v_1.Args[0] { 14673 break 14674 } 14675 v_1_1 := v_1.Args[1] 14676 if v_1_1.Op != OpAMD64ANDQconst { 14677 break 14678 } 14679 if v_1_1.AuxInt != 15 { 14680 break 14681 } 14682 if y != v_1_1.Args[0] { 14683 break 14684 } 14685 if !(v.Type.Size() == 2) { 14686 break 14687 } 14688 v.reset(OpAMD64ROLW) 14689 v.AddArg(x) 14690 v.AddArg(y) 14691 return true 14692 } 14693 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) (SHLL x (ANDQconst y [15]))) 14694 // cond: v.Type.Size() == 2 14695 // result: (ROLW x y) 14696 for { 14697 _ = v.Args[1] 14698 v_0 := v.Args[0] 14699 if v_0.Op != OpAMD64ANDL { 14700 break 14701 } 14702 _ = v_0.Args[1] 14703 v_0_0 := v_0.Args[0] 14704 if v_0_0.Op != OpAMD64SBBLcarrymask { 14705 break 14706 } 14707 v_0_0_0 := v_0_0.Args[0] 14708 if v_0_0_0.Op != OpAMD64CMPQconst { 14709 break 14710 } 14711 if v_0_0_0.AuxInt != 16 { 14712 break 14713 } 14714 v_0_0_0_0 := v_0_0_0.Args[0] 14715 if v_0_0_0_0.Op != OpAMD64NEGQ { 14716 break 14717 } 14718 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 14719 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 14720 break 14721 } 14722 if v_0_0_0_0_0.AuxInt != -16 { 14723 break 14724 } 14725 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 14726 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 14727 break 14728 } 14729 if v_0_0_0_0_0_0.AuxInt != 15 { 14730 break 14731 } 14732 y := v_0_0_0_0_0_0.Args[0] 14733 v_0_1 := v_0.Args[1] 14734 if v_0_1.Op != OpAMD64SHRW { 14735 break 14736 } 14737 _ = v_0_1.Args[1] 14738 x := v_0_1.Args[0] 14739 v_0_1_1 := v_0_1.Args[1] 14740 if v_0_1_1.Op != OpAMD64NEGQ { 14741 break 14742 } 14743 v_0_1_1_0 := v_0_1_1.Args[0] 14744 if v_0_1_1_0.Op != OpAMD64ADDQconst { 14745 break 14746 } 14747 if v_0_1_1_0.AuxInt != -16 { 14748 break 14749 } 14750 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 14751 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 14752 break 14753 } 14754 if v_0_1_1_0_0.AuxInt != 15 { 14755 break 14756 } 14757 if y != v_0_1_1_0_0.Args[0] { 14758 break 14759 } 14760 v_1 := v.Args[1] 14761 if v_1.Op != OpAMD64SHLL { 14762 break 14763 } 14764 _ = v_1.Args[1] 14765 if x != v_1.Args[0] { 14766 break 14767 } 14768 v_1_1 := v_1.Args[1] 14769 if v_1_1.Op != OpAMD64ANDQconst { 14770 break 14771 } 14772 if v_1_1.AuxInt != 15 { 14773 break 14774 } 14775 if y != v_1_1.Args[0] { 14776 break 14777 } 14778 if !(v.Type.Size() == 2) { 14779 break 14780 } 14781 v.reset(OpAMD64ROLW) 14782 v.AddArg(x) 14783 v.AddArg(y) 14784 return true 14785 } 14786 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])))) 14787 // cond: v.Type.Size() == 2 14788 // result: (ROLW x y) 14789 for { 14790 _ = v.Args[1] 14791 v_0 := v.Args[0] 14792 if v_0.Op != OpAMD64SHLL { 14793 break 14794 } 14795 _ = v_0.Args[1] 14796 x := v_0.Args[0] 14797 v_0_1 := v_0.Args[1] 14798 if v_0_1.Op != OpAMD64ANDLconst { 14799 break 14800 } 14801 if v_0_1.AuxInt != 15 { 14802 break 14803 } 14804 y := v_0_1.Args[0] 14805 v_1 := v.Args[1] 14806 if v_1.Op != OpAMD64ANDL { 14807 break 14808 } 14809 _ = v_1.Args[1] 14810 v_1_0 := v_1.Args[0] 14811 if v_1_0.Op != OpAMD64SHRW { 14812 break 14813 } 14814 _ = v_1_0.Args[1] 14815 if x != v_1_0.Args[0] { 14816 break 14817 } 14818 v_1_0_1 := v_1_0.Args[1] 14819 if v_1_0_1.Op != OpAMD64NEGL { 14820 break 14821 } 14822 v_1_0_1_0 := v_1_0_1.Args[0] 14823 if v_1_0_1_0.Op != OpAMD64ADDLconst { 14824 break 14825 } 14826 if v_1_0_1_0.AuxInt != -16 { 14827 break 14828 } 14829 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 14830 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 14831 break 14832 } 14833 if v_1_0_1_0_0.AuxInt != 15 { 14834 break 14835 } 14836 if y != v_1_0_1_0_0.Args[0] { 14837 break 14838 } 14839 v_1_1 := v_1.Args[1] 14840 if v_1_1.Op != OpAMD64SBBLcarrymask { 14841 break 14842 } 14843 v_1_1_0 := v_1_1.Args[0] 14844 if v_1_1_0.Op != OpAMD64CMPLconst { 14845 break 14846 } 14847 if v_1_1_0.AuxInt != 16 { 14848 break 14849 } 14850 v_1_1_0_0 := v_1_1_0.Args[0] 14851 if v_1_1_0_0.Op != OpAMD64NEGL { 14852 break 14853 } 14854 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 14855 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 14856 break 14857 } 14858 if v_1_1_0_0_0.AuxInt != -16 { 14859 break 14860 } 14861 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 14862 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 14863 break 14864 } 14865 if v_1_1_0_0_0_0.AuxInt != 15 { 14866 break 14867 } 14868 if y != v_1_1_0_0_0_0.Args[0] { 14869 break 14870 } 14871 if !(v.Type.Size() == 2) { 14872 break 14873 } 14874 v.reset(OpAMD64ROLW) 14875 v.AddArg(x) 14876 v.AddArg(y) 14877 return true 14878 } 14879 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))) 14880 // cond: v.Type.Size() == 2 14881 // result: (ROLW x y) 14882 for { 14883 _ = v.Args[1] 14884 v_0 := v.Args[0] 14885 if v_0.Op != OpAMD64SHLL { 14886 break 14887 } 14888 _ = v_0.Args[1] 14889 x := v_0.Args[0] 14890 v_0_1 := v_0.Args[1] 14891 if v_0_1.Op != OpAMD64ANDLconst { 14892 break 14893 } 14894 if v_0_1.AuxInt != 15 { 14895 break 14896 } 14897 y := v_0_1.Args[0] 14898 v_1 := v.Args[1] 14899 if v_1.Op != OpAMD64ANDL { 14900 break 14901 } 14902 _ = v_1.Args[1] 14903 v_1_0 := v_1.Args[0] 14904 if v_1_0.Op != OpAMD64SBBLcarrymask { 14905 break 14906 } 14907 v_1_0_0 := v_1_0.Args[0] 14908 if v_1_0_0.Op != OpAMD64CMPLconst { 14909 break 14910 } 14911 if v_1_0_0.AuxInt != 16 { 14912 break 14913 } 14914 v_1_0_0_0 := v_1_0_0.Args[0] 14915 if v_1_0_0_0.Op != OpAMD64NEGL { 14916 break 14917 } 14918 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 14919 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 14920 break 14921 } 14922 if v_1_0_0_0_0.AuxInt != -16 { 14923 break 14924 } 14925 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 14926 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 14927 break 14928 } 14929 if v_1_0_0_0_0_0.AuxInt != 15 { 14930 break 14931 } 14932 if y != v_1_0_0_0_0_0.Args[0] { 14933 break 14934 } 14935 v_1_1 := v_1.Args[1] 14936 if v_1_1.Op != OpAMD64SHRW { 14937 break 14938 } 14939 _ = v_1_1.Args[1] 14940 if x != v_1_1.Args[0] { 14941 break 14942 } 14943 v_1_1_1 := v_1_1.Args[1] 14944 if v_1_1_1.Op != OpAMD64NEGL { 14945 break 14946 } 14947 v_1_1_1_0 := v_1_1_1.Args[0] 14948 if v_1_1_1_0.Op != OpAMD64ADDLconst { 14949 break 14950 } 14951 if v_1_1_1_0.AuxInt != -16 { 14952 break 14953 } 14954 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 14955 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 14956 break 14957 } 14958 if v_1_1_1_0_0.AuxInt != 15 { 14959 break 14960 } 14961 if y != v_1_1_1_0_0.Args[0] { 14962 break 14963 } 14964 if !(v.Type.Size() == 2) { 14965 break 14966 } 14967 v.reset(OpAMD64ROLW) 14968 v.AddArg(x) 14969 v.AddArg(y) 14970 return true 14971 } 14972 return false 14973 } 14974 func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { 14975 // match: (ORL (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))) (SHLL x (ANDLconst y [15]))) 14976 // cond: v.Type.Size() == 2 14977 // result: (ROLW x y) 14978 for { 14979 _ = v.Args[1] 14980 v_0 := v.Args[0] 14981 if v_0.Op != OpAMD64ANDL { 14982 break 14983 } 14984 _ = v_0.Args[1] 14985 v_0_0 := v_0.Args[0] 14986 if v_0_0.Op != OpAMD64SHRW { 14987 break 14988 } 14989 _ = v_0_0.Args[1] 14990 x := v_0_0.Args[0] 14991 v_0_0_1 := v_0_0.Args[1] 14992 if v_0_0_1.Op != OpAMD64NEGL { 14993 break 14994 } 14995 v_0_0_1_0 := v_0_0_1.Args[0] 14996 if v_0_0_1_0.Op != OpAMD64ADDLconst { 14997 break 14998 } 14999 if v_0_0_1_0.AuxInt != -16 { 15000 break 15001 } 15002 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 15003 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 15004 break 15005 } 15006 if v_0_0_1_0_0.AuxInt != 15 { 15007 break 15008 } 15009 y := v_0_0_1_0_0.Args[0] 15010 v_0_1 := v_0.Args[1] 15011 if v_0_1.Op != OpAMD64SBBLcarrymask { 15012 break 15013 } 15014 v_0_1_0 := v_0_1.Args[0] 15015 if v_0_1_0.Op != OpAMD64CMPLconst { 15016 break 15017 } 15018 if v_0_1_0.AuxInt != 16 { 15019 break 15020 } 15021 v_0_1_0_0 := v_0_1_0.Args[0] 15022 if v_0_1_0_0.Op != OpAMD64NEGL { 15023 break 15024 } 15025 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15026 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 15027 break 15028 } 15029 if v_0_1_0_0_0.AuxInt != -16 { 15030 break 15031 } 15032 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15033 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 15034 break 15035 } 15036 if v_0_1_0_0_0_0.AuxInt != 15 { 15037 break 15038 } 15039 if y != v_0_1_0_0_0_0.Args[0] { 15040 break 15041 } 15042 v_1 := v.Args[1] 15043 if v_1.Op != OpAMD64SHLL { 15044 break 15045 } 15046 _ = v_1.Args[1] 15047 if x != v_1.Args[0] { 15048 break 15049 } 15050 v_1_1 := v_1.Args[1] 15051 if v_1_1.Op != OpAMD64ANDLconst { 15052 break 15053 } 15054 if v_1_1.AuxInt != 15 { 15055 break 15056 } 15057 if y != v_1_1.Args[0] { 15058 break 15059 } 15060 if !(v.Type.Size() == 2) { 15061 break 15062 } 15063 v.reset(OpAMD64ROLW) 15064 v.AddArg(x) 15065 v.AddArg(y) 15066 return true 15067 } 15068 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) (SHLL x (ANDLconst y [15]))) 15069 // cond: v.Type.Size() == 2 15070 // result: (ROLW x y) 15071 for { 15072 _ = v.Args[1] 15073 v_0 := v.Args[0] 15074 if v_0.Op != OpAMD64ANDL { 15075 break 15076 } 15077 _ = v_0.Args[1] 15078 v_0_0 := v_0.Args[0] 15079 if v_0_0.Op != OpAMD64SBBLcarrymask { 15080 break 15081 } 15082 v_0_0_0 := v_0_0.Args[0] 15083 if v_0_0_0.Op != OpAMD64CMPLconst { 15084 break 15085 } 15086 if v_0_0_0.AuxInt != 16 { 15087 break 15088 } 15089 v_0_0_0_0 := v_0_0_0.Args[0] 15090 if v_0_0_0_0.Op != OpAMD64NEGL { 15091 break 15092 } 15093 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15094 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 15095 break 15096 } 15097 if v_0_0_0_0_0.AuxInt != -16 { 15098 break 15099 } 15100 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15101 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 15102 break 15103 } 15104 if v_0_0_0_0_0_0.AuxInt != 15 { 15105 break 15106 } 15107 y := v_0_0_0_0_0_0.Args[0] 15108 v_0_1 := v_0.Args[1] 15109 if v_0_1.Op != OpAMD64SHRW { 15110 break 15111 } 15112 _ = v_0_1.Args[1] 15113 x := v_0_1.Args[0] 15114 v_0_1_1 := v_0_1.Args[1] 15115 if v_0_1_1.Op != OpAMD64NEGL { 15116 break 15117 } 15118 v_0_1_1_0 := v_0_1_1.Args[0] 15119 if v_0_1_1_0.Op != OpAMD64ADDLconst { 15120 break 15121 } 15122 if v_0_1_1_0.AuxInt != -16 { 15123 break 15124 } 15125 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 15126 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 15127 break 15128 } 15129 if v_0_1_1_0_0.AuxInt != 15 { 15130 break 15131 } 15132 if y != v_0_1_1_0_0.Args[0] { 15133 break 15134 } 15135 v_1 := v.Args[1] 15136 if v_1.Op != OpAMD64SHLL { 15137 break 15138 } 15139 _ = v_1.Args[1] 15140 if x != v_1.Args[0] { 15141 break 15142 } 15143 v_1_1 := v_1.Args[1] 15144 if v_1_1.Op != OpAMD64ANDLconst { 15145 break 15146 } 15147 if v_1_1.AuxInt != 15 { 15148 break 15149 } 15150 if y != v_1_1.Args[0] { 15151 break 15152 } 15153 if !(v.Type.Size() == 2) { 15154 break 15155 } 15156 v.reset(OpAMD64ROLW) 15157 v.AddArg(x) 15158 v.AddArg(y) 15159 return true 15160 } 15161 // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) 15162 // cond: v.Type.Size() == 2 15163 // result: (RORW x y) 15164 for { 15165 _ = v.Args[1] 15166 v_0 := v.Args[0] 15167 if v_0.Op != OpAMD64SHRW { 15168 break 15169 } 15170 _ = v_0.Args[1] 15171 x := v_0.Args[0] 15172 v_0_1 := v_0.Args[1] 15173 if v_0_1.Op != OpAMD64ANDQconst { 15174 break 15175 } 15176 if v_0_1.AuxInt != 15 { 15177 break 15178 } 15179 y := v_0_1.Args[0] 15180 v_1 := v.Args[1] 15181 if v_1.Op != OpAMD64SHLL { 15182 break 15183 } 15184 _ = v_1.Args[1] 15185 if x != v_1.Args[0] { 15186 break 15187 } 15188 v_1_1 := v_1.Args[1] 15189 if v_1_1.Op != OpAMD64NEGQ { 15190 break 15191 } 15192 v_1_1_0 := v_1_1.Args[0] 15193 if v_1_1_0.Op != OpAMD64ADDQconst { 15194 break 15195 } 15196 if v_1_1_0.AuxInt != -16 { 15197 break 15198 } 15199 v_1_1_0_0 := v_1_1_0.Args[0] 15200 if v_1_1_0_0.Op != OpAMD64ANDQconst { 15201 break 15202 } 15203 if v_1_1_0_0.AuxInt != 15 { 15204 break 15205 } 15206 if y != v_1_1_0_0.Args[0] { 15207 break 15208 } 15209 if !(v.Type.Size() == 2) { 15210 break 15211 } 15212 v.reset(OpAMD64RORW) 15213 v.AddArg(x) 15214 v.AddArg(y) 15215 return true 15216 } 15217 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SHRW x (ANDQconst y [15]))) 15218 // cond: v.Type.Size() == 2 15219 // result: (RORW x y) 15220 for { 15221 _ = v.Args[1] 15222 v_0 := v.Args[0] 15223 if v_0.Op != OpAMD64SHLL { 15224 break 15225 } 15226 _ = v_0.Args[1] 15227 x := v_0.Args[0] 15228 v_0_1 := v_0.Args[1] 15229 if v_0_1.Op != OpAMD64NEGQ { 15230 break 15231 } 15232 v_0_1_0 := v_0_1.Args[0] 15233 if v_0_1_0.Op != OpAMD64ADDQconst { 15234 break 15235 } 15236 if v_0_1_0.AuxInt != -16 { 15237 break 15238 } 15239 v_0_1_0_0 := v_0_1_0.Args[0] 15240 if v_0_1_0_0.Op != OpAMD64ANDQconst { 15241 break 15242 } 15243 if v_0_1_0_0.AuxInt != 15 { 15244 break 15245 } 15246 y := v_0_1_0_0.Args[0] 15247 v_1 := v.Args[1] 15248 if v_1.Op != OpAMD64SHRW { 15249 break 15250 } 15251 _ = v_1.Args[1] 15252 if x != v_1.Args[0] { 15253 break 15254 } 15255 v_1_1 := v_1.Args[1] 15256 if v_1_1.Op != OpAMD64ANDQconst { 15257 break 15258 } 15259 if v_1_1.AuxInt != 15 { 15260 break 15261 } 15262 if y != v_1_1.Args[0] { 15263 break 15264 } 15265 if !(v.Type.Size() == 2) { 15266 break 15267 } 15268 v.reset(OpAMD64RORW) 15269 v.AddArg(x) 15270 v.AddArg(y) 15271 return true 15272 } 15273 // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) 15274 // cond: v.Type.Size() == 2 15275 // result: (RORW x y) 15276 for { 15277 _ = v.Args[1] 15278 v_0 := v.Args[0] 15279 if v_0.Op != OpAMD64SHRW { 15280 break 15281 } 15282 _ = v_0.Args[1] 15283 x := v_0.Args[0] 15284 v_0_1 := v_0.Args[1] 15285 if v_0_1.Op != OpAMD64ANDLconst { 15286 break 15287 } 15288 if v_0_1.AuxInt != 15 { 15289 break 15290 } 15291 y := v_0_1.Args[0] 15292 v_1 := v.Args[1] 15293 if v_1.Op != OpAMD64SHLL { 15294 break 15295 } 15296 _ = v_1.Args[1] 15297 if x != v_1.Args[0] { 15298 break 15299 } 15300 v_1_1 := v_1.Args[1] 15301 if v_1_1.Op != OpAMD64NEGL { 15302 break 15303 } 15304 v_1_1_0 := v_1_1.Args[0] 15305 if v_1_1_0.Op != OpAMD64ADDLconst { 15306 break 15307 } 15308 if v_1_1_0.AuxInt != -16 { 15309 break 15310 } 15311 v_1_1_0_0 := v_1_1_0.Args[0] 15312 if v_1_1_0_0.Op != OpAMD64ANDLconst { 15313 break 15314 } 15315 if v_1_1_0_0.AuxInt != 15 { 15316 break 15317 } 15318 if y != v_1_1_0_0.Args[0] { 15319 break 15320 } 15321 if !(v.Type.Size() == 2) { 15322 break 15323 } 15324 v.reset(OpAMD64RORW) 15325 v.AddArg(x) 15326 v.AddArg(y) 15327 return true 15328 } 15329 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SHRW x (ANDLconst y [15]))) 15330 // cond: v.Type.Size() == 2 15331 // result: (RORW x y) 15332 for { 15333 _ = v.Args[1] 15334 v_0 := v.Args[0] 15335 if v_0.Op != OpAMD64SHLL { 15336 break 15337 } 15338 _ = v_0.Args[1] 15339 x := v_0.Args[0] 15340 v_0_1 := v_0.Args[1] 15341 if v_0_1.Op != OpAMD64NEGL { 15342 break 15343 } 15344 v_0_1_0 := v_0_1.Args[0] 15345 if v_0_1_0.Op != OpAMD64ADDLconst { 15346 break 15347 } 15348 if v_0_1_0.AuxInt != -16 { 15349 break 15350 } 15351 v_0_1_0_0 := v_0_1_0.Args[0] 15352 if v_0_1_0_0.Op != OpAMD64ANDLconst { 15353 break 15354 } 15355 if v_0_1_0_0.AuxInt != 15 { 15356 break 15357 } 15358 y := v_0_1_0_0.Args[0] 15359 v_1 := v.Args[1] 15360 if v_1.Op != OpAMD64SHRW { 15361 break 15362 } 15363 _ = v_1.Args[1] 15364 if x != v_1.Args[0] { 15365 break 15366 } 15367 v_1_1 := v_1.Args[1] 15368 if v_1_1.Op != OpAMD64ANDLconst { 15369 break 15370 } 15371 if v_1_1.AuxInt != 15 { 15372 break 15373 } 15374 if y != v_1_1.Args[0] { 15375 break 15376 } 15377 if !(v.Type.Size() == 2) { 15378 break 15379 } 15380 v.reset(OpAMD64RORW) 15381 v.AddArg(x) 15382 v.AddArg(y) 15383 return true 15384 } 15385 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])))) 15386 // cond: v.Type.Size() == 1 15387 // result: (ROLB x y) 15388 for { 15389 _ = v.Args[1] 15390 v_0 := v.Args[0] 15391 if v_0.Op != OpAMD64SHLL { 15392 break 15393 } 15394 _ = v_0.Args[1] 15395 x := v_0.Args[0] 15396 v_0_1 := v_0.Args[1] 15397 if v_0_1.Op != OpAMD64ANDQconst { 15398 break 15399 } 15400 if v_0_1.AuxInt != 7 { 15401 break 15402 } 15403 y := v_0_1.Args[0] 15404 v_1 := v.Args[1] 15405 if v_1.Op != OpAMD64ANDL { 15406 break 15407 } 15408 _ = v_1.Args[1] 15409 v_1_0 := v_1.Args[0] 15410 if v_1_0.Op != OpAMD64SHRB { 15411 break 15412 } 15413 _ = v_1_0.Args[1] 15414 if x != v_1_0.Args[0] { 15415 break 15416 } 15417 v_1_0_1 := v_1_0.Args[1] 15418 if v_1_0_1.Op != OpAMD64NEGQ { 15419 break 15420 } 15421 v_1_0_1_0 := v_1_0_1.Args[0] 15422 if v_1_0_1_0.Op != OpAMD64ADDQconst { 15423 break 15424 } 15425 if v_1_0_1_0.AuxInt != -8 { 15426 break 15427 } 15428 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 15429 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 15430 break 15431 } 15432 if v_1_0_1_0_0.AuxInt != 7 { 15433 break 15434 } 15435 if y != v_1_0_1_0_0.Args[0] { 15436 break 15437 } 15438 v_1_1 := v_1.Args[1] 15439 if v_1_1.Op != OpAMD64SBBLcarrymask { 15440 break 15441 } 15442 v_1_1_0 := v_1_1.Args[0] 15443 if v_1_1_0.Op != OpAMD64CMPQconst { 15444 break 15445 } 15446 if v_1_1_0.AuxInt != 8 { 15447 break 15448 } 15449 v_1_1_0_0 := v_1_1_0.Args[0] 15450 if v_1_1_0_0.Op != OpAMD64NEGQ { 15451 break 15452 } 15453 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15454 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 15455 break 15456 } 15457 if v_1_1_0_0_0.AuxInt != -8 { 15458 break 15459 } 15460 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15461 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 15462 break 15463 } 15464 if v_1_1_0_0_0_0.AuxInt != 7 { 15465 break 15466 } 15467 if y != v_1_1_0_0_0_0.Args[0] { 15468 break 15469 } 15470 if !(v.Type.Size() == 1) { 15471 break 15472 } 15473 v.reset(OpAMD64ROLB) 15474 v.AddArg(x) 15475 v.AddArg(y) 15476 return true 15477 } 15478 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))) 15479 // cond: v.Type.Size() == 1 15480 // result: (ROLB x y) 15481 for { 15482 _ = v.Args[1] 15483 v_0 := v.Args[0] 15484 if v_0.Op != OpAMD64SHLL { 15485 break 15486 } 15487 _ = v_0.Args[1] 15488 x := v_0.Args[0] 15489 v_0_1 := v_0.Args[1] 15490 if v_0_1.Op != OpAMD64ANDQconst { 15491 break 15492 } 15493 if v_0_1.AuxInt != 7 { 15494 break 15495 } 15496 y := v_0_1.Args[0] 15497 v_1 := v.Args[1] 15498 if v_1.Op != OpAMD64ANDL { 15499 break 15500 } 15501 _ = v_1.Args[1] 15502 v_1_0 := v_1.Args[0] 15503 if v_1_0.Op != OpAMD64SBBLcarrymask { 15504 break 15505 } 15506 v_1_0_0 := v_1_0.Args[0] 15507 if v_1_0_0.Op != OpAMD64CMPQconst { 15508 break 15509 } 15510 if v_1_0_0.AuxInt != 8 { 15511 break 15512 } 15513 v_1_0_0_0 := v_1_0_0.Args[0] 15514 if v_1_0_0_0.Op != OpAMD64NEGQ { 15515 break 15516 } 15517 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15518 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 15519 break 15520 } 15521 if v_1_0_0_0_0.AuxInt != -8 { 15522 break 15523 } 15524 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15525 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 15526 break 15527 } 15528 if v_1_0_0_0_0_0.AuxInt != 7 { 15529 break 15530 } 15531 if y != v_1_0_0_0_0_0.Args[0] { 15532 break 15533 } 15534 v_1_1 := v_1.Args[1] 15535 if v_1_1.Op != OpAMD64SHRB { 15536 break 15537 } 15538 _ = v_1_1.Args[1] 15539 if x != v_1_1.Args[0] { 15540 break 15541 } 15542 v_1_1_1 := v_1_1.Args[1] 15543 if v_1_1_1.Op != OpAMD64NEGQ { 15544 break 15545 } 15546 v_1_1_1_0 := v_1_1_1.Args[0] 15547 if v_1_1_1_0.Op != OpAMD64ADDQconst { 15548 break 15549 } 15550 if v_1_1_1_0.AuxInt != -8 { 15551 break 15552 } 15553 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 15554 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 15555 break 15556 } 15557 if v_1_1_1_0_0.AuxInt != 7 { 15558 break 15559 } 15560 if y != v_1_1_1_0_0.Args[0] { 15561 break 15562 } 15563 if !(v.Type.Size() == 1) { 15564 break 15565 } 15566 v.reset(OpAMD64ROLB) 15567 v.AddArg(x) 15568 v.AddArg(y) 15569 return true 15570 } 15571 // match: (ORL (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDQconst y [ 7]))) 15572 // cond: v.Type.Size() == 1 15573 // result: (ROLB x y) 15574 for { 15575 _ = v.Args[1] 15576 v_0 := v.Args[0] 15577 if v_0.Op != OpAMD64ANDL { 15578 break 15579 } 15580 _ = v_0.Args[1] 15581 v_0_0 := v_0.Args[0] 15582 if v_0_0.Op != OpAMD64SHRB { 15583 break 15584 } 15585 _ = v_0_0.Args[1] 15586 x := v_0_0.Args[0] 15587 v_0_0_1 := v_0_0.Args[1] 15588 if v_0_0_1.Op != OpAMD64NEGQ { 15589 break 15590 } 15591 v_0_0_1_0 := v_0_0_1.Args[0] 15592 if v_0_0_1_0.Op != OpAMD64ADDQconst { 15593 break 15594 } 15595 if v_0_0_1_0.AuxInt != -8 { 15596 break 15597 } 15598 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 15599 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 15600 break 15601 } 15602 if v_0_0_1_0_0.AuxInt != 7 { 15603 break 15604 } 15605 y := v_0_0_1_0_0.Args[0] 15606 v_0_1 := v_0.Args[1] 15607 if v_0_1.Op != OpAMD64SBBLcarrymask { 15608 break 15609 } 15610 v_0_1_0 := v_0_1.Args[0] 15611 if v_0_1_0.Op != OpAMD64CMPQconst { 15612 break 15613 } 15614 if v_0_1_0.AuxInt != 8 { 15615 break 15616 } 15617 v_0_1_0_0 := v_0_1_0.Args[0] 15618 if v_0_1_0_0.Op != OpAMD64NEGQ { 15619 break 15620 } 15621 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15622 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 15623 break 15624 } 15625 if v_0_1_0_0_0.AuxInt != -8 { 15626 break 15627 } 15628 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15629 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 15630 break 15631 } 15632 if v_0_1_0_0_0_0.AuxInt != 7 { 15633 break 15634 } 15635 if y != v_0_1_0_0_0_0.Args[0] { 15636 break 15637 } 15638 v_1 := v.Args[1] 15639 if v_1.Op != OpAMD64SHLL { 15640 break 15641 } 15642 _ = v_1.Args[1] 15643 if x != v_1.Args[0] { 15644 break 15645 } 15646 v_1_1 := v_1.Args[1] 15647 if v_1_1.Op != OpAMD64ANDQconst { 15648 break 15649 } 15650 if v_1_1.AuxInt != 7 { 15651 break 15652 } 15653 if y != v_1_1.Args[0] { 15654 break 15655 } 15656 if !(v.Type.Size() == 1) { 15657 break 15658 } 15659 v.reset(OpAMD64ROLB) 15660 v.AddArg(x) 15661 v.AddArg(y) 15662 return true 15663 } 15664 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) (SHLL x (ANDQconst y [ 7]))) 15665 // cond: v.Type.Size() == 1 15666 // result: (ROLB x y) 15667 for { 15668 _ = v.Args[1] 15669 v_0 := v.Args[0] 15670 if v_0.Op != OpAMD64ANDL { 15671 break 15672 } 15673 _ = v_0.Args[1] 15674 v_0_0 := v_0.Args[0] 15675 if v_0_0.Op != OpAMD64SBBLcarrymask { 15676 break 15677 } 15678 v_0_0_0 := v_0_0.Args[0] 15679 if v_0_0_0.Op != OpAMD64CMPQconst { 15680 break 15681 } 15682 if v_0_0_0.AuxInt != 8 { 15683 break 15684 } 15685 v_0_0_0_0 := v_0_0_0.Args[0] 15686 if v_0_0_0_0.Op != OpAMD64NEGQ { 15687 break 15688 } 15689 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15690 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 15691 break 15692 } 15693 if v_0_0_0_0_0.AuxInt != -8 { 15694 break 15695 } 15696 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15697 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 15698 break 15699 } 15700 if v_0_0_0_0_0_0.AuxInt != 7 { 15701 break 15702 } 15703 y := v_0_0_0_0_0_0.Args[0] 15704 v_0_1 := v_0.Args[1] 15705 if v_0_1.Op != OpAMD64SHRB { 15706 break 15707 } 15708 _ = v_0_1.Args[1] 15709 x := v_0_1.Args[0] 15710 v_0_1_1 := v_0_1.Args[1] 15711 if v_0_1_1.Op != OpAMD64NEGQ { 15712 break 15713 } 15714 v_0_1_1_0 := v_0_1_1.Args[0] 15715 if v_0_1_1_0.Op != OpAMD64ADDQconst { 15716 break 15717 } 15718 if v_0_1_1_0.AuxInt != -8 { 15719 break 15720 } 15721 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 15722 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 15723 break 15724 } 15725 if v_0_1_1_0_0.AuxInt != 7 { 15726 break 15727 } 15728 if y != v_0_1_1_0_0.Args[0] { 15729 break 15730 } 15731 v_1 := v.Args[1] 15732 if v_1.Op != OpAMD64SHLL { 15733 break 15734 } 15735 _ = v_1.Args[1] 15736 if x != v_1.Args[0] { 15737 break 15738 } 15739 v_1_1 := v_1.Args[1] 15740 if v_1_1.Op != OpAMD64ANDQconst { 15741 break 15742 } 15743 if v_1_1.AuxInt != 7 { 15744 break 15745 } 15746 if y != v_1_1.Args[0] { 15747 break 15748 } 15749 if !(v.Type.Size() == 1) { 15750 break 15751 } 15752 v.reset(OpAMD64ROLB) 15753 v.AddArg(x) 15754 v.AddArg(y) 15755 return true 15756 } 15757 return false 15758 } 15759 func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool { 15760 b := v.Block 15761 _ = b 15762 typ := &b.Func.Config.Types 15763 _ = typ 15764 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])))) 15765 // cond: v.Type.Size() == 1 15766 // result: (ROLB x y) 15767 for { 15768 _ = v.Args[1] 15769 v_0 := v.Args[0] 15770 if v_0.Op != OpAMD64SHLL { 15771 break 15772 } 15773 _ = v_0.Args[1] 15774 x := v_0.Args[0] 15775 v_0_1 := v_0.Args[1] 15776 if v_0_1.Op != OpAMD64ANDLconst { 15777 break 15778 } 15779 if v_0_1.AuxInt != 7 { 15780 break 15781 } 15782 y := v_0_1.Args[0] 15783 v_1 := v.Args[1] 15784 if v_1.Op != OpAMD64ANDL { 15785 break 15786 } 15787 _ = v_1.Args[1] 15788 v_1_0 := v_1.Args[0] 15789 if v_1_0.Op != OpAMD64SHRB { 15790 break 15791 } 15792 _ = v_1_0.Args[1] 15793 if x != v_1_0.Args[0] { 15794 break 15795 } 15796 v_1_0_1 := v_1_0.Args[1] 15797 if v_1_0_1.Op != OpAMD64NEGL { 15798 break 15799 } 15800 v_1_0_1_0 := v_1_0_1.Args[0] 15801 if v_1_0_1_0.Op != OpAMD64ADDLconst { 15802 break 15803 } 15804 if v_1_0_1_0.AuxInt != -8 { 15805 break 15806 } 15807 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 15808 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 15809 break 15810 } 15811 if v_1_0_1_0_0.AuxInt != 7 { 15812 break 15813 } 15814 if y != v_1_0_1_0_0.Args[0] { 15815 break 15816 } 15817 v_1_1 := v_1.Args[1] 15818 if v_1_1.Op != OpAMD64SBBLcarrymask { 15819 break 15820 } 15821 v_1_1_0 := v_1_1.Args[0] 15822 if v_1_1_0.Op != OpAMD64CMPLconst { 15823 break 15824 } 15825 if v_1_1_0.AuxInt != 8 { 15826 break 15827 } 15828 v_1_1_0_0 := v_1_1_0.Args[0] 15829 if v_1_1_0_0.Op != OpAMD64NEGL { 15830 break 15831 } 15832 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15833 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 15834 break 15835 } 15836 if v_1_1_0_0_0.AuxInt != -8 { 15837 break 15838 } 15839 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15840 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 15841 break 15842 } 15843 if v_1_1_0_0_0_0.AuxInt != 7 { 15844 break 15845 } 15846 if y != v_1_1_0_0_0_0.Args[0] { 15847 break 15848 } 15849 if !(v.Type.Size() == 1) { 15850 break 15851 } 15852 v.reset(OpAMD64ROLB) 15853 v.AddArg(x) 15854 v.AddArg(y) 15855 return true 15856 } 15857 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))) 15858 // cond: v.Type.Size() == 1 15859 // result: (ROLB x y) 15860 for { 15861 _ = v.Args[1] 15862 v_0 := v.Args[0] 15863 if v_0.Op != OpAMD64SHLL { 15864 break 15865 } 15866 _ = v_0.Args[1] 15867 x := v_0.Args[0] 15868 v_0_1 := v_0.Args[1] 15869 if v_0_1.Op != OpAMD64ANDLconst { 15870 break 15871 } 15872 if v_0_1.AuxInt != 7 { 15873 break 15874 } 15875 y := v_0_1.Args[0] 15876 v_1 := v.Args[1] 15877 if v_1.Op != OpAMD64ANDL { 15878 break 15879 } 15880 _ = v_1.Args[1] 15881 v_1_0 := v_1.Args[0] 15882 if v_1_0.Op != OpAMD64SBBLcarrymask { 15883 break 15884 } 15885 v_1_0_0 := v_1_0.Args[0] 15886 if v_1_0_0.Op != OpAMD64CMPLconst { 15887 break 15888 } 15889 if v_1_0_0.AuxInt != 8 { 15890 break 15891 } 15892 v_1_0_0_0 := v_1_0_0.Args[0] 15893 if v_1_0_0_0.Op != OpAMD64NEGL { 15894 break 15895 } 15896 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15897 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 15898 break 15899 } 15900 if v_1_0_0_0_0.AuxInt != -8 { 15901 break 15902 } 15903 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15904 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 15905 break 15906 } 15907 if v_1_0_0_0_0_0.AuxInt != 7 { 15908 break 15909 } 15910 if y != v_1_0_0_0_0_0.Args[0] { 15911 break 15912 } 15913 v_1_1 := v_1.Args[1] 15914 if v_1_1.Op != OpAMD64SHRB { 15915 break 15916 } 15917 _ = v_1_1.Args[1] 15918 if x != v_1_1.Args[0] { 15919 break 15920 } 15921 v_1_1_1 := v_1_1.Args[1] 15922 if v_1_1_1.Op != OpAMD64NEGL { 15923 break 15924 } 15925 v_1_1_1_0 := v_1_1_1.Args[0] 15926 if v_1_1_1_0.Op != OpAMD64ADDLconst { 15927 break 15928 } 15929 if v_1_1_1_0.AuxInt != -8 { 15930 break 15931 } 15932 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 15933 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 15934 break 15935 } 15936 if v_1_1_1_0_0.AuxInt != 7 { 15937 break 15938 } 15939 if y != v_1_1_1_0_0.Args[0] { 15940 break 15941 } 15942 if !(v.Type.Size() == 1) { 15943 break 15944 } 15945 v.reset(OpAMD64ROLB) 15946 v.AddArg(x) 15947 v.AddArg(y) 15948 return true 15949 } 15950 // match: (ORL (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDLconst y [ 7]))) 15951 // cond: v.Type.Size() == 1 15952 // result: (ROLB x y) 15953 for { 15954 _ = v.Args[1] 15955 v_0 := v.Args[0] 15956 if v_0.Op != OpAMD64ANDL { 15957 break 15958 } 15959 _ = v_0.Args[1] 15960 v_0_0 := v_0.Args[0] 15961 if v_0_0.Op != OpAMD64SHRB { 15962 break 15963 } 15964 _ = v_0_0.Args[1] 15965 x := v_0_0.Args[0] 15966 v_0_0_1 := v_0_0.Args[1] 15967 if v_0_0_1.Op != OpAMD64NEGL { 15968 break 15969 } 15970 v_0_0_1_0 := v_0_0_1.Args[0] 15971 if v_0_0_1_0.Op != OpAMD64ADDLconst { 15972 break 15973 } 15974 if v_0_0_1_0.AuxInt != -8 { 15975 break 15976 } 15977 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 15978 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 15979 break 15980 } 15981 if v_0_0_1_0_0.AuxInt != 7 { 15982 break 15983 } 15984 y := v_0_0_1_0_0.Args[0] 15985 v_0_1 := v_0.Args[1] 15986 if v_0_1.Op != OpAMD64SBBLcarrymask { 15987 break 15988 } 15989 v_0_1_0 := v_0_1.Args[0] 15990 if v_0_1_0.Op != OpAMD64CMPLconst { 15991 break 15992 } 15993 if v_0_1_0.AuxInt != 8 { 15994 break 15995 } 15996 v_0_1_0_0 := v_0_1_0.Args[0] 15997 if v_0_1_0_0.Op != OpAMD64NEGL { 15998 break 15999 } 16000 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16001 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 16002 break 16003 } 16004 if v_0_1_0_0_0.AuxInt != -8 { 16005 break 16006 } 16007 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16008 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 16009 break 16010 } 16011 if v_0_1_0_0_0_0.AuxInt != 7 { 16012 break 16013 } 16014 if y != v_0_1_0_0_0_0.Args[0] { 16015 break 16016 } 16017 v_1 := v.Args[1] 16018 if v_1.Op != OpAMD64SHLL { 16019 break 16020 } 16021 _ = v_1.Args[1] 16022 if x != v_1.Args[0] { 16023 break 16024 } 16025 v_1_1 := v_1.Args[1] 16026 if v_1_1.Op != OpAMD64ANDLconst { 16027 break 16028 } 16029 if v_1_1.AuxInt != 7 { 16030 break 16031 } 16032 if y != v_1_1.Args[0] { 16033 break 16034 } 16035 if !(v.Type.Size() == 1) { 16036 break 16037 } 16038 v.reset(OpAMD64ROLB) 16039 v.AddArg(x) 16040 v.AddArg(y) 16041 return true 16042 } 16043 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) (SHLL x (ANDLconst y [ 7]))) 16044 // cond: v.Type.Size() == 1 16045 // result: (ROLB x y) 16046 for { 16047 _ = v.Args[1] 16048 v_0 := v.Args[0] 16049 if v_0.Op != OpAMD64ANDL { 16050 break 16051 } 16052 _ = v_0.Args[1] 16053 v_0_0 := v_0.Args[0] 16054 if v_0_0.Op != OpAMD64SBBLcarrymask { 16055 break 16056 } 16057 v_0_0_0 := v_0_0.Args[0] 16058 if v_0_0_0.Op != OpAMD64CMPLconst { 16059 break 16060 } 16061 if v_0_0_0.AuxInt != 8 { 16062 break 16063 } 16064 v_0_0_0_0 := v_0_0_0.Args[0] 16065 if v_0_0_0_0.Op != OpAMD64NEGL { 16066 break 16067 } 16068 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16069 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 16070 break 16071 } 16072 if v_0_0_0_0_0.AuxInt != -8 { 16073 break 16074 } 16075 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16076 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 16077 break 16078 } 16079 if v_0_0_0_0_0_0.AuxInt != 7 { 16080 break 16081 } 16082 y := v_0_0_0_0_0_0.Args[0] 16083 v_0_1 := v_0.Args[1] 16084 if v_0_1.Op != OpAMD64SHRB { 16085 break 16086 } 16087 _ = v_0_1.Args[1] 16088 x := v_0_1.Args[0] 16089 v_0_1_1 := v_0_1.Args[1] 16090 if v_0_1_1.Op != OpAMD64NEGL { 16091 break 16092 } 16093 v_0_1_1_0 := v_0_1_1.Args[0] 16094 if v_0_1_1_0.Op != OpAMD64ADDLconst { 16095 break 16096 } 16097 if v_0_1_1_0.AuxInt != -8 { 16098 break 16099 } 16100 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 16101 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 16102 break 16103 } 16104 if v_0_1_1_0_0.AuxInt != 7 { 16105 break 16106 } 16107 if y != v_0_1_1_0_0.Args[0] { 16108 break 16109 } 16110 v_1 := v.Args[1] 16111 if v_1.Op != OpAMD64SHLL { 16112 break 16113 } 16114 _ = v_1.Args[1] 16115 if x != v_1.Args[0] { 16116 break 16117 } 16118 v_1_1 := v_1.Args[1] 16119 if v_1_1.Op != OpAMD64ANDLconst { 16120 break 16121 } 16122 if v_1_1.AuxInt != 7 { 16123 break 16124 } 16125 if y != v_1_1.Args[0] { 16126 break 16127 } 16128 if !(v.Type.Size() == 1) { 16129 break 16130 } 16131 v.reset(OpAMD64ROLB) 16132 v.AddArg(x) 16133 v.AddArg(y) 16134 return true 16135 } 16136 // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) 16137 // cond: v.Type.Size() == 1 16138 // result: (RORB x y) 16139 for { 16140 _ = v.Args[1] 16141 v_0 := v.Args[0] 16142 if v_0.Op != OpAMD64SHRB { 16143 break 16144 } 16145 _ = v_0.Args[1] 16146 x := v_0.Args[0] 16147 v_0_1 := v_0.Args[1] 16148 if v_0_1.Op != OpAMD64ANDQconst { 16149 break 16150 } 16151 if v_0_1.AuxInt != 7 { 16152 break 16153 } 16154 y := v_0_1.Args[0] 16155 v_1 := v.Args[1] 16156 if v_1.Op != OpAMD64SHLL { 16157 break 16158 } 16159 _ = v_1.Args[1] 16160 if x != v_1.Args[0] { 16161 break 16162 } 16163 v_1_1 := v_1.Args[1] 16164 if v_1_1.Op != OpAMD64NEGQ { 16165 break 16166 } 16167 v_1_1_0 := v_1_1.Args[0] 16168 if v_1_1_0.Op != OpAMD64ADDQconst { 16169 break 16170 } 16171 if v_1_1_0.AuxInt != -8 { 16172 break 16173 } 16174 v_1_1_0_0 := v_1_1_0.Args[0] 16175 if v_1_1_0_0.Op != OpAMD64ANDQconst { 16176 break 16177 } 16178 if v_1_1_0_0.AuxInt != 7 { 16179 break 16180 } 16181 if y != v_1_1_0_0.Args[0] { 16182 break 16183 } 16184 if !(v.Type.Size() == 1) { 16185 break 16186 } 16187 v.reset(OpAMD64RORB) 16188 v.AddArg(x) 16189 v.AddArg(y) 16190 return true 16191 } 16192 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SHRB x (ANDQconst y [ 7]))) 16193 // cond: v.Type.Size() == 1 16194 // result: (RORB x y) 16195 for { 16196 _ = v.Args[1] 16197 v_0 := v.Args[0] 16198 if v_0.Op != OpAMD64SHLL { 16199 break 16200 } 16201 _ = v_0.Args[1] 16202 x := v_0.Args[0] 16203 v_0_1 := v_0.Args[1] 16204 if v_0_1.Op != OpAMD64NEGQ { 16205 break 16206 } 16207 v_0_1_0 := v_0_1.Args[0] 16208 if v_0_1_0.Op != OpAMD64ADDQconst { 16209 break 16210 } 16211 if v_0_1_0.AuxInt != -8 { 16212 break 16213 } 16214 v_0_1_0_0 := v_0_1_0.Args[0] 16215 if v_0_1_0_0.Op != OpAMD64ANDQconst { 16216 break 16217 } 16218 if v_0_1_0_0.AuxInt != 7 { 16219 break 16220 } 16221 y := v_0_1_0_0.Args[0] 16222 v_1 := v.Args[1] 16223 if v_1.Op != OpAMD64SHRB { 16224 break 16225 } 16226 _ = v_1.Args[1] 16227 if x != v_1.Args[0] { 16228 break 16229 } 16230 v_1_1 := v_1.Args[1] 16231 if v_1_1.Op != OpAMD64ANDQconst { 16232 break 16233 } 16234 if v_1_1.AuxInt != 7 { 16235 break 16236 } 16237 if y != v_1_1.Args[0] { 16238 break 16239 } 16240 if !(v.Type.Size() == 1) { 16241 break 16242 } 16243 v.reset(OpAMD64RORB) 16244 v.AddArg(x) 16245 v.AddArg(y) 16246 return true 16247 } 16248 // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) 16249 // cond: v.Type.Size() == 1 16250 // result: (RORB x y) 16251 for { 16252 _ = v.Args[1] 16253 v_0 := v.Args[0] 16254 if v_0.Op != OpAMD64SHRB { 16255 break 16256 } 16257 _ = v_0.Args[1] 16258 x := v_0.Args[0] 16259 v_0_1 := v_0.Args[1] 16260 if v_0_1.Op != OpAMD64ANDLconst { 16261 break 16262 } 16263 if v_0_1.AuxInt != 7 { 16264 break 16265 } 16266 y := v_0_1.Args[0] 16267 v_1 := v.Args[1] 16268 if v_1.Op != OpAMD64SHLL { 16269 break 16270 } 16271 _ = v_1.Args[1] 16272 if x != v_1.Args[0] { 16273 break 16274 } 16275 v_1_1 := v_1.Args[1] 16276 if v_1_1.Op != OpAMD64NEGL { 16277 break 16278 } 16279 v_1_1_0 := v_1_1.Args[0] 16280 if v_1_1_0.Op != OpAMD64ADDLconst { 16281 break 16282 } 16283 if v_1_1_0.AuxInt != -8 { 16284 break 16285 } 16286 v_1_1_0_0 := v_1_1_0.Args[0] 16287 if v_1_1_0_0.Op != OpAMD64ANDLconst { 16288 break 16289 } 16290 if v_1_1_0_0.AuxInt != 7 { 16291 break 16292 } 16293 if y != v_1_1_0_0.Args[0] { 16294 break 16295 } 16296 if !(v.Type.Size() == 1) { 16297 break 16298 } 16299 v.reset(OpAMD64RORB) 16300 v.AddArg(x) 16301 v.AddArg(y) 16302 return true 16303 } 16304 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SHRB x (ANDLconst y [ 7]))) 16305 // cond: v.Type.Size() == 1 16306 // result: (RORB x y) 16307 for { 16308 _ = v.Args[1] 16309 v_0 := v.Args[0] 16310 if v_0.Op != OpAMD64SHLL { 16311 break 16312 } 16313 _ = v_0.Args[1] 16314 x := v_0.Args[0] 16315 v_0_1 := v_0.Args[1] 16316 if v_0_1.Op != OpAMD64NEGL { 16317 break 16318 } 16319 v_0_1_0 := v_0_1.Args[0] 16320 if v_0_1_0.Op != OpAMD64ADDLconst { 16321 break 16322 } 16323 if v_0_1_0.AuxInt != -8 { 16324 break 16325 } 16326 v_0_1_0_0 := v_0_1_0.Args[0] 16327 if v_0_1_0_0.Op != OpAMD64ANDLconst { 16328 break 16329 } 16330 if v_0_1_0_0.AuxInt != 7 { 16331 break 16332 } 16333 y := v_0_1_0_0.Args[0] 16334 v_1 := v.Args[1] 16335 if v_1.Op != OpAMD64SHRB { 16336 break 16337 } 16338 _ = v_1.Args[1] 16339 if x != v_1.Args[0] { 16340 break 16341 } 16342 v_1_1 := v_1.Args[1] 16343 if v_1_1.Op != OpAMD64ANDLconst { 16344 break 16345 } 16346 if v_1_1.AuxInt != 7 { 16347 break 16348 } 16349 if y != v_1_1.Args[0] { 16350 break 16351 } 16352 if !(v.Type.Size() == 1) { 16353 break 16354 } 16355 v.reset(OpAMD64RORB) 16356 v.AddArg(x) 16357 v.AddArg(y) 16358 return true 16359 } 16360 // match: (ORL x x) 16361 // cond: 16362 // result: x 16363 for { 16364 _ = v.Args[1] 16365 x := v.Args[0] 16366 if x != v.Args[1] { 16367 break 16368 } 16369 v.reset(OpCopy) 16370 v.Type = x.Type 16371 v.AddArg(x) 16372 return true 16373 } 16374 // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) 16375 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16376 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 16377 for { 16378 _ = v.Args[1] 16379 x0 := v.Args[0] 16380 if x0.Op != OpAMD64MOVBload { 16381 break 16382 } 16383 i0 := x0.AuxInt 16384 s := x0.Aux 16385 _ = x0.Args[1] 16386 p := x0.Args[0] 16387 mem := x0.Args[1] 16388 sh := v.Args[1] 16389 if sh.Op != OpAMD64SHLLconst { 16390 break 16391 } 16392 if sh.AuxInt != 8 { 16393 break 16394 } 16395 x1 := sh.Args[0] 16396 if x1.Op != OpAMD64MOVBload { 16397 break 16398 } 16399 i1 := x1.AuxInt 16400 if x1.Aux != s { 16401 break 16402 } 16403 _ = x1.Args[1] 16404 if p != x1.Args[0] { 16405 break 16406 } 16407 if mem != x1.Args[1] { 16408 break 16409 } 16410 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16411 break 16412 } 16413 b = mergePoint(b, x0, x1) 16414 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16415 v.reset(OpCopy) 16416 v.AddArg(v0) 16417 v0.AuxInt = i0 16418 v0.Aux = s 16419 v0.AddArg(p) 16420 v0.AddArg(mem) 16421 return true 16422 } 16423 return false 16424 } 16425 func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { 16426 b := v.Block 16427 _ = b 16428 typ := &b.Func.Config.Types 16429 _ = typ 16430 // match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 16431 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16432 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 16433 for { 16434 _ = v.Args[1] 16435 sh := v.Args[0] 16436 if sh.Op != OpAMD64SHLLconst { 16437 break 16438 } 16439 if sh.AuxInt != 8 { 16440 break 16441 } 16442 x1 := sh.Args[0] 16443 if x1.Op != OpAMD64MOVBload { 16444 break 16445 } 16446 i1 := x1.AuxInt 16447 s := x1.Aux 16448 _ = x1.Args[1] 16449 p := x1.Args[0] 16450 mem := x1.Args[1] 16451 x0 := v.Args[1] 16452 if x0.Op != OpAMD64MOVBload { 16453 break 16454 } 16455 i0 := x0.AuxInt 16456 if x0.Aux != s { 16457 break 16458 } 16459 _ = x0.Args[1] 16460 if p != x0.Args[0] { 16461 break 16462 } 16463 if mem != x0.Args[1] { 16464 break 16465 } 16466 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16467 break 16468 } 16469 b = mergePoint(b, x0, x1) 16470 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16471 v.reset(OpCopy) 16472 v.AddArg(v0) 16473 v0.AuxInt = i0 16474 v0.Aux = s 16475 v0.AddArg(p) 16476 v0.AddArg(mem) 16477 return true 16478 } 16479 // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) 16480 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16481 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 16482 for { 16483 _ = v.Args[1] 16484 x0 := v.Args[0] 16485 if x0.Op != OpAMD64MOVWload { 16486 break 16487 } 16488 i0 := x0.AuxInt 16489 s := x0.Aux 16490 _ = x0.Args[1] 16491 p := x0.Args[0] 16492 mem := x0.Args[1] 16493 sh := v.Args[1] 16494 if sh.Op != OpAMD64SHLLconst { 16495 break 16496 } 16497 if sh.AuxInt != 16 { 16498 break 16499 } 16500 x1 := sh.Args[0] 16501 if x1.Op != OpAMD64MOVWload { 16502 break 16503 } 16504 i1 := x1.AuxInt 16505 if x1.Aux != s { 16506 break 16507 } 16508 _ = x1.Args[1] 16509 if p != x1.Args[0] { 16510 break 16511 } 16512 if mem != x1.Args[1] { 16513 break 16514 } 16515 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16516 break 16517 } 16518 b = mergePoint(b, x0, x1) 16519 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 16520 v.reset(OpCopy) 16521 v.AddArg(v0) 16522 v0.AuxInt = i0 16523 v0.Aux = s 16524 v0.AddArg(p) 16525 v0.AddArg(mem) 16526 return true 16527 } 16528 // match: (ORL sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 16529 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16530 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 16531 for { 16532 _ = v.Args[1] 16533 sh := v.Args[0] 16534 if sh.Op != OpAMD64SHLLconst { 16535 break 16536 } 16537 if sh.AuxInt != 16 { 16538 break 16539 } 16540 x1 := sh.Args[0] 16541 if x1.Op != OpAMD64MOVWload { 16542 break 16543 } 16544 i1 := x1.AuxInt 16545 s := x1.Aux 16546 _ = x1.Args[1] 16547 p := x1.Args[0] 16548 mem := x1.Args[1] 16549 x0 := v.Args[1] 16550 if x0.Op != OpAMD64MOVWload { 16551 break 16552 } 16553 i0 := x0.AuxInt 16554 if x0.Aux != s { 16555 break 16556 } 16557 _ = x0.Args[1] 16558 if p != x0.Args[0] { 16559 break 16560 } 16561 if mem != x0.Args[1] { 16562 break 16563 } 16564 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16565 break 16566 } 16567 b = mergePoint(b, x0, x1) 16568 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 16569 v.reset(OpCopy) 16570 v.AddArg(v0) 16571 v0.AuxInt = i0 16572 v0.Aux = s 16573 v0.AddArg(p) 16574 v0.AddArg(mem) 16575 return true 16576 } 16577 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 16578 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16579 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 16580 for { 16581 _ = v.Args[1] 16582 s1 := v.Args[0] 16583 if s1.Op != OpAMD64SHLLconst { 16584 break 16585 } 16586 j1 := s1.AuxInt 16587 x1 := s1.Args[0] 16588 if x1.Op != OpAMD64MOVBload { 16589 break 16590 } 16591 i1 := x1.AuxInt 16592 s := x1.Aux 16593 _ = x1.Args[1] 16594 p := x1.Args[0] 16595 mem := x1.Args[1] 16596 or := v.Args[1] 16597 if or.Op != OpAMD64ORL { 16598 break 16599 } 16600 _ = or.Args[1] 16601 s0 := or.Args[0] 16602 if s0.Op != OpAMD64SHLLconst { 16603 break 16604 } 16605 j0 := s0.AuxInt 16606 x0 := s0.Args[0] 16607 if x0.Op != OpAMD64MOVBload { 16608 break 16609 } 16610 i0 := x0.AuxInt 16611 if x0.Aux != s { 16612 break 16613 } 16614 _ = x0.Args[1] 16615 if p != x0.Args[0] { 16616 break 16617 } 16618 if mem != x0.Args[1] { 16619 break 16620 } 16621 y := or.Args[1] 16622 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16623 break 16624 } 16625 b = mergePoint(b, x0, x1) 16626 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16627 v.reset(OpCopy) 16628 v.AddArg(v0) 16629 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16630 v1.AuxInt = j0 16631 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16632 v2.AuxInt = i0 16633 v2.Aux = s 16634 v2.AddArg(p) 16635 v2.AddArg(mem) 16636 v1.AddArg(v2) 16637 v0.AddArg(v1) 16638 v0.AddArg(y) 16639 return true 16640 } 16641 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))) 16642 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16643 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 16644 for { 16645 _ = v.Args[1] 16646 s1 := v.Args[0] 16647 if s1.Op != OpAMD64SHLLconst { 16648 break 16649 } 16650 j1 := s1.AuxInt 16651 x1 := s1.Args[0] 16652 if x1.Op != OpAMD64MOVBload { 16653 break 16654 } 16655 i1 := x1.AuxInt 16656 s := x1.Aux 16657 _ = x1.Args[1] 16658 p := x1.Args[0] 16659 mem := x1.Args[1] 16660 or := v.Args[1] 16661 if or.Op != OpAMD64ORL { 16662 break 16663 } 16664 _ = or.Args[1] 16665 y := or.Args[0] 16666 s0 := or.Args[1] 16667 if s0.Op != OpAMD64SHLLconst { 16668 break 16669 } 16670 j0 := s0.AuxInt 16671 x0 := s0.Args[0] 16672 if x0.Op != OpAMD64MOVBload { 16673 break 16674 } 16675 i0 := x0.AuxInt 16676 if x0.Aux != s { 16677 break 16678 } 16679 _ = x0.Args[1] 16680 if p != x0.Args[0] { 16681 break 16682 } 16683 if mem != x0.Args[1] { 16684 break 16685 } 16686 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16687 break 16688 } 16689 b = mergePoint(b, x0, x1) 16690 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16691 v.reset(OpCopy) 16692 v.AddArg(v0) 16693 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16694 v1.AuxInt = j0 16695 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16696 v2.AuxInt = i0 16697 v2.Aux = s 16698 v2.AddArg(p) 16699 v2.AddArg(mem) 16700 v1.AddArg(v2) 16701 v0.AddArg(v1) 16702 v0.AddArg(y) 16703 return true 16704 } 16705 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 16706 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16707 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 16708 for { 16709 _ = v.Args[1] 16710 or := v.Args[0] 16711 if or.Op != OpAMD64ORL { 16712 break 16713 } 16714 _ = or.Args[1] 16715 s0 := or.Args[0] 16716 if s0.Op != OpAMD64SHLLconst { 16717 break 16718 } 16719 j0 := s0.AuxInt 16720 x0 := s0.Args[0] 16721 if x0.Op != OpAMD64MOVBload { 16722 break 16723 } 16724 i0 := x0.AuxInt 16725 s := x0.Aux 16726 _ = x0.Args[1] 16727 p := x0.Args[0] 16728 mem := x0.Args[1] 16729 y := or.Args[1] 16730 s1 := v.Args[1] 16731 if s1.Op != OpAMD64SHLLconst { 16732 break 16733 } 16734 j1 := s1.AuxInt 16735 x1 := s1.Args[0] 16736 if x1.Op != OpAMD64MOVBload { 16737 break 16738 } 16739 i1 := x1.AuxInt 16740 if x1.Aux != s { 16741 break 16742 } 16743 _ = x1.Args[1] 16744 if p != x1.Args[0] { 16745 break 16746 } 16747 if mem != x1.Args[1] { 16748 break 16749 } 16750 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16751 break 16752 } 16753 b = mergePoint(b, x0, x1) 16754 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16755 v.reset(OpCopy) 16756 v.AddArg(v0) 16757 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16758 v1.AuxInt = j0 16759 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16760 v2.AuxInt = i0 16761 v2.Aux = s 16762 v2.AddArg(p) 16763 v2.AddArg(mem) 16764 v1.AddArg(v2) 16765 v0.AddArg(v1) 16766 v0.AddArg(y) 16767 return true 16768 } 16769 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 16770 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 16771 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 16772 for { 16773 _ = v.Args[1] 16774 or := v.Args[0] 16775 if or.Op != OpAMD64ORL { 16776 break 16777 } 16778 _ = or.Args[1] 16779 y := or.Args[0] 16780 s0 := or.Args[1] 16781 if s0.Op != OpAMD64SHLLconst { 16782 break 16783 } 16784 j0 := s0.AuxInt 16785 x0 := s0.Args[0] 16786 if x0.Op != OpAMD64MOVBload { 16787 break 16788 } 16789 i0 := x0.AuxInt 16790 s := x0.Aux 16791 _ = x0.Args[1] 16792 p := x0.Args[0] 16793 mem := x0.Args[1] 16794 s1 := v.Args[1] 16795 if s1.Op != OpAMD64SHLLconst { 16796 break 16797 } 16798 j1 := s1.AuxInt 16799 x1 := s1.Args[0] 16800 if x1.Op != OpAMD64MOVBload { 16801 break 16802 } 16803 i1 := x1.AuxInt 16804 if x1.Aux != s { 16805 break 16806 } 16807 _ = x1.Args[1] 16808 if p != x1.Args[0] { 16809 break 16810 } 16811 if mem != x1.Args[1] { 16812 break 16813 } 16814 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 16815 break 16816 } 16817 b = mergePoint(b, x0, x1) 16818 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 16819 v.reset(OpCopy) 16820 v.AddArg(v0) 16821 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 16822 v1.AuxInt = j0 16823 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 16824 v2.AuxInt = i0 16825 v2.Aux = s 16826 v2.AddArg(p) 16827 v2.AddArg(mem) 16828 v1.AddArg(v2) 16829 v0.AddArg(v1) 16830 v0.AddArg(y) 16831 return true 16832 } 16833 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 16834 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16835 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 16836 for { 16837 _ = v.Args[1] 16838 x0 := v.Args[0] 16839 if x0.Op != OpAMD64MOVBloadidx1 { 16840 break 16841 } 16842 i0 := x0.AuxInt 16843 s := x0.Aux 16844 _ = x0.Args[2] 16845 p := x0.Args[0] 16846 idx := x0.Args[1] 16847 mem := x0.Args[2] 16848 sh := v.Args[1] 16849 if sh.Op != OpAMD64SHLLconst { 16850 break 16851 } 16852 if sh.AuxInt != 8 { 16853 break 16854 } 16855 x1 := sh.Args[0] 16856 if x1.Op != OpAMD64MOVBloadidx1 { 16857 break 16858 } 16859 i1 := x1.AuxInt 16860 if x1.Aux != s { 16861 break 16862 } 16863 _ = x1.Args[2] 16864 if p != x1.Args[0] { 16865 break 16866 } 16867 if idx != x1.Args[1] { 16868 break 16869 } 16870 if mem != x1.Args[2] { 16871 break 16872 } 16873 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16874 break 16875 } 16876 b = mergePoint(b, x0, x1) 16877 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 16878 v.reset(OpCopy) 16879 v.AddArg(v0) 16880 v0.AuxInt = i0 16881 v0.Aux = s 16882 v0.AddArg(p) 16883 v0.AddArg(idx) 16884 v0.AddArg(mem) 16885 return true 16886 } 16887 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 16888 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16889 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 16890 for { 16891 _ = v.Args[1] 16892 x0 := v.Args[0] 16893 if x0.Op != OpAMD64MOVBloadidx1 { 16894 break 16895 } 16896 i0 := x0.AuxInt 16897 s := x0.Aux 16898 _ = x0.Args[2] 16899 idx := x0.Args[0] 16900 p := x0.Args[1] 16901 mem := x0.Args[2] 16902 sh := v.Args[1] 16903 if sh.Op != OpAMD64SHLLconst { 16904 break 16905 } 16906 if sh.AuxInt != 8 { 16907 break 16908 } 16909 x1 := sh.Args[0] 16910 if x1.Op != OpAMD64MOVBloadidx1 { 16911 break 16912 } 16913 i1 := x1.AuxInt 16914 if x1.Aux != s { 16915 break 16916 } 16917 _ = x1.Args[2] 16918 if p != x1.Args[0] { 16919 break 16920 } 16921 if idx != x1.Args[1] { 16922 break 16923 } 16924 if mem != x1.Args[2] { 16925 break 16926 } 16927 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16928 break 16929 } 16930 b = mergePoint(b, x0, x1) 16931 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 16932 v.reset(OpCopy) 16933 v.AddArg(v0) 16934 v0.AuxInt = i0 16935 v0.Aux = s 16936 v0.AddArg(p) 16937 v0.AddArg(idx) 16938 v0.AddArg(mem) 16939 return true 16940 } 16941 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 16942 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 16943 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 16944 for { 16945 _ = v.Args[1] 16946 x0 := v.Args[0] 16947 if x0.Op != OpAMD64MOVBloadidx1 { 16948 break 16949 } 16950 i0 := x0.AuxInt 16951 s := x0.Aux 16952 _ = x0.Args[2] 16953 p := x0.Args[0] 16954 idx := x0.Args[1] 16955 mem := x0.Args[2] 16956 sh := v.Args[1] 16957 if sh.Op != OpAMD64SHLLconst { 16958 break 16959 } 16960 if sh.AuxInt != 8 { 16961 break 16962 } 16963 x1 := sh.Args[0] 16964 if x1.Op != OpAMD64MOVBloadidx1 { 16965 break 16966 } 16967 i1 := x1.AuxInt 16968 if x1.Aux != s { 16969 break 16970 } 16971 _ = x1.Args[2] 16972 if idx != x1.Args[0] { 16973 break 16974 } 16975 if p != x1.Args[1] { 16976 break 16977 } 16978 if mem != x1.Args[2] { 16979 break 16980 } 16981 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 16982 break 16983 } 16984 b = mergePoint(b, x0, x1) 16985 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 16986 v.reset(OpCopy) 16987 v.AddArg(v0) 16988 v0.AuxInt = i0 16989 v0.Aux = s 16990 v0.AddArg(p) 16991 v0.AddArg(idx) 16992 v0.AddArg(mem) 16993 return true 16994 } 16995 return false 16996 } 16997 func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool { 16998 b := v.Block 16999 _ = b 17000 typ := &b.Func.Config.Types 17001 _ = typ 17002 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 17003 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17004 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17005 for { 17006 _ = v.Args[1] 17007 x0 := v.Args[0] 17008 if x0.Op != OpAMD64MOVBloadidx1 { 17009 break 17010 } 17011 i0 := x0.AuxInt 17012 s := x0.Aux 17013 _ = x0.Args[2] 17014 idx := x0.Args[0] 17015 p := x0.Args[1] 17016 mem := x0.Args[2] 17017 sh := v.Args[1] 17018 if sh.Op != OpAMD64SHLLconst { 17019 break 17020 } 17021 if sh.AuxInt != 8 { 17022 break 17023 } 17024 x1 := sh.Args[0] 17025 if x1.Op != OpAMD64MOVBloadidx1 { 17026 break 17027 } 17028 i1 := x1.AuxInt 17029 if x1.Aux != s { 17030 break 17031 } 17032 _ = x1.Args[2] 17033 if idx != x1.Args[0] { 17034 break 17035 } 17036 if p != x1.Args[1] { 17037 break 17038 } 17039 if mem != x1.Args[2] { 17040 break 17041 } 17042 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17043 break 17044 } 17045 b = mergePoint(b, x0, x1) 17046 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17047 v.reset(OpCopy) 17048 v.AddArg(v0) 17049 v0.AuxInt = i0 17050 v0.Aux = s 17051 v0.AddArg(p) 17052 v0.AddArg(idx) 17053 v0.AddArg(mem) 17054 return true 17055 } 17056 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 17057 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17058 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17059 for { 17060 _ = v.Args[1] 17061 sh := v.Args[0] 17062 if sh.Op != OpAMD64SHLLconst { 17063 break 17064 } 17065 if sh.AuxInt != 8 { 17066 break 17067 } 17068 x1 := sh.Args[0] 17069 if x1.Op != OpAMD64MOVBloadidx1 { 17070 break 17071 } 17072 i1 := x1.AuxInt 17073 s := x1.Aux 17074 _ = x1.Args[2] 17075 p := x1.Args[0] 17076 idx := x1.Args[1] 17077 mem := x1.Args[2] 17078 x0 := v.Args[1] 17079 if x0.Op != OpAMD64MOVBloadidx1 { 17080 break 17081 } 17082 i0 := x0.AuxInt 17083 if x0.Aux != s { 17084 break 17085 } 17086 _ = x0.Args[2] 17087 if p != x0.Args[0] { 17088 break 17089 } 17090 if idx != x0.Args[1] { 17091 break 17092 } 17093 if mem != x0.Args[2] { 17094 break 17095 } 17096 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17097 break 17098 } 17099 b = mergePoint(b, x0, x1) 17100 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17101 v.reset(OpCopy) 17102 v.AddArg(v0) 17103 v0.AuxInt = i0 17104 v0.Aux = s 17105 v0.AddArg(p) 17106 v0.AddArg(idx) 17107 v0.AddArg(mem) 17108 return true 17109 } 17110 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 17111 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17112 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17113 for { 17114 _ = v.Args[1] 17115 sh := v.Args[0] 17116 if sh.Op != OpAMD64SHLLconst { 17117 break 17118 } 17119 if sh.AuxInt != 8 { 17120 break 17121 } 17122 x1 := sh.Args[0] 17123 if x1.Op != OpAMD64MOVBloadidx1 { 17124 break 17125 } 17126 i1 := x1.AuxInt 17127 s := x1.Aux 17128 _ = x1.Args[2] 17129 idx := x1.Args[0] 17130 p := x1.Args[1] 17131 mem := x1.Args[2] 17132 x0 := v.Args[1] 17133 if x0.Op != OpAMD64MOVBloadidx1 { 17134 break 17135 } 17136 i0 := x0.AuxInt 17137 if x0.Aux != s { 17138 break 17139 } 17140 _ = x0.Args[2] 17141 if p != x0.Args[0] { 17142 break 17143 } 17144 if idx != x0.Args[1] { 17145 break 17146 } 17147 if mem != x0.Args[2] { 17148 break 17149 } 17150 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17151 break 17152 } 17153 b = mergePoint(b, x0, x1) 17154 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17155 v.reset(OpCopy) 17156 v.AddArg(v0) 17157 v0.AuxInt = i0 17158 v0.Aux = s 17159 v0.AddArg(p) 17160 v0.AddArg(idx) 17161 v0.AddArg(mem) 17162 return true 17163 } 17164 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 17165 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17166 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17167 for { 17168 _ = v.Args[1] 17169 sh := v.Args[0] 17170 if sh.Op != OpAMD64SHLLconst { 17171 break 17172 } 17173 if sh.AuxInt != 8 { 17174 break 17175 } 17176 x1 := sh.Args[0] 17177 if x1.Op != OpAMD64MOVBloadidx1 { 17178 break 17179 } 17180 i1 := x1.AuxInt 17181 s := x1.Aux 17182 _ = x1.Args[2] 17183 p := x1.Args[0] 17184 idx := x1.Args[1] 17185 mem := x1.Args[2] 17186 x0 := v.Args[1] 17187 if x0.Op != OpAMD64MOVBloadidx1 { 17188 break 17189 } 17190 i0 := x0.AuxInt 17191 if x0.Aux != s { 17192 break 17193 } 17194 _ = x0.Args[2] 17195 if idx != x0.Args[0] { 17196 break 17197 } 17198 if p != x0.Args[1] { 17199 break 17200 } 17201 if mem != x0.Args[2] { 17202 break 17203 } 17204 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17205 break 17206 } 17207 b = mergePoint(b, x0, x1) 17208 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17209 v.reset(OpCopy) 17210 v.AddArg(v0) 17211 v0.AuxInt = i0 17212 v0.Aux = s 17213 v0.AddArg(p) 17214 v0.AddArg(idx) 17215 v0.AddArg(mem) 17216 return true 17217 } 17218 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 17219 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17220 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 17221 for { 17222 _ = v.Args[1] 17223 sh := v.Args[0] 17224 if sh.Op != OpAMD64SHLLconst { 17225 break 17226 } 17227 if sh.AuxInt != 8 { 17228 break 17229 } 17230 x1 := sh.Args[0] 17231 if x1.Op != OpAMD64MOVBloadidx1 { 17232 break 17233 } 17234 i1 := x1.AuxInt 17235 s := x1.Aux 17236 _ = x1.Args[2] 17237 idx := x1.Args[0] 17238 p := x1.Args[1] 17239 mem := x1.Args[2] 17240 x0 := v.Args[1] 17241 if x0.Op != OpAMD64MOVBloadidx1 { 17242 break 17243 } 17244 i0 := x0.AuxInt 17245 if x0.Aux != s { 17246 break 17247 } 17248 _ = x0.Args[2] 17249 if idx != x0.Args[0] { 17250 break 17251 } 17252 if p != x0.Args[1] { 17253 break 17254 } 17255 if mem != x0.Args[2] { 17256 break 17257 } 17258 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17259 break 17260 } 17261 b = mergePoint(b, x0, x1) 17262 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 17263 v.reset(OpCopy) 17264 v.AddArg(v0) 17265 v0.AuxInt = i0 17266 v0.Aux = s 17267 v0.AddArg(p) 17268 v0.AddArg(idx) 17269 v0.AddArg(mem) 17270 return true 17271 } 17272 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 17273 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17274 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17275 for { 17276 _ = v.Args[1] 17277 x0 := v.Args[0] 17278 if x0.Op != OpAMD64MOVWloadidx1 { 17279 break 17280 } 17281 i0 := x0.AuxInt 17282 s := x0.Aux 17283 _ = x0.Args[2] 17284 p := x0.Args[0] 17285 idx := x0.Args[1] 17286 mem := x0.Args[2] 17287 sh := v.Args[1] 17288 if sh.Op != OpAMD64SHLLconst { 17289 break 17290 } 17291 if sh.AuxInt != 16 { 17292 break 17293 } 17294 x1 := sh.Args[0] 17295 if x1.Op != OpAMD64MOVWloadidx1 { 17296 break 17297 } 17298 i1 := x1.AuxInt 17299 if x1.Aux != s { 17300 break 17301 } 17302 _ = x1.Args[2] 17303 if p != x1.Args[0] { 17304 break 17305 } 17306 if idx != x1.Args[1] { 17307 break 17308 } 17309 if mem != x1.Args[2] { 17310 break 17311 } 17312 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17313 break 17314 } 17315 b = mergePoint(b, x0, x1) 17316 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17317 v.reset(OpCopy) 17318 v.AddArg(v0) 17319 v0.AuxInt = i0 17320 v0.Aux = s 17321 v0.AddArg(p) 17322 v0.AddArg(idx) 17323 v0.AddArg(mem) 17324 return true 17325 } 17326 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 17327 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17328 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17329 for { 17330 _ = v.Args[1] 17331 x0 := v.Args[0] 17332 if x0.Op != OpAMD64MOVWloadidx1 { 17333 break 17334 } 17335 i0 := x0.AuxInt 17336 s := x0.Aux 17337 _ = x0.Args[2] 17338 idx := x0.Args[0] 17339 p := x0.Args[1] 17340 mem := x0.Args[2] 17341 sh := v.Args[1] 17342 if sh.Op != OpAMD64SHLLconst { 17343 break 17344 } 17345 if sh.AuxInt != 16 { 17346 break 17347 } 17348 x1 := sh.Args[0] 17349 if x1.Op != OpAMD64MOVWloadidx1 { 17350 break 17351 } 17352 i1 := x1.AuxInt 17353 if x1.Aux != s { 17354 break 17355 } 17356 _ = x1.Args[2] 17357 if p != x1.Args[0] { 17358 break 17359 } 17360 if idx != x1.Args[1] { 17361 break 17362 } 17363 if mem != x1.Args[2] { 17364 break 17365 } 17366 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17367 break 17368 } 17369 b = mergePoint(b, x0, x1) 17370 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17371 v.reset(OpCopy) 17372 v.AddArg(v0) 17373 v0.AuxInt = i0 17374 v0.Aux = s 17375 v0.AddArg(p) 17376 v0.AddArg(idx) 17377 v0.AddArg(mem) 17378 return true 17379 } 17380 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 17381 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17382 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17383 for { 17384 _ = v.Args[1] 17385 x0 := v.Args[0] 17386 if x0.Op != OpAMD64MOVWloadidx1 { 17387 break 17388 } 17389 i0 := x0.AuxInt 17390 s := x0.Aux 17391 _ = x0.Args[2] 17392 p := x0.Args[0] 17393 idx := x0.Args[1] 17394 mem := x0.Args[2] 17395 sh := v.Args[1] 17396 if sh.Op != OpAMD64SHLLconst { 17397 break 17398 } 17399 if sh.AuxInt != 16 { 17400 break 17401 } 17402 x1 := sh.Args[0] 17403 if x1.Op != OpAMD64MOVWloadidx1 { 17404 break 17405 } 17406 i1 := x1.AuxInt 17407 if x1.Aux != s { 17408 break 17409 } 17410 _ = x1.Args[2] 17411 if idx != x1.Args[0] { 17412 break 17413 } 17414 if p != x1.Args[1] { 17415 break 17416 } 17417 if mem != x1.Args[2] { 17418 break 17419 } 17420 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17421 break 17422 } 17423 b = mergePoint(b, x0, x1) 17424 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17425 v.reset(OpCopy) 17426 v.AddArg(v0) 17427 v0.AuxInt = i0 17428 v0.Aux = s 17429 v0.AddArg(p) 17430 v0.AddArg(idx) 17431 v0.AddArg(mem) 17432 return true 17433 } 17434 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 17435 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17436 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17437 for { 17438 _ = v.Args[1] 17439 x0 := v.Args[0] 17440 if x0.Op != OpAMD64MOVWloadidx1 { 17441 break 17442 } 17443 i0 := x0.AuxInt 17444 s := x0.Aux 17445 _ = x0.Args[2] 17446 idx := x0.Args[0] 17447 p := x0.Args[1] 17448 mem := x0.Args[2] 17449 sh := v.Args[1] 17450 if sh.Op != OpAMD64SHLLconst { 17451 break 17452 } 17453 if sh.AuxInt != 16 { 17454 break 17455 } 17456 x1 := sh.Args[0] 17457 if x1.Op != OpAMD64MOVWloadidx1 { 17458 break 17459 } 17460 i1 := x1.AuxInt 17461 if x1.Aux != s { 17462 break 17463 } 17464 _ = x1.Args[2] 17465 if idx != x1.Args[0] { 17466 break 17467 } 17468 if p != x1.Args[1] { 17469 break 17470 } 17471 if mem != x1.Args[2] { 17472 break 17473 } 17474 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17475 break 17476 } 17477 b = mergePoint(b, x0, x1) 17478 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17479 v.reset(OpCopy) 17480 v.AddArg(v0) 17481 v0.AuxInt = i0 17482 v0.Aux = s 17483 v0.AddArg(p) 17484 v0.AddArg(idx) 17485 v0.AddArg(mem) 17486 return true 17487 } 17488 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 17489 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17490 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17491 for { 17492 _ = v.Args[1] 17493 sh := v.Args[0] 17494 if sh.Op != OpAMD64SHLLconst { 17495 break 17496 } 17497 if sh.AuxInt != 16 { 17498 break 17499 } 17500 x1 := sh.Args[0] 17501 if x1.Op != OpAMD64MOVWloadidx1 { 17502 break 17503 } 17504 i1 := x1.AuxInt 17505 s := x1.Aux 17506 _ = x1.Args[2] 17507 p := x1.Args[0] 17508 idx := x1.Args[1] 17509 mem := x1.Args[2] 17510 x0 := v.Args[1] 17511 if x0.Op != OpAMD64MOVWloadidx1 { 17512 break 17513 } 17514 i0 := x0.AuxInt 17515 if x0.Aux != s { 17516 break 17517 } 17518 _ = x0.Args[2] 17519 if p != x0.Args[0] { 17520 break 17521 } 17522 if idx != x0.Args[1] { 17523 break 17524 } 17525 if mem != x0.Args[2] { 17526 break 17527 } 17528 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17529 break 17530 } 17531 b = mergePoint(b, x0, x1) 17532 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17533 v.reset(OpCopy) 17534 v.AddArg(v0) 17535 v0.AuxInt = i0 17536 v0.Aux = s 17537 v0.AddArg(p) 17538 v0.AddArg(idx) 17539 v0.AddArg(mem) 17540 return true 17541 } 17542 return false 17543 } 17544 func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool { 17545 b := v.Block 17546 _ = b 17547 typ := &b.Func.Config.Types 17548 _ = typ 17549 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 17550 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17551 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17552 for { 17553 _ = v.Args[1] 17554 sh := v.Args[0] 17555 if sh.Op != OpAMD64SHLLconst { 17556 break 17557 } 17558 if sh.AuxInt != 16 { 17559 break 17560 } 17561 x1 := sh.Args[0] 17562 if x1.Op != OpAMD64MOVWloadidx1 { 17563 break 17564 } 17565 i1 := x1.AuxInt 17566 s := x1.Aux 17567 _ = x1.Args[2] 17568 idx := x1.Args[0] 17569 p := x1.Args[1] 17570 mem := x1.Args[2] 17571 x0 := v.Args[1] 17572 if x0.Op != OpAMD64MOVWloadidx1 { 17573 break 17574 } 17575 i0 := x0.AuxInt 17576 if x0.Aux != s { 17577 break 17578 } 17579 _ = x0.Args[2] 17580 if p != x0.Args[0] { 17581 break 17582 } 17583 if idx != x0.Args[1] { 17584 break 17585 } 17586 if mem != x0.Args[2] { 17587 break 17588 } 17589 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17590 break 17591 } 17592 b = mergePoint(b, x0, x1) 17593 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17594 v.reset(OpCopy) 17595 v.AddArg(v0) 17596 v0.AuxInt = i0 17597 v0.Aux = s 17598 v0.AddArg(p) 17599 v0.AddArg(idx) 17600 v0.AddArg(mem) 17601 return true 17602 } 17603 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 17604 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17605 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17606 for { 17607 _ = v.Args[1] 17608 sh := v.Args[0] 17609 if sh.Op != OpAMD64SHLLconst { 17610 break 17611 } 17612 if sh.AuxInt != 16 { 17613 break 17614 } 17615 x1 := sh.Args[0] 17616 if x1.Op != OpAMD64MOVWloadidx1 { 17617 break 17618 } 17619 i1 := x1.AuxInt 17620 s := x1.Aux 17621 _ = x1.Args[2] 17622 p := x1.Args[0] 17623 idx := x1.Args[1] 17624 mem := x1.Args[2] 17625 x0 := v.Args[1] 17626 if x0.Op != OpAMD64MOVWloadidx1 { 17627 break 17628 } 17629 i0 := x0.AuxInt 17630 if x0.Aux != s { 17631 break 17632 } 17633 _ = x0.Args[2] 17634 if idx != x0.Args[0] { 17635 break 17636 } 17637 if p != x0.Args[1] { 17638 break 17639 } 17640 if mem != x0.Args[2] { 17641 break 17642 } 17643 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17644 break 17645 } 17646 b = mergePoint(b, x0, x1) 17647 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17648 v.reset(OpCopy) 17649 v.AddArg(v0) 17650 v0.AuxInt = i0 17651 v0.Aux = s 17652 v0.AddArg(p) 17653 v0.AddArg(idx) 17654 v0.AddArg(mem) 17655 return true 17656 } 17657 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 17658 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 17659 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 17660 for { 17661 _ = v.Args[1] 17662 sh := v.Args[0] 17663 if sh.Op != OpAMD64SHLLconst { 17664 break 17665 } 17666 if sh.AuxInt != 16 { 17667 break 17668 } 17669 x1 := sh.Args[0] 17670 if x1.Op != OpAMD64MOVWloadidx1 { 17671 break 17672 } 17673 i1 := x1.AuxInt 17674 s := x1.Aux 17675 _ = x1.Args[2] 17676 idx := x1.Args[0] 17677 p := x1.Args[1] 17678 mem := x1.Args[2] 17679 x0 := v.Args[1] 17680 if x0.Op != OpAMD64MOVWloadidx1 { 17681 break 17682 } 17683 i0 := x0.AuxInt 17684 if x0.Aux != s { 17685 break 17686 } 17687 _ = x0.Args[2] 17688 if idx != x0.Args[0] { 17689 break 17690 } 17691 if p != x0.Args[1] { 17692 break 17693 } 17694 if mem != x0.Args[2] { 17695 break 17696 } 17697 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 17698 break 17699 } 17700 b = mergePoint(b, x0, x1) 17701 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 17702 v.reset(OpCopy) 17703 v.AddArg(v0) 17704 v0.AuxInt = i0 17705 v0.Aux = s 17706 v0.AddArg(p) 17707 v0.AddArg(idx) 17708 v0.AddArg(mem) 17709 return true 17710 } 17711 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 17712 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17713 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17714 for { 17715 _ = v.Args[1] 17716 s1 := v.Args[0] 17717 if s1.Op != OpAMD64SHLLconst { 17718 break 17719 } 17720 j1 := s1.AuxInt 17721 x1 := s1.Args[0] 17722 if x1.Op != OpAMD64MOVBloadidx1 { 17723 break 17724 } 17725 i1 := x1.AuxInt 17726 s := x1.Aux 17727 _ = x1.Args[2] 17728 p := x1.Args[0] 17729 idx := x1.Args[1] 17730 mem := x1.Args[2] 17731 or := v.Args[1] 17732 if or.Op != OpAMD64ORL { 17733 break 17734 } 17735 _ = or.Args[1] 17736 s0 := or.Args[0] 17737 if s0.Op != OpAMD64SHLLconst { 17738 break 17739 } 17740 j0 := s0.AuxInt 17741 x0 := s0.Args[0] 17742 if x0.Op != OpAMD64MOVBloadidx1 { 17743 break 17744 } 17745 i0 := x0.AuxInt 17746 if x0.Aux != s { 17747 break 17748 } 17749 _ = x0.Args[2] 17750 if p != x0.Args[0] { 17751 break 17752 } 17753 if idx != x0.Args[1] { 17754 break 17755 } 17756 if mem != x0.Args[2] { 17757 break 17758 } 17759 y := or.Args[1] 17760 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17761 break 17762 } 17763 b = mergePoint(b, x0, x1) 17764 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17765 v.reset(OpCopy) 17766 v.AddArg(v0) 17767 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17768 v1.AuxInt = j0 17769 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 17770 v2.AuxInt = i0 17771 v2.Aux = s 17772 v2.AddArg(p) 17773 v2.AddArg(idx) 17774 v2.AddArg(mem) 17775 v1.AddArg(v2) 17776 v0.AddArg(v1) 17777 v0.AddArg(y) 17778 return true 17779 } 17780 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 17781 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17782 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17783 for { 17784 _ = v.Args[1] 17785 s1 := v.Args[0] 17786 if s1.Op != OpAMD64SHLLconst { 17787 break 17788 } 17789 j1 := s1.AuxInt 17790 x1 := s1.Args[0] 17791 if x1.Op != OpAMD64MOVBloadidx1 { 17792 break 17793 } 17794 i1 := x1.AuxInt 17795 s := x1.Aux 17796 _ = x1.Args[2] 17797 idx := x1.Args[0] 17798 p := x1.Args[1] 17799 mem := x1.Args[2] 17800 or := v.Args[1] 17801 if or.Op != OpAMD64ORL { 17802 break 17803 } 17804 _ = or.Args[1] 17805 s0 := or.Args[0] 17806 if s0.Op != OpAMD64SHLLconst { 17807 break 17808 } 17809 j0 := s0.AuxInt 17810 x0 := s0.Args[0] 17811 if x0.Op != OpAMD64MOVBloadidx1 { 17812 break 17813 } 17814 i0 := x0.AuxInt 17815 if x0.Aux != s { 17816 break 17817 } 17818 _ = x0.Args[2] 17819 if p != x0.Args[0] { 17820 break 17821 } 17822 if idx != x0.Args[1] { 17823 break 17824 } 17825 if mem != x0.Args[2] { 17826 break 17827 } 17828 y := or.Args[1] 17829 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17830 break 17831 } 17832 b = mergePoint(b, x0, x1) 17833 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17834 v.reset(OpCopy) 17835 v.AddArg(v0) 17836 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17837 v1.AuxInt = j0 17838 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 17839 v2.AuxInt = i0 17840 v2.Aux = s 17841 v2.AddArg(p) 17842 v2.AddArg(idx) 17843 v2.AddArg(mem) 17844 v1.AddArg(v2) 17845 v0.AddArg(v1) 17846 v0.AddArg(y) 17847 return true 17848 } 17849 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 17850 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17851 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17852 for { 17853 _ = v.Args[1] 17854 s1 := v.Args[0] 17855 if s1.Op != OpAMD64SHLLconst { 17856 break 17857 } 17858 j1 := s1.AuxInt 17859 x1 := s1.Args[0] 17860 if x1.Op != OpAMD64MOVBloadidx1 { 17861 break 17862 } 17863 i1 := x1.AuxInt 17864 s := x1.Aux 17865 _ = x1.Args[2] 17866 p := x1.Args[0] 17867 idx := x1.Args[1] 17868 mem := x1.Args[2] 17869 or := v.Args[1] 17870 if or.Op != OpAMD64ORL { 17871 break 17872 } 17873 _ = or.Args[1] 17874 s0 := or.Args[0] 17875 if s0.Op != OpAMD64SHLLconst { 17876 break 17877 } 17878 j0 := s0.AuxInt 17879 x0 := s0.Args[0] 17880 if x0.Op != OpAMD64MOVBloadidx1 { 17881 break 17882 } 17883 i0 := x0.AuxInt 17884 if x0.Aux != s { 17885 break 17886 } 17887 _ = x0.Args[2] 17888 if idx != x0.Args[0] { 17889 break 17890 } 17891 if p != x0.Args[1] { 17892 break 17893 } 17894 if mem != x0.Args[2] { 17895 break 17896 } 17897 y := or.Args[1] 17898 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17899 break 17900 } 17901 b = mergePoint(b, x0, x1) 17902 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17903 v.reset(OpCopy) 17904 v.AddArg(v0) 17905 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17906 v1.AuxInt = j0 17907 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 17908 v2.AuxInt = i0 17909 v2.Aux = s 17910 v2.AddArg(p) 17911 v2.AddArg(idx) 17912 v2.AddArg(mem) 17913 v1.AddArg(v2) 17914 v0.AddArg(v1) 17915 v0.AddArg(y) 17916 return true 17917 } 17918 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 17919 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17920 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17921 for { 17922 _ = v.Args[1] 17923 s1 := v.Args[0] 17924 if s1.Op != OpAMD64SHLLconst { 17925 break 17926 } 17927 j1 := s1.AuxInt 17928 x1 := s1.Args[0] 17929 if x1.Op != OpAMD64MOVBloadidx1 { 17930 break 17931 } 17932 i1 := x1.AuxInt 17933 s := x1.Aux 17934 _ = x1.Args[2] 17935 idx := x1.Args[0] 17936 p := x1.Args[1] 17937 mem := x1.Args[2] 17938 or := v.Args[1] 17939 if or.Op != OpAMD64ORL { 17940 break 17941 } 17942 _ = or.Args[1] 17943 s0 := or.Args[0] 17944 if s0.Op != OpAMD64SHLLconst { 17945 break 17946 } 17947 j0 := s0.AuxInt 17948 x0 := s0.Args[0] 17949 if x0.Op != OpAMD64MOVBloadidx1 { 17950 break 17951 } 17952 i0 := x0.AuxInt 17953 if x0.Aux != s { 17954 break 17955 } 17956 _ = x0.Args[2] 17957 if idx != x0.Args[0] { 17958 break 17959 } 17960 if p != x0.Args[1] { 17961 break 17962 } 17963 if mem != x0.Args[2] { 17964 break 17965 } 17966 y := or.Args[1] 17967 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 17968 break 17969 } 17970 b = mergePoint(b, x0, x1) 17971 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 17972 v.reset(OpCopy) 17973 v.AddArg(v0) 17974 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 17975 v1.AuxInt = j0 17976 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 17977 v2.AuxInt = i0 17978 v2.Aux = s 17979 v2.AddArg(p) 17980 v2.AddArg(idx) 17981 v2.AddArg(mem) 17982 v1.AddArg(v2) 17983 v0.AddArg(v1) 17984 v0.AddArg(y) 17985 return true 17986 } 17987 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 17988 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 17989 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 17990 for { 17991 _ = v.Args[1] 17992 s1 := v.Args[0] 17993 if s1.Op != OpAMD64SHLLconst { 17994 break 17995 } 17996 j1 := s1.AuxInt 17997 x1 := s1.Args[0] 17998 if x1.Op != OpAMD64MOVBloadidx1 { 17999 break 18000 } 18001 i1 := x1.AuxInt 18002 s := x1.Aux 18003 _ = x1.Args[2] 18004 p := x1.Args[0] 18005 idx := x1.Args[1] 18006 mem := x1.Args[2] 18007 or := v.Args[1] 18008 if or.Op != OpAMD64ORL { 18009 break 18010 } 18011 _ = or.Args[1] 18012 y := or.Args[0] 18013 s0 := or.Args[1] 18014 if s0.Op != OpAMD64SHLLconst { 18015 break 18016 } 18017 j0 := s0.AuxInt 18018 x0 := s0.Args[0] 18019 if x0.Op != OpAMD64MOVBloadidx1 { 18020 break 18021 } 18022 i0 := x0.AuxInt 18023 if x0.Aux != s { 18024 break 18025 } 18026 _ = x0.Args[2] 18027 if p != x0.Args[0] { 18028 break 18029 } 18030 if idx != x0.Args[1] { 18031 break 18032 } 18033 if mem != x0.Args[2] { 18034 break 18035 } 18036 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18037 break 18038 } 18039 b = mergePoint(b, x0, x1) 18040 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18041 v.reset(OpCopy) 18042 v.AddArg(v0) 18043 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18044 v1.AuxInt = j0 18045 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18046 v2.AuxInt = i0 18047 v2.Aux = s 18048 v2.AddArg(p) 18049 v2.AddArg(idx) 18050 v2.AddArg(mem) 18051 v1.AddArg(v2) 18052 v0.AddArg(v1) 18053 v0.AddArg(y) 18054 return true 18055 } 18056 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 18057 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18058 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18059 for { 18060 _ = v.Args[1] 18061 s1 := v.Args[0] 18062 if s1.Op != OpAMD64SHLLconst { 18063 break 18064 } 18065 j1 := s1.AuxInt 18066 x1 := s1.Args[0] 18067 if x1.Op != OpAMD64MOVBloadidx1 { 18068 break 18069 } 18070 i1 := x1.AuxInt 18071 s := x1.Aux 18072 _ = x1.Args[2] 18073 idx := x1.Args[0] 18074 p := x1.Args[1] 18075 mem := x1.Args[2] 18076 or := v.Args[1] 18077 if or.Op != OpAMD64ORL { 18078 break 18079 } 18080 _ = or.Args[1] 18081 y := or.Args[0] 18082 s0 := or.Args[1] 18083 if s0.Op != OpAMD64SHLLconst { 18084 break 18085 } 18086 j0 := s0.AuxInt 18087 x0 := s0.Args[0] 18088 if x0.Op != OpAMD64MOVBloadidx1 { 18089 break 18090 } 18091 i0 := x0.AuxInt 18092 if x0.Aux != s { 18093 break 18094 } 18095 _ = x0.Args[2] 18096 if p != x0.Args[0] { 18097 break 18098 } 18099 if idx != x0.Args[1] { 18100 break 18101 } 18102 if mem != x0.Args[2] { 18103 break 18104 } 18105 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18106 break 18107 } 18108 b = mergePoint(b, x0, x1) 18109 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18110 v.reset(OpCopy) 18111 v.AddArg(v0) 18112 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18113 v1.AuxInt = j0 18114 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18115 v2.AuxInt = i0 18116 v2.Aux = s 18117 v2.AddArg(p) 18118 v2.AddArg(idx) 18119 v2.AddArg(mem) 18120 v1.AddArg(v2) 18121 v0.AddArg(v1) 18122 v0.AddArg(y) 18123 return true 18124 } 18125 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 18126 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18127 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18128 for { 18129 _ = v.Args[1] 18130 s1 := v.Args[0] 18131 if s1.Op != OpAMD64SHLLconst { 18132 break 18133 } 18134 j1 := s1.AuxInt 18135 x1 := s1.Args[0] 18136 if x1.Op != OpAMD64MOVBloadidx1 { 18137 break 18138 } 18139 i1 := x1.AuxInt 18140 s := x1.Aux 18141 _ = x1.Args[2] 18142 p := x1.Args[0] 18143 idx := x1.Args[1] 18144 mem := x1.Args[2] 18145 or := v.Args[1] 18146 if or.Op != OpAMD64ORL { 18147 break 18148 } 18149 _ = or.Args[1] 18150 y := or.Args[0] 18151 s0 := or.Args[1] 18152 if s0.Op != OpAMD64SHLLconst { 18153 break 18154 } 18155 j0 := s0.AuxInt 18156 x0 := s0.Args[0] 18157 if x0.Op != OpAMD64MOVBloadidx1 { 18158 break 18159 } 18160 i0 := x0.AuxInt 18161 if x0.Aux != s { 18162 break 18163 } 18164 _ = x0.Args[2] 18165 if idx != x0.Args[0] { 18166 break 18167 } 18168 if p != x0.Args[1] { 18169 break 18170 } 18171 if mem != x0.Args[2] { 18172 break 18173 } 18174 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18175 break 18176 } 18177 b = mergePoint(b, x0, x1) 18178 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18179 v.reset(OpCopy) 18180 v.AddArg(v0) 18181 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18182 v1.AuxInt = j0 18183 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18184 v2.AuxInt = i0 18185 v2.Aux = s 18186 v2.AddArg(p) 18187 v2.AddArg(idx) 18188 v2.AddArg(mem) 18189 v1.AddArg(v2) 18190 v0.AddArg(v1) 18191 v0.AddArg(y) 18192 return true 18193 } 18194 return false 18195 } 18196 func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool { 18197 b := v.Block 18198 _ = b 18199 typ := &b.Func.Config.Types 18200 _ = typ 18201 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 18202 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18203 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18204 for { 18205 _ = v.Args[1] 18206 s1 := v.Args[0] 18207 if s1.Op != OpAMD64SHLLconst { 18208 break 18209 } 18210 j1 := s1.AuxInt 18211 x1 := s1.Args[0] 18212 if x1.Op != OpAMD64MOVBloadidx1 { 18213 break 18214 } 18215 i1 := x1.AuxInt 18216 s := x1.Aux 18217 _ = x1.Args[2] 18218 idx := x1.Args[0] 18219 p := x1.Args[1] 18220 mem := x1.Args[2] 18221 or := v.Args[1] 18222 if or.Op != OpAMD64ORL { 18223 break 18224 } 18225 _ = or.Args[1] 18226 y := or.Args[0] 18227 s0 := or.Args[1] 18228 if s0.Op != OpAMD64SHLLconst { 18229 break 18230 } 18231 j0 := s0.AuxInt 18232 x0 := s0.Args[0] 18233 if x0.Op != OpAMD64MOVBloadidx1 { 18234 break 18235 } 18236 i0 := x0.AuxInt 18237 if x0.Aux != s { 18238 break 18239 } 18240 _ = x0.Args[2] 18241 if idx != x0.Args[0] { 18242 break 18243 } 18244 if p != x0.Args[1] { 18245 break 18246 } 18247 if mem != x0.Args[2] { 18248 break 18249 } 18250 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18251 break 18252 } 18253 b = mergePoint(b, x0, x1) 18254 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18255 v.reset(OpCopy) 18256 v.AddArg(v0) 18257 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18258 v1.AuxInt = j0 18259 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18260 v2.AuxInt = i0 18261 v2.Aux = s 18262 v2.AddArg(p) 18263 v2.AddArg(idx) 18264 v2.AddArg(mem) 18265 v1.AddArg(v2) 18266 v0.AddArg(v1) 18267 v0.AddArg(y) 18268 return true 18269 } 18270 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18271 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18272 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18273 for { 18274 _ = v.Args[1] 18275 or := v.Args[0] 18276 if or.Op != OpAMD64ORL { 18277 break 18278 } 18279 _ = or.Args[1] 18280 s0 := or.Args[0] 18281 if s0.Op != OpAMD64SHLLconst { 18282 break 18283 } 18284 j0 := s0.AuxInt 18285 x0 := s0.Args[0] 18286 if x0.Op != OpAMD64MOVBloadidx1 { 18287 break 18288 } 18289 i0 := x0.AuxInt 18290 s := x0.Aux 18291 _ = x0.Args[2] 18292 p := x0.Args[0] 18293 idx := x0.Args[1] 18294 mem := x0.Args[2] 18295 y := or.Args[1] 18296 s1 := v.Args[1] 18297 if s1.Op != OpAMD64SHLLconst { 18298 break 18299 } 18300 j1 := s1.AuxInt 18301 x1 := s1.Args[0] 18302 if x1.Op != OpAMD64MOVBloadidx1 { 18303 break 18304 } 18305 i1 := x1.AuxInt 18306 if x1.Aux != s { 18307 break 18308 } 18309 _ = x1.Args[2] 18310 if p != x1.Args[0] { 18311 break 18312 } 18313 if idx != x1.Args[1] { 18314 break 18315 } 18316 if mem != x1.Args[2] { 18317 break 18318 } 18319 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18320 break 18321 } 18322 b = mergePoint(b, x0, x1) 18323 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18324 v.reset(OpCopy) 18325 v.AddArg(v0) 18326 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18327 v1.AuxInt = j0 18328 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18329 v2.AuxInt = i0 18330 v2.Aux = s 18331 v2.AddArg(p) 18332 v2.AddArg(idx) 18333 v2.AddArg(mem) 18334 v1.AddArg(v2) 18335 v0.AddArg(v1) 18336 v0.AddArg(y) 18337 return true 18338 } 18339 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18340 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18341 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18342 for { 18343 _ = v.Args[1] 18344 or := v.Args[0] 18345 if or.Op != OpAMD64ORL { 18346 break 18347 } 18348 _ = or.Args[1] 18349 s0 := or.Args[0] 18350 if s0.Op != OpAMD64SHLLconst { 18351 break 18352 } 18353 j0 := s0.AuxInt 18354 x0 := s0.Args[0] 18355 if x0.Op != OpAMD64MOVBloadidx1 { 18356 break 18357 } 18358 i0 := x0.AuxInt 18359 s := x0.Aux 18360 _ = x0.Args[2] 18361 idx := x0.Args[0] 18362 p := x0.Args[1] 18363 mem := x0.Args[2] 18364 y := or.Args[1] 18365 s1 := v.Args[1] 18366 if s1.Op != OpAMD64SHLLconst { 18367 break 18368 } 18369 j1 := s1.AuxInt 18370 x1 := s1.Args[0] 18371 if x1.Op != OpAMD64MOVBloadidx1 { 18372 break 18373 } 18374 i1 := x1.AuxInt 18375 if x1.Aux != s { 18376 break 18377 } 18378 _ = x1.Args[2] 18379 if p != x1.Args[0] { 18380 break 18381 } 18382 if idx != x1.Args[1] { 18383 break 18384 } 18385 if mem != x1.Args[2] { 18386 break 18387 } 18388 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18389 break 18390 } 18391 b = mergePoint(b, x0, x1) 18392 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18393 v.reset(OpCopy) 18394 v.AddArg(v0) 18395 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18396 v1.AuxInt = j0 18397 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18398 v2.AuxInt = i0 18399 v2.Aux = s 18400 v2.AddArg(p) 18401 v2.AddArg(idx) 18402 v2.AddArg(mem) 18403 v1.AddArg(v2) 18404 v0.AddArg(v1) 18405 v0.AddArg(y) 18406 return true 18407 } 18408 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18409 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18410 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18411 for { 18412 _ = v.Args[1] 18413 or := v.Args[0] 18414 if or.Op != OpAMD64ORL { 18415 break 18416 } 18417 _ = or.Args[1] 18418 y := or.Args[0] 18419 s0 := or.Args[1] 18420 if s0.Op != OpAMD64SHLLconst { 18421 break 18422 } 18423 j0 := s0.AuxInt 18424 x0 := s0.Args[0] 18425 if x0.Op != OpAMD64MOVBloadidx1 { 18426 break 18427 } 18428 i0 := x0.AuxInt 18429 s := x0.Aux 18430 _ = x0.Args[2] 18431 p := x0.Args[0] 18432 idx := x0.Args[1] 18433 mem := x0.Args[2] 18434 s1 := v.Args[1] 18435 if s1.Op != OpAMD64SHLLconst { 18436 break 18437 } 18438 j1 := s1.AuxInt 18439 x1 := s1.Args[0] 18440 if x1.Op != OpAMD64MOVBloadidx1 { 18441 break 18442 } 18443 i1 := x1.AuxInt 18444 if x1.Aux != s { 18445 break 18446 } 18447 _ = x1.Args[2] 18448 if p != x1.Args[0] { 18449 break 18450 } 18451 if idx != x1.Args[1] { 18452 break 18453 } 18454 if mem != x1.Args[2] { 18455 break 18456 } 18457 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18458 break 18459 } 18460 b = mergePoint(b, x0, x1) 18461 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18462 v.reset(OpCopy) 18463 v.AddArg(v0) 18464 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18465 v1.AuxInt = j0 18466 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18467 v2.AuxInt = i0 18468 v2.Aux = s 18469 v2.AddArg(p) 18470 v2.AddArg(idx) 18471 v2.AddArg(mem) 18472 v1.AddArg(v2) 18473 v0.AddArg(v1) 18474 v0.AddArg(y) 18475 return true 18476 } 18477 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18478 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18479 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18480 for { 18481 _ = v.Args[1] 18482 or := v.Args[0] 18483 if or.Op != OpAMD64ORL { 18484 break 18485 } 18486 _ = or.Args[1] 18487 y := or.Args[0] 18488 s0 := or.Args[1] 18489 if s0.Op != OpAMD64SHLLconst { 18490 break 18491 } 18492 j0 := s0.AuxInt 18493 x0 := s0.Args[0] 18494 if x0.Op != OpAMD64MOVBloadidx1 { 18495 break 18496 } 18497 i0 := x0.AuxInt 18498 s := x0.Aux 18499 _ = x0.Args[2] 18500 idx := x0.Args[0] 18501 p := x0.Args[1] 18502 mem := x0.Args[2] 18503 s1 := v.Args[1] 18504 if s1.Op != OpAMD64SHLLconst { 18505 break 18506 } 18507 j1 := s1.AuxInt 18508 x1 := s1.Args[0] 18509 if x1.Op != OpAMD64MOVBloadidx1 { 18510 break 18511 } 18512 i1 := x1.AuxInt 18513 if x1.Aux != s { 18514 break 18515 } 18516 _ = x1.Args[2] 18517 if p != x1.Args[0] { 18518 break 18519 } 18520 if idx != x1.Args[1] { 18521 break 18522 } 18523 if mem != x1.Args[2] { 18524 break 18525 } 18526 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18527 break 18528 } 18529 b = mergePoint(b, x0, x1) 18530 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18531 v.reset(OpCopy) 18532 v.AddArg(v0) 18533 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18534 v1.AuxInt = j0 18535 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18536 v2.AuxInt = i0 18537 v2.Aux = s 18538 v2.AddArg(p) 18539 v2.AddArg(idx) 18540 v2.AddArg(mem) 18541 v1.AddArg(v2) 18542 v0.AddArg(v1) 18543 v0.AddArg(y) 18544 return true 18545 } 18546 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18547 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18548 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18549 for { 18550 _ = v.Args[1] 18551 or := v.Args[0] 18552 if or.Op != OpAMD64ORL { 18553 break 18554 } 18555 _ = or.Args[1] 18556 s0 := or.Args[0] 18557 if s0.Op != OpAMD64SHLLconst { 18558 break 18559 } 18560 j0 := s0.AuxInt 18561 x0 := s0.Args[0] 18562 if x0.Op != OpAMD64MOVBloadidx1 { 18563 break 18564 } 18565 i0 := x0.AuxInt 18566 s := x0.Aux 18567 _ = x0.Args[2] 18568 p := x0.Args[0] 18569 idx := x0.Args[1] 18570 mem := x0.Args[2] 18571 y := or.Args[1] 18572 s1 := v.Args[1] 18573 if s1.Op != OpAMD64SHLLconst { 18574 break 18575 } 18576 j1 := s1.AuxInt 18577 x1 := s1.Args[0] 18578 if x1.Op != OpAMD64MOVBloadidx1 { 18579 break 18580 } 18581 i1 := x1.AuxInt 18582 if x1.Aux != s { 18583 break 18584 } 18585 _ = x1.Args[2] 18586 if idx != x1.Args[0] { 18587 break 18588 } 18589 if p != x1.Args[1] { 18590 break 18591 } 18592 if mem != x1.Args[2] { 18593 break 18594 } 18595 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18596 break 18597 } 18598 b = mergePoint(b, x0, x1) 18599 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18600 v.reset(OpCopy) 18601 v.AddArg(v0) 18602 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18603 v1.AuxInt = j0 18604 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18605 v2.AuxInt = i0 18606 v2.Aux = s 18607 v2.AddArg(p) 18608 v2.AddArg(idx) 18609 v2.AddArg(mem) 18610 v1.AddArg(v2) 18611 v0.AddArg(v1) 18612 v0.AddArg(y) 18613 return true 18614 } 18615 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18616 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18617 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18618 for { 18619 _ = v.Args[1] 18620 or := v.Args[0] 18621 if or.Op != OpAMD64ORL { 18622 break 18623 } 18624 _ = or.Args[1] 18625 s0 := or.Args[0] 18626 if s0.Op != OpAMD64SHLLconst { 18627 break 18628 } 18629 j0 := s0.AuxInt 18630 x0 := s0.Args[0] 18631 if x0.Op != OpAMD64MOVBloadidx1 { 18632 break 18633 } 18634 i0 := x0.AuxInt 18635 s := x0.Aux 18636 _ = x0.Args[2] 18637 idx := x0.Args[0] 18638 p := x0.Args[1] 18639 mem := x0.Args[2] 18640 y := or.Args[1] 18641 s1 := v.Args[1] 18642 if s1.Op != OpAMD64SHLLconst { 18643 break 18644 } 18645 j1 := s1.AuxInt 18646 x1 := s1.Args[0] 18647 if x1.Op != OpAMD64MOVBloadidx1 { 18648 break 18649 } 18650 i1 := x1.AuxInt 18651 if x1.Aux != s { 18652 break 18653 } 18654 _ = x1.Args[2] 18655 if idx != x1.Args[0] { 18656 break 18657 } 18658 if p != x1.Args[1] { 18659 break 18660 } 18661 if mem != x1.Args[2] { 18662 break 18663 } 18664 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18665 break 18666 } 18667 b = mergePoint(b, x0, x1) 18668 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18669 v.reset(OpCopy) 18670 v.AddArg(v0) 18671 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18672 v1.AuxInt = j0 18673 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18674 v2.AuxInt = i0 18675 v2.Aux = s 18676 v2.AddArg(p) 18677 v2.AddArg(idx) 18678 v2.AddArg(mem) 18679 v1.AddArg(v2) 18680 v0.AddArg(v1) 18681 v0.AddArg(y) 18682 return true 18683 } 18684 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18685 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18686 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18687 for { 18688 _ = v.Args[1] 18689 or := v.Args[0] 18690 if or.Op != OpAMD64ORL { 18691 break 18692 } 18693 _ = or.Args[1] 18694 y := or.Args[0] 18695 s0 := or.Args[1] 18696 if s0.Op != OpAMD64SHLLconst { 18697 break 18698 } 18699 j0 := s0.AuxInt 18700 x0 := s0.Args[0] 18701 if x0.Op != OpAMD64MOVBloadidx1 { 18702 break 18703 } 18704 i0 := x0.AuxInt 18705 s := x0.Aux 18706 _ = x0.Args[2] 18707 p := x0.Args[0] 18708 idx := x0.Args[1] 18709 mem := x0.Args[2] 18710 s1 := v.Args[1] 18711 if s1.Op != OpAMD64SHLLconst { 18712 break 18713 } 18714 j1 := s1.AuxInt 18715 x1 := s1.Args[0] 18716 if x1.Op != OpAMD64MOVBloadidx1 { 18717 break 18718 } 18719 i1 := x1.AuxInt 18720 if x1.Aux != s { 18721 break 18722 } 18723 _ = x1.Args[2] 18724 if idx != x1.Args[0] { 18725 break 18726 } 18727 if p != x1.Args[1] { 18728 break 18729 } 18730 if mem != x1.Args[2] { 18731 break 18732 } 18733 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18734 break 18735 } 18736 b = mergePoint(b, x0, x1) 18737 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18738 v.reset(OpCopy) 18739 v.AddArg(v0) 18740 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18741 v1.AuxInt = j0 18742 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18743 v2.AuxInt = i0 18744 v2.Aux = s 18745 v2.AddArg(p) 18746 v2.AddArg(idx) 18747 v2.AddArg(mem) 18748 v1.AddArg(v2) 18749 v0.AddArg(v1) 18750 v0.AddArg(y) 18751 return true 18752 } 18753 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18754 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18755 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 18756 for { 18757 _ = v.Args[1] 18758 or := v.Args[0] 18759 if or.Op != OpAMD64ORL { 18760 break 18761 } 18762 _ = or.Args[1] 18763 y := or.Args[0] 18764 s0 := or.Args[1] 18765 if s0.Op != OpAMD64SHLLconst { 18766 break 18767 } 18768 j0 := s0.AuxInt 18769 x0 := s0.Args[0] 18770 if x0.Op != OpAMD64MOVBloadidx1 { 18771 break 18772 } 18773 i0 := x0.AuxInt 18774 s := x0.Aux 18775 _ = x0.Args[2] 18776 idx := x0.Args[0] 18777 p := x0.Args[1] 18778 mem := x0.Args[2] 18779 s1 := v.Args[1] 18780 if s1.Op != OpAMD64SHLLconst { 18781 break 18782 } 18783 j1 := s1.AuxInt 18784 x1 := s1.Args[0] 18785 if x1.Op != OpAMD64MOVBloadidx1 { 18786 break 18787 } 18788 i1 := x1.AuxInt 18789 if x1.Aux != s { 18790 break 18791 } 18792 _ = x1.Args[2] 18793 if idx != x1.Args[0] { 18794 break 18795 } 18796 if p != x1.Args[1] { 18797 break 18798 } 18799 if mem != x1.Args[2] { 18800 break 18801 } 18802 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18803 break 18804 } 18805 b = mergePoint(b, x0, x1) 18806 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18807 v.reset(OpCopy) 18808 v.AddArg(v0) 18809 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18810 v1.AuxInt = j0 18811 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 18812 v2.AuxInt = i0 18813 v2.Aux = s 18814 v2.AddArg(p) 18815 v2.AddArg(idx) 18816 v2.AddArg(mem) 18817 v1.AddArg(v2) 18818 v0.AddArg(v1) 18819 v0.AddArg(y) 18820 return true 18821 } 18822 // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) 18823 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18824 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 18825 for { 18826 _ = v.Args[1] 18827 x1 := v.Args[0] 18828 if x1.Op != OpAMD64MOVBload { 18829 break 18830 } 18831 i1 := x1.AuxInt 18832 s := x1.Aux 18833 _ = x1.Args[1] 18834 p := x1.Args[0] 18835 mem := x1.Args[1] 18836 sh := v.Args[1] 18837 if sh.Op != OpAMD64SHLLconst { 18838 break 18839 } 18840 if sh.AuxInt != 8 { 18841 break 18842 } 18843 x0 := sh.Args[0] 18844 if x0.Op != OpAMD64MOVBload { 18845 break 18846 } 18847 i0 := x0.AuxInt 18848 if x0.Aux != s { 18849 break 18850 } 18851 _ = x0.Args[1] 18852 if p != x0.Args[0] { 18853 break 18854 } 18855 if mem != x0.Args[1] { 18856 break 18857 } 18858 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18859 break 18860 } 18861 b = mergePoint(b, x0, x1) 18862 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 18863 v.reset(OpCopy) 18864 v.AddArg(v0) 18865 v0.AuxInt = 8 18866 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18867 v1.AuxInt = i0 18868 v1.Aux = s 18869 v1.AddArg(p) 18870 v1.AddArg(mem) 18871 v0.AddArg(v1) 18872 return true 18873 } 18874 return false 18875 } 18876 func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { 18877 b := v.Block 18878 _ = b 18879 typ := &b.Func.Config.Types 18880 _ = typ 18881 // match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 18882 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18883 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 18884 for { 18885 _ = v.Args[1] 18886 sh := v.Args[0] 18887 if sh.Op != OpAMD64SHLLconst { 18888 break 18889 } 18890 if sh.AuxInt != 8 { 18891 break 18892 } 18893 x0 := sh.Args[0] 18894 if x0.Op != OpAMD64MOVBload { 18895 break 18896 } 18897 i0 := x0.AuxInt 18898 s := x0.Aux 18899 _ = x0.Args[1] 18900 p := x0.Args[0] 18901 mem := x0.Args[1] 18902 x1 := v.Args[1] 18903 if x1.Op != OpAMD64MOVBload { 18904 break 18905 } 18906 i1 := x1.AuxInt 18907 if x1.Aux != s { 18908 break 18909 } 18910 _ = x1.Args[1] 18911 if p != x1.Args[0] { 18912 break 18913 } 18914 if mem != x1.Args[1] { 18915 break 18916 } 18917 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18918 break 18919 } 18920 b = mergePoint(b, x0, x1) 18921 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 18922 v.reset(OpCopy) 18923 v.AddArg(v0) 18924 v0.AuxInt = 8 18925 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18926 v1.AuxInt = i0 18927 v1.Aux = s 18928 v1.AddArg(p) 18929 v1.AddArg(mem) 18930 v0.AddArg(v1) 18931 return true 18932 } 18933 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 18934 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 18935 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 18936 for { 18937 _ = v.Args[1] 18938 r1 := v.Args[0] 18939 if r1.Op != OpAMD64ROLWconst { 18940 break 18941 } 18942 if r1.AuxInt != 8 { 18943 break 18944 } 18945 x1 := r1.Args[0] 18946 if x1.Op != OpAMD64MOVWload { 18947 break 18948 } 18949 i1 := x1.AuxInt 18950 s := x1.Aux 18951 _ = x1.Args[1] 18952 p := x1.Args[0] 18953 mem := x1.Args[1] 18954 sh := v.Args[1] 18955 if sh.Op != OpAMD64SHLLconst { 18956 break 18957 } 18958 if sh.AuxInt != 16 { 18959 break 18960 } 18961 r0 := sh.Args[0] 18962 if r0.Op != OpAMD64ROLWconst { 18963 break 18964 } 18965 if r0.AuxInt != 8 { 18966 break 18967 } 18968 x0 := r0.Args[0] 18969 if x0.Op != OpAMD64MOVWload { 18970 break 18971 } 18972 i0 := x0.AuxInt 18973 if x0.Aux != s { 18974 break 18975 } 18976 _ = x0.Args[1] 18977 if p != x0.Args[0] { 18978 break 18979 } 18980 if mem != x0.Args[1] { 18981 break 18982 } 18983 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 18984 break 18985 } 18986 b = mergePoint(b, x0, x1) 18987 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 18988 v.reset(OpCopy) 18989 v.AddArg(v0) 18990 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 18991 v1.AuxInt = i0 18992 v1.Aux = s 18993 v1.AddArg(p) 18994 v1.AddArg(mem) 18995 v0.AddArg(v1) 18996 return true 18997 } 18998 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 18999 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19000 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 19001 for { 19002 _ = v.Args[1] 19003 sh := v.Args[0] 19004 if sh.Op != OpAMD64SHLLconst { 19005 break 19006 } 19007 if sh.AuxInt != 16 { 19008 break 19009 } 19010 r0 := sh.Args[0] 19011 if r0.Op != OpAMD64ROLWconst { 19012 break 19013 } 19014 if r0.AuxInt != 8 { 19015 break 19016 } 19017 x0 := r0.Args[0] 19018 if x0.Op != OpAMD64MOVWload { 19019 break 19020 } 19021 i0 := x0.AuxInt 19022 s := x0.Aux 19023 _ = x0.Args[1] 19024 p := x0.Args[0] 19025 mem := x0.Args[1] 19026 r1 := v.Args[1] 19027 if r1.Op != OpAMD64ROLWconst { 19028 break 19029 } 19030 if r1.AuxInt != 8 { 19031 break 19032 } 19033 x1 := r1.Args[0] 19034 if x1.Op != OpAMD64MOVWload { 19035 break 19036 } 19037 i1 := x1.AuxInt 19038 if x1.Aux != s { 19039 break 19040 } 19041 _ = x1.Args[1] 19042 if p != x1.Args[0] { 19043 break 19044 } 19045 if mem != x1.Args[1] { 19046 break 19047 } 19048 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19049 break 19050 } 19051 b = mergePoint(b, x0, x1) 19052 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19053 v.reset(OpCopy) 19054 v.AddArg(v0) 19055 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 19056 v1.AuxInt = i0 19057 v1.Aux = s 19058 v1.AddArg(p) 19059 v1.AddArg(mem) 19060 v0.AddArg(v1) 19061 return true 19062 } 19063 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 19064 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19065 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 19066 for { 19067 _ = v.Args[1] 19068 s0 := v.Args[0] 19069 if s0.Op != OpAMD64SHLLconst { 19070 break 19071 } 19072 j0 := s0.AuxInt 19073 x0 := s0.Args[0] 19074 if x0.Op != OpAMD64MOVBload { 19075 break 19076 } 19077 i0 := x0.AuxInt 19078 s := x0.Aux 19079 _ = x0.Args[1] 19080 p := x0.Args[0] 19081 mem := x0.Args[1] 19082 or := v.Args[1] 19083 if or.Op != OpAMD64ORL { 19084 break 19085 } 19086 _ = or.Args[1] 19087 s1 := or.Args[0] 19088 if s1.Op != OpAMD64SHLLconst { 19089 break 19090 } 19091 j1 := s1.AuxInt 19092 x1 := s1.Args[0] 19093 if x1.Op != OpAMD64MOVBload { 19094 break 19095 } 19096 i1 := x1.AuxInt 19097 if x1.Aux != s { 19098 break 19099 } 19100 _ = x1.Args[1] 19101 if p != x1.Args[0] { 19102 break 19103 } 19104 if mem != x1.Args[1] { 19105 break 19106 } 19107 y := or.Args[1] 19108 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19109 break 19110 } 19111 b = mergePoint(b, x0, x1) 19112 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19113 v.reset(OpCopy) 19114 v.AddArg(v0) 19115 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19116 v1.AuxInt = j1 19117 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 19118 v2.AuxInt = 8 19119 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 19120 v3.AuxInt = i0 19121 v3.Aux = s 19122 v3.AddArg(p) 19123 v3.AddArg(mem) 19124 v2.AddArg(v3) 19125 v1.AddArg(v2) 19126 v0.AddArg(v1) 19127 v0.AddArg(y) 19128 return true 19129 } 19130 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))) 19131 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19132 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 19133 for { 19134 _ = v.Args[1] 19135 s0 := v.Args[0] 19136 if s0.Op != OpAMD64SHLLconst { 19137 break 19138 } 19139 j0 := s0.AuxInt 19140 x0 := s0.Args[0] 19141 if x0.Op != OpAMD64MOVBload { 19142 break 19143 } 19144 i0 := x0.AuxInt 19145 s := x0.Aux 19146 _ = x0.Args[1] 19147 p := x0.Args[0] 19148 mem := x0.Args[1] 19149 or := v.Args[1] 19150 if or.Op != OpAMD64ORL { 19151 break 19152 } 19153 _ = or.Args[1] 19154 y := or.Args[0] 19155 s1 := or.Args[1] 19156 if s1.Op != OpAMD64SHLLconst { 19157 break 19158 } 19159 j1 := s1.AuxInt 19160 x1 := s1.Args[0] 19161 if x1.Op != OpAMD64MOVBload { 19162 break 19163 } 19164 i1 := x1.AuxInt 19165 if x1.Aux != s { 19166 break 19167 } 19168 _ = x1.Args[1] 19169 if p != x1.Args[0] { 19170 break 19171 } 19172 if mem != x1.Args[1] { 19173 break 19174 } 19175 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19176 break 19177 } 19178 b = mergePoint(b, x0, x1) 19179 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19180 v.reset(OpCopy) 19181 v.AddArg(v0) 19182 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19183 v1.AuxInt = j1 19184 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 19185 v2.AuxInt = 8 19186 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 19187 v3.AuxInt = i0 19188 v3.Aux = s 19189 v3.AddArg(p) 19190 v3.AddArg(mem) 19191 v2.AddArg(v3) 19192 v1.AddArg(v2) 19193 v0.AddArg(v1) 19194 v0.AddArg(y) 19195 return true 19196 } 19197 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 19198 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19199 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 19200 for { 19201 _ = v.Args[1] 19202 or := v.Args[0] 19203 if or.Op != OpAMD64ORL { 19204 break 19205 } 19206 _ = or.Args[1] 19207 s1 := or.Args[0] 19208 if s1.Op != OpAMD64SHLLconst { 19209 break 19210 } 19211 j1 := s1.AuxInt 19212 x1 := s1.Args[0] 19213 if x1.Op != OpAMD64MOVBload { 19214 break 19215 } 19216 i1 := x1.AuxInt 19217 s := x1.Aux 19218 _ = x1.Args[1] 19219 p := x1.Args[0] 19220 mem := x1.Args[1] 19221 y := or.Args[1] 19222 s0 := v.Args[1] 19223 if s0.Op != OpAMD64SHLLconst { 19224 break 19225 } 19226 j0 := s0.AuxInt 19227 x0 := s0.Args[0] 19228 if x0.Op != OpAMD64MOVBload { 19229 break 19230 } 19231 i0 := x0.AuxInt 19232 if x0.Aux != s { 19233 break 19234 } 19235 _ = x0.Args[1] 19236 if p != x0.Args[0] { 19237 break 19238 } 19239 if mem != x0.Args[1] { 19240 break 19241 } 19242 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19243 break 19244 } 19245 b = mergePoint(b, x0, x1) 19246 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19247 v.reset(OpCopy) 19248 v.AddArg(v0) 19249 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19250 v1.AuxInt = j1 19251 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 19252 v2.AuxInt = 8 19253 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 19254 v3.AuxInt = i0 19255 v3.Aux = s 19256 v3.AddArg(p) 19257 v3.AddArg(mem) 19258 v2.AddArg(v3) 19259 v1.AddArg(v2) 19260 v0.AddArg(v1) 19261 v0.AddArg(y) 19262 return true 19263 } 19264 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 19265 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19266 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 19267 for { 19268 _ = v.Args[1] 19269 or := v.Args[0] 19270 if or.Op != OpAMD64ORL { 19271 break 19272 } 19273 _ = or.Args[1] 19274 y := or.Args[0] 19275 s1 := or.Args[1] 19276 if s1.Op != OpAMD64SHLLconst { 19277 break 19278 } 19279 j1 := s1.AuxInt 19280 x1 := s1.Args[0] 19281 if x1.Op != OpAMD64MOVBload { 19282 break 19283 } 19284 i1 := x1.AuxInt 19285 s := x1.Aux 19286 _ = x1.Args[1] 19287 p := x1.Args[0] 19288 mem := x1.Args[1] 19289 s0 := v.Args[1] 19290 if s0.Op != OpAMD64SHLLconst { 19291 break 19292 } 19293 j0 := s0.AuxInt 19294 x0 := s0.Args[0] 19295 if x0.Op != OpAMD64MOVBload { 19296 break 19297 } 19298 i0 := x0.AuxInt 19299 if x0.Aux != s { 19300 break 19301 } 19302 _ = x0.Args[1] 19303 if p != x0.Args[0] { 19304 break 19305 } 19306 if mem != x0.Args[1] { 19307 break 19308 } 19309 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19310 break 19311 } 19312 b = mergePoint(b, x0, x1) 19313 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19314 v.reset(OpCopy) 19315 v.AddArg(v0) 19316 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19317 v1.AuxInt = j1 19318 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 19319 v2.AuxInt = 8 19320 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 19321 v3.AuxInt = i0 19322 v3.Aux = s 19323 v3.AddArg(p) 19324 v3.AddArg(mem) 19325 v2.AddArg(v3) 19326 v1.AddArg(v2) 19327 v0.AddArg(v1) 19328 v0.AddArg(y) 19329 return true 19330 } 19331 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 19332 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19333 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19334 for { 19335 _ = v.Args[1] 19336 x1 := v.Args[0] 19337 if x1.Op != OpAMD64MOVBloadidx1 { 19338 break 19339 } 19340 i1 := x1.AuxInt 19341 s := x1.Aux 19342 _ = x1.Args[2] 19343 p := x1.Args[0] 19344 idx := x1.Args[1] 19345 mem := x1.Args[2] 19346 sh := v.Args[1] 19347 if sh.Op != OpAMD64SHLLconst { 19348 break 19349 } 19350 if sh.AuxInt != 8 { 19351 break 19352 } 19353 x0 := sh.Args[0] 19354 if x0.Op != OpAMD64MOVBloadidx1 { 19355 break 19356 } 19357 i0 := x0.AuxInt 19358 if x0.Aux != s { 19359 break 19360 } 19361 _ = x0.Args[2] 19362 if p != x0.Args[0] { 19363 break 19364 } 19365 if idx != x0.Args[1] { 19366 break 19367 } 19368 if mem != x0.Args[2] { 19369 break 19370 } 19371 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19372 break 19373 } 19374 b = mergePoint(b, x0, x1) 19375 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19376 v.reset(OpCopy) 19377 v.AddArg(v0) 19378 v0.AuxInt = 8 19379 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19380 v1.AuxInt = i0 19381 v1.Aux = s 19382 v1.AddArg(p) 19383 v1.AddArg(idx) 19384 v1.AddArg(mem) 19385 v0.AddArg(v1) 19386 return true 19387 } 19388 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 19389 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19390 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19391 for { 19392 _ = v.Args[1] 19393 x1 := v.Args[0] 19394 if x1.Op != OpAMD64MOVBloadidx1 { 19395 break 19396 } 19397 i1 := x1.AuxInt 19398 s := x1.Aux 19399 _ = x1.Args[2] 19400 idx := x1.Args[0] 19401 p := x1.Args[1] 19402 mem := x1.Args[2] 19403 sh := v.Args[1] 19404 if sh.Op != OpAMD64SHLLconst { 19405 break 19406 } 19407 if sh.AuxInt != 8 { 19408 break 19409 } 19410 x0 := sh.Args[0] 19411 if x0.Op != OpAMD64MOVBloadidx1 { 19412 break 19413 } 19414 i0 := x0.AuxInt 19415 if x0.Aux != s { 19416 break 19417 } 19418 _ = x0.Args[2] 19419 if p != x0.Args[0] { 19420 break 19421 } 19422 if idx != x0.Args[1] { 19423 break 19424 } 19425 if mem != x0.Args[2] { 19426 break 19427 } 19428 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19429 break 19430 } 19431 b = mergePoint(b, x0, x1) 19432 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19433 v.reset(OpCopy) 19434 v.AddArg(v0) 19435 v0.AuxInt = 8 19436 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19437 v1.AuxInt = i0 19438 v1.Aux = s 19439 v1.AddArg(p) 19440 v1.AddArg(idx) 19441 v1.AddArg(mem) 19442 v0.AddArg(v1) 19443 return true 19444 } 19445 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 19446 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19447 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19448 for { 19449 _ = v.Args[1] 19450 x1 := v.Args[0] 19451 if x1.Op != OpAMD64MOVBloadidx1 { 19452 break 19453 } 19454 i1 := x1.AuxInt 19455 s := x1.Aux 19456 _ = x1.Args[2] 19457 p := x1.Args[0] 19458 idx := x1.Args[1] 19459 mem := x1.Args[2] 19460 sh := v.Args[1] 19461 if sh.Op != OpAMD64SHLLconst { 19462 break 19463 } 19464 if sh.AuxInt != 8 { 19465 break 19466 } 19467 x0 := sh.Args[0] 19468 if x0.Op != OpAMD64MOVBloadidx1 { 19469 break 19470 } 19471 i0 := x0.AuxInt 19472 if x0.Aux != s { 19473 break 19474 } 19475 _ = x0.Args[2] 19476 if idx != x0.Args[0] { 19477 break 19478 } 19479 if p != x0.Args[1] { 19480 break 19481 } 19482 if mem != x0.Args[2] { 19483 break 19484 } 19485 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19486 break 19487 } 19488 b = mergePoint(b, x0, x1) 19489 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19490 v.reset(OpCopy) 19491 v.AddArg(v0) 19492 v0.AuxInt = 8 19493 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19494 v1.AuxInt = i0 19495 v1.Aux = s 19496 v1.AddArg(p) 19497 v1.AddArg(idx) 19498 v1.AddArg(mem) 19499 v0.AddArg(v1) 19500 return true 19501 } 19502 return false 19503 } 19504 func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool { 19505 b := v.Block 19506 _ = b 19507 typ := &b.Func.Config.Types 19508 _ = typ 19509 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 19510 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19511 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19512 for { 19513 _ = v.Args[1] 19514 x1 := v.Args[0] 19515 if x1.Op != OpAMD64MOVBloadidx1 { 19516 break 19517 } 19518 i1 := x1.AuxInt 19519 s := x1.Aux 19520 _ = x1.Args[2] 19521 idx := x1.Args[0] 19522 p := x1.Args[1] 19523 mem := x1.Args[2] 19524 sh := v.Args[1] 19525 if sh.Op != OpAMD64SHLLconst { 19526 break 19527 } 19528 if sh.AuxInt != 8 { 19529 break 19530 } 19531 x0 := sh.Args[0] 19532 if x0.Op != OpAMD64MOVBloadidx1 { 19533 break 19534 } 19535 i0 := x0.AuxInt 19536 if x0.Aux != s { 19537 break 19538 } 19539 _ = x0.Args[2] 19540 if idx != x0.Args[0] { 19541 break 19542 } 19543 if p != x0.Args[1] { 19544 break 19545 } 19546 if mem != x0.Args[2] { 19547 break 19548 } 19549 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19550 break 19551 } 19552 b = mergePoint(b, x0, x1) 19553 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19554 v.reset(OpCopy) 19555 v.AddArg(v0) 19556 v0.AuxInt = 8 19557 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19558 v1.AuxInt = i0 19559 v1.Aux = s 19560 v1.AddArg(p) 19561 v1.AddArg(idx) 19562 v1.AddArg(mem) 19563 v0.AddArg(v1) 19564 return true 19565 } 19566 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 19567 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19568 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19569 for { 19570 _ = v.Args[1] 19571 sh := v.Args[0] 19572 if sh.Op != OpAMD64SHLLconst { 19573 break 19574 } 19575 if sh.AuxInt != 8 { 19576 break 19577 } 19578 x0 := sh.Args[0] 19579 if x0.Op != OpAMD64MOVBloadidx1 { 19580 break 19581 } 19582 i0 := x0.AuxInt 19583 s := x0.Aux 19584 _ = x0.Args[2] 19585 p := x0.Args[0] 19586 idx := x0.Args[1] 19587 mem := x0.Args[2] 19588 x1 := v.Args[1] 19589 if x1.Op != OpAMD64MOVBloadidx1 { 19590 break 19591 } 19592 i1 := x1.AuxInt 19593 if x1.Aux != s { 19594 break 19595 } 19596 _ = x1.Args[2] 19597 if p != x1.Args[0] { 19598 break 19599 } 19600 if idx != x1.Args[1] { 19601 break 19602 } 19603 if mem != x1.Args[2] { 19604 break 19605 } 19606 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19607 break 19608 } 19609 b = mergePoint(b, x0, x1) 19610 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19611 v.reset(OpCopy) 19612 v.AddArg(v0) 19613 v0.AuxInt = 8 19614 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19615 v1.AuxInt = i0 19616 v1.Aux = s 19617 v1.AddArg(p) 19618 v1.AddArg(idx) 19619 v1.AddArg(mem) 19620 v0.AddArg(v1) 19621 return true 19622 } 19623 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 19624 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19625 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19626 for { 19627 _ = v.Args[1] 19628 sh := v.Args[0] 19629 if sh.Op != OpAMD64SHLLconst { 19630 break 19631 } 19632 if sh.AuxInt != 8 { 19633 break 19634 } 19635 x0 := sh.Args[0] 19636 if x0.Op != OpAMD64MOVBloadidx1 { 19637 break 19638 } 19639 i0 := x0.AuxInt 19640 s := x0.Aux 19641 _ = x0.Args[2] 19642 idx := x0.Args[0] 19643 p := x0.Args[1] 19644 mem := x0.Args[2] 19645 x1 := v.Args[1] 19646 if x1.Op != OpAMD64MOVBloadidx1 { 19647 break 19648 } 19649 i1 := x1.AuxInt 19650 if x1.Aux != s { 19651 break 19652 } 19653 _ = x1.Args[2] 19654 if p != x1.Args[0] { 19655 break 19656 } 19657 if idx != x1.Args[1] { 19658 break 19659 } 19660 if mem != x1.Args[2] { 19661 break 19662 } 19663 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19664 break 19665 } 19666 b = mergePoint(b, x0, x1) 19667 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19668 v.reset(OpCopy) 19669 v.AddArg(v0) 19670 v0.AuxInt = 8 19671 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19672 v1.AuxInt = i0 19673 v1.Aux = s 19674 v1.AddArg(p) 19675 v1.AddArg(idx) 19676 v1.AddArg(mem) 19677 v0.AddArg(v1) 19678 return true 19679 } 19680 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 19681 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19682 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19683 for { 19684 _ = v.Args[1] 19685 sh := v.Args[0] 19686 if sh.Op != OpAMD64SHLLconst { 19687 break 19688 } 19689 if sh.AuxInt != 8 { 19690 break 19691 } 19692 x0 := sh.Args[0] 19693 if x0.Op != OpAMD64MOVBloadidx1 { 19694 break 19695 } 19696 i0 := x0.AuxInt 19697 s := x0.Aux 19698 _ = x0.Args[2] 19699 p := x0.Args[0] 19700 idx := x0.Args[1] 19701 mem := x0.Args[2] 19702 x1 := v.Args[1] 19703 if x1.Op != OpAMD64MOVBloadidx1 { 19704 break 19705 } 19706 i1 := x1.AuxInt 19707 if x1.Aux != s { 19708 break 19709 } 19710 _ = x1.Args[2] 19711 if idx != x1.Args[0] { 19712 break 19713 } 19714 if p != x1.Args[1] { 19715 break 19716 } 19717 if mem != x1.Args[2] { 19718 break 19719 } 19720 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19721 break 19722 } 19723 b = mergePoint(b, x0, x1) 19724 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19725 v.reset(OpCopy) 19726 v.AddArg(v0) 19727 v0.AuxInt = 8 19728 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19729 v1.AuxInt = i0 19730 v1.Aux = s 19731 v1.AddArg(p) 19732 v1.AddArg(idx) 19733 v1.AddArg(mem) 19734 v0.AddArg(v1) 19735 return true 19736 } 19737 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 19738 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19739 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 19740 for { 19741 _ = v.Args[1] 19742 sh := v.Args[0] 19743 if sh.Op != OpAMD64SHLLconst { 19744 break 19745 } 19746 if sh.AuxInt != 8 { 19747 break 19748 } 19749 x0 := sh.Args[0] 19750 if x0.Op != OpAMD64MOVBloadidx1 { 19751 break 19752 } 19753 i0 := x0.AuxInt 19754 s := x0.Aux 19755 _ = x0.Args[2] 19756 idx := x0.Args[0] 19757 p := x0.Args[1] 19758 mem := x0.Args[2] 19759 x1 := v.Args[1] 19760 if x1.Op != OpAMD64MOVBloadidx1 { 19761 break 19762 } 19763 i1 := x1.AuxInt 19764 if x1.Aux != s { 19765 break 19766 } 19767 _ = x1.Args[2] 19768 if idx != x1.Args[0] { 19769 break 19770 } 19771 if p != x1.Args[1] { 19772 break 19773 } 19774 if mem != x1.Args[2] { 19775 break 19776 } 19777 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19778 break 19779 } 19780 b = mergePoint(b, x0, x1) 19781 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 19782 v.reset(OpCopy) 19783 v.AddArg(v0) 19784 v0.AuxInt = 8 19785 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19786 v1.AuxInt = i0 19787 v1.Aux = s 19788 v1.AddArg(p) 19789 v1.AddArg(idx) 19790 v1.AddArg(mem) 19791 v0.AddArg(v1) 19792 return true 19793 } 19794 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 19795 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19796 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 19797 for { 19798 _ = v.Args[1] 19799 r1 := v.Args[0] 19800 if r1.Op != OpAMD64ROLWconst { 19801 break 19802 } 19803 if r1.AuxInt != 8 { 19804 break 19805 } 19806 x1 := r1.Args[0] 19807 if x1.Op != OpAMD64MOVWloadidx1 { 19808 break 19809 } 19810 i1 := x1.AuxInt 19811 s := x1.Aux 19812 _ = x1.Args[2] 19813 p := x1.Args[0] 19814 idx := x1.Args[1] 19815 mem := x1.Args[2] 19816 sh := v.Args[1] 19817 if sh.Op != OpAMD64SHLLconst { 19818 break 19819 } 19820 if sh.AuxInt != 16 { 19821 break 19822 } 19823 r0 := sh.Args[0] 19824 if r0.Op != OpAMD64ROLWconst { 19825 break 19826 } 19827 if r0.AuxInt != 8 { 19828 break 19829 } 19830 x0 := r0.Args[0] 19831 if x0.Op != OpAMD64MOVWloadidx1 { 19832 break 19833 } 19834 i0 := x0.AuxInt 19835 if x0.Aux != s { 19836 break 19837 } 19838 _ = x0.Args[2] 19839 if p != x0.Args[0] { 19840 break 19841 } 19842 if idx != x0.Args[1] { 19843 break 19844 } 19845 if mem != x0.Args[2] { 19846 break 19847 } 19848 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19849 break 19850 } 19851 b = mergePoint(b, x0, x1) 19852 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19853 v.reset(OpCopy) 19854 v.AddArg(v0) 19855 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19856 v1.AuxInt = i0 19857 v1.Aux = s 19858 v1.AddArg(p) 19859 v1.AddArg(idx) 19860 v1.AddArg(mem) 19861 v0.AddArg(v1) 19862 return true 19863 } 19864 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 19865 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19866 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 19867 for { 19868 _ = v.Args[1] 19869 r1 := v.Args[0] 19870 if r1.Op != OpAMD64ROLWconst { 19871 break 19872 } 19873 if r1.AuxInt != 8 { 19874 break 19875 } 19876 x1 := r1.Args[0] 19877 if x1.Op != OpAMD64MOVWloadidx1 { 19878 break 19879 } 19880 i1 := x1.AuxInt 19881 s := x1.Aux 19882 _ = x1.Args[2] 19883 idx := x1.Args[0] 19884 p := x1.Args[1] 19885 mem := x1.Args[2] 19886 sh := v.Args[1] 19887 if sh.Op != OpAMD64SHLLconst { 19888 break 19889 } 19890 if sh.AuxInt != 16 { 19891 break 19892 } 19893 r0 := sh.Args[0] 19894 if r0.Op != OpAMD64ROLWconst { 19895 break 19896 } 19897 if r0.AuxInt != 8 { 19898 break 19899 } 19900 x0 := r0.Args[0] 19901 if x0.Op != OpAMD64MOVWloadidx1 { 19902 break 19903 } 19904 i0 := x0.AuxInt 19905 if x0.Aux != s { 19906 break 19907 } 19908 _ = x0.Args[2] 19909 if p != x0.Args[0] { 19910 break 19911 } 19912 if idx != x0.Args[1] { 19913 break 19914 } 19915 if mem != x0.Args[2] { 19916 break 19917 } 19918 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19919 break 19920 } 19921 b = mergePoint(b, x0, x1) 19922 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19923 v.reset(OpCopy) 19924 v.AddArg(v0) 19925 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19926 v1.AuxInt = i0 19927 v1.Aux = s 19928 v1.AddArg(p) 19929 v1.AddArg(idx) 19930 v1.AddArg(mem) 19931 v0.AddArg(v1) 19932 return true 19933 } 19934 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 19935 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 19936 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 19937 for { 19938 _ = v.Args[1] 19939 r1 := v.Args[0] 19940 if r1.Op != OpAMD64ROLWconst { 19941 break 19942 } 19943 if r1.AuxInt != 8 { 19944 break 19945 } 19946 x1 := r1.Args[0] 19947 if x1.Op != OpAMD64MOVWloadidx1 { 19948 break 19949 } 19950 i1 := x1.AuxInt 19951 s := x1.Aux 19952 _ = x1.Args[2] 19953 p := x1.Args[0] 19954 idx := x1.Args[1] 19955 mem := x1.Args[2] 19956 sh := v.Args[1] 19957 if sh.Op != OpAMD64SHLLconst { 19958 break 19959 } 19960 if sh.AuxInt != 16 { 19961 break 19962 } 19963 r0 := sh.Args[0] 19964 if r0.Op != OpAMD64ROLWconst { 19965 break 19966 } 19967 if r0.AuxInt != 8 { 19968 break 19969 } 19970 x0 := r0.Args[0] 19971 if x0.Op != OpAMD64MOVWloadidx1 { 19972 break 19973 } 19974 i0 := x0.AuxInt 19975 if x0.Aux != s { 19976 break 19977 } 19978 _ = x0.Args[2] 19979 if idx != x0.Args[0] { 19980 break 19981 } 19982 if p != x0.Args[1] { 19983 break 19984 } 19985 if mem != x0.Args[2] { 19986 break 19987 } 19988 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 19989 break 19990 } 19991 b = mergePoint(b, x0, x1) 19992 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 19993 v.reset(OpCopy) 19994 v.AddArg(v0) 19995 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19996 v1.AuxInt = i0 19997 v1.Aux = s 19998 v1.AddArg(p) 19999 v1.AddArg(idx) 20000 v1.AddArg(mem) 20001 v0.AddArg(v1) 20002 return true 20003 } 20004 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 20005 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20006 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20007 for { 20008 _ = v.Args[1] 20009 r1 := v.Args[0] 20010 if r1.Op != OpAMD64ROLWconst { 20011 break 20012 } 20013 if r1.AuxInt != 8 { 20014 break 20015 } 20016 x1 := r1.Args[0] 20017 if x1.Op != OpAMD64MOVWloadidx1 { 20018 break 20019 } 20020 i1 := x1.AuxInt 20021 s := x1.Aux 20022 _ = x1.Args[2] 20023 idx := x1.Args[0] 20024 p := x1.Args[1] 20025 mem := x1.Args[2] 20026 sh := v.Args[1] 20027 if sh.Op != OpAMD64SHLLconst { 20028 break 20029 } 20030 if sh.AuxInt != 16 { 20031 break 20032 } 20033 r0 := sh.Args[0] 20034 if r0.Op != OpAMD64ROLWconst { 20035 break 20036 } 20037 if r0.AuxInt != 8 { 20038 break 20039 } 20040 x0 := r0.Args[0] 20041 if x0.Op != OpAMD64MOVWloadidx1 { 20042 break 20043 } 20044 i0 := x0.AuxInt 20045 if x0.Aux != s { 20046 break 20047 } 20048 _ = x0.Args[2] 20049 if idx != x0.Args[0] { 20050 break 20051 } 20052 if p != x0.Args[1] { 20053 break 20054 } 20055 if mem != x0.Args[2] { 20056 break 20057 } 20058 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20059 break 20060 } 20061 b = mergePoint(b, x0, x1) 20062 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20063 v.reset(OpCopy) 20064 v.AddArg(v0) 20065 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20066 v1.AuxInt = i0 20067 v1.Aux = s 20068 v1.AddArg(p) 20069 v1.AddArg(idx) 20070 v1.AddArg(mem) 20071 v0.AddArg(v1) 20072 return true 20073 } 20074 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 20075 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20076 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20077 for { 20078 _ = v.Args[1] 20079 sh := v.Args[0] 20080 if sh.Op != OpAMD64SHLLconst { 20081 break 20082 } 20083 if sh.AuxInt != 16 { 20084 break 20085 } 20086 r0 := sh.Args[0] 20087 if r0.Op != OpAMD64ROLWconst { 20088 break 20089 } 20090 if r0.AuxInt != 8 { 20091 break 20092 } 20093 x0 := r0.Args[0] 20094 if x0.Op != OpAMD64MOVWloadidx1 { 20095 break 20096 } 20097 i0 := x0.AuxInt 20098 s := x0.Aux 20099 _ = x0.Args[2] 20100 p := x0.Args[0] 20101 idx := x0.Args[1] 20102 mem := x0.Args[2] 20103 r1 := v.Args[1] 20104 if r1.Op != OpAMD64ROLWconst { 20105 break 20106 } 20107 if r1.AuxInt != 8 { 20108 break 20109 } 20110 x1 := r1.Args[0] 20111 if x1.Op != OpAMD64MOVWloadidx1 { 20112 break 20113 } 20114 i1 := x1.AuxInt 20115 if x1.Aux != s { 20116 break 20117 } 20118 _ = x1.Args[2] 20119 if p != x1.Args[0] { 20120 break 20121 } 20122 if idx != x1.Args[1] { 20123 break 20124 } 20125 if mem != x1.Args[2] { 20126 break 20127 } 20128 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20129 break 20130 } 20131 b = mergePoint(b, x0, x1) 20132 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20133 v.reset(OpCopy) 20134 v.AddArg(v0) 20135 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20136 v1.AuxInt = i0 20137 v1.Aux = s 20138 v1.AddArg(p) 20139 v1.AddArg(idx) 20140 v1.AddArg(mem) 20141 v0.AddArg(v1) 20142 return true 20143 } 20144 return false 20145 } 20146 func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool { 20147 b := v.Block 20148 _ = b 20149 typ := &b.Func.Config.Types 20150 _ = typ 20151 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 20152 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20153 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20154 for { 20155 _ = v.Args[1] 20156 sh := v.Args[0] 20157 if sh.Op != OpAMD64SHLLconst { 20158 break 20159 } 20160 if sh.AuxInt != 16 { 20161 break 20162 } 20163 r0 := sh.Args[0] 20164 if r0.Op != OpAMD64ROLWconst { 20165 break 20166 } 20167 if r0.AuxInt != 8 { 20168 break 20169 } 20170 x0 := r0.Args[0] 20171 if x0.Op != OpAMD64MOVWloadidx1 { 20172 break 20173 } 20174 i0 := x0.AuxInt 20175 s := x0.Aux 20176 _ = x0.Args[2] 20177 idx := x0.Args[0] 20178 p := x0.Args[1] 20179 mem := x0.Args[2] 20180 r1 := v.Args[1] 20181 if r1.Op != OpAMD64ROLWconst { 20182 break 20183 } 20184 if r1.AuxInt != 8 { 20185 break 20186 } 20187 x1 := r1.Args[0] 20188 if x1.Op != OpAMD64MOVWloadidx1 { 20189 break 20190 } 20191 i1 := x1.AuxInt 20192 if x1.Aux != s { 20193 break 20194 } 20195 _ = x1.Args[2] 20196 if p != x1.Args[0] { 20197 break 20198 } 20199 if idx != x1.Args[1] { 20200 break 20201 } 20202 if mem != x1.Args[2] { 20203 break 20204 } 20205 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20206 break 20207 } 20208 b = mergePoint(b, x0, x1) 20209 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20210 v.reset(OpCopy) 20211 v.AddArg(v0) 20212 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20213 v1.AuxInt = i0 20214 v1.Aux = s 20215 v1.AddArg(p) 20216 v1.AddArg(idx) 20217 v1.AddArg(mem) 20218 v0.AddArg(v1) 20219 return true 20220 } 20221 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 20222 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20223 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20224 for { 20225 _ = v.Args[1] 20226 sh := v.Args[0] 20227 if sh.Op != OpAMD64SHLLconst { 20228 break 20229 } 20230 if sh.AuxInt != 16 { 20231 break 20232 } 20233 r0 := sh.Args[0] 20234 if r0.Op != OpAMD64ROLWconst { 20235 break 20236 } 20237 if r0.AuxInt != 8 { 20238 break 20239 } 20240 x0 := r0.Args[0] 20241 if x0.Op != OpAMD64MOVWloadidx1 { 20242 break 20243 } 20244 i0 := x0.AuxInt 20245 s := x0.Aux 20246 _ = x0.Args[2] 20247 p := x0.Args[0] 20248 idx := x0.Args[1] 20249 mem := x0.Args[2] 20250 r1 := v.Args[1] 20251 if r1.Op != OpAMD64ROLWconst { 20252 break 20253 } 20254 if r1.AuxInt != 8 { 20255 break 20256 } 20257 x1 := r1.Args[0] 20258 if x1.Op != OpAMD64MOVWloadidx1 { 20259 break 20260 } 20261 i1 := x1.AuxInt 20262 if x1.Aux != s { 20263 break 20264 } 20265 _ = x1.Args[2] 20266 if idx != x1.Args[0] { 20267 break 20268 } 20269 if p != x1.Args[1] { 20270 break 20271 } 20272 if mem != x1.Args[2] { 20273 break 20274 } 20275 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20276 break 20277 } 20278 b = mergePoint(b, x0, x1) 20279 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20280 v.reset(OpCopy) 20281 v.AddArg(v0) 20282 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20283 v1.AuxInt = i0 20284 v1.Aux = s 20285 v1.AddArg(p) 20286 v1.AddArg(idx) 20287 v1.AddArg(mem) 20288 v0.AddArg(v1) 20289 return true 20290 } 20291 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 20292 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20293 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 20294 for { 20295 _ = v.Args[1] 20296 sh := v.Args[0] 20297 if sh.Op != OpAMD64SHLLconst { 20298 break 20299 } 20300 if sh.AuxInt != 16 { 20301 break 20302 } 20303 r0 := sh.Args[0] 20304 if r0.Op != OpAMD64ROLWconst { 20305 break 20306 } 20307 if r0.AuxInt != 8 { 20308 break 20309 } 20310 x0 := r0.Args[0] 20311 if x0.Op != OpAMD64MOVWloadidx1 { 20312 break 20313 } 20314 i0 := x0.AuxInt 20315 s := x0.Aux 20316 _ = x0.Args[2] 20317 idx := x0.Args[0] 20318 p := x0.Args[1] 20319 mem := x0.Args[2] 20320 r1 := v.Args[1] 20321 if r1.Op != OpAMD64ROLWconst { 20322 break 20323 } 20324 if r1.AuxInt != 8 { 20325 break 20326 } 20327 x1 := r1.Args[0] 20328 if x1.Op != OpAMD64MOVWloadidx1 { 20329 break 20330 } 20331 i1 := x1.AuxInt 20332 if x1.Aux != s { 20333 break 20334 } 20335 _ = x1.Args[2] 20336 if idx != x1.Args[0] { 20337 break 20338 } 20339 if p != x1.Args[1] { 20340 break 20341 } 20342 if mem != x1.Args[2] { 20343 break 20344 } 20345 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20346 break 20347 } 20348 b = mergePoint(b, x0, x1) 20349 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20350 v.reset(OpCopy) 20351 v.AddArg(v0) 20352 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 20353 v1.AuxInt = i0 20354 v1.Aux = s 20355 v1.AddArg(p) 20356 v1.AddArg(idx) 20357 v1.AddArg(mem) 20358 v0.AddArg(v1) 20359 return true 20360 } 20361 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 20362 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20363 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20364 for { 20365 _ = v.Args[1] 20366 s0 := v.Args[0] 20367 if s0.Op != OpAMD64SHLLconst { 20368 break 20369 } 20370 j0 := s0.AuxInt 20371 x0 := s0.Args[0] 20372 if x0.Op != OpAMD64MOVBloadidx1 { 20373 break 20374 } 20375 i0 := x0.AuxInt 20376 s := x0.Aux 20377 _ = x0.Args[2] 20378 p := x0.Args[0] 20379 idx := x0.Args[1] 20380 mem := x0.Args[2] 20381 or := v.Args[1] 20382 if or.Op != OpAMD64ORL { 20383 break 20384 } 20385 _ = or.Args[1] 20386 s1 := or.Args[0] 20387 if s1.Op != OpAMD64SHLLconst { 20388 break 20389 } 20390 j1 := s1.AuxInt 20391 x1 := s1.Args[0] 20392 if x1.Op != OpAMD64MOVBloadidx1 { 20393 break 20394 } 20395 i1 := x1.AuxInt 20396 if x1.Aux != s { 20397 break 20398 } 20399 _ = x1.Args[2] 20400 if p != x1.Args[0] { 20401 break 20402 } 20403 if idx != x1.Args[1] { 20404 break 20405 } 20406 if mem != x1.Args[2] { 20407 break 20408 } 20409 y := or.Args[1] 20410 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20411 break 20412 } 20413 b = mergePoint(b, x0, x1) 20414 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20415 v.reset(OpCopy) 20416 v.AddArg(v0) 20417 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20418 v1.AuxInt = j1 20419 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20420 v2.AuxInt = 8 20421 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20422 v3.AuxInt = i0 20423 v3.Aux = s 20424 v3.AddArg(p) 20425 v3.AddArg(idx) 20426 v3.AddArg(mem) 20427 v2.AddArg(v3) 20428 v1.AddArg(v2) 20429 v0.AddArg(v1) 20430 v0.AddArg(y) 20431 return true 20432 } 20433 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 20434 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20435 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20436 for { 20437 _ = v.Args[1] 20438 s0 := v.Args[0] 20439 if s0.Op != OpAMD64SHLLconst { 20440 break 20441 } 20442 j0 := s0.AuxInt 20443 x0 := s0.Args[0] 20444 if x0.Op != OpAMD64MOVBloadidx1 { 20445 break 20446 } 20447 i0 := x0.AuxInt 20448 s := x0.Aux 20449 _ = x0.Args[2] 20450 idx := x0.Args[0] 20451 p := x0.Args[1] 20452 mem := x0.Args[2] 20453 or := v.Args[1] 20454 if or.Op != OpAMD64ORL { 20455 break 20456 } 20457 _ = or.Args[1] 20458 s1 := or.Args[0] 20459 if s1.Op != OpAMD64SHLLconst { 20460 break 20461 } 20462 j1 := s1.AuxInt 20463 x1 := s1.Args[0] 20464 if x1.Op != OpAMD64MOVBloadidx1 { 20465 break 20466 } 20467 i1 := x1.AuxInt 20468 if x1.Aux != s { 20469 break 20470 } 20471 _ = x1.Args[2] 20472 if p != x1.Args[0] { 20473 break 20474 } 20475 if idx != x1.Args[1] { 20476 break 20477 } 20478 if mem != x1.Args[2] { 20479 break 20480 } 20481 y := or.Args[1] 20482 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20483 break 20484 } 20485 b = mergePoint(b, x0, x1) 20486 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20487 v.reset(OpCopy) 20488 v.AddArg(v0) 20489 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20490 v1.AuxInt = j1 20491 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20492 v2.AuxInt = 8 20493 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20494 v3.AuxInt = i0 20495 v3.Aux = s 20496 v3.AddArg(p) 20497 v3.AddArg(idx) 20498 v3.AddArg(mem) 20499 v2.AddArg(v3) 20500 v1.AddArg(v2) 20501 v0.AddArg(v1) 20502 v0.AddArg(y) 20503 return true 20504 } 20505 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 20506 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20507 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20508 for { 20509 _ = v.Args[1] 20510 s0 := v.Args[0] 20511 if s0.Op != OpAMD64SHLLconst { 20512 break 20513 } 20514 j0 := s0.AuxInt 20515 x0 := s0.Args[0] 20516 if x0.Op != OpAMD64MOVBloadidx1 { 20517 break 20518 } 20519 i0 := x0.AuxInt 20520 s := x0.Aux 20521 _ = x0.Args[2] 20522 p := x0.Args[0] 20523 idx := x0.Args[1] 20524 mem := x0.Args[2] 20525 or := v.Args[1] 20526 if or.Op != OpAMD64ORL { 20527 break 20528 } 20529 _ = or.Args[1] 20530 s1 := or.Args[0] 20531 if s1.Op != OpAMD64SHLLconst { 20532 break 20533 } 20534 j1 := s1.AuxInt 20535 x1 := s1.Args[0] 20536 if x1.Op != OpAMD64MOVBloadidx1 { 20537 break 20538 } 20539 i1 := x1.AuxInt 20540 if x1.Aux != s { 20541 break 20542 } 20543 _ = x1.Args[2] 20544 if idx != x1.Args[0] { 20545 break 20546 } 20547 if p != x1.Args[1] { 20548 break 20549 } 20550 if mem != x1.Args[2] { 20551 break 20552 } 20553 y := or.Args[1] 20554 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20555 break 20556 } 20557 b = mergePoint(b, x0, x1) 20558 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20559 v.reset(OpCopy) 20560 v.AddArg(v0) 20561 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20562 v1.AuxInt = j1 20563 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20564 v2.AuxInt = 8 20565 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20566 v3.AuxInt = i0 20567 v3.Aux = s 20568 v3.AddArg(p) 20569 v3.AddArg(idx) 20570 v3.AddArg(mem) 20571 v2.AddArg(v3) 20572 v1.AddArg(v2) 20573 v0.AddArg(v1) 20574 v0.AddArg(y) 20575 return true 20576 } 20577 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 20578 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20579 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20580 for { 20581 _ = v.Args[1] 20582 s0 := v.Args[0] 20583 if s0.Op != OpAMD64SHLLconst { 20584 break 20585 } 20586 j0 := s0.AuxInt 20587 x0 := s0.Args[0] 20588 if x0.Op != OpAMD64MOVBloadidx1 { 20589 break 20590 } 20591 i0 := x0.AuxInt 20592 s := x0.Aux 20593 _ = x0.Args[2] 20594 idx := x0.Args[0] 20595 p := x0.Args[1] 20596 mem := x0.Args[2] 20597 or := v.Args[1] 20598 if or.Op != OpAMD64ORL { 20599 break 20600 } 20601 _ = or.Args[1] 20602 s1 := or.Args[0] 20603 if s1.Op != OpAMD64SHLLconst { 20604 break 20605 } 20606 j1 := s1.AuxInt 20607 x1 := s1.Args[0] 20608 if x1.Op != OpAMD64MOVBloadidx1 { 20609 break 20610 } 20611 i1 := x1.AuxInt 20612 if x1.Aux != s { 20613 break 20614 } 20615 _ = x1.Args[2] 20616 if idx != x1.Args[0] { 20617 break 20618 } 20619 if p != x1.Args[1] { 20620 break 20621 } 20622 if mem != x1.Args[2] { 20623 break 20624 } 20625 y := or.Args[1] 20626 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20627 break 20628 } 20629 b = mergePoint(b, x0, x1) 20630 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20631 v.reset(OpCopy) 20632 v.AddArg(v0) 20633 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20634 v1.AuxInt = j1 20635 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20636 v2.AuxInt = 8 20637 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20638 v3.AuxInt = i0 20639 v3.Aux = s 20640 v3.AddArg(p) 20641 v3.AddArg(idx) 20642 v3.AddArg(mem) 20643 v2.AddArg(v3) 20644 v1.AddArg(v2) 20645 v0.AddArg(v1) 20646 v0.AddArg(y) 20647 return true 20648 } 20649 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 20650 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20651 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20652 for { 20653 _ = v.Args[1] 20654 s0 := v.Args[0] 20655 if s0.Op != OpAMD64SHLLconst { 20656 break 20657 } 20658 j0 := s0.AuxInt 20659 x0 := s0.Args[0] 20660 if x0.Op != OpAMD64MOVBloadidx1 { 20661 break 20662 } 20663 i0 := x0.AuxInt 20664 s := x0.Aux 20665 _ = x0.Args[2] 20666 p := x0.Args[0] 20667 idx := x0.Args[1] 20668 mem := x0.Args[2] 20669 or := v.Args[1] 20670 if or.Op != OpAMD64ORL { 20671 break 20672 } 20673 _ = or.Args[1] 20674 y := or.Args[0] 20675 s1 := or.Args[1] 20676 if s1.Op != OpAMD64SHLLconst { 20677 break 20678 } 20679 j1 := s1.AuxInt 20680 x1 := s1.Args[0] 20681 if x1.Op != OpAMD64MOVBloadidx1 { 20682 break 20683 } 20684 i1 := x1.AuxInt 20685 if x1.Aux != s { 20686 break 20687 } 20688 _ = x1.Args[2] 20689 if p != x1.Args[0] { 20690 break 20691 } 20692 if idx != x1.Args[1] { 20693 break 20694 } 20695 if mem != x1.Args[2] { 20696 break 20697 } 20698 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20699 break 20700 } 20701 b = mergePoint(b, x0, x1) 20702 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20703 v.reset(OpCopy) 20704 v.AddArg(v0) 20705 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20706 v1.AuxInt = j1 20707 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20708 v2.AuxInt = 8 20709 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20710 v3.AuxInt = i0 20711 v3.Aux = s 20712 v3.AddArg(p) 20713 v3.AddArg(idx) 20714 v3.AddArg(mem) 20715 v2.AddArg(v3) 20716 v1.AddArg(v2) 20717 v0.AddArg(v1) 20718 v0.AddArg(y) 20719 return true 20720 } 20721 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 20722 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20723 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20724 for { 20725 _ = v.Args[1] 20726 s0 := v.Args[0] 20727 if s0.Op != OpAMD64SHLLconst { 20728 break 20729 } 20730 j0 := s0.AuxInt 20731 x0 := s0.Args[0] 20732 if x0.Op != OpAMD64MOVBloadidx1 { 20733 break 20734 } 20735 i0 := x0.AuxInt 20736 s := x0.Aux 20737 _ = x0.Args[2] 20738 idx := x0.Args[0] 20739 p := x0.Args[1] 20740 mem := x0.Args[2] 20741 or := v.Args[1] 20742 if or.Op != OpAMD64ORL { 20743 break 20744 } 20745 _ = or.Args[1] 20746 y := or.Args[0] 20747 s1 := or.Args[1] 20748 if s1.Op != OpAMD64SHLLconst { 20749 break 20750 } 20751 j1 := s1.AuxInt 20752 x1 := s1.Args[0] 20753 if x1.Op != OpAMD64MOVBloadidx1 { 20754 break 20755 } 20756 i1 := x1.AuxInt 20757 if x1.Aux != s { 20758 break 20759 } 20760 _ = x1.Args[2] 20761 if p != x1.Args[0] { 20762 break 20763 } 20764 if idx != x1.Args[1] { 20765 break 20766 } 20767 if mem != x1.Args[2] { 20768 break 20769 } 20770 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20771 break 20772 } 20773 b = mergePoint(b, x0, x1) 20774 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20775 v.reset(OpCopy) 20776 v.AddArg(v0) 20777 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20778 v1.AuxInt = j1 20779 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20780 v2.AuxInt = 8 20781 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20782 v3.AuxInt = i0 20783 v3.Aux = s 20784 v3.AddArg(p) 20785 v3.AddArg(idx) 20786 v3.AddArg(mem) 20787 v2.AddArg(v3) 20788 v1.AddArg(v2) 20789 v0.AddArg(v1) 20790 v0.AddArg(y) 20791 return true 20792 } 20793 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 20794 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20795 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20796 for { 20797 _ = v.Args[1] 20798 s0 := v.Args[0] 20799 if s0.Op != OpAMD64SHLLconst { 20800 break 20801 } 20802 j0 := s0.AuxInt 20803 x0 := s0.Args[0] 20804 if x0.Op != OpAMD64MOVBloadidx1 { 20805 break 20806 } 20807 i0 := x0.AuxInt 20808 s := x0.Aux 20809 _ = x0.Args[2] 20810 p := x0.Args[0] 20811 idx := x0.Args[1] 20812 mem := x0.Args[2] 20813 or := v.Args[1] 20814 if or.Op != OpAMD64ORL { 20815 break 20816 } 20817 _ = or.Args[1] 20818 y := or.Args[0] 20819 s1 := or.Args[1] 20820 if s1.Op != OpAMD64SHLLconst { 20821 break 20822 } 20823 j1 := s1.AuxInt 20824 x1 := s1.Args[0] 20825 if x1.Op != OpAMD64MOVBloadidx1 { 20826 break 20827 } 20828 i1 := x1.AuxInt 20829 if x1.Aux != s { 20830 break 20831 } 20832 _ = x1.Args[2] 20833 if idx != x1.Args[0] { 20834 break 20835 } 20836 if p != x1.Args[1] { 20837 break 20838 } 20839 if mem != x1.Args[2] { 20840 break 20841 } 20842 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20843 break 20844 } 20845 b = mergePoint(b, x0, x1) 20846 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20847 v.reset(OpCopy) 20848 v.AddArg(v0) 20849 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20850 v1.AuxInt = j1 20851 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20852 v2.AuxInt = 8 20853 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20854 v3.AuxInt = i0 20855 v3.Aux = s 20856 v3.AddArg(p) 20857 v3.AddArg(idx) 20858 v3.AddArg(mem) 20859 v2.AddArg(v3) 20860 v1.AddArg(v2) 20861 v0.AddArg(v1) 20862 v0.AddArg(y) 20863 return true 20864 } 20865 return false 20866 } 20867 func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool { 20868 b := v.Block 20869 _ = b 20870 typ := &b.Func.Config.Types 20871 _ = typ 20872 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 20873 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20874 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20875 for { 20876 _ = v.Args[1] 20877 s0 := v.Args[0] 20878 if s0.Op != OpAMD64SHLLconst { 20879 break 20880 } 20881 j0 := s0.AuxInt 20882 x0 := s0.Args[0] 20883 if x0.Op != OpAMD64MOVBloadidx1 { 20884 break 20885 } 20886 i0 := x0.AuxInt 20887 s := x0.Aux 20888 _ = x0.Args[2] 20889 idx := x0.Args[0] 20890 p := x0.Args[1] 20891 mem := x0.Args[2] 20892 or := v.Args[1] 20893 if or.Op != OpAMD64ORL { 20894 break 20895 } 20896 _ = or.Args[1] 20897 y := or.Args[0] 20898 s1 := or.Args[1] 20899 if s1.Op != OpAMD64SHLLconst { 20900 break 20901 } 20902 j1 := s1.AuxInt 20903 x1 := s1.Args[0] 20904 if x1.Op != OpAMD64MOVBloadidx1 { 20905 break 20906 } 20907 i1 := x1.AuxInt 20908 if x1.Aux != s { 20909 break 20910 } 20911 _ = x1.Args[2] 20912 if idx != x1.Args[0] { 20913 break 20914 } 20915 if p != x1.Args[1] { 20916 break 20917 } 20918 if mem != x1.Args[2] { 20919 break 20920 } 20921 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20922 break 20923 } 20924 b = mergePoint(b, x0, x1) 20925 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20926 v.reset(OpCopy) 20927 v.AddArg(v0) 20928 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20929 v1.AuxInt = j1 20930 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20931 v2.AuxInt = 8 20932 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20933 v3.AuxInt = i0 20934 v3.Aux = s 20935 v3.AddArg(p) 20936 v3.AddArg(idx) 20937 v3.AddArg(mem) 20938 v2.AddArg(v3) 20939 v1.AddArg(v2) 20940 v0.AddArg(v1) 20941 v0.AddArg(y) 20942 return true 20943 } 20944 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 20945 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20946 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 20947 for { 20948 _ = v.Args[1] 20949 or := v.Args[0] 20950 if or.Op != OpAMD64ORL { 20951 break 20952 } 20953 _ = or.Args[1] 20954 s1 := or.Args[0] 20955 if s1.Op != OpAMD64SHLLconst { 20956 break 20957 } 20958 j1 := s1.AuxInt 20959 x1 := s1.Args[0] 20960 if x1.Op != OpAMD64MOVBloadidx1 { 20961 break 20962 } 20963 i1 := x1.AuxInt 20964 s := x1.Aux 20965 _ = x1.Args[2] 20966 p := x1.Args[0] 20967 idx := x1.Args[1] 20968 mem := x1.Args[2] 20969 y := or.Args[1] 20970 s0 := v.Args[1] 20971 if s0.Op != OpAMD64SHLLconst { 20972 break 20973 } 20974 j0 := s0.AuxInt 20975 x0 := s0.Args[0] 20976 if x0.Op != OpAMD64MOVBloadidx1 { 20977 break 20978 } 20979 i0 := x0.AuxInt 20980 if x0.Aux != s { 20981 break 20982 } 20983 _ = x0.Args[2] 20984 if p != x0.Args[0] { 20985 break 20986 } 20987 if idx != x0.Args[1] { 20988 break 20989 } 20990 if mem != x0.Args[2] { 20991 break 20992 } 20993 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20994 break 20995 } 20996 b = mergePoint(b, x0, x1) 20997 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20998 v.reset(OpCopy) 20999 v.AddArg(v0) 21000 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21001 v1.AuxInt = j1 21002 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21003 v2.AuxInt = 8 21004 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21005 v3.AuxInt = i0 21006 v3.Aux = s 21007 v3.AddArg(p) 21008 v3.AddArg(idx) 21009 v3.AddArg(mem) 21010 v2.AddArg(v3) 21011 v1.AddArg(v2) 21012 v0.AddArg(v1) 21013 v0.AddArg(y) 21014 return true 21015 } 21016 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21017 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21018 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21019 for { 21020 _ = v.Args[1] 21021 or := v.Args[0] 21022 if or.Op != OpAMD64ORL { 21023 break 21024 } 21025 _ = or.Args[1] 21026 s1 := or.Args[0] 21027 if s1.Op != OpAMD64SHLLconst { 21028 break 21029 } 21030 j1 := s1.AuxInt 21031 x1 := s1.Args[0] 21032 if x1.Op != OpAMD64MOVBloadidx1 { 21033 break 21034 } 21035 i1 := x1.AuxInt 21036 s := x1.Aux 21037 _ = x1.Args[2] 21038 idx := x1.Args[0] 21039 p := x1.Args[1] 21040 mem := x1.Args[2] 21041 y := or.Args[1] 21042 s0 := v.Args[1] 21043 if s0.Op != OpAMD64SHLLconst { 21044 break 21045 } 21046 j0 := s0.AuxInt 21047 x0 := s0.Args[0] 21048 if x0.Op != OpAMD64MOVBloadidx1 { 21049 break 21050 } 21051 i0 := x0.AuxInt 21052 if x0.Aux != s { 21053 break 21054 } 21055 _ = x0.Args[2] 21056 if p != x0.Args[0] { 21057 break 21058 } 21059 if idx != x0.Args[1] { 21060 break 21061 } 21062 if mem != x0.Args[2] { 21063 break 21064 } 21065 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21066 break 21067 } 21068 b = mergePoint(b, x0, x1) 21069 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21070 v.reset(OpCopy) 21071 v.AddArg(v0) 21072 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21073 v1.AuxInt = j1 21074 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21075 v2.AuxInt = 8 21076 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21077 v3.AuxInt = i0 21078 v3.Aux = s 21079 v3.AddArg(p) 21080 v3.AddArg(idx) 21081 v3.AddArg(mem) 21082 v2.AddArg(v3) 21083 v1.AddArg(v2) 21084 v0.AddArg(v1) 21085 v0.AddArg(y) 21086 return true 21087 } 21088 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21089 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21090 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21091 for { 21092 _ = v.Args[1] 21093 or := v.Args[0] 21094 if or.Op != OpAMD64ORL { 21095 break 21096 } 21097 _ = or.Args[1] 21098 y := or.Args[0] 21099 s1 := or.Args[1] 21100 if s1.Op != OpAMD64SHLLconst { 21101 break 21102 } 21103 j1 := s1.AuxInt 21104 x1 := s1.Args[0] 21105 if x1.Op != OpAMD64MOVBloadidx1 { 21106 break 21107 } 21108 i1 := x1.AuxInt 21109 s := x1.Aux 21110 _ = x1.Args[2] 21111 p := x1.Args[0] 21112 idx := x1.Args[1] 21113 mem := x1.Args[2] 21114 s0 := v.Args[1] 21115 if s0.Op != OpAMD64SHLLconst { 21116 break 21117 } 21118 j0 := s0.AuxInt 21119 x0 := s0.Args[0] 21120 if x0.Op != OpAMD64MOVBloadidx1 { 21121 break 21122 } 21123 i0 := x0.AuxInt 21124 if x0.Aux != s { 21125 break 21126 } 21127 _ = x0.Args[2] 21128 if p != x0.Args[0] { 21129 break 21130 } 21131 if idx != x0.Args[1] { 21132 break 21133 } 21134 if mem != x0.Args[2] { 21135 break 21136 } 21137 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21138 break 21139 } 21140 b = mergePoint(b, x0, x1) 21141 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21142 v.reset(OpCopy) 21143 v.AddArg(v0) 21144 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21145 v1.AuxInt = j1 21146 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21147 v2.AuxInt = 8 21148 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21149 v3.AuxInt = i0 21150 v3.Aux = s 21151 v3.AddArg(p) 21152 v3.AddArg(idx) 21153 v3.AddArg(mem) 21154 v2.AddArg(v3) 21155 v1.AddArg(v2) 21156 v0.AddArg(v1) 21157 v0.AddArg(y) 21158 return true 21159 } 21160 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21161 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21162 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21163 for { 21164 _ = v.Args[1] 21165 or := v.Args[0] 21166 if or.Op != OpAMD64ORL { 21167 break 21168 } 21169 _ = or.Args[1] 21170 y := or.Args[0] 21171 s1 := or.Args[1] 21172 if s1.Op != OpAMD64SHLLconst { 21173 break 21174 } 21175 j1 := s1.AuxInt 21176 x1 := s1.Args[0] 21177 if x1.Op != OpAMD64MOVBloadidx1 { 21178 break 21179 } 21180 i1 := x1.AuxInt 21181 s := x1.Aux 21182 _ = x1.Args[2] 21183 idx := x1.Args[0] 21184 p := x1.Args[1] 21185 mem := x1.Args[2] 21186 s0 := v.Args[1] 21187 if s0.Op != OpAMD64SHLLconst { 21188 break 21189 } 21190 j0 := s0.AuxInt 21191 x0 := s0.Args[0] 21192 if x0.Op != OpAMD64MOVBloadidx1 { 21193 break 21194 } 21195 i0 := x0.AuxInt 21196 if x0.Aux != s { 21197 break 21198 } 21199 _ = x0.Args[2] 21200 if p != x0.Args[0] { 21201 break 21202 } 21203 if idx != x0.Args[1] { 21204 break 21205 } 21206 if mem != x0.Args[2] { 21207 break 21208 } 21209 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21210 break 21211 } 21212 b = mergePoint(b, x0, x1) 21213 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21214 v.reset(OpCopy) 21215 v.AddArg(v0) 21216 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21217 v1.AuxInt = j1 21218 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21219 v2.AuxInt = 8 21220 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21221 v3.AuxInt = i0 21222 v3.Aux = s 21223 v3.AddArg(p) 21224 v3.AddArg(idx) 21225 v3.AddArg(mem) 21226 v2.AddArg(v3) 21227 v1.AddArg(v2) 21228 v0.AddArg(v1) 21229 v0.AddArg(y) 21230 return true 21231 } 21232 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21233 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21234 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21235 for { 21236 _ = v.Args[1] 21237 or := v.Args[0] 21238 if or.Op != OpAMD64ORL { 21239 break 21240 } 21241 _ = or.Args[1] 21242 s1 := or.Args[0] 21243 if s1.Op != OpAMD64SHLLconst { 21244 break 21245 } 21246 j1 := s1.AuxInt 21247 x1 := s1.Args[0] 21248 if x1.Op != OpAMD64MOVBloadidx1 { 21249 break 21250 } 21251 i1 := x1.AuxInt 21252 s := x1.Aux 21253 _ = x1.Args[2] 21254 p := x1.Args[0] 21255 idx := x1.Args[1] 21256 mem := x1.Args[2] 21257 y := or.Args[1] 21258 s0 := v.Args[1] 21259 if s0.Op != OpAMD64SHLLconst { 21260 break 21261 } 21262 j0 := s0.AuxInt 21263 x0 := s0.Args[0] 21264 if x0.Op != OpAMD64MOVBloadidx1 { 21265 break 21266 } 21267 i0 := x0.AuxInt 21268 if x0.Aux != s { 21269 break 21270 } 21271 _ = x0.Args[2] 21272 if idx != x0.Args[0] { 21273 break 21274 } 21275 if p != x0.Args[1] { 21276 break 21277 } 21278 if mem != x0.Args[2] { 21279 break 21280 } 21281 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21282 break 21283 } 21284 b = mergePoint(b, x0, x1) 21285 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21286 v.reset(OpCopy) 21287 v.AddArg(v0) 21288 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21289 v1.AuxInt = j1 21290 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21291 v2.AuxInt = 8 21292 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21293 v3.AuxInt = i0 21294 v3.Aux = s 21295 v3.AddArg(p) 21296 v3.AddArg(idx) 21297 v3.AddArg(mem) 21298 v2.AddArg(v3) 21299 v1.AddArg(v2) 21300 v0.AddArg(v1) 21301 v0.AddArg(y) 21302 return true 21303 } 21304 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21305 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21306 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21307 for { 21308 _ = v.Args[1] 21309 or := v.Args[0] 21310 if or.Op != OpAMD64ORL { 21311 break 21312 } 21313 _ = or.Args[1] 21314 s1 := or.Args[0] 21315 if s1.Op != OpAMD64SHLLconst { 21316 break 21317 } 21318 j1 := s1.AuxInt 21319 x1 := s1.Args[0] 21320 if x1.Op != OpAMD64MOVBloadidx1 { 21321 break 21322 } 21323 i1 := x1.AuxInt 21324 s := x1.Aux 21325 _ = x1.Args[2] 21326 idx := x1.Args[0] 21327 p := x1.Args[1] 21328 mem := x1.Args[2] 21329 y := or.Args[1] 21330 s0 := v.Args[1] 21331 if s0.Op != OpAMD64SHLLconst { 21332 break 21333 } 21334 j0 := s0.AuxInt 21335 x0 := s0.Args[0] 21336 if x0.Op != OpAMD64MOVBloadidx1 { 21337 break 21338 } 21339 i0 := x0.AuxInt 21340 if x0.Aux != s { 21341 break 21342 } 21343 _ = x0.Args[2] 21344 if idx != x0.Args[0] { 21345 break 21346 } 21347 if p != x0.Args[1] { 21348 break 21349 } 21350 if mem != x0.Args[2] { 21351 break 21352 } 21353 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21354 break 21355 } 21356 b = mergePoint(b, x0, x1) 21357 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21358 v.reset(OpCopy) 21359 v.AddArg(v0) 21360 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21361 v1.AuxInt = j1 21362 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21363 v2.AuxInt = 8 21364 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21365 v3.AuxInt = i0 21366 v3.Aux = s 21367 v3.AddArg(p) 21368 v3.AddArg(idx) 21369 v3.AddArg(mem) 21370 v2.AddArg(v3) 21371 v1.AddArg(v2) 21372 v0.AddArg(v1) 21373 v0.AddArg(y) 21374 return true 21375 } 21376 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21377 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21378 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21379 for { 21380 _ = v.Args[1] 21381 or := v.Args[0] 21382 if or.Op != OpAMD64ORL { 21383 break 21384 } 21385 _ = or.Args[1] 21386 y := or.Args[0] 21387 s1 := or.Args[1] 21388 if s1.Op != OpAMD64SHLLconst { 21389 break 21390 } 21391 j1 := s1.AuxInt 21392 x1 := s1.Args[0] 21393 if x1.Op != OpAMD64MOVBloadidx1 { 21394 break 21395 } 21396 i1 := x1.AuxInt 21397 s := x1.Aux 21398 _ = x1.Args[2] 21399 p := x1.Args[0] 21400 idx := x1.Args[1] 21401 mem := x1.Args[2] 21402 s0 := v.Args[1] 21403 if s0.Op != OpAMD64SHLLconst { 21404 break 21405 } 21406 j0 := s0.AuxInt 21407 x0 := s0.Args[0] 21408 if x0.Op != OpAMD64MOVBloadidx1 { 21409 break 21410 } 21411 i0 := x0.AuxInt 21412 if x0.Aux != s { 21413 break 21414 } 21415 _ = x0.Args[2] 21416 if idx != x0.Args[0] { 21417 break 21418 } 21419 if p != x0.Args[1] { 21420 break 21421 } 21422 if mem != x0.Args[2] { 21423 break 21424 } 21425 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21426 break 21427 } 21428 b = mergePoint(b, x0, x1) 21429 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21430 v.reset(OpCopy) 21431 v.AddArg(v0) 21432 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21433 v1.AuxInt = j1 21434 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21435 v2.AuxInt = 8 21436 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21437 v3.AuxInt = i0 21438 v3.Aux = s 21439 v3.AddArg(p) 21440 v3.AddArg(idx) 21441 v3.AddArg(mem) 21442 v2.AddArg(v3) 21443 v1.AddArg(v2) 21444 v0.AddArg(v1) 21445 v0.AddArg(y) 21446 return true 21447 } 21448 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21449 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21450 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 21451 for { 21452 _ = v.Args[1] 21453 or := v.Args[0] 21454 if or.Op != OpAMD64ORL { 21455 break 21456 } 21457 _ = or.Args[1] 21458 y := or.Args[0] 21459 s1 := or.Args[1] 21460 if s1.Op != OpAMD64SHLLconst { 21461 break 21462 } 21463 j1 := s1.AuxInt 21464 x1 := s1.Args[0] 21465 if x1.Op != OpAMD64MOVBloadidx1 { 21466 break 21467 } 21468 i1 := x1.AuxInt 21469 s := x1.Aux 21470 _ = x1.Args[2] 21471 idx := x1.Args[0] 21472 p := x1.Args[1] 21473 mem := x1.Args[2] 21474 s0 := v.Args[1] 21475 if s0.Op != OpAMD64SHLLconst { 21476 break 21477 } 21478 j0 := s0.AuxInt 21479 x0 := s0.Args[0] 21480 if x0.Op != OpAMD64MOVBloadidx1 { 21481 break 21482 } 21483 i0 := x0.AuxInt 21484 if x0.Aux != s { 21485 break 21486 } 21487 _ = x0.Args[2] 21488 if idx != x0.Args[0] { 21489 break 21490 } 21491 if p != x0.Args[1] { 21492 break 21493 } 21494 if mem != x0.Args[2] { 21495 break 21496 } 21497 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21498 break 21499 } 21500 b = mergePoint(b, x0, x1) 21501 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21502 v.reset(OpCopy) 21503 v.AddArg(v0) 21504 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21505 v1.AuxInt = j1 21506 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21507 v2.AuxInt = 8 21508 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21509 v3.AuxInt = i0 21510 v3.Aux = s 21511 v3.AddArg(p) 21512 v3.AddArg(idx) 21513 v3.AddArg(mem) 21514 v2.AddArg(v3) 21515 v1.AddArg(v2) 21516 v0.AddArg(v1) 21517 v0.AddArg(y) 21518 return true 21519 } 21520 // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) 21521 // cond: canMergeLoad(v, l, x) && clobber(l) 21522 // result: (ORLmem x [off] {sym} ptr mem) 21523 for { 21524 _ = v.Args[1] 21525 x := v.Args[0] 21526 l := v.Args[1] 21527 if l.Op != OpAMD64MOVLload { 21528 break 21529 } 21530 off := l.AuxInt 21531 sym := l.Aux 21532 _ = l.Args[1] 21533 ptr := l.Args[0] 21534 mem := l.Args[1] 21535 if !(canMergeLoad(v, l, x) && clobber(l)) { 21536 break 21537 } 21538 v.reset(OpAMD64ORLmem) 21539 v.AuxInt = off 21540 v.Aux = sym 21541 v.AddArg(x) 21542 v.AddArg(ptr) 21543 v.AddArg(mem) 21544 return true 21545 } 21546 return false 21547 } 21548 func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { 21549 // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) 21550 // cond: canMergeLoad(v, l, x) && clobber(l) 21551 // result: (ORLmem x [off] {sym} ptr mem) 21552 for { 21553 _ = v.Args[1] 21554 l := v.Args[0] 21555 if l.Op != OpAMD64MOVLload { 21556 break 21557 } 21558 off := l.AuxInt 21559 sym := l.Aux 21560 _ = l.Args[1] 21561 ptr := l.Args[0] 21562 mem := l.Args[1] 21563 x := v.Args[1] 21564 if !(canMergeLoad(v, l, x) && clobber(l)) { 21565 break 21566 } 21567 v.reset(OpAMD64ORLmem) 21568 v.AuxInt = off 21569 v.Aux = sym 21570 v.AddArg(x) 21571 v.AddArg(ptr) 21572 v.AddArg(mem) 21573 return true 21574 } 21575 return false 21576 } 21577 func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { 21578 // match: (ORLconst [c] x) 21579 // cond: int32(c)==0 21580 // result: x 21581 for { 21582 c := v.AuxInt 21583 x := v.Args[0] 21584 if !(int32(c) == 0) { 21585 break 21586 } 21587 v.reset(OpCopy) 21588 v.Type = x.Type 21589 v.AddArg(x) 21590 return true 21591 } 21592 // match: (ORLconst [c] _) 21593 // cond: int32(c)==-1 21594 // result: (MOVLconst [-1]) 21595 for { 21596 c := v.AuxInt 21597 if !(int32(c) == -1) { 21598 break 21599 } 21600 v.reset(OpAMD64MOVLconst) 21601 v.AuxInt = -1 21602 return true 21603 } 21604 // match: (ORLconst [c] (MOVLconst [d])) 21605 // cond: 21606 // result: (MOVLconst [c|d]) 21607 for { 21608 c := v.AuxInt 21609 v_0 := v.Args[0] 21610 if v_0.Op != OpAMD64MOVLconst { 21611 break 21612 } 21613 d := v_0.AuxInt 21614 v.reset(OpAMD64MOVLconst) 21615 v.AuxInt = c | d 21616 return true 21617 } 21618 return false 21619 } 21620 func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { 21621 // match: (ORQ x (MOVQconst [c])) 21622 // cond: is32Bit(c) 21623 // result: (ORQconst [c] x) 21624 for { 21625 _ = v.Args[1] 21626 x := v.Args[0] 21627 v_1 := v.Args[1] 21628 if v_1.Op != OpAMD64MOVQconst { 21629 break 21630 } 21631 c := v_1.AuxInt 21632 if !(is32Bit(c)) { 21633 break 21634 } 21635 v.reset(OpAMD64ORQconst) 21636 v.AuxInt = c 21637 v.AddArg(x) 21638 return true 21639 } 21640 // match: (ORQ (MOVQconst [c]) x) 21641 // cond: is32Bit(c) 21642 // result: (ORQconst [c] x) 21643 for { 21644 _ = v.Args[1] 21645 v_0 := v.Args[0] 21646 if v_0.Op != OpAMD64MOVQconst { 21647 break 21648 } 21649 c := v_0.AuxInt 21650 x := v.Args[1] 21651 if !(is32Bit(c)) { 21652 break 21653 } 21654 v.reset(OpAMD64ORQconst) 21655 v.AuxInt = c 21656 v.AddArg(x) 21657 return true 21658 } 21659 // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) 21660 // cond: d==64-c 21661 // result: (ROLQconst x [c]) 21662 for { 21663 _ = v.Args[1] 21664 v_0 := v.Args[0] 21665 if v_0.Op != OpAMD64SHLQconst { 21666 break 21667 } 21668 c := v_0.AuxInt 21669 x := v_0.Args[0] 21670 v_1 := v.Args[1] 21671 if v_1.Op != OpAMD64SHRQconst { 21672 break 21673 } 21674 d := v_1.AuxInt 21675 if x != v_1.Args[0] { 21676 break 21677 } 21678 if !(d == 64-c) { 21679 break 21680 } 21681 v.reset(OpAMD64ROLQconst) 21682 v.AuxInt = c 21683 v.AddArg(x) 21684 return true 21685 } 21686 // match: (ORQ (SHRQconst x [d]) (SHLQconst x [c])) 21687 // cond: d==64-c 21688 // result: (ROLQconst x [c]) 21689 for { 21690 _ = v.Args[1] 21691 v_0 := v.Args[0] 21692 if v_0.Op != OpAMD64SHRQconst { 21693 break 21694 } 21695 d := v_0.AuxInt 21696 x := v_0.Args[0] 21697 v_1 := v.Args[1] 21698 if v_1.Op != OpAMD64SHLQconst { 21699 break 21700 } 21701 c := v_1.AuxInt 21702 if x != v_1.Args[0] { 21703 break 21704 } 21705 if !(d == 64-c) { 21706 break 21707 } 21708 v.reset(OpAMD64ROLQconst) 21709 v.AuxInt = c 21710 v.AddArg(x) 21711 return true 21712 } 21713 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 21714 // cond: 21715 // result: (ROLQ x y) 21716 for { 21717 _ = v.Args[1] 21718 v_0 := v.Args[0] 21719 if v_0.Op != OpAMD64SHLQ { 21720 break 21721 } 21722 _ = v_0.Args[1] 21723 x := v_0.Args[0] 21724 y := v_0.Args[1] 21725 v_1 := v.Args[1] 21726 if v_1.Op != OpAMD64ANDQ { 21727 break 21728 } 21729 _ = v_1.Args[1] 21730 v_1_0 := v_1.Args[0] 21731 if v_1_0.Op != OpAMD64SHRQ { 21732 break 21733 } 21734 _ = v_1_0.Args[1] 21735 if x != v_1_0.Args[0] { 21736 break 21737 } 21738 v_1_0_1 := v_1_0.Args[1] 21739 if v_1_0_1.Op != OpAMD64NEGQ { 21740 break 21741 } 21742 if y != v_1_0_1.Args[0] { 21743 break 21744 } 21745 v_1_1 := v_1.Args[1] 21746 if v_1_1.Op != OpAMD64SBBQcarrymask { 21747 break 21748 } 21749 v_1_1_0 := v_1_1.Args[0] 21750 if v_1_1_0.Op != OpAMD64CMPQconst { 21751 break 21752 } 21753 if v_1_1_0.AuxInt != 64 { 21754 break 21755 } 21756 v_1_1_0_0 := v_1_1_0.Args[0] 21757 if v_1_1_0_0.Op != OpAMD64NEGQ { 21758 break 21759 } 21760 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 21761 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 21762 break 21763 } 21764 if v_1_1_0_0_0.AuxInt != -64 { 21765 break 21766 } 21767 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 21768 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 21769 break 21770 } 21771 if v_1_1_0_0_0_0.AuxInt != 63 { 21772 break 21773 } 21774 if y != v_1_1_0_0_0_0.Args[0] { 21775 break 21776 } 21777 v.reset(OpAMD64ROLQ) 21778 v.AddArg(x) 21779 v.AddArg(y) 21780 return true 21781 } 21782 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y)))) 21783 // cond: 21784 // result: (ROLQ x y) 21785 for { 21786 _ = v.Args[1] 21787 v_0 := v.Args[0] 21788 if v_0.Op != OpAMD64SHLQ { 21789 break 21790 } 21791 _ = v_0.Args[1] 21792 x := v_0.Args[0] 21793 y := v_0.Args[1] 21794 v_1 := v.Args[1] 21795 if v_1.Op != OpAMD64ANDQ { 21796 break 21797 } 21798 _ = v_1.Args[1] 21799 v_1_0 := v_1.Args[0] 21800 if v_1_0.Op != OpAMD64SBBQcarrymask { 21801 break 21802 } 21803 v_1_0_0 := v_1_0.Args[0] 21804 if v_1_0_0.Op != OpAMD64CMPQconst { 21805 break 21806 } 21807 if v_1_0_0.AuxInt != 64 { 21808 break 21809 } 21810 v_1_0_0_0 := v_1_0_0.Args[0] 21811 if v_1_0_0_0.Op != OpAMD64NEGQ { 21812 break 21813 } 21814 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 21815 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 21816 break 21817 } 21818 if v_1_0_0_0_0.AuxInt != -64 { 21819 break 21820 } 21821 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 21822 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 21823 break 21824 } 21825 if v_1_0_0_0_0_0.AuxInt != 63 { 21826 break 21827 } 21828 if y != v_1_0_0_0_0_0.Args[0] { 21829 break 21830 } 21831 v_1_1 := v_1.Args[1] 21832 if v_1_1.Op != OpAMD64SHRQ { 21833 break 21834 } 21835 _ = v_1_1.Args[1] 21836 if x != v_1_1.Args[0] { 21837 break 21838 } 21839 v_1_1_1 := v_1_1.Args[1] 21840 if v_1_1_1.Op != OpAMD64NEGQ { 21841 break 21842 } 21843 if y != v_1_1_1.Args[0] { 21844 break 21845 } 21846 v.reset(OpAMD64ROLQ) 21847 v.AddArg(x) 21848 v.AddArg(y) 21849 return true 21850 } 21851 // match: (ORQ (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHLQ x y)) 21852 // cond: 21853 // result: (ROLQ x y) 21854 for { 21855 _ = v.Args[1] 21856 v_0 := v.Args[0] 21857 if v_0.Op != OpAMD64ANDQ { 21858 break 21859 } 21860 _ = v_0.Args[1] 21861 v_0_0 := v_0.Args[0] 21862 if v_0_0.Op != OpAMD64SHRQ { 21863 break 21864 } 21865 _ = v_0_0.Args[1] 21866 x := v_0_0.Args[0] 21867 v_0_0_1 := v_0_0.Args[1] 21868 if v_0_0_1.Op != OpAMD64NEGQ { 21869 break 21870 } 21871 y := v_0_0_1.Args[0] 21872 v_0_1 := v_0.Args[1] 21873 if v_0_1.Op != OpAMD64SBBQcarrymask { 21874 break 21875 } 21876 v_0_1_0 := v_0_1.Args[0] 21877 if v_0_1_0.Op != OpAMD64CMPQconst { 21878 break 21879 } 21880 if v_0_1_0.AuxInt != 64 { 21881 break 21882 } 21883 v_0_1_0_0 := v_0_1_0.Args[0] 21884 if v_0_1_0_0.Op != OpAMD64NEGQ { 21885 break 21886 } 21887 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 21888 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 21889 break 21890 } 21891 if v_0_1_0_0_0.AuxInt != -64 { 21892 break 21893 } 21894 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 21895 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 21896 break 21897 } 21898 if v_0_1_0_0_0_0.AuxInt != 63 { 21899 break 21900 } 21901 if y != v_0_1_0_0_0_0.Args[0] { 21902 break 21903 } 21904 v_1 := v.Args[1] 21905 if v_1.Op != OpAMD64SHLQ { 21906 break 21907 } 21908 _ = v_1.Args[1] 21909 if x != v_1.Args[0] { 21910 break 21911 } 21912 if y != v_1.Args[1] { 21913 break 21914 } 21915 v.reset(OpAMD64ROLQ) 21916 v.AddArg(x) 21917 v.AddArg(y) 21918 return true 21919 } 21920 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y))) (SHLQ x y)) 21921 // cond: 21922 // result: (ROLQ x y) 21923 for { 21924 _ = v.Args[1] 21925 v_0 := v.Args[0] 21926 if v_0.Op != OpAMD64ANDQ { 21927 break 21928 } 21929 _ = v_0.Args[1] 21930 v_0_0 := v_0.Args[0] 21931 if v_0_0.Op != OpAMD64SBBQcarrymask { 21932 break 21933 } 21934 v_0_0_0 := v_0_0.Args[0] 21935 if v_0_0_0.Op != OpAMD64CMPQconst { 21936 break 21937 } 21938 if v_0_0_0.AuxInt != 64 { 21939 break 21940 } 21941 v_0_0_0_0 := v_0_0_0.Args[0] 21942 if v_0_0_0_0.Op != OpAMD64NEGQ { 21943 break 21944 } 21945 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 21946 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 21947 break 21948 } 21949 if v_0_0_0_0_0.AuxInt != -64 { 21950 break 21951 } 21952 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 21953 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 21954 break 21955 } 21956 if v_0_0_0_0_0_0.AuxInt != 63 { 21957 break 21958 } 21959 y := v_0_0_0_0_0_0.Args[0] 21960 v_0_1 := v_0.Args[1] 21961 if v_0_1.Op != OpAMD64SHRQ { 21962 break 21963 } 21964 _ = v_0_1.Args[1] 21965 x := v_0_1.Args[0] 21966 v_0_1_1 := v_0_1.Args[1] 21967 if v_0_1_1.Op != OpAMD64NEGQ { 21968 break 21969 } 21970 if y != v_0_1_1.Args[0] { 21971 break 21972 } 21973 v_1 := v.Args[1] 21974 if v_1.Op != OpAMD64SHLQ { 21975 break 21976 } 21977 _ = v_1.Args[1] 21978 if x != v_1.Args[0] { 21979 break 21980 } 21981 if y != v_1.Args[1] { 21982 break 21983 } 21984 v.reset(OpAMD64ROLQ) 21985 v.AddArg(x) 21986 v.AddArg(y) 21987 return true 21988 } 21989 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 21990 // cond: 21991 // result: (ROLQ x y) 21992 for { 21993 _ = v.Args[1] 21994 v_0 := v.Args[0] 21995 if v_0.Op != OpAMD64SHLQ { 21996 break 21997 } 21998 _ = v_0.Args[1] 21999 x := v_0.Args[0] 22000 y := v_0.Args[1] 22001 v_1 := v.Args[1] 22002 if v_1.Op != OpAMD64ANDQ { 22003 break 22004 } 22005 _ = v_1.Args[1] 22006 v_1_0 := v_1.Args[0] 22007 if v_1_0.Op != OpAMD64SHRQ { 22008 break 22009 } 22010 _ = v_1_0.Args[1] 22011 if x != v_1_0.Args[0] { 22012 break 22013 } 22014 v_1_0_1 := v_1_0.Args[1] 22015 if v_1_0_1.Op != OpAMD64NEGL { 22016 break 22017 } 22018 if y != v_1_0_1.Args[0] { 22019 break 22020 } 22021 v_1_1 := v_1.Args[1] 22022 if v_1_1.Op != OpAMD64SBBQcarrymask { 22023 break 22024 } 22025 v_1_1_0 := v_1_1.Args[0] 22026 if v_1_1_0.Op != OpAMD64CMPLconst { 22027 break 22028 } 22029 if v_1_1_0.AuxInt != 64 { 22030 break 22031 } 22032 v_1_1_0_0 := v_1_1_0.Args[0] 22033 if v_1_1_0_0.Op != OpAMD64NEGL { 22034 break 22035 } 22036 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 22037 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 22038 break 22039 } 22040 if v_1_1_0_0_0.AuxInt != -64 { 22041 break 22042 } 22043 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 22044 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 22045 break 22046 } 22047 if v_1_1_0_0_0_0.AuxInt != 63 { 22048 break 22049 } 22050 if y != v_1_1_0_0_0_0.Args[0] { 22051 break 22052 } 22053 v.reset(OpAMD64ROLQ) 22054 v.AddArg(x) 22055 v.AddArg(y) 22056 return true 22057 } 22058 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y)))) 22059 // cond: 22060 // result: (ROLQ x y) 22061 for { 22062 _ = v.Args[1] 22063 v_0 := v.Args[0] 22064 if v_0.Op != OpAMD64SHLQ { 22065 break 22066 } 22067 _ = v_0.Args[1] 22068 x := v_0.Args[0] 22069 y := v_0.Args[1] 22070 v_1 := v.Args[1] 22071 if v_1.Op != OpAMD64ANDQ { 22072 break 22073 } 22074 _ = v_1.Args[1] 22075 v_1_0 := v_1.Args[0] 22076 if v_1_0.Op != OpAMD64SBBQcarrymask { 22077 break 22078 } 22079 v_1_0_0 := v_1_0.Args[0] 22080 if v_1_0_0.Op != OpAMD64CMPLconst { 22081 break 22082 } 22083 if v_1_0_0.AuxInt != 64 { 22084 break 22085 } 22086 v_1_0_0_0 := v_1_0_0.Args[0] 22087 if v_1_0_0_0.Op != OpAMD64NEGL { 22088 break 22089 } 22090 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 22091 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 22092 break 22093 } 22094 if v_1_0_0_0_0.AuxInt != -64 { 22095 break 22096 } 22097 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 22098 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 22099 break 22100 } 22101 if v_1_0_0_0_0_0.AuxInt != 63 { 22102 break 22103 } 22104 if y != v_1_0_0_0_0_0.Args[0] { 22105 break 22106 } 22107 v_1_1 := v_1.Args[1] 22108 if v_1_1.Op != OpAMD64SHRQ { 22109 break 22110 } 22111 _ = v_1_1.Args[1] 22112 if x != v_1_1.Args[0] { 22113 break 22114 } 22115 v_1_1_1 := v_1_1.Args[1] 22116 if v_1_1_1.Op != OpAMD64NEGL { 22117 break 22118 } 22119 if y != v_1_1_1.Args[0] { 22120 break 22121 } 22122 v.reset(OpAMD64ROLQ) 22123 v.AddArg(x) 22124 v.AddArg(y) 22125 return true 22126 } 22127 return false 22128 } 22129 func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { 22130 // match: (ORQ (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHLQ x y)) 22131 // cond: 22132 // result: (ROLQ x y) 22133 for { 22134 _ = v.Args[1] 22135 v_0 := v.Args[0] 22136 if v_0.Op != OpAMD64ANDQ { 22137 break 22138 } 22139 _ = v_0.Args[1] 22140 v_0_0 := v_0.Args[0] 22141 if v_0_0.Op != OpAMD64SHRQ { 22142 break 22143 } 22144 _ = v_0_0.Args[1] 22145 x := v_0_0.Args[0] 22146 v_0_0_1 := v_0_0.Args[1] 22147 if v_0_0_1.Op != OpAMD64NEGL { 22148 break 22149 } 22150 y := v_0_0_1.Args[0] 22151 v_0_1 := v_0.Args[1] 22152 if v_0_1.Op != OpAMD64SBBQcarrymask { 22153 break 22154 } 22155 v_0_1_0 := v_0_1.Args[0] 22156 if v_0_1_0.Op != OpAMD64CMPLconst { 22157 break 22158 } 22159 if v_0_1_0.AuxInt != 64 { 22160 break 22161 } 22162 v_0_1_0_0 := v_0_1_0.Args[0] 22163 if v_0_1_0_0.Op != OpAMD64NEGL { 22164 break 22165 } 22166 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 22167 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 22168 break 22169 } 22170 if v_0_1_0_0_0.AuxInt != -64 { 22171 break 22172 } 22173 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 22174 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 22175 break 22176 } 22177 if v_0_1_0_0_0_0.AuxInt != 63 { 22178 break 22179 } 22180 if y != v_0_1_0_0_0_0.Args[0] { 22181 break 22182 } 22183 v_1 := v.Args[1] 22184 if v_1.Op != OpAMD64SHLQ { 22185 break 22186 } 22187 _ = v_1.Args[1] 22188 if x != v_1.Args[0] { 22189 break 22190 } 22191 if y != v_1.Args[1] { 22192 break 22193 } 22194 v.reset(OpAMD64ROLQ) 22195 v.AddArg(x) 22196 v.AddArg(y) 22197 return true 22198 } 22199 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y))) (SHLQ x y)) 22200 // cond: 22201 // result: (ROLQ x y) 22202 for { 22203 _ = v.Args[1] 22204 v_0 := v.Args[0] 22205 if v_0.Op != OpAMD64ANDQ { 22206 break 22207 } 22208 _ = v_0.Args[1] 22209 v_0_0 := v_0.Args[0] 22210 if v_0_0.Op != OpAMD64SBBQcarrymask { 22211 break 22212 } 22213 v_0_0_0 := v_0_0.Args[0] 22214 if v_0_0_0.Op != OpAMD64CMPLconst { 22215 break 22216 } 22217 if v_0_0_0.AuxInt != 64 { 22218 break 22219 } 22220 v_0_0_0_0 := v_0_0_0.Args[0] 22221 if v_0_0_0_0.Op != OpAMD64NEGL { 22222 break 22223 } 22224 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 22225 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 22226 break 22227 } 22228 if v_0_0_0_0_0.AuxInt != -64 { 22229 break 22230 } 22231 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 22232 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 22233 break 22234 } 22235 if v_0_0_0_0_0_0.AuxInt != 63 { 22236 break 22237 } 22238 y := v_0_0_0_0_0_0.Args[0] 22239 v_0_1 := v_0.Args[1] 22240 if v_0_1.Op != OpAMD64SHRQ { 22241 break 22242 } 22243 _ = v_0_1.Args[1] 22244 x := v_0_1.Args[0] 22245 v_0_1_1 := v_0_1.Args[1] 22246 if v_0_1_1.Op != OpAMD64NEGL { 22247 break 22248 } 22249 if y != v_0_1_1.Args[0] { 22250 break 22251 } 22252 v_1 := v.Args[1] 22253 if v_1.Op != OpAMD64SHLQ { 22254 break 22255 } 22256 _ = v_1.Args[1] 22257 if x != v_1.Args[0] { 22258 break 22259 } 22260 if y != v_1.Args[1] { 22261 break 22262 } 22263 v.reset(OpAMD64ROLQ) 22264 v.AddArg(x) 22265 v.AddArg(y) 22266 return true 22267 } 22268 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 22269 // cond: 22270 // result: (RORQ x y) 22271 for { 22272 _ = v.Args[1] 22273 v_0 := v.Args[0] 22274 if v_0.Op != OpAMD64SHRQ { 22275 break 22276 } 22277 _ = v_0.Args[1] 22278 x := v_0.Args[0] 22279 y := v_0.Args[1] 22280 v_1 := v.Args[1] 22281 if v_1.Op != OpAMD64ANDQ { 22282 break 22283 } 22284 _ = v_1.Args[1] 22285 v_1_0 := v_1.Args[0] 22286 if v_1_0.Op != OpAMD64SHLQ { 22287 break 22288 } 22289 _ = v_1_0.Args[1] 22290 if x != v_1_0.Args[0] { 22291 break 22292 } 22293 v_1_0_1 := v_1_0.Args[1] 22294 if v_1_0_1.Op != OpAMD64NEGQ { 22295 break 22296 } 22297 if y != v_1_0_1.Args[0] { 22298 break 22299 } 22300 v_1_1 := v_1.Args[1] 22301 if v_1_1.Op != OpAMD64SBBQcarrymask { 22302 break 22303 } 22304 v_1_1_0 := v_1_1.Args[0] 22305 if v_1_1_0.Op != OpAMD64CMPQconst { 22306 break 22307 } 22308 if v_1_1_0.AuxInt != 64 { 22309 break 22310 } 22311 v_1_1_0_0 := v_1_1_0.Args[0] 22312 if v_1_1_0_0.Op != OpAMD64NEGQ { 22313 break 22314 } 22315 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 22316 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 22317 break 22318 } 22319 if v_1_1_0_0_0.AuxInt != -64 { 22320 break 22321 } 22322 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 22323 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 22324 break 22325 } 22326 if v_1_1_0_0_0_0.AuxInt != 63 { 22327 break 22328 } 22329 if y != v_1_1_0_0_0_0.Args[0] { 22330 break 22331 } 22332 v.reset(OpAMD64RORQ) 22333 v.AddArg(x) 22334 v.AddArg(y) 22335 return true 22336 } 22337 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y)))) 22338 // cond: 22339 // result: (RORQ x y) 22340 for { 22341 _ = v.Args[1] 22342 v_0 := v.Args[0] 22343 if v_0.Op != OpAMD64SHRQ { 22344 break 22345 } 22346 _ = v_0.Args[1] 22347 x := v_0.Args[0] 22348 y := v_0.Args[1] 22349 v_1 := v.Args[1] 22350 if v_1.Op != OpAMD64ANDQ { 22351 break 22352 } 22353 _ = v_1.Args[1] 22354 v_1_0 := v_1.Args[0] 22355 if v_1_0.Op != OpAMD64SBBQcarrymask { 22356 break 22357 } 22358 v_1_0_0 := v_1_0.Args[0] 22359 if v_1_0_0.Op != OpAMD64CMPQconst { 22360 break 22361 } 22362 if v_1_0_0.AuxInt != 64 { 22363 break 22364 } 22365 v_1_0_0_0 := v_1_0_0.Args[0] 22366 if v_1_0_0_0.Op != OpAMD64NEGQ { 22367 break 22368 } 22369 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 22370 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 22371 break 22372 } 22373 if v_1_0_0_0_0.AuxInt != -64 { 22374 break 22375 } 22376 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 22377 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 22378 break 22379 } 22380 if v_1_0_0_0_0_0.AuxInt != 63 { 22381 break 22382 } 22383 if y != v_1_0_0_0_0_0.Args[0] { 22384 break 22385 } 22386 v_1_1 := v_1.Args[1] 22387 if v_1_1.Op != OpAMD64SHLQ { 22388 break 22389 } 22390 _ = v_1_1.Args[1] 22391 if x != v_1_1.Args[0] { 22392 break 22393 } 22394 v_1_1_1 := v_1_1.Args[1] 22395 if v_1_1_1.Op != OpAMD64NEGQ { 22396 break 22397 } 22398 if y != v_1_1_1.Args[0] { 22399 break 22400 } 22401 v.reset(OpAMD64RORQ) 22402 v.AddArg(x) 22403 v.AddArg(y) 22404 return true 22405 } 22406 // match: (ORQ (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHRQ x y)) 22407 // cond: 22408 // result: (RORQ x y) 22409 for { 22410 _ = v.Args[1] 22411 v_0 := v.Args[0] 22412 if v_0.Op != OpAMD64ANDQ { 22413 break 22414 } 22415 _ = v_0.Args[1] 22416 v_0_0 := v_0.Args[0] 22417 if v_0_0.Op != OpAMD64SHLQ { 22418 break 22419 } 22420 _ = v_0_0.Args[1] 22421 x := v_0_0.Args[0] 22422 v_0_0_1 := v_0_0.Args[1] 22423 if v_0_0_1.Op != OpAMD64NEGQ { 22424 break 22425 } 22426 y := v_0_0_1.Args[0] 22427 v_0_1 := v_0.Args[1] 22428 if v_0_1.Op != OpAMD64SBBQcarrymask { 22429 break 22430 } 22431 v_0_1_0 := v_0_1.Args[0] 22432 if v_0_1_0.Op != OpAMD64CMPQconst { 22433 break 22434 } 22435 if v_0_1_0.AuxInt != 64 { 22436 break 22437 } 22438 v_0_1_0_0 := v_0_1_0.Args[0] 22439 if v_0_1_0_0.Op != OpAMD64NEGQ { 22440 break 22441 } 22442 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 22443 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 22444 break 22445 } 22446 if v_0_1_0_0_0.AuxInt != -64 { 22447 break 22448 } 22449 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 22450 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 22451 break 22452 } 22453 if v_0_1_0_0_0_0.AuxInt != 63 { 22454 break 22455 } 22456 if y != v_0_1_0_0_0_0.Args[0] { 22457 break 22458 } 22459 v_1 := v.Args[1] 22460 if v_1.Op != OpAMD64SHRQ { 22461 break 22462 } 22463 _ = v_1.Args[1] 22464 if x != v_1.Args[0] { 22465 break 22466 } 22467 if y != v_1.Args[1] { 22468 break 22469 } 22470 v.reset(OpAMD64RORQ) 22471 v.AddArg(x) 22472 v.AddArg(y) 22473 return true 22474 } 22475 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y))) (SHRQ x y)) 22476 // cond: 22477 // result: (RORQ x y) 22478 for { 22479 _ = v.Args[1] 22480 v_0 := v.Args[0] 22481 if v_0.Op != OpAMD64ANDQ { 22482 break 22483 } 22484 _ = v_0.Args[1] 22485 v_0_0 := v_0.Args[0] 22486 if v_0_0.Op != OpAMD64SBBQcarrymask { 22487 break 22488 } 22489 v_0_0_0 := v_0_0.Args[0] 22490 if v_0_0_0.Op != OpAMD64CMPQconst { 22491 break 22492 } 22493 if v_0_0_0.AuxInt != 64 { 22494 break 22495 } 22496 v_0_0_0_0 := v_0_0_0.Args[0] 22497 if v_0_0_0_0.Op != OpAMD64NEGQ { 22498 break 22499 } 22500 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 22501 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 22502 break 22503 } 22504 if v_0_0_0_0_0.AuxInt != -64 { 22505 break 22506 } 22507 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 22508 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 22509 break 22510 } 22511 if v_0_0_0_0_0_0.AuxInt != 63 { 22512 break 22513 } 22514 y := v_0_0_0_0_0_0.Args[0] 22515 v_0_1 := v_0.Args[1] 22516 if v_0_1.Op != OpAMD64SHLQ { 22517 break 22518 } 22519 _ = v_0_1.Args[1] 22520 x := v_0_1.Args[0] 22521 v_0_1_1 := v_0_1.Args[1] 22522 if v_0_1_1.Op != OpAMD64NEGQ { 22523 break 22524 } 22525 if y != v_0_1_1.Args[0] { 22526 break 22527 } 22528 v_1 := v.Args[1] 22529 if v_1.Op != OpAMD64SHRQ { 22530 break 22531 } 22532 _ = v_1.Args[1] 22533 if x != v_1.Args[0] { 22534 break 22535 } 22536 if y != v_1.Args[1] { 22537 break 22538 } 22539 v.reset(OpAMD64RORQ) 22540 v.AddArg(x) 22541 v.AddArg(y) 22542 return true 22543 } 22544 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 22545 // cond: 22546 // result: (RORQ x y) 22547 for { 22548 _ = v.Args[1] 22549 v_0 := v.Args[0] 22550 if v_0.Op != OpAMD64SHRQ { 22551 break 22552 } 22553 _ = v_0.Args[1] 22554 x := v_0.Args[0] 22555 y := v_0.Args[1] 22556 v_1 := v.Args[1] 22557 if v_1.Op != OpAMD64ANDQ { 22558 break 22559 } 22560 _ = v_1.Args[1] 22561 v_1_0 := v_1.Args[0] 22562 if v_1_0.Op != OpAMD64SHLQ { 22563 break 22564 } 22565 _ = v_1_0.Args[1] 22566 if x != v_1_0.Args[0] { 22567 break 22568 } 22569 v_1_0_1 := v_1_0.Args[1] 22570 if v_1_0_1.Op != OpAMD64NEGL { 22571 break 22572 } 22573 if y != v_1_0_1.Args[0] { 22574 break 22575 } 22576 v_1_1 := v_1.Args[1] 22577 if v_1_1.Op != OpAMD64SBBQcarrymask { 22578 break 22579 } 22580 v_1_1_0 := v_1_1.Args[0] 22581 if v_1_1_0.Op != OpAMD64CMPLconst { 22582 break 22583 } 22584 if v_1_1_0.AuxInt != 64 { 22585 break 22586 } 22587 v_1_1_0_0 := v_1_1_0.Args[0] 22588 if v_1_1_0_0.Op != OpAMD64NEGL { 22589 break 22590 } 22591 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 22592 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 22593 break 22594 } 22595 if v_1_1_0_0_0.AuxInt != -64 { 22596 break 22597 } 22598 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 22599 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 22600 break 22601 } 22602 if v_1_1_0_0_0_0.AuxInt != 63 { 22603 break 22604 } 22605 if y != v_1_1_0_0_0_0.Args[0] { 22606 break 22607 } 22608 v.reset(OpAMD64RORQ) 22609 v.AddArg(x) 22610 v.AddArg(y) 22611 return true 22612 } 22613 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y)))) 22614 // cond: 22615 // result: (RORQ x y) 22616 for { 22617 _ = v.Args[1] 22618 v_0 := v.Args[0] 22619 if v_0.Op != OpAMD64SHRQ { 22620 break 22621 } 22622 _ = v_0.Args[1] 22623 x := v_0.Args[0] 22624 y := v_0.Args[1] 22625 v_1 := v.Args[1] 22626 if v_1.Op != OpAMD64ANDQ { 22627 break 22628 } 22629 _ = v_1.Args[1] 22630 v_1_0 := v_1.Args[0] 22631 if v_1_0.Op != OpAMD64SBBQcarrymask { 22632 break 22633 } 22634 v_1_0_0 := v_1_0.Args[0] 22635 if v_1_0_0.Op != OpAMD64CMPLconst { 22636 break 22637 } 22638 if v_1_0_0.AuxInt != 64 { 22639 break 22640 } 22641 v_1_0_0_0 := v_1_0_0.Args[0] 22642 if v_1_0_0_0.Op != OpAMD64NEGL { 22643 break 22644 } 22645 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 22646 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 22647 break 22648 } 22649 if v_1_0_0_0_0.AuxInt != -64 { 22650 break 22651 } 22652 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 22653 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 22654 break 22655 } 22656 if v_1_0_0_0_0_0.AuxInt != 63 { 22657 break 22658 } 22659 if y != v_1_0_0_0_0_0.Args[0] { 22660 break 22661 } 22662 v_1_1 := v_1.Args[1] 22663 if v_1_1.Op != OpAMD64SHLQ { 22664 break 22665 } 22666 _ = v_1_1.Args[1] 22667 if x != v_1_1.Args[0] { 22668 break 22669 } 22670 v_1_1_1 := v_1_1.Args[1] 22671 if v_1_1_1.Op != OpAMD64NEGL { 22672 break 22673 } 22674 if y != v_1_1_1.Args[0] { 22675 break 22676 } 22677 v.reset(OpAMD64RORQ) 22678 v.AddArg(x) 22679 v.AddArg(y) 22680 return true 22681 } 22682 // match: (ORQ (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHRQ x y)) 22683 // cond: 22684 // result: (RORQ x y) 22685 for { 22686 _ = v.Args[1] 22687 v_0 := v.Args[0] 22688 if v_0.Op != OpAMD64ANDQ { 22689 break 22690 } 22691 _ = v_0.Args[1] 22692 v_0_0 := v_0.Args[0] 22693 if v_0_0.Op != OpAMD64SHLQ { 22694 break 22695 } 22696 _ = v_0_0.Args[1] 22697 x := v_0_0.Args[0] 22698 v_0_0_1 := v_0_0.Args[1] 22699 if v_0_0_1.Op != OpAMD64NEGL { 22700 break 22701 } 22702 y := v_0_0_1.Args[0] 22703 v_0_1 := v_0.Args[1] 22704 if v_0_1.Op != OpAMD64SBBQcarrymask { 22705 break 22706 } 22707 v_0_1_0 := v_0_1.Args[0] 22708 if v_0_1_0.Op != OpAMD64CMPLconst { 22709 break 22710 } 22711 if v_0_1_0.AuxInt != 64 { 22712 break 22713 } 22714 v_0_1_0_0 := v_0_1_0.Args[0] 22715 if v_0_1_0_0.Op != OpAMD64NEGL { 22716 break 22717 } 22718 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 22719 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 22720 break 22721 } 22722 if v_0_1_0_0_0.AuxInt != -64 { 22723 break 22724 } 22725 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 22726 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 22727 break 22728 } 22729 if v_0_1_0_0_0_0.AuxInt != 63 { 22730 break 22731 } 22732 if y != v_0_1_0_0_0_0.Args[0] { 22733 break 22734 } 22735 v_1 := v.Args[1] 22736 if v_1.Op != OpAMD64SHRQ { 22737 break 22738 } 22739 _ = v_1.Args[1] 22740 if x != v_1.Args[0] { 22741 break 22742 } 22743 if y != v_1.Args[1] { 22744 break 22745 } 22746 v.reset(OpAMD64RORQ) 22747 v.AddArg(x) 22748 v.AddArg(y) 22749 return true 22750 } 22751 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y))) (SHRQ x y)) 22752 // cond: 22753 // result: (RORQ x y) 22754 for { 22755 _ = v.Args[1] 22756 v_0 := v.Args[0] 22757 if v_0.Op != OpAMD64ANDQ { 22758 break 22759 } 22760 _ = v_0.Args[1] 22761 v_0_0 := v_0.Args[0] 22762 if v_0_0.Op != OpAMD64SBBQcarrymask { 22763 break 22764 } 22765 v_0_0_0 := v_0_0.Args[0] 22766 if v_0_0_0.Op != OpAMD64CMPLconst { 22767 break 22768 } 22769 if v_0_0_0.AuxInt != 64 { 22770 break 22771 } 22772 v_0_0_0_0 := v_0_0_0.Args[0] 22773 if v_0_0_0_0.Op != OpAMD64NEGL { 22774 break 22775 } 22776 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 22777 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 22778 break 22779 } 22780 if v_0_0_0_0_0.AuxInt != -64 { 22781 break 22782 } 22783 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 22784 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 22785 break 22786 } 22787 if v_0_0_0_0_0_0.AuxInt != 63 { 22788 break 22789 } 22790 y := v_0_0_0_0_0_0.Args[0] 22791 v_0_1 := v_0.Args[1] 22792 if v_0_1.Op != OpAMD64SHLQ { 22793 break 22794 } 22795 _ = v_0_1.Args[1] 22796 x := v_0_1.Args[0] 22797 v_0_1_1 := v_0_1.Args[1] 22798 if v_0_1_1.Op != OpAMD64NEGL { 22799 break 22800 } 22801 if y != v_0_1_1.Args[0] { 22802 break 22803 } 22804 v_1 := v.Args[1] 22805 if v_1.Op != OpAMD64SHRQ { 22806 break 22807 } 22808 _ = v_1.Args[1] 22809 if x != v_1.Args[0] { 22810 break 22811 } 22812 if y != v_1.Args[1] { 22813 break 22814 } 22815 v.reset(OpAMD64RORQ) 22816 v.AddArg(x) 22817 v.AddArg(y) 22818 return true 22819 } 22820 return false 22821 } 22822 func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { 22823 b := v.Block 22824 _ = b 22825 typ := &b.Func.Config.Types 22826 _ = typ 22827 // match: (ORQ x x) 22828 // cond: 22829 // result: x 22830 for { 22831 _ = v.Args[1] 22832 x := v.Args[0] 22833 if x != v.Args[1] { 22834 break 22835 } 22836 v.reset(OpCopy) 22837 v.Type = x.Type 22838 v.AddArg(x) 22839 return true 22840 } 22841 // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) 22842 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22843 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 22844 for { 22845 _ = v.Args[1] 22846 x0 := v.Args[0] 22847 if x0.Op != OpAMD64MOVBload { 22848 break 22849 } 22850 i0 := x0.AuxInt 22851 s := x0.Aux 22852 _ = x0.Args[1] 22853 p := x0.Args[0] 22854 mem := x0.Args[1] 22855 sh := v.Args[1] 22856 if sh.Op != OpAMD64SHLQconst { 22857 break 22858 } 22859 if sh.AuxInt != 8 { 22860 break 22861 } 22862 x1 := sh.Args[0] 22863 if x1.Op != OpAMD64MOVBload { 22864 break 22865 } 22866 i1 := x1.AuxInt 22867 if x1.Aux != s { 22868 break 22869 } 22870 _ = x1.Args[1] 22871 if p != x1.Args[0] { 22872 break 22873 } 22874 if mem != x1.Args[1] { 22875 break 22876 } 22877 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22878 break 22879 } 22880 b = mergePoint(b, x0, x1) 22881 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 22882 v.reset(OpCopy) 22883 v.AddArg(v0) 22884 v0.AuxInt = i0 22885 v0.Aux = s 22886 v0.AddArg(p) 22887 v0.AddArg(mem) 22888 return true 22889 } 22890 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 22891 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22892 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 22893 for { 22894 _ = v.Args[1] 22895 sh := v.Args[0] 22896 if sh.Op != OpAMD64SHLQconst { 22897 break 22898 } 22899 if sh.AuxInt != 8 { 22900 break 22901 } 22902 x1 := sh.Args[0] 22903 if x1.Op != OpAMD64MOVBload { 22904 break 22905 } 22906 i1 := x1.AuxInt 22907 s := x1.Aux 22908 _ = x1.Args[1] 22909 p := x1.Args[0] 22910 mem := x1.Args[1] 22911 x0 := v.Args[1] 22912 if x0.Op != OpAMD64MOVBload { 22913 break 22914 } 22915 i0 := x0.AuxInt 22916 if x0.Aux != s { 22917 break 22918 } 22919 _ = x0.Args[1] 22920 if p != x0.Args[0] { 22921 break 22922 } 22923 if mem != x0.Args[1] { 22924 break 22925 } 22926 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22927 break 22928 } 22929 b = mergePoint(b, x0, x1) 22930 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 22931 v.reset(OpCopy) 22932 v.AddArg(v0) 22933 v0.AuxInt = i0 22934 v0.Aux = s 22935 v0.AddArg(p) 22936 v0.AddArg(mem) 22937 return true 22938 } 22939 // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) 22940 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22941 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 22942 for { 22943 _ = v.Args[1] 22944 x0 := v.Args[0] 22945 if x0.Op != OpAMD64MOVWload { 22946 break 22947 } 22948 i0 := x0.AuxInt 22949 s := x0.Aux 22950 _ = x0.Args[1] 22951 p := x0.Args[0] 22952 mem := x0.Args[1] 22953 sh := v.Args[1] 22954 if sh.Op != OpAMD64SHLQconst { 22955 break 22956 } 22957 if sh.AuxInt != 16 { 22958 break 22959 } 22960 x1 := sh.Args[0] 22961 if x1.Op != OpAMD64MOVWload { 22962 break 22963 } 22964 i1 := x1.AuxInt 22965 if x1.Aux != s { 22966 break 22967 } 22968 _ = x1.Args[1] 22969 if p != x1.Args[0] { 22970 break 22971 } 22972 if mem != x1.Args[1] { 22973 break 22974 } 22975 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 22976 break 22977 } 22978 b = mergePoint(b, x0, x1) 22979 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 22980 v.reset(OpCopy) 22981 v.AddArg(v0) 22982 v0.AuxInt = i0 22983 v0.Aux = s 22984 v0.AddArg(p) 22985 v0.AddArg(mem) 22986 return true 22987 } 22988 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 22989 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 22990 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 22991 for { 22992 _ = v.Args[1] 22993 sh := v.Args[0] 22994 if sh.Op != OpAMD64SHLQconst { 22995 break 22996 } 22997 if sh.AuxInt != 16 { 22998 break 22999 } 23000 x1 := sh.Args[0] 23001 if x1.Op != OpAMD64MOVWload { 23002 break 23003 } 23004 i1 := x1.AuxInt 23005 s := x1.Aux 23006 _ = x1.Args[1] 23007 p := x1.Args[0] 23008 mem := x1.Args[1] 23009 x0 := v.Args[1] 23010 if x0.Op != OpAMD64MOVWload { 23011 break 23012 } 23013 i0 := x0.AuxInt 23014 if x0.Aux != s { 23015 break 23016 } 23017 _ = x0.Args[1] 23018 if p != x0.Args[0] { 23019 break 23020 } 23021 if mem != x0.Args[1] { 23022 break 23023 } 23024 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23025 break 23026 } 23027 b = mergePoint(b, x0, x1) 23028 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23029 v.reset(OpCopy) 23030 v.AddArg(v0) 23031 v0.AuxInt = i0 23032 v0.Aux = s 23033 v0.AddArg(p) 23034 v0.AddArg(mem) 23035 return true 23036 } 23037 // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) 23038 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23039 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 23040 for { 23041 _ = v.Args[1] 23042 x0 := v.Args[0] 23043 if x0.Op != OpAMD64MOVLload { 23044 break 23045 } 23046 i0 := x0.AuxInt 23047 s := x0.Aux 23048 _ = x0.Args[1] 23049 p := x0.Args[0] 23050 mem := x0.Args[1] 23051 sh := v.Args[1] 23052 if sh.Op != OpAMD64SHLQconst { 23053 break 23054 } 23055 if sh.AuxInt != 32 { 23056 break 23057 } 23058 x1 := sh.Args[0] 23059 if x1.Op != OpAMD64MOVLload { 23060 break 23061 } 23062 i1 := x1.AuxInt 23063 if x1.Aux != s { 23064 break 23065 } 23066 _ = x1.Args[1] 23067 if p != x1.Args[0] { 23068 break 23069 } 23070 if mem != x1.Args[1] { 23071 break 23072 } 23073 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23074 break 23075 } 23076 b = mergePoint(b, x0, x1) 23077 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 23078 v.reset(OpCopy) 23079 v.AddArg(v0) 23080 v0.AuxInt = i0 23081 v0.Aux = s 23082 v0.AddArg(p) 23083 v0.AddArg(mem) 23084 return true 23085 } 23086 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem)) 23087 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23088 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 23089 for { 23090 _ = v.Args[1] 23091 sh := v.Args[0] 23092 if sh.Op != OpAMD64SHLQconst { 23093 break 23094 } 23095 if sh.AuxInt != 32 { 23096 break 23097 } 23098 x1 := sh.Args[0] 23099 if x1.Op != OpAMD64MOVLload { 23100 break 23101 } 23102 i1 := x1.AuxInt 23103 s := x1.Aux 23104 _ = x1.Args[1] 23105 p := x1.Args[0] 23106 mem := x1.Args[1] 23107 x0 := v.Args[1] 23108 if x0.Op != OpAMD64MOVLload { 23109 break 23110 } 23111 i0 := x0.AuxInt 23112 if x0.Aux != s { 23113 break 23114 } 23115 _ = x0.Args[1] 23116 if p != x0.Args[0] { 23117 break 23118 } 23119 if mem != x0.Args[1] { 23120 break 23121 } 23122 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23123 break 23124 } 23125 b = mergePoint(b, x0, x1) 23126 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 23127 v.reset(OpCopy) 23128 v.AddArg(v0) 23129 v0.AuxInt = i0 23130 v0.Aux = s 23131 v0.AddArg(p) 23132 v0.AddArg(mem) 23133 return true 23134 } 23135 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 23136 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23137 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 23138 for { 23139 _ = v.Args[1] 23140 s1 := v.Args[0] 23141 if s1.Op != OpAMD64SHLQconst { 23142 break 23143 } 23144 j1 := s1.AuxInt 23145 x1 := s1.Args[0] 23146 if x1.Op != OpAMD64MOVBload { 23147 break 23148 } 23149 i1 := x1.AuxInt 23150 s := x1.Aux 23151 _ = x1.Args[1] 23152 p := x1.Args[0] 23153 mem := x1.Args[1] 23154 or := v.Args[1] 23155 if or.Op != OpAMD64ORQ { 23156 break 23157 } 23158 _ = or.Args[1] 23159 s0 := or.Args[0] 23160 if s0.Op != OpAMD64SHLQconst { 23161 break 23162 } 23163 j0 := s0.AuxInt 23164 x0 := s0.Args[0] 23165 if x0.Op != OpAMD64MOVBload { 23166 break 23167 } 23168 i0 := x0.AuxInt 23169 if x0.Aux != s { 23170 break 23171 } 23172 _ = x0.Args[1] 23173 if p != x0.Args[0] { 23174 break 23175 } 23176 if mem != x0.Args[1] { 23177 break 23178 } 23179 y := or.Args[1] 23180 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23181 break 23182 } 23183 b = mergePoint(b, x0, x1) 23184 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23185 v.reset(OpCopy) 23186 v.AddArg(v0) 23187 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23188 v1.AuxInt = j0 23189 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 23190 v2.AuxInt = i0 23191 v2.Aux = s 23192 v2.AddArg(p) 23193 v2.AddArg(mem) 23194 v1.AddArg(v2) 23195 v0.AddArg(v1) 23196 v0.AddArg(y) 23197 return true 23198 } 23199 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))) 23200 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23201 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 23202 for { 23203 _ = v.Args[1] 23204 s1 := v.Args[0] 23205 if s1.Op != OpAMD64SHLQconst { 23206 break 23207 } 23208 j1 := s1.AuxInt 23209 x1 := s1.Args[0] 23210 if x1.Op != OpAMD64MOVBload { 23211 break 23212 } 23213 i1 := x1.AuxInt 23214 s := x1.Aux 23215 _ = x1.Args[1] 23216 p := x1.Args[0] 23217 mem := x1.Args[1] 23218 or := v.Args[1] 23219 if or.Op != OpAMD64ORQ { 23220 break 23221 } 23222 _ = or.Args[1] 23223 y := or.Args[0] 23224 s0 := or.Args[1] 23225 if s0.Op != OpAMD64SHLQconst { 23226 break 23227 } 23228 j0 := s0.AuxInt 23229 x0 := s0.Args[0] 23230 if x0.Op != OpAMD64MOVBload { 23231 break 23232 } 23233 i0 := x0.AuxInt 23234 if x0.Aux != s { 23235 break 23236 } 23237 _ = x0.Args[1] 23238 if p != x0.Args[0] { 23239 break 23240 } 23241 if mem != x0.Args[1] { 23242 break 23243 } 23244 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23245 break 23246 } 23247 b = mergePoint(b, x0, x1) 23248 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23249 v.reset(OpCopy) 23250 v.AddArg(v0) 23251 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23252 v1.AuxInt = j0 23253 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 23254 v2.AuxInt = i0 23255 v2.Aux = s 23256 v2.AddArg(p) 23257 v2.AddArg(mem) 23258 v1.AddArg(v2) 23259 v0.AddArg(v1) 23260 v0.AddArg(y) 23261 return true 23262 } 23263 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 23264 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23265 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 23266 for { 23267 _ = v.Args[1] 23268 or := v.Args[0] 23269 if or.Op != OpAMD64ORQ { 23270 break 23271 } 23272 _ = or.Args[1] 23273 s0 := or.Args[0] 23274 if s0.Op != OpAMD64SHLQconst { 23275 break 23276 } 23277 j0 := s0.AuxInt 23278 x0 := s0.Args[0] 23279 if x0.Op != OpAMD64MOVBload { 23280 break 23281 } 23282 i0 := x0.AuxInt 23283 s := x0.Aux 23284 _ = x0.Args[1] 23285 p := x0.Args[0] 23286 mem := x0.Args[1] 23287 y := or.Args[1] 23288 s1 := v.Args[1] 23289 if s1.Op != OpAMD64SHLQconst { 23290 break 23291 } 23292 j1 := s1.AuxInt 23293 x1 := s1.Args[0] 23294 if x1.Op != OpAMD64MOVBload { 23295 break 23296 } 23297 i1 := x1.AuxInt 23298 if x1.Aux != s { 23299 break 23300 } 23301 _ = x1.Args[1] 23302 if p != x1.Args[0] { 23303 break 23304 } 23305 if mem != x1.Args[1] { 23306 break 23307 } 23308 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23309 break 23310 } 23311 b = mergePoint(b, x0, x1) 23312 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23313 v.reset(OpCopy) 23314 v.AddArg(v0) 23315 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23316 v1.AuxInt = j0 23317 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 23318 v2.AuxInt = i0 23319 v2.Aux = s 23320 v2.AddArg(p) 23321 v2.AddArg(mem) 23322 v1.AddArg(v2) 23323 v0.AddArg(v1) 23324 v0.AddArg(y) 23325 return true 23326 } 23327 return false 23328 } 23329 func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool { 23330 b := v.Block 23331 _ = b 23332 typ := &b.Func.Config.Types 23333 _ = typ 23334 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 23335 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23336 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 23337 for { 23338 _ = v.Args[1] 23339 or := v.Args[0] 23340 if or.Op != OpAMD64ORQ { 23341 break 23342 } 23343 _ = or.Args[1] 23344 y := or.Args[0] 23345 s0 := or.Args[1] 23346 if s0.Op != OpAMD64SHLQconst { 23347 break 23348 } 23349 j0 := s0.AuxInt 23350 x0 := s0.Args[0] 23351 if x0.Op != OpAMD64MOVBload { 23352 break 23353 } 23354 i0 := x0.AuxInt 23355 s := x0.Aux 23356 _ = x0.Args[1] 23357 p := x0.Args[0] 23358 mem := x0.Args[1] 23359 s1 := v.Args[1] 23360 if s1.Op != OpAMD64SHLQconst { 23361 break 23362 } 23363 j1 := s1.AuxInt 23364 x1 := s1.Args[0] 23365 if x1.Op != OpAMD64MOVBload { 23366 break 23367 } 23368 i1 := x1.AuxInt 23369 if x1.Aux != s { 23370 break 23371 } 23372 _ = x1.Args[1] 23373 if p != x1.Args[0] { 23374 break 23375 } 23376 if mem != x1.Args[1] { 23377 break 23378 } 23379 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23380 break 23381 } 23382 b = mergePoint(b, x0, x1) 23383 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23384 v.reset(OpCopy) 23385 v.AddArg(v0) 23386 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23387 v1.AuxInt = j0 23388 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 23389 v2.AuxInt = i0 23390 v2.Aux = s 23391 v2.AddArg(p) 23392 v2.AddArg(mem) 23393 v1.AddArg(v2) 23394 v0.AddArg(v1) 23395 v0.AddArg(y) 23396 return true 23397 } 23398 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y)) 23399 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23400 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 23401 for { 23402 _ = v.Args[1] 23403 s1 := v.Args[0] 23404 if s1.Op != OpAMD64SHLQconst { 23405 break 23406 } 23407 j1 := s1.AuxInt 23408 x1 := s1.Args[0] 23409 if x1.Op != OpAMD64MOVWload { 23410 break 23411 } 23412 i1 := x1.AuxInt 23413 s := x1.Aux 23414 _ = x1.Args[1] 23415 p := x1.Args[0] 23416 mem := x1.Args[1] 23417 or := v.Args[1] 23418 if or.Op != OpAMD64ORQ { 23419 break 23420 } 23421 _ = or.Args[1] 23422 s0 := or.Args[0] 23423 if s0.Op != OpAMD64SHLQconst { 23424 break 23425 } 23426 j0 := s0.AuxInt 23427 x0 := s0.Args[0] 23428 if x0.Op != OpAMD64MOVWload { 23429 break 23430 } 23431 i0 := x0.AuxInt 23432 if x0.Aux != s { 23433 break 23434 } 23435 _ = x0.Args[1] 23436 if p != x0.Args[0] { 23437 break 23438 } 23439 if mem != x0.Args[1] { 23440 break 23441 } 23442 y := or.Args[1] 23443 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23444 break 23445 } 23446 b = mergePoint(b, x0, x1) 23447 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23448 v.reset(OpCopy) 23449 v.AddArg(v0) 23450 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23451 v1.AuxInt = j0 23452 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23453 v2.AuxInt = i0 23454 v2.Aux = s 23455 v2.AddArg(p) 23456 v2.AddArg(mem) 23457 v1.AddArg(v2) 23458 v0.AddArg(v1) 23459 v0.AddArg(y) 23460 return true 23461 } 23462 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)))) 23463 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23464 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 23465 for { 23466 _ = v.Args[1] 23467 s1 := v.Args[0] 23468 if s1.Op != OpAMD64SHLQconst { 23469 break 23470 } 23471 j1 := s1.AuxInt 23472 x1 := s1.Args[0] 23473 if x1.Op != OpAMD64MOVWload { 23474 break 23475 } 23476 i1 := x1.AuxInt 23477 s := x1.Aux 23478 _ = x1.Args[1] 23479 p := x1.Args[0] 23480 mem := x1.Args[1] 23481 or := v.Args[1] 23482 if or.Op != OpAMD64ORQ { 23483 break 23484 } 23485 _ = or.Args[1] 23486 y := or.Args[0] 23487 s0 := or.Args[1] 23488 if s0.Op != OpAMD64SHLQconst { 23489 break 23490 } 23491 j0 := s0.AuxInt 23492 x0 := s0.Args[0] 23493 if x0.Op != OpAMD64MOVWload { 23494 break 23495 } 23496 i0 := x0.AuxInt 23497 if x0.Aux != s { 23498 break 23499 } 23500 _ = x0.Args[1] 23501 if p != x0.Args[0] { 23502 break 23503 } 23504 if mem != x0.Args[1] { 23505 break 23506 } 23507 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23508 break 23509 } 23510 b = mergePoint(b, x0, x1) 23511 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23512 v.reset(OpCopy) 23513 v.AddArg(v0) 23514 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23515 v1.AuxInt = j0 23516 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23517 v2.AuxInt = i0 23518 v2.Aux = s 23519 v2.AddArg(p) 23520 v2.AddArg(mem) 23521 v1.AddArg(v2) 23522 v0.AddArg(v1) 23523 v0.AddArg(y) 23524 return true 23525 } 23526 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 23527 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23528 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 23529 for { 23530 _ = v.Args[1] 23531 or := v.Args[0] 23532 if or.Op != OpAMD64ORQ { 23533 break 23534 } 23535 _ = or.Args[1] 23536 s0 := or.Args[0] 23537 if s0.Op != OpAMD64SHLQconst { 23538 break 23539 } 23540 j0 := s0.AuxInt 23541 x0 := s0.Args[0] 23542 if x0.Op != OpAMD64MOVWload { 23543 break 23544 } 23545 i0 := x0.AuxInt 23546 s := x0.Aux 23547 _ = x0.Args[1] 23548 p := x0.Args[0] 23549 mem := x0.Args[1] 23550 y := or.Args[1] 23551 s1 := v.Args[1] 23552 if s1.Op != OpAMD64SHLQconst { 23553 break 23554 } 23555 j1 := s1.AuxInt 23556 x1 := s1.Args[0] 23557 if x1.Op != OpAMD64MOVWload { 23558 break 23559 } 23560 i1 := x1.AuxInt 23561 if x1.Aux != s { 23562 break 23563 } 23564 _ = x1.Args[1] 23565 if p != x1.Args[0] { 23566 break 23567 } 23568 if mem != x1.Args[1] { 23569 break 23570 } 23571 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23572 break 23573 } 23574 b = mergePoint(b, x0, x1) 23575 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23576 v.reset(OpCopy) 23577 v.AddArg(v0) 23578 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23579 v1.AuxInt = j0 23580 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23581 v2.AuxInt = i0 23582 v2.Aux = s 23583 v2.AddArg(p) 23584 v2.AddArg(mem) 23585 v1.AddArg(v2) 23586 v0.AddArg(v1) 23587 v0.AddArg(y) 23588 return true 23589 } 23590 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 23591 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23592 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 23593 for { 23594 _ = v.Args[1] 23595 or := v.Args[0] 23596 if or.Op != OpAMD64ORQ { 23597 break 23598 } 23599 _ = or.Args[1] 23600 y := or.Args[0] 23601 s0 := or.Args[1] 23602 if s0.Op != OpAMD64SHLQconst { 23603 break 23604 } 23605 j0 := s0.AuxInt 23606 x0 := s0.Args[0] 23607 if x0.Op != OpAMD64MOVWload { 23608 break 23609 } 23610 i0 := x0.AuxInt 23611 s := x0.Aux 23612 _ = x0.Args[1] 23613 p := x0.Args[0] 23614 mem := x0.Args[1] 23615 s1 := v.Args[1] 23616 if s1.Op != OpAMD64SHLQconst { 23617 break 23618 } 23619 j1 := s1.AuxInt 23620 x1 := s1.Args[0] 23621 if x1.Op != OpAMD64MOVWload { 23622 break 23623 } 23624 i1 := x1.AuxInt 23625 if x1.Aux != s { 23626 break 23627 } 23628 _ = x1.Args[1] 23629 if p != x1.Args[0] { 23630 break 23631 } 23632 if mem != x1.Args[1] { 23633 break 23634 } 23635 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23636 break 23637 } 23638 b = mergePoint(b, x0, x1) 23639 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 23640 v.reset(OpCopy) 23641 v.AddArg(v0) 23642 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 23643 v1.AuxInt = j0 23644 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 23645 v2.AuxInt = i0 23646 v2.Aux = s 23647 v2.AddArg(p) 23648 v2.AddArg(mem) 23649 v1.AddArg(v2) 23650 v0.AddArg(v1) 23651 v0.AddArg(y) 23652 return true 23653 } 23654 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 23655 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23656 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 23657 for { 23658 _ = v.Args[1] 23659 x0 := v.Args[0] 23660 if x0.Op != OpAMD64MOVBloadidx1 { 23661 break 23662 } 23663 i0 := x0.AuxInt 23664 s := x0.Aux 23665 _ = x0.Args[2] 23666 p := x0.Args[0] 23667 idx := x0.Args[1] 23668 mem := x0.Args[2] 23669 sh := v.Args[1] 23670 if sh.Op != OpAMD64SHLQconst { 23671 break 23672 } 23673 if sh.AuxInt != 8 { 23674 break 23675 } 23676 x1 := sh.Args[0] 23677 if x1.Op != OpAMD64MOVBloadidx1 { 23678 break 23679 } 23680 i1 := x1.AuxInt 23681 if x1.Aux != s { 23682 break 23683 } 23684 _ = x1.Args[2] 23685 if p != x1.Args[0] { 23686 break 23687 } 23688 if idx != x1.Args[1] { 23689 break 23690 } 23691 if mem != x1.Args[2] { 23692 break 23693 } 23694 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23695 break 23696 } 23697 b = mergePoint(b, x0, x1) 23698 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 23699 v.reset(OpCopy) 23700 v.AddArg(v0) 23701 v0.AuxInt = i0 23702 v0.Aux = s 23703 v0.AddArg(p) 23704 v0.AddArg(idx) 23705 v0.AddArg(mem) 23706 return true 23707 } 23708 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 23709 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23710 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 23711 for { 23712 _ = v.Args[1] 23713 x0 := v.Args[0] 23714 if x0.Op != OpAMD64MOVBloadidx1 { 23715 break 23716 } 23717 i0 := x0.AuxInt 23718 s := x0.Aux 23719 _ = x0.Args[2] 23720 idx := x0.Args[0] 23721 p := x0.Args[1] 23722 mem := x0.Args[2] 23723 sh := v.Args[1] 23724 if sh.Op != OpAMD64SHLQconst { 23725 break 23726 } 23727 if sh.AuxInt != 8 { 23728 break 23729 } 23730 x1 := sh.Args[0] 23731 if x1.Op != OpAMD64MOVBloadidx1 { 23732 break 23733 } 23734 i1 := x1.AuxInt 23735 if x1.Aux != s { 23736 break 23737 } 23738 _ = x1.Args[2] 23739 if p != x1.Args[0] { 23740 break 23741 } 23742 if idx != x1.Args[1] { 23743 break 23744 } 23745 if mem != x1.Args[2] { 23746 break 23747 } 23748 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23749 break 23750 } 23751 b = mergePoint(b, x0, x1) 23752 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 23753 v.reset(OpCopy) 23754 v.AddArg(v0) 23755 v0.AuxInt = i0 23756 v0.Aux = s 23757 v0.AddArg(p) 23758 v0.AddArg(idx) 23759 v0.AddArg(mem) 23760 return true 23761 } 23762 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 23763 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23764 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 23765 for { 23766 _ = v.Args[1] 23767 x0 := v.Args[0] 23768 if x0.Op != OpAMD64MOVBloadidx1 { 23769 break 23770 } 23771 i0 := x0.AuxInt 23772 s := x0.Aux 23773 _ = x0.Args[2] 23774 p := x0.Args[0] 23775 idx := x0.Args[1] 23776 mem := x0.Args[2] 23777 sh := v.Args[1] 23778 if sh.Op != OpAMD64SHLQconst { 23779 break 23780 } 23781 if sh.AuxInt != 8 { 23782 break 23783 } 23784 x1 := sh.Args[0] 23785 if x1.Op != OpAMD64MOVBloadidx1 { 23786 break 23787 } 23788 i1 := x1.AuxInt 23789 if x1.Aux != s { 23790 break 23791 } 23792 _ = x1.Args[2] 23793 if idx != x1.Args[0] { 23794 break 23795 } 23796 if p != x1.Args[1] { 23797 break 23798 } 23799 if mem != x1.Args[2] { 23800 break 23801 } 23802 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23803 break 23804 } 23805 b = mergePoint(b, x0, x1) 23806 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 23807 v.reset(OpCopy) 23808 v.AddArg(v0) 23809 v0.AuxInt = i0 23810 v0.Aux = s 23811 v0.AddArg(p) 23812 v0.AddArg(idx) 23813 v0.AddArg(mem) 23814 return true 23815 } 23816 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 23817 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23818 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 23819 for { 23820 _ = v.Args[1] 23821 x0 := v.Args[0] 23822 if x0.Op != OpAMD64MOVBloadidx1 { 23823 break 23824 } 23825 i0 := x0.AuxInt 23826 s := x0.Aux 23827 _ = x0.Args[2] 23828 idx := x0.Args[0] 23829 p := x0.Args[1] 23830 mem := x0.Args[2] 23831 sh := v.Args[1] 23832 if sh.Op != OpAMD64SHLQconst { 23833 break 23834 } 23835 if sh.AuxInt != 8 { 23836 break 23837 } 23838 x1 := sh.Args[0] 23839 if x1.Op != OpAMD64MOVBloadidx1 { 23840 break 23841 } 23842 i1 := x1.AuxInt 23843 if x1.Aux != s { 23844 break 23845 } 23846 _ = x1.Args[2] 23847 if idx != x1.Args[0] { 23848 break 23849 } 23850 if p != x1.Args[1] { 23851 break 23852 } 23853 if mem != x1.Args[2] { 23854 break 23855 } 23856 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23857 break 23858 } 23859 b = mergePoint(b, x0, x1) 23860 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 23861 v.reset(OpCopy) 23862 v.AddArg(v0) 23863 v0.AuxInt = i0 23864 v0.Aux = s 23865 v0.AddArg(p) 23866 v0.AddArg(idx) 23867 v0.AddArg(mem) 23868 return true 23869 } 23870 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 23871 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23872 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 23873 for { 23874 _ = v.Args[1] 23875 sh := v.Args[0] 23876 if sh.Op != OpAMD64SHLQconst { 23877 break 23878 } 23879 if sh.AuxInt != 8 { 23880 break 23881 } 23882 x1 := sh.Args[0] 23883 if x1.Op != OpAMD64MOVBloadidx1 { 23884 break 23885 } 23886 i1 := x1.AuxInt 23887 s := x1.Aux 23888 _ = x1.Args[2] 23889 p := x1.Args[0] 23890 idx := x1.Args[1] 23891 mem := x1.Args[2] 23892 x0 := v.Args[1] 23893 if x0.Op != OpAMD64MOVBloadidx1 { 23894 break 23895 } 23896 i0 := x0.AuxInt 23897 if x0.Aux != s { 23898 break 23899 } 23900 _ = x0.Args[2] 23901 if p != x0.Args[0] { 23902 break 23903 } 23904 if idx != x0.Args[1] { 23905 break 23906 } 23907 if mem != x0.Args[2] { 23908 break 23909 } 23910 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23911 break 23912 } 23913 b = mergePoint(b, x0, x1) 23914 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 23915 v.reset(OpCopy) 23916 v.AddArg(v0) 23917 v0.AuxInt = i0 23918 v0.Aux = s 23919 v0.AddArg(p) 23920 v0.AddArg(idx) 23921 v0.AddArg(mem) 23922 return true 23923 } 23924 return false 23925 } 23926 func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool { 23927 b := v.Block 23928 _ = b 23929 typ := &b.Func.Config.Types 23930 _ = typ 23931 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 23932 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23933 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 23934 for { 23935 _ = v.Args[1] 23936 sh := v.Args[0] 23937 if sh.Op != OpAMD64SHLQconst { 23938 break 23939 } 23940 if sh.AuxInt != 8 { 23941 break 23942 } 23943 x1 := sh.Args[0] 23944 if x1.Op != OpAMD64MOVBloadidx1 { 23945 break 23946 } 23947 i1 := x1.AuxInt 23948 s := x1.Aux 23949 _ = x1.Args[2] 23950 idx := x1.Args[0] 23951 p := x1.Args[1] 23952 mem := x1.Args[2] 23953 x0 := v.Args[1] 23954 if x0.Op != OpAMD64MOVBloadidx1 { 23955 break 23956 } 23957 i0 := x0.AuxInt 23958 if x0.Aux != s { 23959 break 23960 } 23961 _ = x0.Args[2] 23962 if p != x0.Args[0] { 23963 break 23964 } 23965 if idx != x0.Args[1] { 23966 break 23967 } 23968 if mem != x0.Args[2] { 23969 break 23970 } 23971 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 23972 break 23973 } 23974 b = mergePoint(b, x0, x1) 23975 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 23976 v.reset(OpCopy) 23977 v.AddArg(v0) 23978 v0.AuxInt = i0 23979 v0.Aux = s 23980 v0.AddArg(p) 23981 v0.AddArg(idx) 23982 v0.AddArg(mem) 23983 return true 23984 } 23985 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 23986 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 23987 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 23988 for { 23989 _ = v.Args[1] 23990 sh := v.Args[0] 23991 if sh.Op != OpAMD64SHLQconst { 23992 break 23993 } 23994 if sh.AuxInt != 8 { 23995 break 23996 } 23997 x1 := sh.Args[0] 23998 if x1.Op != OpAMD64MOVBloadidx1 { 23999 break 24000 } 24001 i1 := x1.AuxInt 24002 s := x1.Aux 24003 _ = x1.Args[2] 24004 p := x1.Args[0] 24005 idx := x1.Args[1] 24006 mem := x1.Args[2] 24007 x0 := v.Args[1] 24008 if x0.Op != OpAMD64MOVBloadidx1 { 24009 break 24010 } 24011 i0 := x0.AuxInt 24012 if x0.Aux != s { 24013 break 24014 } 24015 _ = x0.Args[2] 24016 if idx != x0.Args[0] { 24017 break 24018 } 24019 if p != x0.Args[1] { 24020 break 24021 } 24022 if mem != x0.Args[2] { 24023 break 24024 } 24025 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24026 break 24027 } 24028 b = mergePoint(b, x0, x1) 24029 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 24030 v.reset(OpCopy) 24031 v.AddArg(v0) 24032 v0.AuxInt = i0 24033 v0.Aux = s 24034 v0.AddArg(p) 24035 v0.AddArg(idx) 24036 v0.AddArg(mem) 24037 return true 24038 } 24039 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 24040 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24041 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 24042 for { 24043 _ = v.Args[1] 24044 sh := v.Args[0] 24045 if sh.Op != OpAMD64SHLQconst { 24046 break 24047 } 24048 if sh.AuxInt != 8 { 24049 break 24050 } 24051 x1 := sh.Args[0] 24052 if x1.Op != OpAMD64MOVBloadidx1 { 24053 break 24054 } 24055 i1 := x1.AuxInt 24056 s := x1.Aux 24057 _ = x1.Args[2] 24058 idx := x1.Args[0] 24059 p := x1.Args[1] 24060 mem := x1.Args[2] 24061 x0 := v.Args[1] 24062 if x0.Op != OpAMD64MOVBloadidx1 { 24063 break 24064 } 24065 i0 := x0.AuxInt 24066 if x0.Aux != s { 24067 break 24068 } 24069 _ = x0.Args[2] 24070 if idx != x0.Args[0] { 24071 break 24072 } 24073 if p != x0.Args[1] { 24074 break 24075 } 24076 if mem != x0.Args[2] { 24077 break 24078 } 24079 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24080 break 24081 } 24082 b = mergePoint(b, x0, x1) 24083 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 24084 v.reset(OpCopy) 24085 v.AddArg(v0) 24086 v0.AuxInt = i0 24087 v0.Aux = s 24088 v0.AddArg(p) 24089 v0.AddArg(idx) 24090 v0.AddArg(mem) 24091 return true 24092 } 24093 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 24094 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24095 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24096 for { 24097 _ = v.Args[1] 24098 x0 := v.Args[0] 24099 if x0.Op != OpAMD64MOVWloadidx1 { 24100 break 24101 } 24102 i0 := x0.AuxInt 24103 s := x0.Aux 24104 _ = x0.Args[2] 24105 p := x0.Args[0] 24106 idx := x0.Args[1] 24107 mem := x0.Args[2] 24108 sh := v.Args[1] 24109 if sh.Op != OpAMD64SHLQconst { 24110 break 24111 } 24112 if sh.AuxInt != 16 { 24113 break 24114 } 24115 x1 := sh.Args[0] 24116 if x1.Op != OpAMD64MOVWloadidx1 { 24117 break 24118 } 24119 i1 := x1.AuxInt 24120 if x1.Aux != s { 24121 break 24122 } 24123 _ = x1.Args[2] 24124 if p != x1.Args[0] { 24125 break 24126 } 24127 if idx != x1.Args[1] { 24128 break 24129 } 24130 if mem != x1.Args[2] { 24131 break 24132 } 24133 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24134 break 24135 } 24136 b = mergePoint(b, x0, x1) 24137 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24138 v.reset(OpCopy) 24139 v.AddArg(v0) 24140 v0.AuxInt = i0 24141 v0.Aux = s 24142 v0.AddArg(p) 24143 v0.AddArg(idx) 24144 v0.AddArg(mem) 24145 return true 24146 } 24147 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 24148 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24149 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24150 for { 24151 _ = v.Args[1] 24152 x0 := v.Args[0] 24153 if x0.Op != OpAMD64MOVWloadidx1 { 24154 break 24155 } 24156 i0 := x0.AuxInt 24157 s := x0.Aux 24158 _ = x0.Args[2] 24159 idx := x0.Args[0] 24160 p := x0.Args[1] 24161 mem := x0.Args[2] 24162 sh := v.Args[1] 24163 if sh.Op != OpAMD64SHLQconst { 24164 break 24165 } 24166 if sh.AuxInt != 16 { 24167 break 24168 } 24169 x1 := sh.Args[0] 24170 if x1.Op != OpAMD64MOVWloadidx1 { 24171 break 24172 } 24173 i1 := x1.AuxInt 24174 if x1.Aux != s { 24175 break 24176 } 24177 _ = x1.Args[2] 24178 if p != x1.Args[0] { 24179 break 24180 } 24181 if idx != x1.Args[1] { 24182 break 24183 } 24184 if mem != x1.Args[2] { 24185 break 24186 } 24187 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24188 break 24189 } 24190 b = mergePoint(b, x0, x1) 24191 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24192 v.reset(OpCopy) 24193 v.AddArg(v0) 24194 v0.AuxInt = i0 24195 v0.Aux = s 24196 v0.AddArg(p) 24197 v0.AddArg(idx) 24198 v0.AddArg(mem) 24199 return true 24200 } 24201 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 24202 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24203 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24204 for { 24205 _ = v.Args[1] 24206 x0 := v.Args[0] 24207 if x0.Op != OpAMD64MOVWloadidx1 { 24208 break 24209 } 24210 i0 := x0.AuxInt 24211 s := x0.Aux 24212 _ = x0.Args[2] 24213 p := x0.Args[0] 24214 idx := x0.Args[1] 24215 mem := x0.Args[2] 24216 sh := v.Args[1] 24217 if sh.Op != OpAMD64SHLQconst { 24218 break 24219 } 24220 if sh.AuxInt != 16 { 24221 break 24222 } 24223 x1 := sh.Args[0] 24224 if x1.Op != OpAMD64MOVWloadidx1 { 24225 break 24226 } 24227 i1 := x1.AuxInt 24228 if x1.Aux != s { 24229 break 24230 } 24231 _ = x1.Args[2] 24232 if idx != x1.Args[0] { 24233 break 24234 } 24235 if p != x1.Args[1] { 24236 break 24237 } 24238 if mem != x1.Args[2] { 24239 break 24240 } 24241 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24242 break 24243 } 24244 b = mergePoint(b, x0, x1) 24245 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24246 v.reset(OpCopy) 24247 v.AddArg(v0) 24248 v0.AuxInt = i0 24249 v0.Aux = s 24250 v0.AddArg(p) 24251 v0.AddArg(idx) 24252 v0.AddArg(mem) 24253 return true 24254 } 24255 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 24256 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24257 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24258 for { 24259 _ = v.Args[1] 24260 x0 := v.Args[0] 24261 if x0.Op != OpAMD64MOVWloadidx1 { 24262 break 24263 } 24264 i0 := x0.AuxInt 24265 s := x0.Aux 24266 _ = x0.Args[2] 24267 idx := x0.Args[0] 24268 p := x0.Args[1] 24269 mem := x0.Args[2] 24270 sh := v.Args[1] 24271 if sh.Op != OpAMD64SHLQconst { 24272 break 24273 } 24274 if sh.AuxInt != 16 { 24275 break 24276 } 24277 x1 := sh.Args[0] 24278 if x1.Op != OpAMD64MOVWloadidx1 { 24279 break 24280 } 24281 i1 := x1.AuxInt 24282 if x1.Aux != s { 24283 break 24284 } 24285 _ = x1.Args[2] 24286 if idx != x1.Args[0] { 24287 break 24288 } 24289 if p != x1.Args[1] { 24290 break 24291 } 24292 if mem != x1.Args[2] { 24293 break 24294 } 24295 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24296 break 24297 } 24298 b = mergePoint(b, x0, x1) 24299 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24300 v.reset(OpCopy) 24301 v.AddArg(v0) 24302 v0.AuxInt = i0 24303 v0.Aux = s 24304 v0.AddArg(p) 24305 v0.AddArg(idx) 24306 v0.AddArg(mem) 24307 return true 24308 } 24309 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 24310 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24311 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24312 for { 24313 _ = v.Args[1] 24314 sh := v.Args[0] 24315 if sh.Op != OpAMD64SHLQconst { 24316 break 24317 } 24318 if sh.AuxInt != 16 { 24319 break 24320 } 24321 x1 := sh.Args[0] 24322 if x1.Op != OpAMD64MOVWloadidx1 { 24323 break 24324 } 24325 i1 := x1.AuxInt 24326 s := x1.Aux 24327 _ = x1.Args[2] 24328 p := x1.Args[0] 24329 idx := x1.Args[1] 24330 mem := x1.Args[2] 24331 x0 := v.Args[1] 24332 if x0.Op != OpAMD64MOVWloadidx1 { 24333 break 24334 } 24335 i0 := x0.AuxInt 24336 if x0.Aux != s { 24337 break 24338 } 24339 _ = x0.Args[2] 24340 if p != x0.Args[0] { 24341 break 24342 } 24343 if idx != x0.Args[1] { 24344 break 24345 } 24346 if mem != x0.Args[2] { 24347 break 24348 } 24349 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24350 break 24351 } 24352 b = mergePoint(b, x0, x1) 24353 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24354 v.reset(OpCopy) 24355 v.AddArg(v0) 24356 v0.AuxInt = i0 24357 v0.Aux = s 24358 v0.AddArg(p) 24359 v0.AddArg(idx) 24360 v0.AddArg(mem) 24361 return true 24362 } 24363 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 24364 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24365 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24366 for { 24367 _ = v.Args[1] 24368 sh := v.Args[0] 24369 if sh.Op != OpAMD64SHLQconst { 24370 break 24371 } 24372 if sh.AuxInt != 16 { 24373 break 24374 } 24375 x1 := sh.Args[0] 24376 if x1.Op != OpAMD64MOVWloadidx1 { 24377 break 24378 } 24379 i1 := x1.AuxInt 24380 s := x1.Aux 24381 _ = x1.Args[2] 24382 idx := x1.Args[0] 24383 p := x1.Args[1] 24384 mem := x1.Args[2] 24385 x0 := v.Args[1] 24386 if x0.Op != OpAMD64MOVWloadidx1 { 24387 break 24388 } 24389 i0 := x0.AuxInt 24390 if x0.Aux != s { 24391 break 24392 } 24393 _ = x0.Args[2] 24394 if p != x0.Args[0] { 24395 break 24396 } 24397 if idx != x0.Args[1] { 24398 break 24399 } 24400 if mem != x0.Args[2] { 24401 break 24402 } 24403 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24404 break 24405 } 24406 b = mergePoint(b, x0, x1) 24407 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24408 v.reset(OpCopy) 24409 v.AddArg(v0) 24410 v0.AuxInt = i0 24411 v0.Aux = s 24412 v0.AddArg(p) 24413 v0.AddArg(idx) 24414 v0.AddArg(mem) 24415 return true 24416 } 24417 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 24418 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24419 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24420 for { 24421 _ = v.Args[1] 24422 sh := v.Args[0] 24423 if sh.Op != OpAMD64SHLQconst { 24424 break 24425 } 24426 if sh.AuxInt != 16 { 24427 break 24428 } 24429 x1 := sh.Args[0] 24430 if x1.Op != OpAMD64MOVWloadidx1 { 24431 break 24432 } 24433 i1 := x1.AuxInt 24434 s := x1.Aux 24435 _ = x1.Args[2] 24436 p := x1.Args[0] 24437 idx := x1.Args[1] 24438 mem := x1.Args[2] 24439 x0 := v.Args[1] 24440 if x0.Op != OpAMD64MOVWloadidx1 { 24441 break 24442 } 24443 i0 := x0.AuxInt 24444 if x0.Aux != s { 24445 break 24446 } 24447 _ = x0.Args[2] 24448 if idx != x0.Args[0] { 24449 break 24450 } 24451 if p != x0.Args[1] { 24452 break 24453 } 24454 if mem != x0.Args[2] { 24455 break 24456 } 24457 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24458 break 24459 } 24460 b = mergePoint(b, x0, x1) 24461 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24462 v.reset(OpCopy) 24463 v.AddArg(v0) 24464 v0.AuxInt = i0 24465 v0.Aux = s 24466 v0.AddArg(p) 24467 v0.AddArg(idx) 24468 v0.AddArg(mem) 24469 return true 24470 } 24471 return false 24472 } 24473 func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool { 24474 b := v.Block 24475 _ = b 24476 typ := &b.Func.Config.Types 24477 _ = typ 24478 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 24479 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24480 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 24481 for { 24482 _ = v.Args[1] 24483 sh := v.Args[0] 24484 if sh.Op != OpAMD64SHLQconst { 24485 break 24486 } 24487 if sh.AuxInt != 16 { 24488 break 24489 } 24490 x1 := sh.Args[0] 24491 if x1.Op != OpAMD64MOVWloadidx1 { 24492 break 24493 } 24494 i1 := x1.AuxInt 24495 s := x1.Aux 24496 _ = x1.Args[2] 24497 idx := x1.Args[0] 24498 p := x1.Args[1] 24499 mem := x1.Args[2] 24500 x0 := v.Args[1] 24501 if x0.Op != OpAMD64MOVWloadidx1 { 24502 break 24503 } 24504 i0 := x0.AuxInt 24505 if x0.Aux != s { 24506 break 24507 } 24508 _ = x0.Args[2] 24509 if idx != x0.Args[0] { 24510 break 24511 } 24512 if p != x0.Args[1] { 24513 break 24514 } 24515 if mem != x0.Args[2] { 24516 break 24517 } 24518 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24519 break 24520 } 24521 b = mergePoint(b, x0, x1) 24522 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 24523 v.reset(OpCopy) 24524 v.AddArg(v0) 24525 v0.AuxInt = i0 24526 v0.Aux = s 24527 v0.AddArg(p) 24528 v0.AddArg(idx) 24529 v0.AddArg(mem) 24530 return true 24531 } 24532 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 24533 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24534 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24535 for { 24536 _ = v.Args[1] 24537 x0 := v.Args[0] 24538 if x0.Op != OpAMD64MOVLloadidx1 { 24539 break 24540 } 24541 i0 := x0.AuxInt 24542 s := x0.Aux 24543 _ = x0.Args[2] 24544 p := x0.Args[0] 24545 idx := x0.Args[1] 24546 mem := x0.Args[2] 24547 sh := v.Args[1] 24548 if sh.Op != OpAMD64SHLQconst { 24549 break 24550 } 24551 if sh.AuxInt != 32 { 24552 break 24553 } 24554 x1 := sh.Args[0] 24555 if x1.Op != OpAMD64MOVLloadidx1 { 24556 break 24557 } 24558 i1 := x1.AuxInt 24559 if x1.Aux != s { 24560 break 24561 } 24562 _ = x1.Args[2] 24563 if p != x1.Args[0] { 24564 break 24565 } 24566 if idx != x1.Args[1] { 24567 break 24568 } 24569 if mem != x1.Args[2] { 24570 break 24571 } 24572 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24573 break 24574 } 24575 b = mergePoint(b, x0, x1) 24576 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24577 v.reset(OpCopy) 24578 v.AddArg(v0) 24579 v0.AuxInt = i0 24580 v0.Aux = s 24581 v0.AddArg(p) 24582 v0.AddArg(idx) 24583 v0.AddArg(mem) 24584 return true 24585 } 24586 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 24587 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24588 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24589 for { 24590 _ = v.Args[1] 24591 x0 := v.Args[0] 24592 if x0.Op != OpAMD64MOVLloadidx1 { 24593 break 24594 } 24595 i0 := x0.AuxInt 24596 s := x0.Aux 24597 _ = x0.Args[2] 24598 idx := x0.Args[0] 24599 p := x0.Args[1] 24600 mem := x0.Args[2] 24601 sh := v.Args[1] 24602 if sh.Op != OpAMD64SHLQconst { 24603 break 24604 } 24605 if sh.AuxInt != 32 { 24606 break 24607 } 24608 x1 := sh.Args[0] 24609 if x1.Op != OpAMD64MOVLloadidx1 { 24610 break 24611 } 24612 i1 := x1.AuxInt 24613 if x1.Aux != s { 24614 break 24615 } 24616 _ = x1.Args[2] 24617 if p != x1.Args[0] { 24618 break 24619 } 24620 if idx != x1.Args[1] { 24621 break 24622 } 24623 if mem != x1.Args[2] { 24624 break 24625 } 24626 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24627 break 24628 } 24629 b = mergePoint(b, x0, x1) 24630 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24631 v.reset(OpCopy) 24632 v.AddArg(v0) 24633 v0.AuxInt = i0 24634 v0.Aux = s 24635 v0.AddArg(p) 24636 v0.AddArg(idx) 24637 v0.AddArg(mem) 24638 return true 24639 } 24640 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 24641 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24642 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24643 for { 24644 _ = v.Args[1] 24645 x0 := v.Args[0] 24646 if x0.Op != OpAMD64MOVLloadidx1 { 24647 break 24648 } 24649 i0 := x0.AuxInt 24650 s := x0.Aux 24651 _ = x0.Args[2] 24652 p := x0.Args[0] 24653 idx := x0.Args[1] 24654 mem := x0.Args[2] 24655 sh := v.Args[1] 24656 if sh.Op != OpAMD64SHLQconst { 24657 break 24658 } 24659 if sh.AuxInt != 32 { 24660 break 24661 } 24662 x1 := sh.Args[0] 24663 if x1.Op != OpAMD64MOVLloadidx1 { 24664 break 24665 } 24666 i1 := x1.AuxInt 24667 if x1.Aux != s { 24668 break 24669 } 24670 _ = x1.Args[2] 24671 if idx != x1.Args[0] { 24672 break 24673 } 24674 if p != x1.Args[1] { 24675 break 24676 } 24677 if mem != x1.Args[2] { 24678 break 24679 } 24680 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24681 break 24682 } 24683 b = mergePoint(b, x0, x1) 24684 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24685 v.reset(OpCopy) 24686 v.AddArg(v0) 24687 v0.AuxInt = i0 24688 v0.Aux = s 24689 v0.AddArg(p) 24690 v0.AddArg(idx) 24691 v0.AddArg(mem) 24692 return true 24693 } 24694 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 24695 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24696 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24697 for { 24698 _ = v.Args[1] 24699 x0 := v.Args[0] 24700 if x0.Op != OpAMD64MOVLloadidx1 { 24701 break 24702 } 24703 i0 := x0.AuxInt 24704 s := x0.Aux 24705 _ = x0.Args[2] 24706 idx := x0.Args[0] 24707 p := x0.Args[1] 24708 mem := x0.Args[2] 24709 sh := v.Args[1] 24710 if sh.Op != OpAMD64SHLQconst { 24711 break 24712 } 24713 if sh.AuxInt != 32 { 24714 break 24715 } 24716 x1 := sh.Args[0] 24717 if x1.Op != OpAMD64MOVLloadidx1 { 24718 break 24719 } 24720 i1 := x1.AuxInt 24721 if x1.Aux != s { 24722 break 24723 } 24724 _ = x1.Args[2] 24725 if idx != x1.Args[0] { 24726 break 24727 } 24728 if p != x1.Args[1] { 24729 break 24730 } 24731 if mem != x1.Args[2] { 24732 break 24733 } 24734 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24735 break 24736 } 24737 b = mergePoint(b, x0, x1) 24738 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24739 v.reset(OpCopy) 24740 v.AddArg(v0) 24741 v0.AuxInt = i0 24742 v0.Aux = s 24743 v0.AddArg(p) 24744 v0.AddArg(idx) 24745 v0.AddArg(mem) 24746 return true 24747 } 24748 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 24749 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24750 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24751 for { 24752 _ = v.Args[1] 24753 sh := v.Args[0] 24754 if sh.Op != OpAMD64SHLQconst { 24755 break 24756 } 24757 if sh.AuxInt != 32 { 24758 break 24759 } 24760 x1 := sh.Args[0] 24761 if x1.Op != OpAMD64MOVLloadidx1 { 24762 break 24763 } 24764 i1 := x1.AuxInt 24765 s := x1.Aux 24766 _ = x1.Args[2] 24767 p := x1.Args[0] 24768 idx := x1.Args[1] 24769 mem := x1.Args[2] 24770 x0 := v.Args[1] 24771 if x0.Op != OpAMD64MOVLloadidx1 { 24772 break 24773 } 24774 i0 := x0.AuxInt 24775 if x0.Aux != s { 24776 break 24777 } 24778 _ = x0.Args[2] 24779 if p != x0.Args[0] { 24780 break 24781 } 24782 if idx != x0.Args[1] { 24783 break 24784 } 24785 if mem != x0.Args[2] { 24786 break 24787 } 24788 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24789 break 24790 } 24791 b = mergePoint(b, x0, x1) 24792 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24793 v.reset(OpCopy) 24794 v.AddArg(v0) 24795 v0.AuxInt = i0 24796 v0.Aux = s 24797 v0.AddArg(p) 24798 v0.AddArg(idx) 24799 v0.AddArg(mem) 24800 return true 24801 } 24802 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 24803 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24804 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24805 for { 24806 _ = v.Args[1] 24807 sh := v.Args[0] 24808 if sh.Op != OpAMD64SHLQconst { 24809 break 24810 } 24811 if sh.AuxInt != 32 { 24812 break 24813 } 24814 x1 := sh.Args[0] 24815 if x1.Op != OpAMD64MOVLloadidx1 { 24816 break 24817 } 24818 i1 := x1.AuxInt 24819 s := x1.Aux 24820 _ = x1.Args[2] 24821 idx := x1.Args[0] 24822 p := x1.Args[1] 24823 mem := x1.Args[2] 24824 x0 := v.Args[1] 24825 if x0.Op != OpAMD64MOVLloadidx1 { 24826 break 24827 } 24828 i0 := x0.AuxInt 24829 if x0.Aux != s { 24830 break 24831 } 24832 _ = x0.Args[2] 24833 if p != x0.Args[0] { 24834 break 24835 } 24836 if idx != x0.Args[1] { 24837 break 24838 } 24839 if mem != x0.Args[2] { 24840 break 24841 } 24842 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24843 break 24844 } 24845 b = mergePoint(b, x0, x1) 24846 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24847 v.reset(OpCopy) 24848 v.AddArg(v0) 24849 v0.AuxInt = i0 24850 v0.Aux = s 24851 v0.AddArg(p) 24852 v0.AddArg(idx) 24853 v0.AddArg(mem) 24854 return true 24855 } 24856 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 24857 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24858 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24859 for { 24860 _ = v.Args[1] 24861 sh := v.Args[0] 24862 if sh.Op != OpAMD64SHLQconst { 24863 break 24864 } 24865 if sh.AuxInt != 32 { 24866 break 24867 } 24868 x1 := sh.Args[0] 24869 if x1.Op != OpAMD64MOVLloadidx1 { 24870 break 24871 } 24872 i1 := x1.AuxInt 24873 s := x1.Aux 24874 _ = x1.Args[2] 24875 p := x1.Args[0] 24876 idx := x1.Args[1] 24877 mem := x1.Args[2] 24878 x0 := v.Args[1] 24879 if x0.Op != OpAMD64MOVLloadidx1 { 24880 break 24881 } 24882 i0 := x0.AuxInt 24883 if x0.Aux != s { 24884 break 24885 } 24886 _ = x0.Args[2] 24887 if idx != x0.Args[0] { 24888 break 24889 } 24890 if p != x0.Args[1] { 24891 break 24892 } 24893 if mem != x0.Args[2] { 24894 break 24895 } 24896 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24897 break 24898 } 24899 b = mergePoint(b, x0, x1) 24900 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24901 v.reset(OpCopy) 24902 v.AddArg(v0) 24903 v0.AuxInt = i0 24904 v0.Aux = s 24905 v0.AddArg(p) 24906 v0.AddArg(idx) 24907 v0.AddArg(mem) 24908 return true 24909 } 24910 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 24911 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24912 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 24913 for { 24914 _ = v.Args[1] 24915 sh := v.Args[0] 24916 if sh.Op != OpAMD64SHLQconst { 24917 break 24918 } 24919 if sh.AuxInt != 32 { 24920 break 24921 } 24922 x1 := sh.Args[0] 24923 if x1.Op != OpAMD64MOVLloadidx1 { 24924 break 24925 } 24926 i1 := x1.AuxInt 24927 s := x1.Aux 24928 _ = x1.Args[2] 24929 idx := x1.Args[0] 24930 p := x1.Args[1] 24931 mem := x1.Args[2] 24932 x0 := v.Args[1] 24933 if x0.Op != OpAMD64MOVLloadidx1 { 24934 break 24935 } 24936 i0 := x0.AuxInt 24937 if x0.Aux != s { 24938 break 24939 } 24940 _ = x0.Args[2] 24941 if idx != x0.Args[0] { 24942 break 24943 } 24944 if p != x0.Args[1] { 24945 break 24946 } 24947 if mem != x0.Args[2] { 24948 break 24949 } 24950 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24951 break 24952 } 24953 b = mergePoint(b, x0, x1) 24954 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 24955 v.reset(OpCopy) 24956 v.AddArg(v0) 24957 v0.AuxInt = i0 24958 v0.Aux = s 24959 v0.AddArg(p) 24960 v0.AddArg(idx) 24961 v0.AddArg(mem) 24962 return true 24963 } 24964 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 24965 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24966 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 24967 for { 24968 _ = v.Args[1] 24969 s1 := v.Args[0] 24970 if s1.Op != OpAMD64SHLQconst { 24971 break 24972 } 24973 j1 := s1.AuxInt 24974 x1 := s1.Args[0] 24975 if x1.Op != OpAMD64MOVBloadidx1 { 24976 break 24977 } 24978 i1 := x1.AuxInt 24979 s := x1.Aux 24980 _ = x1.Args[2] 24981 p := x1.Args[0] 24982 idx := x1.Args[1] 24983 mem := x1.Args[2] 24984 or := v.Args[1] 24985 if or.Op != OpAMD64ORQ { 24986 break 24987 } 24988 _ = or.Args[1] 24989 s0 := or.Args[0] 24990 if s0.Op != OpAMD64SHLQconst { 24991 break 24992 } 24993 j0 := s0.AuxInt 24994 x0 := s0.Args[0] 24995 if x0.Op != OpAMD64MOVBloadidx1 { 24996 break 24997 } 24998 i0 := x0.AuxInt 24999 if x0.Aux != s { 25000 break 25001 } 25002 _ = x0.Args[2] 25003 if p != x0.Args[0] { 25004 break 25005 } 25006 if idx != x0.Args[1] { 25007 break 25008 } 25009 if mem != x0.Args[2] { 25010 break 25011 } 25012 y := or.Args[1] 25013 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25014 break 25015 } 25016 b = mergePoint(b, x0, x1) 25017 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25018 v.reset(OpCopy) 25019 v.AddArg(v0) 25020 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25021 v1.AuxInt = j0 25022 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25023 v2.AuxInt = i0 25024 v2.Aux = s 25025 v2.AddArg(p) 25026 v2.AddArg(idx) 25027 v2.AddArg(mem) 25028 v1.AddArg(v2) 25029 v0.AddArg(v1) 25030 v0.AddArg(y) 25031 return true 25032 } 25033 return false 25034 } 25035 func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool { 25036 b := v.Block 25037 _ = b 25038 typ := &b.Func.Config.Types 25039 _ = typ 25040 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 25041 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25042 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25043 for { 25044 _ = v.Args[1] 25045 s1 := v.Args[0] 25046 if s1.Op != OpAMD64SHLQconst { 25047 break 25048 } 25049 j1 := s1.AuxInt 25050 x1 := s1.Args[0] 25051 if x1.Op != OpAMD64MOVBloadidx1 { 25052 break 25053 } 25054 i1 := x1.AuxInt 25055 s := x1.Aux 25056 _ = x1.Args[2] 25057 idx := x1.Args[0] 25058 p := x1.Args[1] 25059 mem := x1.Args[2] 25060 or := v.Args[1] 25061 if or.Op != OpAMD64ORQ { 25062 break 25063 } 25064 _ = or.Args[1] 25065 s0 := or.Args[0] 25066 if s0.Op != OpAMD64SHLQconst { 25067 break 25068 } 25069 j0 := s0.AuxInt 25070 x0 := s0.Args[0] 25071 if x0.Op != OpAMD64MOVBloadidx1 { 25072 break 25073 } 25074 i0 := x0.AuxInt 25075 if x0.Aux != s { 25076 break 25077 } 25078 _ = x0.Args[2] 25079 if p != x0.Args[0] { 25080 break 25081 } 25082 if idx != x0.Args[1] { 25083 break 25084 } 25085 if mem != x0.Args[2] { 25086 break 25087 } 25088 y := or.Args[1] 25089 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25090 break 25091 } 25092 b = mergePoint(b, x0, x1) 25093 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25094 v.reset(OpCopy) 25095 v.AddArg(v0) 25096 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25097 v1.AuxInt = j0 25098 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25099 v2.AuxInt = i0 25100 v2.Aux = s 25101 v2.AddArg(p) 25102 v2.AddArg(idx) 25103 v2.AddArg(mem) 25104 v1.AddArg(v2) 25105 v0.AddArg(v1) 25106 v0.AddArg(y) 25107 return true 25108 } 25109 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 25110 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25111 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25112 for { 25113 _ = v.Args[1] 25114 s1 := v.Args[0] 25115 if s1.Op != OpAMD64SHLQconst { 25116 break 25117 } 25118 j1 := s1.AuxInt 25119 x1 := s1.Args[0] 25120 if x1.Op != OpAMD64MOVBloadidx1 { 25121 break 25122 } 25123 i1 := x1.AuxInt 25124 s := x1.Aux 25125 _ = x1.Args[2] 25126 p := x1.Args[0] 25127 idx := x1.Args[1] 25128 mem := x1.Args[2] 25129 or := v.Args[1] 25130 if or.Op != OpAMD64ORQ { 25131 break 25132 } 25133 _ = or.Args[1] 25134 s0 := or.Args[0] 25135 if s0.Op != OpAMD64SHLQconst { 25136 break 25137 } 25138 j0 := s0.AuxInt 25139 x0 := s0.Args[0] 25140 if x0.Op != OpAMD64MOVBloadidx1 { 25141 break 25142 } 25143 i0 := x0.AuxInt 25144 if x0.Aux != s { 25145 break 25146 } 25147 _ = x0.Args[2] 25148 if idx != x0.Args[0] { 25149 break 25150 } 25151 if p != x0.Args[1] { 25152 break 25153 } 25154 if mem != x0.Args[2] { 25155 break 25156 } 25157 y := or.Args[1] 25158 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25159 break 25160 } 25161 b = mergePoint(b, x0, x1) 25162 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25163 v.reset(OpCopy) 25164 v.AddArg(v0) 25165 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25166 v1.AuxInt = j0 25167 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25168 v2.AuxInt = i0 25169 v2.Aux = s 25170 v2.AddArg(p) 25171 v2.AddArg(idx) 25172 v2.AddArg(mem) 25173 v1.AddArg(v2) 25174 v0.AddArg(v1) 25175 v0.AddArg(y) 25176 return true 25177 } 25178 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 25179 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25180 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25181 for { 25182 _ = v.Args[1] 25183 s1 := v.Args[0] 25184 if s1.Op != OpAMD64SHLQconst { 25185 break 25186 } 25187 j1 := s1.AuxInt 25188 x1 := s1.Args[0] 25189 if x1.Op != OpAMD64MOVBloadidx1 { 25190 break 25191 } 25192 i1 := x1.AuxInt 25193 s := x1.Aux 25194 _ = x1.Args[2] 25195 idx := x1.Args[0] 25196 p := x1.Args[1] 25197 mem := x1.Args[2] 25198 or := v.Args[1] 25199 if or.Op != OpAMD64ORQ { 25200 break 25201 } 25202 _ = or.Args[1] 25203 s0 := or.Args[0] 25204 if s0.Op != OpAMD64SHLQconst { 25205 break 25206 } 25207 j0 := s0.AuxInt 25208 x0 := s0.Args[0] 25209 if x0.Op != OpAMD64MOVBloadidx1 { 25210 break 25211 } 25212 i0 := x0.AuxInt 25213 if x0.Aux != s { 25214 break 25215 } 25216 _ = x0.Args[2] 25217 if idx != x0.Args[0] { 25218 break 25219 } 25220 if p != x0.Args[1] { 25221 break 25222 } 25223 if mem != x0.Args[2] { 25224 break 25225 } 25226 y := or.Args[1] 25227 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25228 break 25229 } 25230 b = mergePoint(b, x0, x1) 25231 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25232 v.reset(OpCopy) 25233 v.AddArg(v0) 25234 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25235 v1.AuxInt = j0 25236 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25237 v2.AuxInt = i0 25238 v2.Aux = s 25239 v2.AddArg(p) 25240 v2.AddArg(idx) 25241 v2.AddArg(mem) 25242 v1.AddArg(v2) 25243 v0.AddArg(v1) 25244 v0.AddArg(y) 25245 return true 25246 } 25247 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 25248 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25249 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25250 for { 25251 _ = v.Args[1] 25252 s1 := v.Args[0] 25253 if s1.Op != OpAMD64SHLQconst { 25254 break 25255 } 25256 j1 := s1.AuxInt 25257 x1 := s1.Args[0] 25258 if x1.Op != OpAMD64MOVBloadidx1 { 25259 break 25260 } 25261 i1 := x1.AuxInt 25262 s := x1.Aux 25263 _ = x1.Args[2] 25264 p := x1.Args[0] 25265 idx := x1.Args[1] 25266 mem := x1.Args[2] 25267 or := v.Args[1] 25268 if or.Op != OpAMD64ORQ { 25269 break 25270 } 25271 _ = or.Args[1] 25272 y := or.Args[0] 25273 s0 := or.Args[1] 25274 if s0.Op != OpAMD64SHLQconst { 25275 break 25276 } 25277 j0 := s0.AuxInt 25278 x0 := s0.Args[0] 25279 if x0.Op != OpAMD64MOVBloadidx1 { 25280 break 25281 } 25282 i0 := x0.AuxInt 25283 if x0.Aux != s { 25284 break 25285 } 25286 _ = x0.Args[2] 25287 if p != x0.Args[0] { 25288 break 25289 } 25290 if idx != x0.Args[1] { 25291 break 25292 } 25293 if mem != x0.Args[2] { 25294 break 25295 } 25296 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25297 break 25298 } 25299 b = mergePoint(b, x0, x1) 25300 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25301 v.reset(OpCopy) 25302 v.AddArg(v0) 25303 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25304 v1.AuxInt = j0 25305 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25306 v2.AuxInt = i0 25307 v2.Aux = s 25308 v2.AddArg(p) 25309 v2.AddArg(idx) 25310 v2.AddArg(mem) 25311 v1.AddArg(v2) 25312 v0.AddArg(v1) 25313 v0.AddArg(y) 25314 return true 25315 } 25316 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 25317 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25318 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25319 for { 25320 _ = v.Args[1] 25321 s1 := v.Args[0] 25322 if s1.Op != OpAMD64SHLQconst { 25323 break 25324 } 25325 j1 := s1.AuxInt 25326 x1 := s1.Args[0] 25327 if x1.Op != OpAMD64MOVBloadidx1 { 25328 break 25329 } 25330 i1 := x1.AuxInt 25331 s := x1.Aux 25332 _ = x1.Args[2] 25333 idx := x1.Args[0] 25334 p := x1.Args[1] 25335 mem := x1.Args[2] 25336 or := v.Args[1] 25337 if or.Op != OpAMD64ORQ { 25338 break 25339 } 25340 _ = or.Args[1] 25341 y := or.Args[0] 25342 s0 := or.Args[1] 25343 if s0.Op != OpAMD64SHLQconst { 25344 break 25345 } 25346 j0 := s0.AuxInt 25347 x0 := s0.Args[0] 25348 if x0.Op != OpAMD64MOVBloadidx1 { 25349 break 25350 } 25351 i0 := x0.AuxInt 25352 if x0.Aux != s { 25353 break 25354 } 25355 _ = x0.Args[2] 25356 if p != x0.Args[0] { 25357 break 25358 } 25359 if idx != x0.Args[1] { 25360 break 25361 } 25362 if mem != x0.Args[2] { 25363 break 25364 } 25365 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25366 break 25367 } 25368 b = mergePoint(b, x0, x1) 25369 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25370 v.reset(OpCopy) 25371 v.AddArg(v0) 25372 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25373 v1.AuxInt = j0 25374 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25375 v2.AuxInt = i0 25376 v2.Aux = s 25377 v2.AddArg(p) 25378 v2.AddArg(idx) 25379 v2.AddArg(mem) 25380 v1.AddArg(v2) 25381 v0.AddArg(v1) 25382 v0.AddArg(y) 25383 return true 25384 } 25385 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 25386 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25387 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25388 for { 25389 _ = v.Args[1] 25390 s1 := v.Args[0] 25391 if s1.Op != OpAMD64SHLQconst { 25392 break 25393 } 25394 j1 := s1.AuxInt 25395 x1 := s1.Args[0] 25396 if x1.Op != OpAMD64MOVBloadidx1 { 25397 break 25398 } 25399 i1 := x1.AuxInt 25400 s := x1.Aux 25401 _ = x1.Args[2] 25402 p := x1.Args[0] 25403 idx := x1.Args[1] 25404 mem := x1.Args[2] 25405 or := v.Args[1] 25406 if or.Op != OpAMD64ORQ { 25407 break 25408 } 25409 _ = or.Args[1] 25410 y := or.Args[0] 25411 s0 := or.Args[1] 25412 if s0.Op != OpAMD64SHLQconst { 25413 break 25414 } 25415 j0 := s0.AuxInt 25416 x0 := s0.Args[0] 25417 if x0.Op != OpAMD64MOVBloadidx1 { 25418 break 25419 } 25420 i0 := x0.AuxInt 25421 if x0.Aux != s { 25422 break 25423 } 25424 _ = x0.Args[2] 25425 if idx != x0.Args[0] { 25426 break 25427 } 25428 if p != x0.Args[1] { 25429 break 25430 } 25431 if mem != x0.Args[2] { 25432 break 25433 } 25434 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25435 break 25436 } 25437 b = mergePoint(b, x0, x1) 25438 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25439 v.reset(OpCopy) 25440 v.AddArg(v0) 25441 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25442 v1.AuxInt = j0 25443 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25444 v2.AuxInt = i0 25445 v2.Aux = s 25446 v2.AddArg(p) 25447 v2.AddArg(idx) 25448 v2.AddArg(mem) 25449 v1.AddArg(v2) 25450 v0.AddArg(v1) 25451 v0.AddArg(y) 25452 return true 25453 } 25454 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 25455 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25456 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25457 for { 25458 _ = v.Args[1] 25459 s1 := v.Args[0] 25460 if s1.Op != OpAMD64SHLQconst { 25461 break 25462 } 25463 j1 := s1.AuxInt 25464 x1 := s1.Args[0] 25465 if x1.Op != OpAMD64MOVBloadidx1 { 25466 break 25467 } 25468 i1 := x1.AuxInt 25469 s := x1.Aux 25470 _ = x1.Args[2] 25471 idx := x1.Args[0] 25472 p := x1.Args[1] 25473 mem := x1.Args[2] 25474 or := v.Args[1] 25475 if or.Op != OpAMD64ORQ { 25476 break 25477 } 25478 _ = or.Args[1] 25479 y := or.Args[0] 25480 s0 := or.Args[1] 25481 if s0.Op != OpAMD64SHLQconst { 25482 break 25483 } 25484 j0 := s0.AuxInt 25485 x0 := s0.Args[0] 25486 if x0.Op != OpAMD64MOVBloadidx1 { 25487 break 25488 } 25489 i0 := x0.AuxInt 25490 if x0.Aux != s { 25491 break 25492 } 25493 _ = x0.Args[2] 25494 if idx != x0.Args[0] { 25495 break 25496 } 25497 if p != x0.Args[1] { 25498 break 25499 } 25500 if mem != x0.Args[2] { 25501 break 25502 } 25503 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25504 break 25505 } 25506 b = mergePoint(b, x0, x1) 25507 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25508 v.reset(OpCopy) 25509 v.AddArg(v0) 25510 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25511 v1.AuxInt = j0 25512 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25513 v2.AuxInt = i0 25514 v2.Aux = s 25515 v2.AddArg(p) 25516 v2.AddArg(idx) 25517 v2.AddArg(mem) 25518 v1.AddArg(v2) 25519 v0.AddArg(v1) 25520 v0.AddArg(y) 25521 return true 25522 } 25523 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25524 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25525 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25526 for { 25527 _ = v.Args[1] 25528 or := v.Args[0] 25529 if or.Op != OpAMD64ORQ { 25530 break 25531 } 25532 _ = or.Args[1] 25533 s0 := or.Args[0] 25534 if s0.Op != OpAMD64SHLQconst { 25535 break 25536 } 25537 j0 := s0.AuxInt 25538 x0 := s0.Args[0] 25539 if x0.Op != OpAMD64MOVBloadidx1 { 25540 break 25541 } 25542 i0 := x0.AuxInt 25543 s := x0.Aux 25544 _ = x0.Args[2] 25545 p := x0.Args[0] 25546 idx := x0.Args[1] 25547 mem := x0.Args[2] 25548 y := or.Args[1] 25549 s1 := v.Args[1] 25550 if s1.Op != OpAMD64SHLQconst { 25551 break 25552 } 25553 j1 := s1.AuxInt 25554 x1 := s1.Args[0] 25555 if x1.Op != OpAMD64MOVBloadidx1 { 25556 break 25557 } 25558 i1 := x1.AuxInt 25559 if x1.Aux != s { 25560 break 25561 } 25562 _ = x1.Args[2] 25563 if p != x1.Args[0] { 25564 break 25565 } 25566 if idx != x1.Args[1] { 25567 break 25568 } 25569 if mem != x1.Args[2] { 25570 break 25571 } 25572 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25573 break 25574 } 25575 b = mergePoint(b, x0, x1) 25576 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25577 v.reset(OpCopy) 25578 v.AddArg(v0) 25579 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25580 v1.AuxInt = j0 25581 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25582 v2.AuxInt = i0 25583 v2.Aux = s 25584 v2.AddArg(p) 25585 v2.AddArg(idx) 25586 v2.AddArg(mem) 25587 v1.AddArg(v2) 25588 v0.AddArg(v1) 25589 v0.AddArg(y) 25590 return true 25591 } 25592 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25593 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25594 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25595 for { 25596 _ = v.Args[1] 25597 or := v.Args[0] 25598 if or.Op != OpAMD64ORQ { 25599 break 25600 } 25601 _ = or.Args[1] 25602 s0 := or.Args[0] 25603 if s0.Op != OpAMD64SHLQconst { 25604 break 25605 } 25606 j0 := s0.AuxInt 25607 x0 := s0.Args[0] 25608 if x0.Op != OpAMD64MOVBloadidx1 { 25609 break 25610 } 25611 i0 := x0.AuxInt 25612 s := x0.Aux 25613 _ = x0.Args[2] 25614 idx := x0.Args[0] 25615 p := x0.Args[1] 25616 mem := x0.Args[2] 25617 y := or.Args[1] 25618 s1 := v.Args[1] 25619 if s1.Op != OpAMD64SHLQconst { 25620 break 25621 } 25622 j1 := s1.AuxInt 25623 x1 := s1.Args[0] 25624 if x1.Op != OpAMD64MOVBloadidx1 { 25625 break 25626 } 25627 i1 := x1.AuxInt 25628 if x1.Aux != s { 25629 break 25630 } 25631 _ = x1.Args[2] 25632 if p != x1.Args[0] { 25633 break 25634 } 25635 if idx != x1.Args[1] { 25636 break 25637 } 25638 if mem != x1.Args[2] { 25639 break 25640 } 25641 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25642 break 25643 } 25644 b = mergePoint(b, x0, x1) 25645 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25646 v.reset(OpCopy) 25647 v.AddArg(v0) 25648 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25649 v1.AuxInt = j0 25650 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25651 v2.AuxInt = i0 25652 v2.Aux = s 25653 v2.AddArg(p) 25654 v2.AddArg(idx) 25655 v2.AddArg(mem) 25656 v1.AddArg(v2) 25657 v0.AddArg(v1) 25658 v0.AddArg(y) 25659 return true 25660 } 25661 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25662 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25663 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25664 for { 25665 _ = v.Args[1] 25666 or := v.Args[0] 25667 if or.Op != OpAMD64ORQ { 25668 break 25669 } 25670 _ = or.Args[1] 25671 y := or.Args[0] 25672 s0 := or.Args[1] 25673 if s0.Op != OpAMD64SHLQconst { 25674 break 25675 } 25676 j0 := s0.AuxInt 25677 x0 := s0.Args[0] 25678 if x0.Op != OpAMD64MOVBloadidx1 { 25679 break 25680 } 25681 i0 := x0.AuxInt 25682 s := x0.Aux 25683 _ = x0.Args[2] 25684 p := x0.Args[0] 25685 idx := x0.Args[1] 25686 mem := x0.Args[2] 25687 s1 := v.Args[1] 25688 if s1.Op != OpAMD64SHLQconst { 25689 break 25690 } 25691 j1 := s1.AuxInt 25692 x1 := s1.Args[0] 25693 if x1.Op != OpAMD64MOVBloadidx1 { 25694 break 25695 } 25696 i1 := x1.AuxInt 25697 if x1.Aux != s { 25698 break 25699 } 25700 _ = x1.Args[2] 25701 if p != x1.Args[0] { 25702 break 25703 } 25704 if idx != x1.Args[1] { 25705 break 25706 } 25707 if mem != x1.Args[2] { 25708 break 25709 } 25710 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25711 break 25712 } 25713 b = mergePoint(b, x0, x1) 25714 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25715 v.reset(OpCopy) 25716 v.AddArg(v0) 25717 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25718 v1.AuxInt = j0 25719 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25720 v2.AuxInt = i0 25721 v2.Aux = s 25722 v2.AddArg(p) 25723 v2.AddArg(idx) 25724 v2.AddArg(mem) 25725 v1.AddArg(v2) 25726 v0.AddArg(v1) 25727 v0.AddArg(y) 25728 return true 25729 } 25730 return false 25731 } 25732 func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool { 25733 b := v.Block 25734 _ = b 25735 typ := &b.Func.Config.Types 25736 _ = typ 25737 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25738 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25739 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25740 for { 25741 _ = v.Args[1] 25742 or := v.Args[0] 25743 if or.Op != OpAMD64ORQ { 25744 break 25745 } 25746 _ = or.Args[1] 25747 y := or.Args[0] 25748 s0 := or.Args[1] 25749 if s0.Op != OpAMD64SHLQconst { 25750 break 25751 } 25752 j0 := s0.AuxInt 25753 x0 := s0.Args[0] 25754 if x0.Op != OpAMD64MOVBloadidx1 { 25755 break 25756 } 25757 i0 := x0.AuxInt 25758 s := x0.Aux 25759 _ = x0.Args[2] 25760 idx := x0.Args[0] 25761 p := x0.Args[1] 25762 mem := x0.Args[2] 25763 s1 := v.Args[1] 25764 if s1.Op != OpAMD64SHLQconst { 25765 break 25766 } 25767 j1 := s1.AuxInt 25768 x1 := s1.Args[0] 25769 if x1.Op != OpAMD64MOVBloadidx1 { 25770 break 25771 } 25772 i1 := x1.AuxInt 25773 if x1.Aux != s { 25774 break 25775 } 25776 _ = x1.Args[2] 25777 if p != x1.Args[0] { 25778 break 25779 } 25780 if idx != x1.Args[1] { 25781 break 25782 } 25783 if mem != x1.Args[2] { 25784 break 25785 } 25786 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25787 break 25788 } 25789 b = mergePoint(b, x0, x1) 25790 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25791 v.reset(OpCopy) 25792 v.AddArg(v0) 25793 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25794 v1.AuxInt = j0 25795 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25796 v2.AuxInt = i0 25797 v2.Aux = s 25798 v2.AddArg(p) 25799 v2.AddArg(idx) 25800 v2.AddArg(mem) 25801 v1.AddArg(v2) 25802 v0.AddArg(v1) 25803 v0.AddArg(y) 25804 return true 25805 } 25806 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 25807 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25808 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25809 for { 25810 _ = v.Args[1] 25811 or := v.Args[0] 25812 if or.Op != OpAMD64ORQ { 25813 break 25814 } 25815 _ = or.Args[1] 25816 s0 := or.Args[0] 25817 if s0.Op != OpAMD64SHLQconst { 25818 break 25819 } 25820 j0 := s0.AuxInt 25821 x0 := s0.Args[0] 25822 if x0.Op != OpAMD64MOVBloadidx1 { 25823 break 25824 } 25825 i0 := x0.AuxInt 25826 s := x0.Aux 25827 _ = x0.Args[2] 25828 p := x0.Args[0] 25829 idx := x0.Args[1] 25830 mem := x0.Args[2] 25831 y := or.Args[1] 25832 s1 := v.Args[1] 25833 if s1.Op != OpAMD64SHLQconst { 25834 break 25835 } 25836 j1 := s1.AuxInt 25837 x1 := s1.Args[0] 25838 if x1.Op != OpAMD64MOVBloadidx1 { 25839 break 25840 } 25841 i1 := x1.AuxInt 25842 if x1.Aux != s { 25843 break 25844 } 25845 _ = x1.Args[2] 25846 if idx != x1.Args[0] { 25847 break 25848 } 25849 if p != x1.Args[1] { 25850 break 25851 } 25852 if mem != x1.Args[2] { 25853 break 25854 } 25855 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25856 break 25857 } 25858 b = mergePoint(b, x0, x1) 25859 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25860 v.reset(OpCopy) 25861 v.AddArg(v0) 25862 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25863 v1.AuxInt = j0 25864 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25865 v2.AuxInt = i0 25866 v2.Aux = s 25867 v2.AddArg(p) 25868 v2.AddArg(idx) 25869 v2.AddArg(mem) 25870 v1.AddArg(v2) 25871 v0.AddArg(v1) 25872 v0.AddArg(y) 25873 return true 25874 } 25875 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 25876 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25877 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25878 for { 25879 _ = v.Args[1] 25880 or := v.Args[0] 25881 if or.Op != OpAMD64ORQ { 25882 break 25883 } 25884 _ = or.Args[1] 25885 s0 := or.Args[0] 25886 if s0.Op != OpAMD64SHLQconst { 25887 break 25888 } 25889 j0 := s0.AuxInt 25890 x0 := s0.Args[0] 25891 if x0.Op != OpAMD64MOVBloadidx1 { 25892 break 25893 } 25894 i0 := x0.AuxInt 25895 s := x0.Aux 25896 _ = x0.Args[2] 25897 idx := x0.Args[0] 25898 p := x0.Args[1] 25899 mem := x0.Args[2] 25900 y := or.Args[1] 25901 s1 := v.Args[1] 25902 if s1.Op != OpAMD64SHLQconst { 25903 break 25904 } 25905 j1 := s1.AuxInt 25906 x1 := s1.Args[0] 25907 if x1.Op != OpAMD64MOVBloadidx1 { 25908 break 25909 } 25910 i1 := x1.AuxInt 25911 if x1.Aux != s { 25912 break 25913 } 25914 _ = x1.Args[2] 25915 if idx != x1.Args[0] { 25916 break 25917 } 25918 if p != x1.Args[1] { 25919 break 25920 } 25921 if mem != x1.Args[2] { 25922 break 25923 } 25924 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25925 break 25926 } 25927 b = mergePoint(b, x0, x1) 25928 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25929 v.reset(OpCopy) 25930 v.AddArg(v0) 25931 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25932 v1.AuxInt = j0 25933 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 25934 v2.AuxInt = i0 25935 v2.Aux = s 25936 v2.AddArg(p) 25937 v2.AddArg(idx) 25938 v2.AddArg(mem) 25939 v1.AddArg(v2) 25940 v0.AddArg(v1) 25941 v0.AddArg(y) 25942 return true 25943 } 25944 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 25945 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25946 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 25947 for { 25948 _ = v.Args[1] 25949 or := v.Args[0] 25950 if or.Op != OpAMD64ORQ { 25951 break 25952 } 25953 _ = or.Args[1] 25954 y := or.Args[0] 25955 s0 := or.Args[1] 25956 if s0.Op != OpAMD64SHLQconst { 25957 break 25958 } 25959 j0 := s0.AuxInt 25960 x0 := s0.Args[0] 25961 if x0.Op != OpAMD64MOVBloadidx1 { 25962 break 25963 } 25964 i0 := x0.AuxInt 25965 s := x0.Aux 25966 _ = x0.Args[2] 25967 p := x0.Args[0] 25968 idx := x0.Args[1] 25969 mem := x0.Args[2] 25970 s1 := v.Args[1] 25971 if s1.Op != OpAMD64SHLQconst { 25972 break 25973 } 25974 j1 := s1.AuxInt 25975 x1 := s1.Args[0] 25976 if x1.Op != OpAMD64MOVBloadidx1 { 25977 break 25978 } 25979 i1 := x1.AuxInt 25980 if x1.Aux != s { 25981 break 25982 } 25983 _ = x1.Args[2] 25984 if idx != x1.Args[0] { 25985 break 25986 } 25987 if p != x1.Args[1] { 25988 break 25989 } 25990 if mem != x1.Args[2] { 25991 break 25992 } 25993 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25994 break 25995 } 25996 b = mergePoint(b, x0, x1) 25997 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25998 v.reset(OpCopy) 25999 v.AddArg(v0) 26000 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26001 v1.AuxInt = j0 26002 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26003 v2.AuxInt = i0 26004 v2.Aux = s 26005 v2.AddArg(p) 26006 v2.AddArg(idx) 26007 v2.AddArg(mem) 26008 v1.AddArg(v2) 26009 v0.AddArg(v1) 26010 v0.AddArg(y) 26011 return true 26012 } 26013 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 26014 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26015 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26016 for { 26017 _ = v.Args[1] 26018 or := v.Args[0] 26019 if or.Op != OpAMD64ORQ { 26020 break 26021 } 26022 _ = or.Args[1] 26023 y := or.Args[0] 26024 s0 := or.Args[1] 26025 if s0.Op != OpAMD64SHLQconst { 26026 break 26027 } 26028 j0 := s0.AuxInt 26029 x0 := s0.Args[0] 26030 if x0.Op != OpAMD64MOVBloadidx1 { 26031 break 26032 } 26033 i0 := x0.AuxInt 26034 s := x0.Aux 26035 _ = x0.Args[2] 26036 idx := x0.Args[0] 26037 p := x0.Args[1] 26038 mem := x0.Args[2] 26039 s1 := v.Args[1] 26040 if s1.Op != OpAMD64SHLQconst { 26041 break 26042 } 26043 j1 := s1.AuxInt 26044 x1 := s1.Args[0] 26045 if x1.Op != OpAMD64MOVBloadidx1 { 26046 break 26047 } 26048 i1 := x1.AuxInt 26049 if x1.Aux != s { 26050 break 26051 } 26052 _ = x1.Args[2] 26053 if idx != x1.Args[0] { 26054 break 26055 } 26056 if p != x1.Args[1] { 26057 break 26058 } 26059 if mem != x1.Args[2] { 26060 break 26061 } 26062 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26063 break 26064 } 26065 b = mergePoint(b, x0, x1) 26066 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26067 v.reset(OpCopy) 26068 v.AddArg(v0) 26069 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26070 v1.AuxInt = j0 26071 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26072 v2.AuxInt = i0 26073 v2.Aux = s 26074 v2.AddArg(p) 26075 v2.AddArg(idx) 26076 v2.AddArg(mem) 26077 v1.AddArg(v2) 26078 v0.AddArg(v1) 26079 v0.AddArg(y) 26080 return true 26081 } 26082 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 26083 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26084 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26085 for { 26086 _ = v.Args[1] 26087 s1 := v.Args[0] 26088 if s1.Op != OpAMD64SHLQconst { 26089 break 26090 } 26091 j1 := s1.AuxInt 26092 x1 := s1.Args[0] 26093 if x1.Op != OpAMD64MOVWloadidx1 { 26094 break 26095 } 26096 i1 := x1.AuxInt 26097 s := x1.Aux 26098 _ = x1.Args[2] 26099 p := x1.Args[0] 26100 idx := x1.Args[1] 26101 mem := x1.Args[2] 26102 or := v.Args[1] 26103 if or.Op != OpAMD64ORQ { 26104 break 26105 } 26106 _ = or.Args[1] 26107 s0 := or.Args[0] 26108 if s0.Op != OpAMD64SHLQconst { 26109 break 26110 } 26111 j0 := s0.AuxInt 26112 x0 := s0.Args[0] 26113 if x0.Op != OpAMD64MOVWloadidx1 { 26114 break 26115 } 26116 i0 := x0.AuxInt 26117 if x0.Aux != s { 26118 break 26119 } 26120 _ = x0.Args[2] 26121 if p != x0.Args[0] { 26122 break 26123 } 26124 if idx != x0.Args[1] { 26125 break 26126 } 26127 if mem != x0.Args[2] { 26128 break 26129 } 26130 y := or.Args[1] 26131 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26132 break 26133 } 26134 b = mergePoint(b, x0, x1) 26135 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26136 v.reset(OpCopy) 26137 v.AddArg(v0) 26138 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26139 v1.AuxInt = j0 26140 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26141 v2.AuxInt = i0 26142 v2.Aux = s 26143 v2.AddArg(p) 26144 v2.AddArg(idx) 26145 v2.AddArg(mem) 26146 v1.AddArg(v2) 26147 v0.AddArg(v1) 26148 v0.AddArg(y) 26149 return true 26150 } 26151 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 26152 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26153 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26154 for { 26155 _ = v.Args[1] 26156 s1 := v.Args[0] 26157 if s1.Op != OpAMD64SHLQconst { 26158 break 26159 } 26160 j1 := s1.AuxInt 26161 x1 := s1.Args[0] 26162 if x1.Op != OpAMD64MOVWloadidx1 { 26163 break 26164 } 26165 i1 := x1.AuxInt 26166 s := x1.Aux 26167 _ = x1.Args[2] 26168 idx := x1.Args[0] 26169 p := x1.Args[1] 26170 mem := x1.Args[2] 26171 or := v.Args[1] 26172 if or.Op != OpAMD64ORQ { 26173 break 26174 } 26175 _ = or.Args[1] 26176 s0 := or.Args[0] 26177 if s0.Op != OpAMD64SHLQconst { 26178 break 26179 } 26180 j0 := s0.AuxInt 26181 x0 := s0.Args[0] 26182 if x0.Op != OpAMD64MOVWloadidx1 { 26183 break 26184 } 26185 i0 := x0.AuxInt 26186 if x0.Aux != s { 26187 break 26188 } 26189 _ = x0.Args[2] 26190 if p != x0.Args[0] { 26191 break 26192 } 26193 if idx != x0.Args[1] { 26194 break 26195 } 26196 if mem != x0.Args[2] { 26197 break 26198 } 26199 y := or.Args[1] 26200 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26201 break 26202 } 26203 b = mergePoint(b, x0, x1) 26204 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26205 v.reset(OpCopy) 26206 v.AddArg(v0) 26207 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26208 v1.AuxInt = j0 26209 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26210 v2.AuxInt = i0 26211 v2.Aux = s 26212 v2.AddArg(p) 26213 v2.AddArg(idx) 26214 v2.AddArg(mem) 26215 v1.AddArg(v2) 26216 v0.AddArg(v1) 26217 v0.AddArg(y) 26218 return true 26219 } 26220 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 26221 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26222 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26223 for { 26224 _ = v.Args[1] 26225 s1 := v.Args[0] 26226 if s1.Op != OpAMD64SHLQconst { 26227 break 26228 } 26229 j1 := s1.AuxInt 26230 x1 := s1.Args[0] 26231 if x1.Op != OpAMD64MOVWloadidx1 { 26232 break 26233 } 26234 i1 := x1.AuxInt 26235 s := x1.Aux 26236 _ = x1.Args[2] 26237 p := x1.Args[0] 26238 idx := x1.Args[1] 26239 mem := x1.Args[2] 26240 or := v.Args[1] 26241 if or.Op != OpAMD64ORQ { 26242 break 26243 } 26244 _ = or.Args[1] 26245 s0 := or.Args[0] 26246 if s0.Op != OpAMD64SHLQconst { 26247 break 26248 } 26249 j0 := s0.AuxInt 26250 x0 := s0.Args[0] 26251 if x0.Op != OpAMD64MOVWloadidx1 { 26252 break 26253 } 26254 i0 := x0.AuxInt 26255 if x0.Aux != s { 26256 break 26257 } 26258 _ = x0.Args[2] 26259 if idx != x0.Args[0] { 26260 break 26261 } 26262 if p != x0.Args[1] { 26263 break 26264 } 26265 if mem != x0.Args[2] { 26266 break 26267 } 26268 y := or.Args[1] 26269 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26270 break 26271 } 26272 b = mergePoint(b, x0, x1) 26273 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26274 v.reset(OpCopy) 26275 v.AddArg(v0) 26276 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26277 v1.AuxInt = j0 26278 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26279 v2.AuxInt = i0 26280 v2.Aux = s 26281 v2.AddArg(p) 26282 v2.AddArg(idx) 26283 v2.AddArg(mem) 26284 v1.AddArg(v2) 26285 v0.AddArg(v1) 26286 v0.AddArg(y) 26287 return true 26288 } 26289 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 26290 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26291 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26292 for { 26293 _ = v.Args[1] 26294 s1 := v.Args[0] 26295 if s1.Op != OpAMD64SHLQconst { 26296 break 26297 } 26298 j1 := s1.AuxInt 26299 x1 := s1.Args[0] 26300 if x1.Op != OpAMD64MOVWloadidx1 { 26301 break 26302 } 26303 i1 := x1.AuxInt 26304 s := x1.Aux 26305 _ = x1.Args[2] 26306 idx := x1.Args[0] 26307 p := x1.Args[1] 26308 mem := x1.Args[2] 26309 or := v.Args[1] 26310 if or.Op != OpAMD64ORQ { 26311 break 26312 } 26313 _ = or.Args[1] 26314 s0 := or.Args[0] 26315 if s0.Op != OpAMD64SHLQconst { 26316 break 26317 } 26318 j0 := s0.AuxInt 26319 x0 := s0.Args[0] 26320 if x0.Op != OpAMD64MOVWloadidx1 { 26321 break 26322 } 26323 i0 := x0.AuxInt 26324 if x0.Aux != s { 26325 break 26326 } 26327 _ = x0.Args[2] 26328 if idx != x0.Args[0] { 26329 break 26330 } 26331 if p != x0.Args[1] { 26332 break 26333 } 26334 if mem != x0.Args[2] { 26335 break 26336 } 26337 y := or.Args[1] 26338 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26339 break 26340 } 26341 b = mergePoint(b, x0, x1) 26342 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26343 v.reset(OpCopy) 26344 v.AddArg(v0) 26345 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26346 v1.AuxInt = j0 26347 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26348 v2.AuxInt = i0 26349 v2.Aux = s 26350 v2.AddArg(p) 26351 v2.AddArg(idx) 26352 v2.AddArg(mem) 26353 v1.AddArg(v2) 26354 v0.AddArg(v1) 26355 v0.AddArg(y) 26356 return true 26357 } 26358 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 26359 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26360 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26361 for { 26362 _ = v.Args[1] 26363 s1 := v.Args[0] 26364 if s1.Op != OpAMD64SHLQconst { 26365 break 26366 } 26367 j1 := s1.AuxInt 26368 x1 := s1.Args[0] 26369 if x1.Op != OpAMD64MOVWloadidx1 { 26370 break 26371 } 26372 i1 := x1.AuxInt 26373 s := x1.Aux 26374 _ = x1.Args[2] 26375 p := x1.Args[0] 26376 idx := x1.Args[1] 26377 mem := x1.Args[2] 26378 or := v.Args[1] 26379 if or.Op != OpAMD64ORQ { 26380 break 26381 } 26382 _ = or.Args[1] 26383 y := or.Args[0] 26384 s0 := or.Args[1] 26385 if s0.Op != OpAMD64SHLQconst { 26386 break 26387 } 26388 j0 := s0.AuxInt 26389 x0 := s0.Args[0] 26390 if x0.Op != OpAMD64MOVWloadidx1 { 26391 break 26392 } 26393 i0 := x0.AuxInt 26394 if x0.Aux != s { 26395 break 26396 } 26397 _ = x0.Args[2] 26398 if p != x0.Args[0] { 26399 break 26400 } 26401 if idx != x0.Args[1] { 26402 break 26403 } 26404 if mem != x0.Args[2] { 26405 break 26406 } 26407 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26408 break 26409 } 26410 b = mergePoint(b, x0, x1) 26411 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26412 v.reset(OpCopy) 26413 v.AddArg(v0) 26414 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26415 v1.AuxInt = j0 26416 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26417 v2.AuxInt = i0 26418 v2.Aux = s 26419 v2.AddArg(p) 26420 v2.AddArg(idx) 26421 v2.AddArg(mem) 26422 v1.AddArg(v2) 26423 v0.AddArg(v1) 26424 v0.AddArg(y) 26425 return true 26426 } 26427 return false 26428 } 26429 func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool { 26430 b := v.Block 26431 _ = b 26432 typ := &b.Func.Config.Types 26433 _ = typ 26434 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 26435 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26436 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26437 for { 26438 _ = v.Args[1] 26439 s1 := v.Args[0] 26440 if s1.Op != OpAMD64SHLQconst { 26441 break 26442 } 26443 j1 := s1.AuxInt 26444 x1 := s1.Args[0] 26445 if x1.Op != OpAMD64MOVWloadidx1 { 26446 break 26447 } 26448 i1 := x1.AuxInt 26449 s := x1.Aux 26450 _ = x1.Args[2] 26451 idx := x1.Args[0] 26452 p := x1.Args[1] 26453 mem := x1.Args[2] 26454 or := v.Args[1] 26455 if or.Op != OpAMD64ORQ { 26456 break 26457 } 26458 _ = or.Args[1] 26459 y := or.Args[0] 26460 s0 := or.Args[1] 26461 if s0.Op != OpAMD64SHLQconst { 26462 break 26463 } 26464 j0 := s0.AuxInt 26465 x0 := s0.Args[0] 26466 if x0.Op != OpAMD64MOVWloadidx1 { 26467 break 26468 } 26469 i0 := x0.AuxInt 26470 if x0.Aux != s { 26471 break 26472 } 26473 _ = x0.Args[2] 26474 if p != x0.Args[0] { 26475 break 26476 } 26477 if idx != x0.Args[1] { 26478 break 26479 } 26480 if mem != x0.Args[2] { 26481 break 26482 } 26483 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26484 break 26485 } 26486 b = mergePoint(b, x0, x1) 26487 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26488 v.reset(OpCopy) 26489 v.AddArg(v0) 26490 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26491 v1.AuxInt = j0 26492 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26493 v2.AuxInt = i0 26494 v2.Aux = s 26495 v2.AddArg(p) 26496 v2.AddArg(idx) 26497 v2.AddArg(mem) 26498 v1.AddArg(v2) 26499 v0.AddArg(v1) 26500 v0.AddArg(y) 26501 return true 26502 } 26503 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 26504 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26505 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26506 for { 26507 _ = v.Args[1] 26508 s1 := v.Args[0] 26509 if s1.Op != OpAMD64SHLQconst { 26510 break 26511 } 26512 j1 := s1.AuxInt 26513 x1 := s1.Args[0] 26514 if x1.Op != OpAMD64MOVWloadidx1 { 26515 break 26516 } 26517 i1 := x1.AuxInt 26518 s := x1.Aux 26519 _ = x1.Args[2] 26520 p := x1.Args[0] 26521 idx := x1.Args[1] 26522 mem := x1.Args[2] 26523 or := v.Args[1] 26524 if or.Op != OpAMD64ORQ { 26525 break 26526 } 26527 _ = or.Args[1] 26528 y := or.Args[0] 26529 s0 := or.Args[1] 26530 if s0.Op != OpAMD64SHLQconst { 26531 break 26532 } 26533 j0 := s0.AuxInt 26534 x0 := s0.Args[0] 26535 if x0.Op != OpAMD64MOVWloadidx1 { 26536 break 26537 } 26538 i0 := x0.AuxInt 26539 if x0.Aux != s { 26540 break 26541 } 26542 _ = x0.Args[2] 26543 if idx != x0.Args[0] { 26544 break 26545 } 26546 if p != x0.Args[1] { 26547 break 26548 } 26549 if mem != x0.Args[2] { 26550 break 26551 } 26552 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26553 break 26554 } 26555 b = mergePoint(b, x0, x1) 26556 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26557 v.reset(OpCopy) 26558 v.AddArg(v0) 26559 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26560 v1.AuxInt = j0 26561 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26562 v2.AuxInt = i0 26563 v2.Aux = s 26564 v2.AddArg(p) 26565 v2.AddArg(idx) 26566 v2.AddArg(mem) 26567 v1.AddArg(v2) 26568 v0.AddArg(v1) 26569 v0.AddArg(y) 26570 return true 26571 } 26572 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 26573 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26574 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26575 for { 26576 _ = v.Args[1] 26577 s1 := v.Args[0] 26578 if s1.Op != OpAMD64SHLQconst { 26579 break 26580 } 26581 j1 := s1.AuxInt 26582 x1 := s1.Args[0] 26583 if x1.Op != OpAMD64MOVWloadidx1 { 26584 break 26585 } 26586 i1 := x1.AuxInt 26587 s := x1.Aux 26588 _ = x1.Args[2] 26589 idx := x1.Args[0] 26590 p := x1.Args[1] 26591 mem := x1.Args[2] 26592 or := v.Args[1] 26593 if or.Op != OpAMD64ORQ { 26594 break 26595 } 26596 _ = or.Args[1] 26597 y := or.Args[0] 26598 s0 := or.Args[1] 26599 if s0.Op != OpAMD64SHLQconst { 26600 break 26601 } 26602 j0 := s0.AuxInt 26603 x0 := s0.Args[0] 26604 if x0.Op != OpAMD64MOVWloadidx1 { 26605 break 26606 } 26607 i0 := x0.AuxInt 26608 if x0.Aux != s { 26609 break 26610 } 26611 _ = x0.Args[2] 26612 if idx != x0.Args[0] { 26613 break 26614 } 26615 if p != x0.Args[1] { 26616 break 26617 } 26618 if mem != x0.Args[2] { 26619 break 26620 } 26621 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26622 break 26623 } 26624 b = mergePoint(b, x0, x1) 26625 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26626 v.reset(OpCopy) 26627 v.AddArg(v0) 26628 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26629 v1.AuxInt = j0 26630 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26631 v2.AuxInt = i0 26632 v2.Aux = s 26633 v2.AddArg(p) 26634 v2.AddArg(idx) 26635 v2.AddArg(mem) 26636 v1.AddArg(v2) 26637 v0.AddArg(v1) 26638 v0.AddArg(y) 26639 return true 26640 } 26641 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 26642 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26643 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26644 for { 26645 _ = v.Args[1] 26646 or := v.Args[0] 26647 if or.Op != OpAMD64ORQ { 26648 break 26649 } 26650 _ = or.Args[1] 26651 s0 := or.Args[0] 26652 if s0.Op != OpAMD64SHLQconst { 26653 break 26654 } 26655 j0 := s0.AuxInt 26656 x0 := s0.Args[0] 26657 if x0.Op != OpAMD64MOVWloadidx1 { 26658 break 26659 } 26660 i0 := x0.AuxInt 26661 s := x0.Aux 26662 _ = x0.Args[2] 26663 p := x0.Args[0] 26664 idx := x0.Args[1] 26665 mem := x0.Args[2] 26666 y := or.Args[1] 26667 s1 := v.Args[1] 26668 if s1.Op != OpAMD64SHLQconst { 26669 break 26670 } 26671 j1 := s1.AuxInt 26672 x1 := s1.Args[0] 26673 if x1.Op != OpAMD64MOVWloadidx1 { 26674 break 26675 } 26676 i1 := x1.AuxInt 26677 if x1.Aux != s { 26678 break 26679 } 26680 _ = x1.Args[2] 26681 if p != x1.Args[0] { 26682 break 26683 } 26684 if idx != x1.Args[1] { 26685 break 26686 } 26687 if mem != x1.Args[2] { 26688 break 26689 } 26690 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26691 break 26692 } 26693 b = mergePoint(b, x0, x1) 26694 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26695 v.reset(OpCopy) 26696 v.AddArg(v0) 26697 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26698 v1.AuxInt = j0 26699 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26700 v2.AuxInt = i0 26701 v2.Aux = s 26702 v2.AddArg(p) 26703 v2.AddArg(idx) 26704 v2.AddArg(mem) 26705 v1.AddArg(v2) 26706 v0.AddArg(v1) 26707 v0.AddArg(y) 26708 return true 26709 } 26710 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 26711 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26712 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26713 for { 26714 _ = v.Args[1] 26715 or := v.Args[0] 26716 if or.Op != OpAMD64ORQ { 26717 break 26718 } 26719 _ = or.Args[1] 26720 s0 := or.Args[0] 26721 if s0.Op != OpAMD64SHLQconst { 26722 break 26723 } 26724 j0 := s0.AuxInt 26725 x0 := s0.Args[0] 26726 if x0.Op != OpAMD64MOVWloadidx1 { 26727 break 26728 } 26729 i0 := x0.AuxInt 26730 s := x0.Aux 26731 _ = x0.Args[2] 26732 idx := x0.Args[0] 26733 p := x0.Args[1] 26734 mem := x0.Args[2] 26735 y := or.Args[1] 26736 s1 := v.Args[1] 26737 if s1.Op != OpAMD64SHLQconst { 26738 break 26739 } 26740 j1 := s1.AuxInt 26741 x1 := s1.Args[0] 26742 if x1.Op != OpAMD64MOVWloadidx1 { 26743 break 26744 } 26745 i1 := x1.AuxInt 26746 if x1.Aux != s { 26747 break 26748 } 26749 _ = x1.Args[2] 26750 if p != x1.Args[0] { 26751 break 26752 } 26753 if idx != x1.Args[1] { 26754 break 26755 } 26756 if mem != x1.Args[2] { 26757 break 26758 } 26759 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26760 break 26761 } 26762 b = mergePoint(b, x0, x1) 26763 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26764 v.reset(OpCopy) 26765 v.AddArg(v0) 26766 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26767 v1.AuxInt = j0 26768 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26769 v2.AuxInt = i0 26770 v2.Aux = s 26771 v2.AddArg(p) 26772 v2.AddArg(idx) 26773 v2.AddArg(mem) 26774 v1.AddArg(v2) 26775 v0.AddArg(v1) 26776 v0.AddArg(y) 26777 return true 26778 } 26779 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 26780 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26781 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26782 for { 26783 _ = v.Args[1] 26784 or := v.Args[0] 26785 if or.Op != OpAMD64ORQ { 26786 break 26787 } 26788 _ = or.Args[1] 26789 y := or.Args[0] 26790 s0 := or.Args[1] 26791 if s0.Op != OpAMD64SHLQconst { 26792 break 26793 } 26794 j0 := s0.AuxInt 26795 x0 := s0.Args[0] 26796 if x0.Op != OpAMD64MOVWloadidx1 { 26797 break 26798 } 26799 i0 := x0.AuxInt 26800 s := x0.Aux 26801 _ = x0.Args[2] 26802 p := x0.Args[0] 26803 idx := x0.Args[1] 26804 mem := x0.Args[2] 26805 s1 := v.Args[1] 26806 if s1.Op != OpAMD64SHLQconst { 26807 break 26808 } 26809 j1 := s1.AuxInt 26810 x1 := s1.Args[0] 26811 if x1.Op != OpAMD64MOVWloadidx1 { 26812 break 26813 } 26814 i1 := x1.AuxInt 26815 if x1.Aux != s { 26816 break 26817 } 26818 _ = x1.Args[2] 26819 if p != x1.Args[0] { 26820 break 26821 } 26822 if idx != x1.Args[1] { 26823 break 26824 } 26825 if mem != x1.Args[2] { 26826 break 26827 } 26828 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26829 break 26830 } 26831 b = mergePoint(b, x0, x1) 26832 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26833 v.reset(OpCopy) 26834 v.AddArg(v0) 26835 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26836 v1.AuxInt = j0 26837 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26838 v2.AuxInt = i0 26839 v2.Aux = s 26840 v2.AddArg(p) 26841 v2.AddArg(idx) 26842 v2.AddArg(mem) 26843 v1.AddArg(v2) 26844 v0.AddArg(v1) 26845 v0.AddArg(y) 26846 return true 26847 } 26848 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 26849 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26850 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26851 for { 26852 _ = v.Args[1] 26853 or := v.Args[0] 26854 if or.Op != OpAMD64ORQ { 26855 break 26856 } 26857 _ = or.Args[1] 26858 y := or.Args[0] 26859 s0 := or.Args[1] 26860 if s0.Op != OpAMD64SHLQconst { 26861 break 26862 } 26863 j0 := s0.AuxInt 26864 x0 := s0.Args[0] 26865 if x0.Op != OpAMD64MOVWloadidx1 { 26866 break 26867 } 26868 i0 := x0.AuxInt 26869 s := x0.Aux 26870 _ = x0.Args[2] 26871 idx := x0.Args[0] 26872 p := x0.Args[1] 26873 mem := x0.Args[2] 26874 s1 := v.Args[1] 26875 if s1.Op != OpAMD64SHLQconst { 26876 break 26877 } 26878 j1 := s1.AuxInt 26879 x1 := s1.Args[0] 26880 if x1.Op != OpAMD64MOVWloadidx1 { 26881 break 26882 } 26883 i1 := x1.AuxInt 26884 if x1.Aux != s { 26885 break 26886 } 26887 _ = x1.Args[2] 26888 if p != x1.Args[0] { 26889 break 26890 } 26891 if idx != x1.Args[1] { 26892 break 26893 } 26894 if mem != x1.Args[2] { 26895 break 26896 } 26897 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26898 break 26899 } 26900 b = mergePoint(b, x0, x1) 26901 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26902 v.reset(OpCopy) 26903 v.AddArg(v0) 26904 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26905 v1.AuxInt = j0 26906 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26907 v2.AuxInt = i0 26908 v2.Aux = s 26909 v2.AddArg(p) 26910 v2.AddArg(idx) 26911 v2.AddArg(mem) 26912 v1.AddArg(v2) 26913 v0.AddArg(v1) 26914 v0.AddArg(y) 26915 return true 26916 } 26917 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 26918 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26919 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26920 for { 26921 _ = v.Args[1] 26922 or := v.Args[0] 26923 if or.Op != OpAMD64ORQ { 26924 break 26925 } 26926 _ = or.Args[1] 26927 s0 := or.Args[0] 26928 if s0.Op != OpAMD64SHLQconst { 26929 break 26930 } 26931 j0 := s0.AuxInt 26932 x0 := s0.Args[0] 26933 if x0.Op != OpAMD64MOVWloadidx1 { 26934 break 26935 } 26936 i0 := x0.AuxInt 26937 s := x0.Aux 26938 _ = x0.Args[2] 26939 p := x0.Args[0] 26940 idx := x0.Args[1] 26941 mem := x0.Args[2] 26942 y := or.Args[1] 26943 s1 := v.Args[1] 26944 if s1.Op != OpAMD64SHLQconst { 26945 break 26946 } 26947 j1 := s1.AuxInt 26948 x1 := s1.Args[0] 26949 if x1.Op != OpAMD64MOVWloadidx1 { 26950 break 26951 } 26952 i1 := x1.AuxInt 26953 if x1.Aux != s { 26954 break 26955 } 26956 _ = x1.Args[2] 26957 if idx != x1.Args[0] { 26958 break 26959 } 26960 if p != x1.Args[1] { 26961 break 26962 } 26963 if mem != x1.Args[2] { 26964 break 26965 } 26966 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26967 break 26968 } 26969 b = mergePoint(b, x0, x1) 26970 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26971 v.reset(OpCopy) 26972 v.AddArg(v0) 26973 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26974 v1.AuxInt = j0 26975 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26976 v2.AuxInt = i0 26977 v2.Aux = s 26978 v2.AddArg(p) 26979 v2.AddArg(idx) 26980 v2.AddArg(mem) 26981 v1.AddArg(v2) 26982 v0.AddArg(v1) 26983 v0.AddArg(y) 26984 return true 26985 } 26986 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 26987 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26988 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 26989 for { 26990 _ = v.Args[1] 26991 or := v.Args[0] 26992 if or.Op != OpAMD64ORQ { 26993 break 26994 } 26995 _ = or.Args[1] 26996 s0 := or.Args[0] 26997 if s0.Op != OpAMD64SHLQconst { 26998 break 26999 } 27000 j0 := s0.AuxInt 27001 x0 := s0.Args[0] 27002 if x0.Op != OpAMD64MOVWloadidx1 { 27003 break 27004 } 27005 i0 := x0.AuxInt 27006 s := x0.Aux 27007 _ = x0.Args[2] 27008 idx := x0.Args[0] 27009 p := x0.Args[1] 27010 mem := x0.Args[2] 27011 y := or.Args[1] 27012 s1 := v.Args[1] 27013 if s1.Op != OpAMD64SHLQconst { 27014 break 27015 } 27016 j1 := s1.AuxInt 27017 x1 := s1.Args[0] 27018 if x1.Op != OpAMD64MOVWloadidx1 { 27019 break 27020 } 27021 i1 := x1.AuxInt 27022 if x1.Aux != s { 27023 break 27024 } 27025 _ = x1.Args[2] 27026 if idx != x1.Args[0] { 27027 break 27028 } 27029 if p != x1.Args[1] { 27030 break 27031 } 27032 if mem != x1.Args[2] { 27033 break 27034 } 27035 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27036 break 27037 } 27038 b = mergePoint(b, x0, x1) 27039 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27040 v.reset(OpCopy) 27041 v.AddArg(v0) 27042 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27043 v1.AuxInt = j0 27044 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27045 v2.AuxInt = i0 27046 v2.Aux = s 27047 v2.AddArg(p) 27048 v2.AddArg(idx) 27049 v2.AddArg(mem) 27050 v1.AddArg(v2) 27051 v0.AddArg(v1) 27052 v0.AddArg(y) 27053 return true 27054 } 27055 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 27056 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27057 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27058 for { 27059 _ = v.Args[1] 27060 or := v.Args[0] 27061 if or.Op != OpAMD64ORQ { 27062 break 27063 } 27064 _ = or.Args[1] 27065 y := or.Args[0] 27066 s0 := or.Args[1] 27067 if s0.Op != OpAMD64SHLQconst { 27068 break 27069 } 27070 j0 := s0.AuxInt 27071 x0 := s0.Args[0] 27072 if x0.Op != OpAMD64MOVWloadidx1 { 27073 break 27074 } 27075 i0 := x0.AuxInt 27076 s := x0.Aux 27077 _ = x0.Args[2] 27078 p := x0.Args[0] 27079 idx := x0.Args[1] 27080 mem := x0.Args[2] 27081 s1 := v.Args[1] 27082 if s1.Op != OpAMD64SHLQconst { 27083 break 27084 } 27085 j1 := s1.AuxInt 27086 x1 := s1.Args[0] 27087 if x1.Op != OpAMD64MOVWloadidx1 { 27088 break 27089 } 27090 i1 := x1.AuxInt 27091 if x1.Aux != s { 27092 break 27093 } 27094 _ = x1.Args[2] 27095 if idx != x1.Args[0] { 27096 break 27097 } 27098 if p != x1.Args[1] { 27099 break 27100 } 27101 if mem != x1.Args[2] { 27102 break 27103 } 27104 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27105 break 27106 } 27107 b = mergePoint(b, x0, x1) 27108 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27109 v.reset(OpCopy) 27110 v.AddArg(v0) 27111 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27112 v1.AuxInt = j0 27113 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27114 v2.AuxInt = i0 27115 v2.Aux = s 27116 v2.AddArg(p) 27117 v2.AddArg(idx) 27118 v2.AddArg(mem) 27119 v1.AddArg(v2) 27120 v0.AddArg(v1) 27121 v0.AddArg(y) 27122 return true 27123 } 27124 return false 27125 } 27126 func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool { 27127 b := v.Block 27128 _ = b 27129 typ := &b.Func.Config.Types 27130 _ = typ 27131 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 27132 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27133 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27134 for { 27135 _ = v.Args[1] 27136 or := v.Args[0] 27137 if or.Op != OpAMD64ORQ { 27138 break 27139 } 27140 _ = or.Args[1] 27141 y := or.Args[0] 27142 s0 := or.Args[1] 27143 if s0.Op != OpAMD64SHLQconst { 27144 break 27145 } 27146 j0 := s0.AuxInt 27147 x0 := s0.Args[0] 27148 if x0.Op != OpAMD64MOVWloadidx1 { 27149 break 27150 } 27151 i0 := x0.AuxInt 27152 s := x0.Aux 27153 _ = x0.Args[2] 27154 idx := x0.Args[0] 27155 p := x0.Args[1] 27156 mem := x0.Args[2] 27157 s1 := v.Args[1] 27158 if s1.Op != OpAMD64SHLQconst { 27159 break 27160 } 27161 j1 := s1.AuxInt 27162 x1 := s1.Args[0] 27163 if x1.Op != OpAMD64MOVWloadidx1 { 27164 break 27165 } 27166 i1 := x1.AuxInt 27167 if x1.Aux != s { 27168 break 27169 } 27170 _ = x1.Args[2] 27171 if idx != x1.Args[0] { 27172 break 27173 } 27174 if p != x1.Args[1] { 27175 break 27176 } 27177 if mem != x1.Args[2] { 27178 break 27179 } 27180 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27181 break 27182 } 27183 b = mergePoint(b, x0, x1) 27184 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27185 v.reset(OpCopy) 27186 v.AddArg(v0) 27187 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27188 v1.AuxInt = j0 27189 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27190 v2.AuxInt = i0 27191 v2.Aux = s 27192 v2.AddArg(p) 27193 v2.AddArg(idx) 27194 v2.AddArg(mem) 27195 v1.AddArg(v2) 27196 v0.AddArg(v1) 27197 v0.AddArg(y) 27198 return true 27199 } 27200 // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) 27201 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 27202 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 27203 for { 27204 _ = v.Args[1] 27205 x1 := v.Args[0] 27206 if x1.Op != OpAMD64MOVBload { 27207 break 27208 } 27209 i1 := x1.AuxInt 27210 s := x1.Aux 27211 _ = x1.Args[1] 27212 p := x1.Args[0] 27213 mem := x1.Args[1] 27214 sh := v.Args[1] 27215 if sh.Op != OpAMD64SHLQconst { 27216 break 27217 } 27218 if sh.AuxInt != 8 { 27219 break 27220 } 27221 x0 := sh.Args[0] 27222 if x0.Op != OpAMD64MOVBload { 27223 break 27224 } 27225 i0 := x0.AuxInt 27226 if x0.Aux != s { 27227 break 27228 } 27229 _ = x0.Args[1] 27230 if p != x0.Args[0] { 27231 break 27232 } 27233 if mem != x0.Args[1] { 27234 break 27235 } 27236 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27237 break 27238 } 27239 b = mergePoint(b, x0, x1) 27240 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 27241 v.reset(OpCopy) 27242 v.AddArg(v0) 27243 v0.AuxInt = 8 27244 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27245 v1.AuxInt = i0 27246 v1.Aux = s 27247 v1.AddArg(p) 27248 v1.AddArg(mem) 27249 v0.AddArg(v1) 27250 return true 27251 } 27252 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 27253 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 27254 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 27255 for { 27256 _ = v.Args[1] 27257 sh := v.Args[0] 27258 if sh.Op != OpAMD64SHLQconst { 27259 break 27260 } 27261 if sh.AuxInt != 8 { 27262 break 27263 } 27264 x0 := sh.Args[0] 27265 if x0.Op != OpAMD64MOVBload { 27266 break 27267 } 27268 i0 := x0.AuxInt 27269 s := x0.Aux 27270 _ = x0.Args[1] 27271 p := x0.Args[0] 27272 mem := x0.Args[1] 27273 x1 := v.Args[1] 27274 if x1.Op != OpAMD64MOVBload { 27275 break 27276 } 27277 i1 := x1.AuxInt 27278 if x1.Aux != s { 27279 break 27280 } 27281 _ = x1.Args[1] 27282 if p != x1.Args[0] { 27283 break 27284 } 27285 if mem != x1.Args[1] { 27286 break 27287 } 27288 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 27289 break 27290 } 27291 b = mergePoint(b, x0, x1) 27292 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 27293 v.reset(OpCopy) 27294 v.AddArg(v0) 27295 v0.AuxInt = 8 27296 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27297 v1.AuxInt = i0 27298 v1.Aux = s 27299 v1.AddArg(p) 27300 v1.AddArg(mem) 27301 v0.AddArg(v1) 27302 return true 27303 } 27304 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 27305 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27306 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 27307 for { 27308 _ = v.Args[1] 27309 r1 := v.Args[0] 27310 if r1.Op != OpAMD64ROLWconst { 27311 break 27312 } 27313 if r1.AuxInt != 8 { 27314 break 27315 } 27316 x1 := r1.Args[0] 27317 if x1.Op != OpAMD64MOVWload { 27318 break 27319 } 27320 i1 := x1.AuxInt 27321 s := x1.Aux 27322 _ = x1.Args[1] 27323 p := x1.Args[0] 27324 mem := x1.Args[1] 27325 sh := v.Args[1] 27326 if sh.Op != OpAMD64SHLQconst { 27327 break 27328 } 27329 if sh.AuxInt != 16 { 27330 break 27331 } 27332 r0 := sh.Args[0] 27333 if r0.Op != OpAMD64ROLWconst { 27334 break 27335 } 27336 if r0.AuxInt != 8 { 27337 break 27338 } 27339 x0 := r0.Args[0] 27340 if x0.Op != OpAMD64MOVWload { 27341 break 27342 } 27343 i0 := x0.AuxInt 27344 if x0.Aux != s { 27345 break 27346 } 27347 _ = x0.Args[1] 27348 if p != x0.Args[0] { 27349 break 27350 } 27351 if mem != x0.Args[1] { 27352 break 27353 } 27354 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27355 break 27356 } 27357 b = mergePoint(b, x0, x1) 27358 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 27359 v.reset(OpCopy) 27360 v.AddArg(v0) 27361 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 27362 v1.AuxInt = i0 27363 v1.Aux = s 27364 v1.AddArg(p) 27365 v1.AddArg(mem) 27366 v0.AddArg(v1) 27367 return true 27368 } 27369 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 27370 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27371 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 27372 for { 27373 _ = v.Args[1] 27374 sh := v.Args[0] 27375 if sh.Op != OpAMD64SHLQconst { 27376 break 27377 } 27378 if sh.AuxInt != 16 { 27379 break 27380 } 27381 r0 := sh.Args[0] 27382 if r0.Op != OpAMD64ROLWconst { 27383 break 27384 } 27385 if r0.AuxInt != 8 { 27386 break 27387 } 27388 x0 := r0.Args[0] 27389 if x0.Op != OpAMD64MOVWload { 27390 break 27391 } 27392 i0 := x0.AuxInt 27393 s := x0.Aux 27394 _ = x0.Args[1] 27395 p := x0.Args[0] 27396 mem := x0.Args[1] 27397 r1 := v.Args[1] 27398 if r1.Op != OpAMD64ROLWconst { 27399 break 27400 } 27401 if r1.AuxInt != 8 { 27402 break 27403 } 27404 x1 := r1.Args[0] 27405 if x1.Op != OpAMD64MOVWload { 27406 break 27407 } 27408 i1 := x1.AuxInt 27409 if x1.Aux != s { 27410 break 27411 } 27412 _ = x1.Args[1] 27413 if p != x1.Args[0] { 27414 break 27415 } 27416 if mem != x1.Args[1] { 27417 break 27418 } 27419 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27420 break 27421 } 27422 b = mergePoint(b, x0, x1) 27423 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 27424 v.reset(OpCopy) 27425 v.AddArg(v0) 27426 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 27427 v1.AuxInt = i0 27428 v1.Aux = s 27429 v1.AddArg(p) 27430 v1.AddArg(mem) 27431 v0.AddArg(v1) 27432 return true 27433 } 27434 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) 27435 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27436 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 27437 for { 27438 _ = v.Args[1] 27439 r1 := v.Args[0] 27440 if r1.Op != OpAMD64BSWAPL { 27441 break 27442 } 27443 x1 := r1.Args[0] 27444 if x1.Op != OpAMD64MOVLload { 27445 break 27446 } 27447 i1 := x1.AuxInt 27448 s := x1.Aux 27449 _ = x1.Args[1] 27450 p := x1.Args[0] 27451 mem := x1.Args[1] 27452 sh := v.Args[1] 27453 if sh.Op != OpAMD64SHLQconst { 27454 break 27455 } 27456 if sh.AuxInt != 32 { 27457 break 27458 } 27459 r0 := sh.Args[0] 27460 if r0.Op != OpAMD64BSWAPL { 27461 break 27462 } 27463 x0 := r0.Args[0] 27464 if x0.Op != OpAMD64MOVLload { 27465 break 27466 } 27467 i0 := x0.AuxInt 27468 if x0.Aux != s { 27469 break 27470 } 27471 _ = x0.Args[1] 27472 if p != x0.Args[0] { 27473 break 27474 } 27475 if mem != x0.Args[1] { 27476 break 27477 } 27478 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27479 break 27480 } 27481 b = mergePoint(b, x0, x1) 27482 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 27483 v.reset(OpCopy) 27484 v.AddArg(v0) 27485 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27486 v1.AuxInt = i0 27487 v1.Aux = s 27488 v1.AddArg(p) 27489 v1.AddArg(mem) 27490 v0.AddArg(v1) 27491 return true 27492 } 27493 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))) 27494 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 27495 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 27496 for { 27497 _ = v.Args[1] 27498 sh := v.Args[0] 27499 if sh.Op != OpAMD64SHLQconst { 27500 break 27501 } 27502 if sh.AuxInt != 32 { 27503 break 27504 } 27505 r0 := sh.Args[0] 27506 if r0.Op != OpAMD64BSWAPL { 27507 break 27508 } 27509 x0 := r0.Args[0] 27510 if x0.Op != OpAMD64MOVLload { 27511 break 27512 } 27513 i0 := x0.AuxInt 27514 s := x0.Aux 27515 _ = x0.Args[1] 27516 p := x0.Args[0] 27517 mem := x0.Args[1] 27518 r1 := v.Args[1] 27519 if r1.Op != OpAMD64BSWAPL { 27520 break 27521 } 27522 x1 := r1.Args[0] 27523 if x1.Op != OpAMD64MOVLload { 27524 break 27525 } 27526 i1 := x1.AuxInt 27527 if x1.Aux != s { 27528 break 27529 } 27530 _ = x1.Args[1] 27531 if p != x1.Args[0] { 27532 break 27533 } 27534 if mem != x1.Args[1] { 27535 break 27536 } 27537 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 27538 break 27539 } 27540 b = mergePoint(b, x0, x1) 27541 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 27542 v.reset(OpCopy) 27543 v.AddArg(v0) 27544 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 27545 v1.AuxInt = i0 27546 v1.Aux = s 27547 v1.AddArg(p) 27548 v1.AddArg(mem) 27549 v0.AddArg(v1) 27550 return true 27551 } 27552 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 27553 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27554 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 27555 for { 27556 _ = v.Args[1] 27557 s0 := v.Args[0] 27558 if s0.Op != OpAMD64SHLQconst { 27559 break 27560 } 27561 j0 := s0.AuxInt 27562 x0 := s0.Args[0] 27563 if x0.Op != OpAMD64MOVBload { 27564 break 27565 } 27566 i0 := x0.AuxInt 27567 s := x0.Aux 27568 _ = x0.Args[1] 27569 p := x0.Args[0] 27570 mem := x0.Args[1] 27571 or := v.Args[1] 27572 if or.Op != OpAMD64ORQ { 27573 break 27574 } 27575 _ = or.Args[1] 27576 s1 := or.Args[0] 27577 if s1.Op != OpAMD64SHLQconst { 27578 break 27579 } 27580 j1 := s1.AuxInt 27581 x1 := s1.Args[0] 27582 if x1.Op != OpAMD64MOVBload { 27583 break 27584 } 27585 i1 := x1.AuxInt 27586 if x1.Aux != s { 27587 break 27588 } 27589 _ = x1.Args[1] 27590 if p != x1.Args[0] { 27591 break 27592 } 27593 if mem != x1.Args[1] { 27594 break 27595 } 27596 y := or.Args[1] 27597 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27598 break 27599 } 27600 b = mergePoint(b, x0, x1) 27601 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27602 v.reset(OpCopy) 27603 v.AddArg(v0) 27604 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27605 v1.AuxInt = j1 27606 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 27607 v2.AuxInt = 8 27608 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27609 v3.AuxInt = i0 27610 v3.Aux = s 27611 v3.AddArg(p) 27612 v3.AddArg(mem) 27613 v2.AddArg(v3) 27614 v1.AddArg(v2) 27615 v0.AddArg(v1) 27616 v0.AddArg(y) 27617 return true 27618 } 27619 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))) 27620 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27621 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 27622 for { 27623 _ = v.Args[1] 27624 s0 := v.Args[0] 27625 if s0.Op != OpAMD64SHLQconst { 27626 break 27627 } 27628 j0 := s0.AuxInt 27629 x0 := s0.Args[0] 27630 if x0.Op != OpAMD64MOVBload { 27631 break 27632 } 27633 i0 := x0.AuxInt 27634 s := x0.Aux 27635 _ = x0.Args[1] 27636 p := x0.Args[0] 27637 mem := x0.Args[1] 27638 or := v.Args[1] 27639 if or.Op != OpAMD64ORQ { 27640 break 27641 } 27642 _ = or.Args[1] 27643 y := or.Args[0] 27644 s1 := or.Args[1] 27645 if s1.Op != OpAMD64SHLQconst { 27646 break 27647 } 27648 j1 := s1.AuxInt 27649 x1 := s1.Args[0] 27650 if x1.Op != OpAMD64MOVBload { 27651 break 27652 } 27653 i1 := x1.AuxInt 27654 if x1.Aux != s { 27655 break 27656 } 27657 _ = x1.Args[1] 27658 if p != x1.Args[0] { 27659 break 27660 } 27661 if mem != x1.Args[1] { 27662 break 27663 } 27664 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27665 break 27666 } 27667 b = mergePoint(b, x0, x1) 27668 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27669 v.reset(OpCopy) 27670 v.AddArg(v0) 27671 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27672 v1.AuxInt = j1 27673 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 27674 v2.AuxInt = 8 27675 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27676 v3.AuxInt = i0 27677 v3.Aux = s 27678 v3.AddArg(p) 27679 v3.AddArg(mem) 27680 v2.AddArg(v3) 27681 v1.AddArg(v2) 27682 v0.AddArg(v1) 27683 v0.AddArg(y) 27684 return true 27685 } 27686 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 27687 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27688 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 27689 for { 27690 _ = v.Args[1] 27691 or := v.Args[0] 27692 if or.Op != OpAMD64ORQ { 27693 break 27694 } 27695 _ = or.Args[1] 27696 s1 := or.Args[0] 27697 if s1.Op != OpAMD64SHLQconst { 27698 break 27699 } 27700 j1 := s1.AuxInt 27701 x1 := s1.Args[0] 27702 if x1.Op != OpAMD64MOVBload { 27703 break 27704 } 27705 i1 := x1.AuxInt 27706 s := x1.Aux 27707 _ = x1.Args[1] 27708 p := x1.Args[0] 27709 mem := x1.Args[1] 27710 y := or.Args[1] 27711 s0 := v.Args[1] 27712 if s0.Op != OpAMD64SHLQconst { 27713 break 27714 } 27715 j0 := s0.AuxInt 27716 x0 := s0.Args[0] 27717 if x0.Op != OpAMD64MOVBload { 27718 break 27719 } 27720 i0 := x0.AuxInt 27721 if x0.Aux != s { 27722 break 27723 } 27724 _ = x0.Args[1] 27725 if p != x0.Args[0] { 27726 break 27727 } 27728 if mem != x0.Args[1] { 27729 break 27730 } 27731 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27732 break 27733 } 27734 b = mergePoint(b, x0, x1) 27735 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27736 v.reset(OpCopy) 27737 v.AddArg(v0) 27738 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27739 v1.AuxInt = j1 27740 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 27741 v2.AuxInt = 8 27742 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27743 v3.AuxInt = i0 27744 v3.Aux = s 27745 v3.AddArg(p) 27746 v3.AddArg(mem) 27747 v2.AddArg(v3) 27748 v1.AddArg(v2) 27749 v0.AddArg(v1) 27750 v0.AddArg(y) 27751 return true 27752 } 27753 return false 27754 } 27755 func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool { 27756 b := v.Block 27757 _ = b 27758 typ := &b.Func.Config.Types 27759 _ = typ 27760 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 27761 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27762 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 27763 for { 27764 _ = v.Args[1] 27765 or := v.Args[0] 27766 if or.Op != OpAMD64ORQ { 27767 break 27768 } 27769 _ = or.Args[1] 27770 y := or.Args[0] 27771 s1 := or.Args[1] 27772 if s1.Op != OpAMD64SHLQconst { 27773 break 27774 } 27775 j1 := s1.AuxInt 27776 x1 := s1.Args[0] 27777 if x1.Op != OpAMD64MOVBload { 27778 break 27779 } 27780 i1 := x1.AuxInt 27781 s := x1.Aux 27782 _ = x1.Args[1] 27783 p := x1.Args[0] 27784 mem := x1.Args[1] 27785 s0 := v.Args[1] 27786 if s0.Op != OpAMD64SHLQconst { 27787 break 27788 } 27789 j0 := s0.AuxInt 27790 x0 := s0.Args[0] 27791 if x0.Op != OpAMD64MOVBload { 27792 break 27793 } 27794 i0 := x0.AuxInt 27795 if x0.Aux != s { 27796 break 27797 } 27798 _ = x0.Args[1] 27799 if p != x0.Args[0] { 27800 break 27801 } 27802 if mem != x0.Args[1] { 27803 break 27804 } 27805 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27806 break 27807 } 27808 b = mergePoint(b, x0, x1) 27809 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27810 v.reset(OpCopy) 27811 v.AddArg(v0) 27812 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27813 v1.AuxInt = j1 27814 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 27815 v2.AuxInt = 8 27816 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 27817 v3.AuxInt = i0 27818 v3.Aux = s 27819 v3.AddArg(p) 27820 v3.AddArg(mem) 27821 v2.AddArg(v3) 27822 v1.AddArg(v2) 27823 v0.AddArg(v1) 27824 v0.AddArg(y) 27825 return true 27826 } 27827 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y)) 27828 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 27829 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 27830 for { 27831 _ = v.Args[1] 27832 s0 := v.Args[0] 27833 if s0.Op != OpAMD64SHLQconst { 27834 break 27835 } 27836 j0 := s0.AuxInt 27837 r0 := s0.Args[0] 27838 if r0.Op != OpAMD64ROLWconst { 27839 break 27840 } 27841 if r0.AuxInt != 8 { 27842 break 27843 } 27844 x0 := r0.Args[0] 27845 if x0.Op != OpAMD64MOVWload { 27846 break 27847 } 27848 i0 := x0.AuxInt 27849 s := x0.Aux 27850 _ = x0.Args[1] 27851 p := x0.Args[0] 27852 mem := x0.Args[1] 27853 or := v.Args[1] 27854 if or.Op != OpAMD64ORQ { 27855 break 27856 } 27857 _ = or.Args[1] 27858 s1 := or.Args[0] 27859 if s1.Op != OpAMD64SHLQconst { 27860 break 27861 } 27862 j1 := s1.AuxInt 27863 r1 := s1.Args[0] 27864 if r1.Op != OpAMD64ROLWconst { 27865 break 27866 } 27867 if r1.AuxInt != 8 { 27868 break 27869 } 27870 x1 := r1.Args[0] 27871 if x1.Op != OpAMD64MOVWload { 27872 break 27873 } 27874 i1 := x1.AuxInt 27875 if x1.Aux != s { 27876 break 27877 } 27878 _ = x1.Args[1] 27879 if p != x1.Args[0] { 27880 break 27881 } 27882 if mem != x1.Args[1] { 27883 break 27884 } 27885 y := or.Args[1] 27886 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 27887 break 27888 } 27889 b = mergePoint(b, x0, x1) 27890 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27891 v.reset(OpCopy) 27892 v.AddArg(v0) 27893 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27894 v1.AuxInt = j1 27895 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 27896 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 27897 v3.AuxInt = i0 27898 v3.Aux = s 27899 v3.AddArg(p) 27900 v3.AddArg(mem) 27901 v2.AddArg(v3) 27902 v1.AddArg(v2) 27903 v0.AddArg(v1) 27904 v0.AddArg(y) 27905 return true 27906 } 27907 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))))) 27908 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 27909 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 27910 for { 27911 _ = v.Args[1] 27912 s0 := v.Args[0] 27913 if s0.Op != OpAMD64SHLQconst { 27914 break 27915 } 27916 j0 := s0.AuxInt 27917 r0 := s0.Args[0] 27918 if r0.Op != OpAMD64ROLWconst { 27919 break 27920 } 27921 if r0.AuxInt != 8 { 27922 break 27923 } 27924 x0 := r0.Args[0] 27925 if x0.Op != OpAMD64MOVWload { 27926 break 27927 } 27928 i0 := x0.AuxInt 27929 s := x0.Aux 27930 _ = x0.Args[1] 27931 p := x0.Args[0] 27932 mem := x0.Args[1] 27933 or := v.Args[1] 27934 if or.Op != OpAMD64ORQ { 27935 break 27936 } 27937 _ = or.Args[1] 27938 y := or.Args[0] 27939 s1 := or.Args[1] 27940 if s1.Op != OpAMD64SHLQconst { 27941 break 27942 } 27943 j1 := s1.AuxInt 27944 r1 := s1.Args[0] 27945 if r1.Op != OpAMD64ROLWconst { 27946 break 27947 } 27948 if r1.AuxInt != 8 { 27949 break 27950 } 27951 x1 := r1.Args[0] 27952 if x1.Op != OpAMD64MOVWload { 27953 break 27954 } 27955 i1 := x1.AuxInt 27956 if x1.Aux != s { 27957 break 27958 } 27959 _ = x1.Args[1] 27960 if p != x1.Args[0] { 27961 break 27962 } 27963 if mem != x1.Args[1] { 27964 break 27965 } 27966 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 27967 break 27968 } 27969 b = mergePoint(b, x0, x1) 27970 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27971 v.reset(OpCopy) 27972 v.AddArg(v0) 27973 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27974 v1.AuxInt = j1 27975 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 27976 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 27977 v3.AuxInt = i0 27978 v3.Aux = s 27979 v3.AddArg(p) 27980 v3.AddArg(mem) 27981 v2.AddArg(v3) 27982 v1.AddArg(v2) 27983 v0.AddArg(v1) 27984 v0.AddArg(y) 27985 return true 27986 } 27987 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 27988 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 27989 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 27990 for { 27991 _ = v.Args[1] 27992 or := v.Args[0] 27993 if or.Op != OpAMD64ORQ { 27994 break 27995 } 27996 _ = or.Args[1] 27997 s1 := or.Args[0] 27998 if s1.Op != OpAMD64SHLQconst { 27999 break 28000 } 28001 j1 := s1.AuxInt 28002 r1 := s1.Args[0] 28003 if r1.Op != OpAMD64ROLWconst { 28004 break 28005 } 28006 if r1.AuxInt != 8 { 28007 break 28008 } 28009 x1 := r1.Args[0] 28010 if x1.Op != OpAMD64MOVWload { 28011 break 28012 } 28013 i1 := x1.AuxInt 28014 s := x1.Aux 28015 _ = x1.Args[1] 28016 p := x1.Args[0] 28017 mem := x1.Args[1] 28018 y := or.Args[1] 28019 s0 := v.Args[1] 28020 if s0.Op != OpAMD64SHLQconst { 28021 break 28022 } 28023 j0 := s0.AuxInt 28024 r0 := s0.Args[0] 28025 if r0.Op != OpAMD64ROLWconst { 28026 break 28027 } 28028 if r0.AuxInt != 8 { 28029 break 28030 } 28031 x0 := r0.Args[0] 28032 if x0.Op != OpAMD64MOVWload { 28033 break 28034 } 28035 i0 := x0.AuxInt 28036 if x0.Aux != s { 28037 break 28038 } 28039 _ = x0.Args[1] 28040 if p != x0.Args[0] { 28041 break 28042 } 28043 if mem != x0.Args[1] { 28044 break 28045 } 28046 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 28047 break 28048 } 28049 b = mergePoint(b, x0, x1) 28050 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28051 v.reset(OpCopy) 28052 v.AddArg(v0) 28053 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28054 v1.AuxInt = j1 28055 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 28056 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 28057 v3.AuxInt = i0 28058 v3.Aux = s 28059 v3.AddArg(p) 28060 v3.AddArg(mem) 28061 v2.AddArg(v3) 28062 v1.AddArg(v2) 28063 v0.AddArg(v1) 28064 v0.AddArg(y) 28065 return true 28066 } 28067 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 28068 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 28069 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 28070 for { 28071 _ = v.Args[1] 28072 or := v.Args[0] 28073 if or.Op != OpAMD64ORQ { 28074 break 28075 } 28076 _ = or.Args[1] 28077 y := or.Args[0] 28078 s1 := or.Args[1] 28079 if s1.Op != OpAMD64SHLQconst { 28080 break 28081 } 28082 j1 := s1.AuxInt 28083 r1 := s1.Args[0] 28084 if r1.Op != OpAMD64ROLWconst { 28085 break 28086 } 28087 if r1.AuxInt != 8 { 28088 break 28089 } 28090 x1 := r1.Args[0] 28091 if x1.Op != OpAMD64MOVWload { 28092 break 28093 } 28094 i1 := x1.AuxInt 28095 s := x1.Aux 28096 _ = x1.Args[1] 28097 p := x1.Args[0] 28098 mem := x1.Args[1] 28099 s0 := v.Args[1] 28100 if s0.Op != OpAMD64SHLQconst { 28101 break 28102 } 28103 j0 := s0.AuxInt 28104 r0 := s0.Args[0] 28105 if r0.Op != OpAMD64ROLWconst { 28106 break 28107 } 28108 if r0.AuxInt != 8 { 28109 break 28110 } 28111 x0 := r0.Args[0] 28112 if x0.Op != OpAMD64MOVWload { 28113 break 28114 } 28115 i0 := x0.AuxInt 28116 if x0.Aux != s { 28117 break 28118 } 28119 _ = x0.Args[1] 28120 if p != x0.Args[0] { 28121 break 28122 } 28123 if mem != x0.Args[1] { 28124 break 28125 } 28126 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 28127 break 28128 } 28129 b = mergePoint(b, x0, x1) 28130 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28131 v.reset(OpCopy) 28132 v.AddArg(v0) 28133 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28134 v1.AuxInt = j1 28135 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 28136 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 28137 v3.AuxInt = i0 28138 v3.Aux = s 28139 v3.AddArg(p) 28140 v3.AddArg(mem) 28141 v2.AddArg(v3) 28142 v1.AddArg(v2) 28143 v0.AddArg(v1) 28144 v0.AddArg(y) 28145 return true 28146 } 28147 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 28148 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28149 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28150 for { 28151 _ = v.Args[1] 28152 x1 := v.Args[0] 28153 if x1.Op != OpAMD64MOVBloadidx1 { 28154 break 28155 } 28156 i1 := x1.AuxInt 28157 s := x1.Aux 28158 _ = x1.Args[2] 28159 p := x1.Args[0] 28160 idx := x1.Args[1] 28161 mem := x1.Args[2] 28162 sh := v.Args[1] 28163 if sh.Op != OpAMD64SHLQconst { 28164 break 28165 } 28166 if sh.AuxInt != 8 { 28167 break 28168 } 28169 x0 := sh.Args[0] 28170 if x0.Op != OpAMD64MOVBloadidx1 { 28171 break 28172 } 28173 i0 := x0.AuxInt 28174 if x0.Aux != s { 28175 break 28176 } 28177 _ = x0.Args[2] 28178 if p != x0.Args[0] { 28179 break 28180 } 28181 if idx != x0.Args[1] { 28182 break 28183 } 28184 if mem != x0.Args[2] { 28185 break 28186 } 28187 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28188 break 28189 } 28190 b = mergePoint(b, x0, x1) 28191 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28192 v.reset(OpCopy) 28193 v.AddArg(v0) 28194 v0.AuxInt = 8 28195 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28196 v1.AuxInt = i0 28197 v1.Aux = s 28198 v1.AddArg(p) 28199 v1.AddArg(idx) 28200 v1.AddArg(mem) 28201 v0.AddArg(v1) 28202 return true 28203 } 28204 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 28205 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28206 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28207 for { 28208 _ = v.Args[1] 28209 x1 := v.Args[0] 28210 if x1.Op != OpAMD64MOVBloadidx1 { 28211 break 28212 } 28213 i1 := x1.AuxInt 28214 s := x1.Aux 28215 _ = x1.Args[2] 28216 idx := x1.Args[0] 28217 p := x1.Args[1] 28218 mem := x1.Args[2] 28219 sh := v.Args[1] 28220 if sh.Op != OpAMD64SHLQconst { 28221 break 28222 } 28223 if sh.AuxInt != 8 { 28224 break 28225 } 28226 x0 := sh.Args[0] 28227 if x0.Op != OpAMD64MOVBloadidx1 { 28228 break 28229 } 28230 i0 := x0.AuxInt 28231 if x0.Aux != s { 28232 break 28233 } 28234 _ = x0.Args[2] 28235 if p != x0.Args[0] { 28236 break 28237 } 28238 if idx != x0.Args[1] { 28239 break 28240 } 28241 if mem != x0.Args[2] { 28242 break 28243 } 28244 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28245 break 28246 } 28247 b = mergePoint(b, x0, x1) 28248 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28249 v.reset(OpCopy) 28250 v.AddArg(v0) 28251 v0.AuxInt = 8 28252 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28253 v1.AuxInt = i0 28254 v1.Aux = s 28255 v1.AddArg(p) 28256 v1.AddArg(idx) 28257 v1.AddArg(mem) 28258 v0.AddArg(v1) 28259 return true 28260 } 28261 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 28262 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28263 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28264 for { 28265 _ = v.Args[1] 28266 x1 := v.Args[0] 28267 if x1.Op != OpAMD64MOVBloadidx1 { 28268 break 28269 } 28270 i1 := x1.AuxInt 28271 s := x1.Aux 28272 _ = x1.Args[2] 28273 p := x1.Args[0] 28274 idx := x1.Args[1] 28275 mem := x1.Args[2] 28276 sh := v.Args[1] 28277 if sh.Op != OpAMD64SHLQconst { 28278 break 28279 } 28280 if sh.AuxInt != 8 { 28281 break 28282 } 28283 x0 := sh.Args[0] 28284 if x0.Op != OpAMD64MOVBloadidx1 { 28285 break 28286 } 28287 i0 := x0.AuxInt 28288 if x0.Aux != s { 28289 break 28290 } 28291 _ = x0.Args[2] 28292 if idx != x0.Args[0] { 28293 break 28294 } 28295 if p != x0.Args[1] { 28296 break 28297 } 28298 if mem != x0.Args[2] { 28299 break 28300 } 28301 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28302 break 28303 } 28304 b = mergePoint(b, x0, x1) 28305 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28306 v.reset(OpCopy) 28307 v.AddArg(v0) 28308 v0.AuxInt = 8 28309 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28310 v1.AuxInt = i0 28311 v1.Aux = s 28312 v1.AddArg(p) 28313 v1.AddArg(idx) 28314 v1.AddArg(mem) 28315 v0.AddArg(v1) 28316 return true 28317 } 28318 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 28319 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28320 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28321 for { 28322 _ = v.Args[1] 28323 x1 := v.Args[0] 28324 if x1.Op != OpAMD64MOVBloadidx1 { 28325 break 28326 } 28327 i1 := x1.AuxInt 28328 s := x1.Aux 28329 _ = x1.Args[2] 28330 idx := x1.Args[0] 28331 p := x1.Args[1] 28332 mem := x1.Args[2] 28333 sh := v.Args[1] 28334 if sh.Op != OpAMD64SHLQconst { 28335 break 28336 } 28337 if sh.AuxInt != 8 { 28338 break 28339 } 28340 x0 := sh.Args[0] 28341 if x0.Op != OpAMD64MOVBloadidx1 { 28342 break 28343 } 28344 i0 := x0.AuxInt 28345 if x0.Aux != s { 28346 break 28347 } 28348 _ = x0.Args[2] 28349 if idx != x0.Args[0] { 28350 break 28351 } 28352 if p != x0.Args[1] { 28353 break 28354 } 28355 if mem != x0.Args[2] { 28356 break 28357 } 28358 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28359 break 28360 } 28361 b = mergePoint(b, x0, x1) 28362 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28363 v.reset(OpCopy) 28364 v.AddArg(v0) 28365 v0.AuxInt = 8 28366 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28367 v1.AuxInt = i0 28368 v1.Aux = s 28369 v1.AddArg(p) 28370 v1.AddArg(idx) 28371 v1.AddArg(mem) 28372 v0.AddArg(v1) 28373 return true 28374 } 28375 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 28376 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28377 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28378 for { 28379 _ = v.Args[1] 28380 sh := v.Args[0] 28381 if sh.Op != OpAMD64SHLQconst { 28382 break 28383 } 28384 if sh.AuxInt != 8 { 28385 break 28386 } 28387 x0 := sh.Args[0] 28388 if x0.Op != OpAMD64MOVBloadidx1 { 28389 break 28390 } 28391 i0 := x0.AuxInt 28392 s := x0.Aux 28393 _ = x0.Args[2] 28394 p := x0.Args[0] 28395 idx := x0.Args[1] 28396 mem := x0.Args[2] 28397 x1 := v.Args[1] 28398 if x1.Op != OpAMD64MOVBloadidx1 { 28399 break 28400 } 28401 i1 := x1.AuxInt 28402 if x1.Aux != s { 28403 break 28404 } 28405 _ = x1.Args[2] 28406 if p != x1.Args[0] { 28407 break 28408 } 28409 if idx != x1.Args[1] { 28410 break 28411 } 28412 if mem != x1.Args[2] { 28413 break 28414 } 28415 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28416 break 28417 } 28418 b = mergePoint(b, x0, x1) 28419 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28420 v.reset(OpCopy) 28421 v.AddArg(v0) 28422 v0.AuxInt = 8 28423 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28424 v1.AuxInt = i0 28425 v1.Aux = s 28426 v1.AddArg(p) 28427 v1.AddArg(idx) 28428 v1.AddArg(mem) 28429 v0.AddArg(v1) 28430 return true 28431 } 28432 return false 28433 } 28434 func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool { 28435 b := v.Block 28436 _ = b 28437 typ := &b.Func.Config.Types 28438 _ = typ 28439 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 28440 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28441 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28442 for { 28443 _ = v.Args[1] 28444 sh := v.Args[0] 28445 if sh.Op != OpAMD64SHLQconst { 28446 break 28447 } 28448 if sh.AuxInt != 8 { 28449 break 28450 } 28451 x0 := sh.Args[0] 28452 if x0.Op != OpAMD64MOVBloadidx1 { 28453 break 28454 } 28455 i0 := x0.AuxInt 28456 s := x0.Aux 28457 _ = x0.Args[2] 28458 idx := x0.Args[0] 28459 p := x0.Args[1] 28460 mem := x0.Args[2] 28461 x1 := v.Args[1] 28462 if x1.Op != OpAMD64MOVBloadidx1 { 28463 break 28464 } 28465 i1 := x1.AuxInt 28466 if x1.Aux != s { 28467 break 28468 } 28469 _ = x1.Args[2] 28470 if p != x1.Args[0] { 28471 break 28472 } 28473 if idx != x1.Args[1] { 28474 break 28475 } 28476 if mem != x1.Args[2] { 28477 break 28478 } 28479 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28480 break 28481 } 28482 b = mergePoint(b, x0, x1) 28483 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28484 v.reset(OpCopy) 28485 v.AddArg(v0) 28486 v0.AuxInt = 8 28487 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28488 v1.AuxInt = i0 28489 v1.Aux = s 28490 v1.AddArg(p) 28491 v1.AddArg(idx) 28492 v1.AddArg(mem) 28493 v0.AddArg(v1) 28494 return true 28495 } 28496 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 28497 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28498 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28499 for { 28500 _ = v.Args[1] 28501 sh := v.Args[0] 28502 if sh.Op != OpAMD64SHLQconst { 28503 break 28504 } 28505 if sh.AuxInt != 8 { 28506 break 28507 } 28508 x0 := sh.Args[0] 28509 if x0.Op != OpAMD64MOVBloadidx1 { 28510 break 28511 } 28512 i0 := x0.AuxInt 28513 s := x0.Aux 28514 _ = x0.Args[2] 28515 p := x0.Args[0] 28516 idx := x0.Args[1] 28517 mem := x0.Args[2] 28518 x1 := v.Args[1] 28519 if x1.Op != OpAMD64MOVBloadidx1 { 28520 break 28521 } 28522 i1 := x1.AuxInt 28523 if x1.Aux != s { 28524 break 28525 } 28526 _ = x1.Args[2] 28527 if idx != x1.Args[0] { 28528 break 28529 } 28530 if p != x1.Args[1] { 28531 break 28532 } 28533 if mem != x1.Args[2] { 28534 break 28535 } 28536 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28537 break 28538 } 28539 b = mergePoint(b, x0, x1) 28540 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28541 v.reset(OpCopy) 28542 v.AddArg(v0) 28543 v0.AuxInt = 8 28544 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28545 v1.AuxInt = i0 28546 v1.Aux = s 28547 v1.AddArg(p) 28548 v1.AddArg(idx) 28549 v1.AddArg(mem) 28550 v0.AddArg(v1) 28551 return true 28552 } 28553 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 28554 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 28555 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 28556 for { 28557 _ = v.Args[1] 28558 sh := v.Args[0] 28559 if sh.Op != OpAMD64SHLQconst { 28560 break 28561 } 28562 if sh.AuxInt != 8 { 28563 break 28564 } 28565 x0 := sh.Args[0] 28566 if x0.Op != OpAMD64MOVBloadidx1 { 28567 break 28568 } 28569 i0 := x0.AuxInt 28570 s := x0.Aux 28571 _ = x0.Args[2] 28572 idx := x0.Args[0] 28573 p := x0.Args[1] 28574 mem := x0.Args[2] 28575 x1 := v.Args[1] 28576 if x1.Op != OpAMD64MOVBloadidx1 { 28577 break 28578 } 28579 i1 := x1.AuxInt 28580 if x1.Aux != s { 28581 break 28582 } 28583 _ = x1.Args[2] 28584 if idx != x1.Args[0] { 28585 break 28586 } 28587 if p != x1.Args[1] { 28588 break 28589 } 28590 if mem != x1.Args[2] { 28591 break 28592 } 28593 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 28594 break 28595 } 28596 b = mergePoint(b, x0, x1) 28597 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 28598 v.reset(OpCopy) 28599 v.AddArg(v0) 28600 v0.AuxInt = 8 28601 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 28602 v1.AuxInt = i0 28603 v1.Aux = s 28604 v1.AddArg(p) 28605 v1.AddArg(idx) 28606 v1.AddArg(mem) 28607 v0.AddArg(v1) 28608 return true 28609 } 28610 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 28611 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28612 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 28613 for { 28614 _ = v.Args[1] 28615 r1 := v.Args[0] 28616 if r1.Op != OpAMD64ROLWconst { 28617 break 28618 } 28619 if r1.AuxInt != 8 { 28620 break 28621 } 28622 x1 := r1.Args[0] 28623 if x1.Op != OpAMD64MOVWloadidx1 { 28624 break 28625 } 28626 i1 := x1.AuxInt 28627 s := x1.Aux 28628 _ = x1.Args[2] 28629 p := x1.Args[0] 28630 idx := x1.Args[1] 28631 mem := x1.Args[2] 28632 sh := v.Args[1] 28633 if sh.Op != OpAMD64SHLQconst { 28634 break 28635 } 28636 if sh.AuxInt != 16 { 28637 break 28638 } 28639 r0 := sh.Args[0] 28640 if r0.Op != OpAMD64ROLWconst { 28641 break 28642 } 28643 if r0.AuxInt != 8 { 28644 break 28645 } 28646 x0 := r0.Args[0] 28647 if x0.Op != OpAMD64MOVWloadidx1 { 28648 break 28649 } 28650 i0 := x0.AuxInt 28651 if x0.Aux != s { 28652 break 28653 } 28654 _ = x0.Args[2] 28655 if p != x0.Args[0] { 28656 break 28657 } 28658 if idx != x0.Args[1] { 28659 break 28660 } 28661 if mem != x0.Args[2] { 28662 break 28663 } 28664 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28665 break 28666 } 28667 b = mergePoint(b, x0, x1) 28668 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 28669 v.reset(OpCopy) 28670 v.AddArg(v0) 28671 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28672 v1.AuxInt = i0 28673 v1.Aux = s 28674 v1.AddArg(p) 28675 v1.AddArg(idx) 28676 v1.AddArg(mem) 28677 v0.AddArg(v1) 28678 return true 28679 } 28680 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 28681 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28682 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 28683 for { 28684 _ = v.Args[1] 28685 r1 := v.Args[0] 28686 if r1.Op != OpAMD64ROLWconst { 28687 break 28688 } 28689 if r1.AuxInt != 8 { 28690 break 28691 } 28692 x1 := r1.Args[0] 28693 if x1.Op != OpAMD64MOVWloadidx1 { 28694 break 28695 } 28696 i1 := x1.AuxInt 28697 s := x1.Aux 28698 _ = x1.Args[2] 28699 idx := x1.Args[0] 28700 p := x1.Args[1] 28701 mem := x1.Args[2] 28702 sh := v.Args[1] 28703 if sh.Op != OpAMD64SHLQconst { 28704 break 28705 } 28706 if sh.AuxInt != 16 { 28707 break 28708 } 28709 r0 := sh.Args[0] 28710 if r0.Op != OpAMD64ROLWconst { 28711 break 28712 } 28713 if r0.AuxInt != 8 { 28714 break 28715 } 28716 x0 := r0.Args[0] 28717 if x0.Op != OpAMD64MOVWloadidx1 { 28718 break 28719 } 28720 i0 := x0.AuxInt 28721 if x0.Aux != s { 28722 break 28723 } 28724 _ = x0.Args[2] 28725 if p != x0.Args[0] { 28726 break 28727 } 28728 if idx != x0.Args[1] { 28729 break 28730 } 28731 if mem != x0.Args[2] { 28732 break 28733 } 28734 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28735 break 28736 } 28737 b = mergePoint(b, x0, x1) 28738 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 28739 v.reset(OpCopy) 28740 v.AddArg(v0) 28741 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28742 v1.AuxInt = i0 28743 v1.Aux = s 28744 v1.AddArg(p) 28745 v1.AddArg(idx) 28746 v1.AddArg(mem) 28747 v0.AddArg(v1) 28748 return true 28749 } 28750 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 28751 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28752 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 28753 for { 28754 _ = v.Args[1] 28755 r1 := v.Args[0] 28756 if r1.Op != OpAMD64ROLWconst { 28757 break 28758 } 28759 if r1.AuxInt != 8 { 28760 break 28761 } 28762 x1 := r1.Args[0] 28763 if x1.Op != OpAMD64MOVWloadidx1 { 28764 break 28765 } 28766 i1 := x1.AuxInt 28767 s := x1.Aux 28768 _ = x1.Args[2] 28769 p := x1.Args[0] 28770 idx := x1.Args[1] 28771 mem := x1.Args[2] 28772 sh := v.Args[1] 28773 if sh.Op != OpAMD64SHLQconst { 28774 break 28775 } 28776 if sh.AuxInt != 16 { 28777 break 28778 } 28779 r0 := sh.Args[0] 28780 if r0.Op != OpAMD64ROLWconst { 28781 break 28782 } 28783 if r0.AuxInt != 8 { 28784 break 28785 } 28786 x0 := r0.Args[0] 28787 if x0.Op != OpAMD64MOVWloadidx1 { 28788 break 28789 } 28790 i0 := x0.AuxInt 28791 if x0.Aux != s { 28792 break 28793 } 28794 _ = x0.Args[2] 28795 if idx != x0.Args[0] { 28796 break 28797 } 28798 if p != x0.Args[1] { 28799 break 28800 } 28801 if mem != x0.Args[2] { 28802 break 28803 } 28804 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28805 break 28806 } 28807 b = mergePoint(b, x0, x1) 28808 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 28809 v.reset(OpCopy) 28810 v.AddArg(v0) 28811 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28812 v1.AuxInt = i0 28813 v1.Aux = s 28814 v1.AddArg(p) 28815 v1.AddArg(idx) 28816 v1.AddArg(mem) 28817 v0.AddArg(v1) 28818 return true 28819 } 28820 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 28821 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28822 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 28823 for { 28824 _ = v.Args[1] 28825 r1 := v.Args[0] 28826 if r1.Op != OpAMD64ROLWconst { 28827 break 28828 } 28829 if r1.AuxInt != 8 { 28830 break 28831 } 28832 x1 := r1.Args[0] 28833 if x1.Op != OpAMD64MOVWloadidx1 { 28834 break 28835 } 28836 i1 := x1.AuxInt 28837 s := x1.Aux 28838 _ = x1.Args[2] 28839 idx := x1.Args[0] 28840 p := x1.Args[1] 28841 mem := x1.Args[2] 28842 sh := v.Args[1] 28843 if sh.Op != OpAMD64SHLQconst { 28844 break 28845 } 28846 if sh.AuxInt != 16 { 28847 break 28848 } 28849 r0 := sh.Args[0] 28850 if r0.Op != OpAMD64ROLWconst { 28851 break 28852 } 28853 if r0.AuxInt != 8 { 28854 break 28855 } 28856 x0 := r0.Args[0] 28857 if x0.Op != OpAMD64MOVWloadidx1 { 28858 break 28859 } 28860 i0 := x0.AuxInt 28861 if x0.Aux != s { 28862 break 28863 } 28864 _ = x0.Args[2] 28865 if idx != x0.Args[0] { 28866 break 28867 } 28868 if p != x0.Args[1] { 28869 break 28870 } 28871 if mem != x0.Args[2] { 28872 break 28873 } 28874 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28875 break 28876 } 28877 b = mergePoint(b, x0, x1) 28878 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 28879 v.reset(OpCopy) 28880 v.AddArg(v0) 28881 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28882 v1.AuxInt = i0 28883 v1.Aux = s 28884 v1.AddArg(p) 28885 v1.AddArg(idx) 28886 v1.AddArg(mem) 28887 v0.AddArg(v1) 28888 return true 28889 } 28890 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28891 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28892 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 28893 for { 28894 _ = v.Args[1] 28895 sh := v.Args[0] 28896 if sh.Op != OpAMD64SHLQconst { 28897 break 28898 } 28899 if sh.AuxInt != 16 { 28900 break 28901 } 28902 r0 := sh.Args[0] 28903 if r0.Op != OpAMD64ROLWconst { 28904 break 28905 } 28906 if r0.AuxInt != 8 { 28907 break 28908 } 28909 x0 := r0.Args[0] 28910 if x0.Op != OpAMD64MOVWloadidx1 { 28911 break 28912 } 28913 i0 := x0.AuxInt 28914 s := x0.Aux 28915 _ = x0.Args[2] 28916 p := x0.Args[0] 28917 idx := x0.Args[1] 28918 mem := x0.Args[2] 28919 r1 := v.Args[1] 28920 if r1.Op != OpAMD64ROLWconst { 28921 break 28922 } 28923 if r1.AuxInt != 8 { 28924 break 28925 } 28926 x1 := r1.Args[0] 28927 if x1.Op != OpAMD64MOVWloadidx1 { 28928 break 28929 } 28930 i1 := x1.AuxInt 28931 if x1.Aux != s { 28932 break 28933 } 28934 _ = x1.Args[2] 28935 if p != x1.Args[0] { 28936 break 28937 } 28938 if idx != x1.Args[1] { 28939 break 28940 } 28941 if mem != x1.Args[2] { 28942 break 28943 } 28944 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 28945 break 28946 } 28947 b = mergePoint(b, x0, x1) 28948 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 28949 v.reset(OpCopy) 28950 v.AddArg(v0) 28951 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28952 v1.AuxInt = i0 28953 v1.Aux = s 28954 v1.AddArg(p) 28955 v1.AddArg(idx) 28956 v1.AddArg(mem) 28957 v0.AddArg(v1) 28958 return true 28959 } 28960 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28961 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 28962 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 28963 for { 28964 _ = v.Args[1] 28965 sh := v.Args[0] 28966 if sh.Op != OpAMD64SHLQconst { 28967 break 28968 } 28969 if sh.AuxInt != 16 { 28970 break 28971 } 28972 r0 := sh.Args[0] 28973 if r0.Op != OpAMD64ROLWconst { 28974 break 28975 } 28976 if r0.AuxInt != 8 { 28977 break 28978 } 28979 x0 := r0.Args[0] 28980 if x0.Op != OpAMD64MOVWloadidx1 { 28981 break 28982 } 28983 i0 := x0.AuxInt 28984 s := x0.Aux 28985 _ = x0.Args[2] 28986 idx := x0.Args[0] 28987 p := x0.Args[1] 28988 mem := x0.Args[2] 28989 r1 := v.Args[1] 28990 if r1.Op != OpAMD64ROLWconst { 28991 break 28992 } 28993 if r1.AuxInt != 8 { 28994 break 28995 } 28996 x1 := r1.Args[0] 28997 if x1.Op != OpAMD64MOVWloadidx1 { 28998 break 28999 } 29000 i1 := x1.AuxInt 29001 if x1.Aux != s { 29002 break 29003 } 29004 _ = x1.Args[2] 29005 if p != x1.Args[0] { 29006 break 29007 } 29008 if idx != x1.Args[1] { 29009 break 29010 } 29011 if mem != x1.Args[2] { 29012 break 29013 } 29014 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29015 break 29016 } 29017 b = mergePoint(b, x0, x1) 29018 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29019 v.reset(OpCopy) 29020 v.AddArg(v0) 29021 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29022 v1.AuxInt = i0 29023 v1.Aux = s 29024 v1.AddArg(p) 29025 v1.AddArg(idx) 29026 v1.AddArg(mem) 29027 v0.AddArg(v1) 29028 return true 29029 } 29030 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 29031 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29032 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29033 for { 29034 _ = v.Args[1] 29035 sh := v.Args[0] 29036 if sh.Op != OpAMD64SHLQconst { 29037 break 29038 } 29039 if sh.AuxInt != 16 { 29040 break 29041 } 29042 r0 := sh.Args[0] 29043 if r0.Op != OpAMD64ROLWconst { 29044 break 29045 } 29046 if r0.AuxInt != 8 { 29047 break 29048 } 29049 x0 := r0.Args[0] 29050 if x0.Op != OpAMD64MOVWloadidx1 { 29051 break 29052 } 29053 i0 := x0.AuxInt 29054 s := x0.Aux 29055 _ = x0.Args[2] 29056 p := x0.Args[0] 29057 idx := x0.Args[1] 29058 mem := x0.Args[2] 29059 r1 := v.Args[1] 29060 if r1.Op != OpAMD64ROLWconst { 29061 break 29062 } 29063 if r1.AuxInt != 8 { 29064 break 29065 } 29066 x1 := r1.Args[0] 29067 if x1.Op != OpAMD64MOVWloadidx1 { 29068 break 29069 } 29070 i1 := x1.AuxInt 29071 if x1.Aux != s { 29072 break 29073 } 29074 _ = x1.Args[2] 29075 if idx != x1.Args[0] { 29076 break 29077 } 29078 if p != x1.Args[1] { 29079 break 29080 } 29081 if mem != x1.Args[2] { 29082 break 29083 } 29084 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29085 break 29086 } 29087 b = mergePoint(b, x0, x1) 29088 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29089 v.reset(OpCopy) 29090 v.AddArg(v0) 29091 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29092 v1.AuxInt = i0 29093 v1.Aux = s 29094 v1.AddArg(p) 29095 v1.AddArg(idx) 29096 v1.AddArg(mem) 29097 v0.AddArg(v1) 29098 return true 29099 } 29100 return false 29101 } 29102 func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool { 29103 b := v.Block 29104 _ = b 29105 typ := &b.Func.Config.Types 29106 _ = typ 29107 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 29108 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29109 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 29110 for { 29111 _ = v.Args[1] 29112 sh := v.Args[0] 29113 if sh.Op != OpAMD64SHLQconst { 29114 break 29115 } 29116 if sh.AuxInt != 16 { 29117 break 29118 } 29119 r0 := sh.Args[0] 29120 if r0.Op != OpAMD64ROLWconst { 29121 break 29122 } 29123 if r0.AuxInt != 8 { 29124 break 29125 } 29126 x0 := r0.Args[0] 29127 if x0.Op != OpAMD64MOVWloadidx1 { 29128 break 29129 } 29130 i0 := x0.AuxInt 29131 s := x0.Aux 29132 _ = x0.Args[2] 29133 idx := x0.Args[0] 29134 p := x0.Args[1] 29135 mem := x0.Args[2] 29136 r1 := v.Args[1] 29137 if r1.Op != OpAMD64ROLWconst { 29138 break 29139 } 29140 if r1.AuxInt != 8 { 29141 break 29142 } 29143 x1 := r1.Args[0] 29144 if x1.Op != OpAMD64MOVWloadidx1 { 29145 break 29146 } 29147 i1 := x1.AuxInt 29148 if x1.Aux != s { 29149 break 29150 } 29151 _ = x1.Args[2] 29152 if idx != x1.Args[0] { 29153 break 29154 } 29155 if p != x1.Args[1] { 29156 break 29157 } 29158 if mem != x1.Args[2] { 29159 break 29160 } 29161 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29162 break 29163 } 29164 b = mergePoint(b, x0, x1) 29165 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29166 v.reset(OpCopy) 29167 v.AddArg(v0) 29168 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29169 v1.AuxInt = i0 29170 v1.Aux = s 29171 v1.AddArg(p) 29172 v1.AddArg(idx) 29173 v1.AddArg(mem) 29174 v0.AddArg(v1) 29175 return true 29176 } 29177 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 29178 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29179 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29180 for { 29181 _ = v.Args[1] 29182 r1 := v.Args[0] 29183 if r1.Op != OpAMD64BSWAPL { 29184 break 29185 } 29186 x1 := r1.Args[0] 29187 if x1.Op != OpAMD64MOVLloadidx1 { 29188 break 29189 } 29190 i1 := x1.AuxInt 29191 s := x1.Aux 29192 _ = x1.Args[2] 29193 p := x1.Args[0] 29194 idx := x1.Args[1] 29195 mem := x1.Args[2] 29196 sh := v.Args[1] 29197 if sh.Op != OpAMD64SHLQconst { 29198 break 29199 } 29200 if sh.AuxInt != 32 { 29201 break 29202 } 29203 r0 := sh.Args[0] 29204 if r0.Op != OpAMD64BSWAPL { 29205 break 29206 } 29207 x0 := r0.Args[0] 29208 if x0.Op != OpAMD64MOVLloadidx1 { 29209 break 29210 } 29211 i0 := x0.AuxInt 29212 if x0.Aux != s { 29213 break 29214 } 29215 _ = x0.Args[2] 29216 if p != x0.Args[0] { 29217 break 29218 } 29219 if idx != x0.Args[1] { 29220 break 29221 } 29222 if mem != x0.Args[2] { 29223 break 29224 } 29225 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29226 break 29227 } 29228 b = mergePoint(b, x0, x1) 29229 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29230 v.reset(OpCopy) 29231 v.AddArg(v0) 29232 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29233 v1.AuxInt = i0 29234 v1.Aux = s 29235 v1.AddArg(p) 29236 v1.AddArg(idx) 29237 v1.AddArg(mem) 29238 v0.AddArg(v1) 29239 return true 29240 } 29241 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 29242 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29243 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29244 for { 29245 _ = v.Args[1] 29246 r1 := v.Args[0] 29247 if r1.Op != OpAMD64BSWAPL { 29248 break 29249 } 29250 x1 := r1.Args[0] 29251 if x1.Op != OpAMD64MOVLloadidx1 { 29252 break 29253 } 29254 i1 := x1.AuxInt 29255 s := x1.Aux 29256 _ = x1.Args[2] 29257 idx := x1.Args[0] 29258 p := x1.Args[1] 29259 mem := x1.Args[2] 29260 sh := v.Args[1] 29261 if sh.Op != OpAMD64SHLQconst { 29262 break 29263 } 29264 if sh.AuxInt != 32 { 29265 break 29266 } 29267 r0 := sh.Args[0] 29268 if r0.Op != OpAMD64BSWAPL { 29269 break 29270 } 29271 x0 := r0.Args[0] 29272 if x0.Op != OpAMD64MOVLloadidx1 { 29273 break 29274 } 29275 i0 := x0.AuxInt 29276 if x0.Aux != s { 29277 break 29278 } 29279 _ = x0.Args[2] 29280 if p != x0.Args[0] { 29281 break 29282 } 29283 if idx != x0.Args[1] { 29284 break 29285 } 29286 if mem != x0.Args[2] { 29287 break 29288 } 29289 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29290 break 29291 } 29292 b = mergePoint(b, x0, x1) 29293 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29294 v.reset(OpCopy) 29295 v.AddArg(v0) 29296 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29297 v1.AuxInt = i0 29298 v1.Aux = s 29299 v1.AddArg(p) 29300 v1.AddArg(idx) 29301 v1.AddArg(mem) 29302 v0.AddArg(v1) 29303 return true 29304 } 29305 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 29306 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29307 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29308 for { 29309 _ = v.Args[1] 29310 r1 := v.Args[0] 29311 if r1.Op != OpAMD64BSWAPL { 29312 break 29313 } 29314 x1 := r1.Args[0] 29315 if x1.Op != OpAMD64MOVLloadidx1 { 29316 break 29317 } 29318 i1 := x1.AuxInt 29319 s := x1.Aux 29320 _ = x1.Args[2] 29321 p := x1.Args[0] 29322 idx := x1.Args[1] 29323 mem := x1.Args[2] 29324 sh := v.Args[1] 29325 if sh.Op != OpAMD64SHLQconst { 29326 break 29327 } 29328 if sh.AuxInt != 32 { 29329 break 29330 } 29331 r0 := sh.Args[0] 29332 if r0.Op != OpAMD64BSWAPL { 29333 break 29334 } 29335 x0 := r0.Args[0] 29336 if x0.Op != OpAMD64MOVLloadidx1 { 29337 break 29338 } 29339 i0 := x0.AuxInt 29340 if x0.Aux != s { 29341 break 29342 } 29343 _ = x0.Args[2] 29344 if idx != x0.Args[0] { 29345 break 29346 } 29347 if p != x0.Args[1] { 29348 break 29349 } 29350 if mem != x0.Args[2] { 29351 break 29352 } 29353 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29354 break 29355 } 29356 b = mergePoint(b, x0, x1) 29357 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29358 v.reset(OpCopy) 29359 v.AddArg(v0) 29360 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29361 v1.AuxInt = i0 29362 v1.Aux = s 29363 v1.AddArg(p) 29364 v1.AddArg(idx) 29365 v1.AddArg(mem) 29366 v0.AddArg(v1) 29367 return true 29368 } 29369 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 29370 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29371 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29372 for { 29373 _ = v.Args[1] 29374 r1 := v.Args[0] 29375 if r1.Op != OpAMD64BSWAPL { 29376 break 29377 } 29378 x1 := r1.Args[0] 29379 if x1.Op != OpAMD64MOVLloadidx1 { 29380 break 29381 } 29382 i1 := x1.AuxInt 29383 s := x1.Aux 29384 _ = x1.Args[2] 29385 idx := x1.Args[0] 29386 p := x1.Args[1] 29387 mem := x1.Args[2] 29388 sh := v.Args[1] 29389 if sh.Op != OpAMD64SHLQconst { 29390 break 29391 } 29392 if sh.AuxInt != 32 { 29393 break 29394 } 29395 r0 := sh.Args[0] 29396 if r0.Op != OpAMD64BSWAPL { 29397 break 29398 } 29399 x0 := r0.Args[0] 29400 if x0.Op != OpAMD64MOVLloadidx1 { 29401 break 29402 } 29403 i0 := x0.AuxInt 29404 if x0.Aux != s { 29405 break 29406 } 29407 _ = x0.Args[2] 29408 if idx != x0.Args[0] { 29409 break 29410 } 29411 if p != x0.Args[1] { 29412 break 29413 } 29414 if mem != x0.Args[2] { 29415 break 29416 } 29417 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29418 break 29419 } 29420 b = mergePoint(b, x0, x1) 29421 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29422 v.reset(OpCopy) 29423 v.AddArg(v0) 29424 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29425 v1.AuxInt = i0 29426 v1.Aux = s 29427 v1.AddArg(p) 29428 v1.AddArg(idx) 29429 v1.AddArg(mem) 29430 v0.AddArg(v1) 29431 return true 29432 } 29433 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 29434 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29435 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29436 for { 29437 _ = v.Args[1] 29438 sh := v.Args[0] 29439 if sh.Op != OpAMD64SHLQconst { 29440 break 29441 } 29442 if sh.AuxInt != 32 { 29443 break 29444 } 29445 r0 := sh.Args[0] 29446 if r0.Op != OpAMD64BSWAPL { 29447 break 29448 } 29449 x0 := r0.Args[0] 29450 if x0.Op != OpAMD64MOVLloadidx1 { 29451 break 29452 } 29453 i0 := x0.AuxInt 29454 s := x0.Aux 29455 _ = x0.Args[2] 29456 p := x0.Args[0] 29457 idx := x0.Args[1] 29458 mem := x0.Args[2] 29459 r1 := v.Args[1] 29460 if r1.Op != OpAMD64BSWAPL { 29461 break 29462 } 29463 x1 := r1.Args[0] 29464 if x1.Op != OpAMD64MOVLloadidx1 { 29465 break 29466 } 29467 i1 := x1.AuxInt 29468 if x1.Aux != s { 29469 break 29470 } 29471 _ = x1.Args[2] 29472 if p != x1.Args[0] { 29473 break 29474 } 29475 if idx != x1.Args[1] { 29476 break 29477 } 29478 if mem != x1.Args[2] { 29479 break 29480 } 29481 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29482 break 29483 } 29484 b = mergePoint(b, x0, x1) 29485 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29486 v.reset(OpCopy) 29487 v.AddArg(v0) 29488 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29489 v1.AuxInt = i0 29490 v1.Aux = s 29491 v1.AddArg(p) 29492 v1.AddArg(idx) 29493 v1.AddArg(mem) 29494 v0.AddArg(v1) 29495 return true 29496 } 29497 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 29498 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29499 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29500 for { 29501 _ = v.Args[1] 29502 sh := v.Args[0] 29503 if sh.Op != OpAMD64SHLQconst { 29504 break 29505 } 29506 if sh.AuxInt != 32 { 29507 break 29508 } 29509 r0 := sh.Args[0] 29510 if r0.Op != OpAMD64BSWAPL { 29511 break 29512 } 29513 x0 := r0.Args[0] 29514 if x0.Op != OpAMD64MOVLloadidx1 { 29515 break 29516 } 29517 i0 := x0.AuxInt 29518 s := x0.Aux 29519 _ = x0.Args[2] 29520 idx := x0.Args[0] 29521 p := x0.Args[1] 29522 mem := x0.Args[2] 29523 r1 := v.Args[1] 29524 if r1.Op != OpAMD64BSWAPL { 29525 break 29526 } 29527 x1 := r1.Args[0] 29528 if x1.Op != OpAMD64MOVLloadidx1 { 29529 break 29530 } 29531 i1 := x1.AuxInt 29532 if x1.Aux != s { 29533 break 29534 } 29535 _ = x1.Args[2] 29536 if p != x1.Args[0] { 29537 break 29538 } 29539 if idx != x1.Args[1] { 29540 break 29541 } 29542 if mem != x1.Args[2] { 29543 break 29544 } 29545 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29546 break 29547 } 29548 b = mergePoint(b, x0, x1) 29549 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29550 v.reset(OpCopy) 29551 v.AddArg(v0) 29552 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29553 v1.AuxInt = i0 29554 v1.Aux = s 29555 v1.AddArg(p) 29556 v1.AddArg(idx) 29557 v1.AddArg(mem) 29558 v0.AddArg(v1) 29559 return true 29560 } 29561 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 29562 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29563 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29564 for { 29565 _ = v.Args[1] 29566 sh := v.Args[0] 29567 if sh.Op != OpAMD64SHLQconst { 29568 break 29569 } 29570 if sh.AuxInt != 32 { 29571 break 29572 } 29573 r0 := sh.Args[0] 29574 if r0.Op != OpAMD64BSWAPL { 29575 break 29576 } 29577 x0 := r0.Args[0] 29578 if x0.Op != OpAMD64MOVLloadidx1 { 29579 break 29580 } 29581 i0 := x0.AuxInt 29582 s := x0.Aux 29583 _ = x0.Args[2] 29584 p := x0.Args[0] 29585 idx := x0.Args[1] 29586 mem := x0.Args[2] 29587 r1 := v.Args[1] 29588 if r1.Op != OpAMD64BSWAPL { 29589 break 29590 } 29591 x1 := r1.Args[0] 29592 if x1.Op != OpAMD64MOVLloadidx1 { 29593 break 29594 } 29595 i1 := x1.AuxInt 29596 if x1.Aux != s { 29597 break 29598 } 29599 _ = x1.Args[2] 29600 if idx != x1.Args[0] { 29601 break 29602 } 29603 if p != x1.Args[1] { 29604 break 29605 } 29606 if mem != x1.Args[2] { 29607 break 29608 } 29609 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29610 break 29611 } 29612 b = mergePoint(b, x0, x1) 29613 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29614 v.reset(OpCopy) 29615 v.AddArg(v0) 29616 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29617 v1.AuxInt = i0 29618 v1.Aux = s 29619 v1.AddArg(p) 29620 v1.AddArg(idx) 29621 v1.AddArg(mem) 29622 v0.AddArg(v1) 29623 return true 29624 } 29625 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 29626 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29627 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 29628 for { 29629 _ = v.Args[1] 29630 sh := v.Args[0] 29631 if sh.Op != OpAMD64SHLQconst { 29632 break 29633 } 29634 if sh.AuxInt != 32 { 29635 break 29636 } 29637 r0 := sh.Args[0] 29638 if r0.Op != OpAMD64BSWAPL { 29639 break 29640 } 29641 x0 := r0.Args[0] 29642 if x0.Op != OpAMD64MOVLloadidx1 { 29643 break 29644 } 29645 i0 := x0.AuxInt 29646 s := x0.Aux 29647 _ = x0.Args[2] 29648 idx := x0.Args[0] 29649 p := x0.Args[1] 29650 mem := x0.Args[2] 29651 r1 := v.Args[1] 29652 if r1.Op != OpAMD64BSWAPL { 29653 break 29654 } 29655 x1 := r1.Args[0] 29656 if x1.Op != OpAMD64MOVLloadidx1 { 29657 break 29658 } 29659 i1 := x1.AuxInt 29660 if x1.Aux != s { 29661 break 29662 } 29663 _ = x1.Args[2] 29664 if idx != x1.Args[0] { 29665 break 29666 } 29667 if p != x1.Args[1] { 29668 break 29669 } 29670 if mem != x1.Args[2] { 29671 break 29672 } 29673 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29674 break 29675 } 29676 b = mergePoint(b, x0, x1) 29677 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29678 v.reset(OpCopy) 29679 v.AddArg(v0) 29680 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 29681 v1.AuxInt = i0 29682 v1.Aux = s 29683 v1.AddArg(p) 29684 v1.AddArg(idx) 29685 v1.AddArg(mem) 29686 v0.AddArg(v1) 29687 return true 29688 } 29689 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 29690 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29691 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29692 for { 29693 _ = v.Args[1] 29694 s0 := v.Args[0] 29695 if s0.Op != OpAMD64SHLQconst { 29696 break 29697 } 29698 j0 := s0.AuxInt 29699 x0 := s0.Args[0] 29700 if x0.Op != OpAMD64MOVBloadidx1 { 29701 break 29702 } 29703 i0 := x0.AuxInt 29704 s := x0.Aux 29705 _ = x0.Args[2] 29706 p := x0.Args[0] 29707 idx := x0.Args[1] 29708 mem := x0.Args[2] 29709 or := v.Args[1] 29710 if or.Op != OpAMD64ORQ { 29711 break 29712 } 29713 _ = or.Args[1] 29714 s1 := or.Args[0] 29715 if s1.Op != OpAMD64SHLQconst { 29716 break 29717 } 29718 j1 := s1.AuxInt 29719 x1 := s1.Args[0] 29720 if x1.Op != OpAMD64MOVBloadidx1 { 29721 break 29722 } 29723 i1 := x1.AuxInt 29724 if x1.Aux != s { 29725 break 29726 } 29727 _ = x1.Args[2] 29728 if p != x1.Args[0] { 29729 break 29730 } 29731 if idx != x1.Args[1] { 29732 break 29733 } 29734 if mem != x1.Args[2] { 29735 break 29736 } 29737 y := or.Args[1] 29738 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29739 break 29740 } 29741 b = mergePoint(b, x0, x1) 29742 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29743 v.reset(OpCopy) 29744 v.AddArg(v0) 29745 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29746 v1.AuxInt = j1 29747 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29748 v2.AuxInt = 8 29749 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29750 v3.AuxInt = i0 29751 v3.Aux = s 29752 v3.AddArg(p) 29753 v3.AddArg(idx) 29754 v3.AddArg(mem) 29755 v2.AddArg(v3) 29756 v1.AddArg(v2) 29757 v0.AddArg(v1) 29758 v0.AddArg(y) 29759 return true 29760 } 29761 return false 29762 } 29763 func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool { 29764 b := v.Block 29765 _ = b 29766 typ := &b.Func.Config.Types 29767 _ = typ 29768 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 29769 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29770 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29771 for { 29772 _ = v.Args[1] 29773 s0 := v.Args[0] 29774 if s0.Op != OpAMD64SHLQconst { 29775 break 29776 } 29777 j0 := s0.AuxInt 29778 x0 := s0.Args[0] 29779 if x0.Op != OpAMD64MOVBloadidx1 { 29780 break 29781 } 29782 i0 := x0.AuxInt 29783 s := x0.Aux 29784 _ = x0.Args[2] 29785 idx := x0.Args[0] 29786 p := x0.Args[1] 29787 mem := x0.Args[2] 29788 or := v.Args[1] 29789 if or.Op != OpAMD64ORQ { 29790 break 29791 } 29792 _ = or.Args[1] 29793 s1 := or.Args[0] 29794 if s1.Op != OpAMD64SHLQconst { 29795 break 29796 } 29797 j1 := s1.AuxInt 29798 x1 := s1.Args[0] 29799 if x1.Op != OpAMD64MOVBloadidx1 { 29800 break 29801 } 29802 i1 := x1.AuxInt 29803 if x1.Aux != s { 29804 break 29805 } 29806 _ = x1.Args[2] 29807 if p != x1.Args[0] { 29808 break 29809 } 29810 if idx != x1.Args[1] { 29811 break 29812 } 29813 if mem != x1.Args[2] { 29814 break 29815 } 29816 y := or.Args[1] 29817 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29818 break 29819 } 29820 b = mergePoint(b, x0, x1) 29821 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29822 v.reset(OpCopy) 29823 v.AddArg(v0) 29824 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29825 v1.AuxInt = j1 29826 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29827 v2.AuxInt = 8 29828 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29829 v3.AuxInt = i0 29830 v3.Aux = s 29831 v3.AddArg(p) 29832 v3.AddArg(idx) 29833 v3.AddArg(mem) 29834 v2.AddArg(v3) 29835 v1.AddArg(v2) 29836 v0.AddArg(v1) 29837 v0.AddArg(y) 29838 return true 29839 } 29840 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 29841 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29842 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29843 for { 29844 _ = v.Args[1] 29845 s0 := v.Args[0] 29846 if s0.Op != OpAMD64SHLQconst { 29847 break 29848 } 29849 j0 := s0.AuxInt 29850 x0 := s0.Args[0] 29851 if x0.Op != OpAMD64MOVBloadidx1 { 29852 break 29853 } 29854 i0 := x0.AuxInt 29855 s := x0.Aux 29856 _ = x0.Args[2] 29857 p := x0.Args[0] 29858 idx := x0.Args[1] 29859 mem := x0.Args[2] 29860 or := v.Args[1] 29861 if or.Op != OpAMD64ORQ { 29862 break 29863 } 29864 _ = or.Args[1] 29865 s1 := or.Args[0] 29866 if s1.Op != OpAMD64SHLQconst { 29867 break 29868 } 29869 j1 := s1.AuxInt 29870 x1 := s1.Args[0] 29871 if x1.Op != OpAMD64MOVBloadidx1 { 29872 break 29873 } 29874 i1 := x1.AuxInt 29875 if x1.Aux != s { 29876 break 29877 } 29878 _ = x1.Args[2] 29879 if idx != x1.Args[0] { 29880 break 29881 } 29882 if p != x1.Args[1] { 29883 break 29884 } 29885 if mem != x1.Args[2] { 29886 break 29887 } 29888 y := or.Args[1] 29889 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29890 break 29891 } 29892 b = mergePoint(b, x0, x1) 29893 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29894 v.reset(OpCopy) 29895 v.AddArg(v0) 29896 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29897 v1.AuxInt = j1 29898 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29899 v2.AuxInt = 8 29900 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29901 v3.AuxInt = i0 29902 v3.Aux = s 29903 v3.AddArg(p) 29904 v3.AddArg(idx) 29905 v3.AddArg(mem) 29906 v2.AddArg(v3) 29907 v1.AddArg(v2) 29908 v0.AddArg(v1) 29909 v0.AddArg(y) 29910 return true 29911 } 29912 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 29913 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29914 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29915 for { 29916 _ = v.Args[1] 29917 s0 := v.Args[0] 29918 if s0.Op != OpAMD64SHLQconst { 29919 break 29920 } 29921 j0 := s0.AuxInt 29922 x0 := s0.Args[0] 29923 if x0.Op != OpAMD64MOVBloadidx1 { 29924 break 29925 } 29926 i0 := x0.AuxInt 29927 s := x0.Aux 29928 _ = x0.Args[2] 29929 idx := x0.Args[0] 29930 p := x0.Args[1] 29931 mem := x0.Args[2] 29932 or := v.Args[1] 29933 if or.Op != OpAMD64ORQ { 29934 break 29935 } 29936 _ = or.Args[1] 29937 s1 := or.Args[0] 29938 if s1.Op != OpAMD64SHLQconst { 29939 break 29940 } 29941 j1 := s1.AuxInt 29942 x1 := s1.Args[0] 29943 if x1.Op != OpAMD64MOVBloadidx1 { 29944 break 29945 } 29946 i1 := x1.AuxInt 29947 if x1.Aux != s { 29948 break 29949 } 29950 _ = x1.Args[2] 29951 if idx != x1.Args[0] { 29952 break 29953 } 29954 if p != x1.Args[1] { 29955 break 29956 } 29957 if mem != x1.Args[2] { 29958 break 29959 } 29960 y := or.Args[1] 29961 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29962 break 29963 } 29964 b = mergePoint(b, x0, x1) 29965 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29966 v.reset(OpCopy) 29967 v.AddArg(v0) 29968 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29969 v1.AuxInt = j1 29970 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29971 v2.AuxInt = 8 29972 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 29973 v3.AuxInt = i0 29974 v3.Aux = s 29975 v3.AddArg(p) 29976 v3.AddArg(idx) 29977 v3.AddArg(mem) 29978 v2.AddArg(v3) 29979 v1.AddArg(v2) 29980 v0.AddArg(v1) 29981 v0.AddArg(y) 29982 return true 29983 } 29984 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 29985 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29986 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 29987 for { 29988 _ = v.Args[1] 29989 s0 := v.Args[0] 29990 if s0.Op != OpAMD64SHLQconst { 29991 break 29992 } 29993 j0 := s0.AuxInt 29994 x0 := s0.Args[0] 29995 if x0.Op != OpAMD64MOVBloadidx1 { 29996 break 29997 } 29998 i0 := x0.AuxInt 29999 s := x0.Aux 30000 _ = x0.Args[2] 30001 p := x0.Args[0] 30002 idx := x0.Args[1] 30003 mem := x0.Args[2] 30004 or := v.Args[1] 30005 if or.Op != OpAMD64ORQ { 30006 break 30007 } 30008 _ = or.Args[1] 30009 y := or.Args[0] 30010 s1 := or.Args[1] 30011 if s1.Op != OpAMD64SHLQconst { 30012 break 30013 } 30014 j1 := s1.AuxInt 30015 x1 := s1.Args[0] 30016 if x1.Op != OpAMD64MOVBloadidx1 { 30017 break 30018 } 30019 i1 := x1.AuxInt 30020 if x1.Aux != s { 30021 break 30022 } 30023 _ = x1.Args[2] 30024 if p != x1.Args[0] { 30025 break 30026 } 30027 if idx != x1.Args[1] { 30028 break 30029 } 30030 if mem != x1.Args[2] { 30031 break 30032 } 30033 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30034 break 30035 } 30036 b = mergePoint(b, x0, x1) 30037 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30038 v.reset(OpCopy) 30039 v.AddArg(v0) 30040 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30041 v1.AuxInt = j1 30042 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30043 v2.AuxInt = 8 30044 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30045 v3.AuxInt = i0 30046 v3.Aux = s 30047 v3.AddArg(p) 30048 v3.AddArg(idx) 30049 v3.AddArg(mem) 30050 v2.AddArg(v3) 30051 v1.AddArg(v2) 30052 v0.AddArg(v1) 30053 v0.AddArg(y) 30054 return true 30055 } 30056 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 30057 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30058 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30059 for { 30060 _ = v.Args[1] 30061 s0 := v.Args[0] 30062 if s0.Op != OpAMD64SHLQconst { 30063 break 30064 } 30065 j0 := s0.AuxInt 30066 x0 := s0.Args[0] 30067 if x0.Op != OpAMD64MOVBloadidx1 { 30068 break 30069 } 30070 i0 := x0.AuxInt 30071 s := x0.Aux 30072 _ = x0.Args[2] 30073 idx := x0.Args[0] 30074 p := x0.Args[1] 30075 mem := x0.Args[2] 30076 or := v.Args[1] 30077 if or.Op != OpAMD64ORQ { 30078 break 30079 } 30080 _ = or.Args[1] 30081 y := or.Args[0] 30082 s1 := or.Args[1] 30083 if s1.Op != OpAMD64SHLQconst { 30084 break 30085 } 30086 j1 := s1.AuxInt 30087 x1 := s1.Args[0] 30088 if x1.Op != OpAMD64MOVBloadidx1 { 30089 break 30090 } 30091 i1 := x1.AuxInt 30092 if x1.Aux != s { 30093 break 30094 } 30095 _ = x1.Args[2] 30096 if p != x1.Args[0] { 30097 break 30098 } 30099 if idx != x1.Args[1] { 30100 break 30101 } 30102 if mem != x1.Args[2] { 30103 break 30104 } 30105 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30106 break 30107 } 30108 b = mergePoint(b, x0, x1) 30109 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30110 v.reset(OpCopy) 30111 v.AddArg(v0) 30112 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30113 v1.AuxInt = j1 30114 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30115 v2.AuxInt = 8 30116 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30117 v3.AuxInt = i0 30118 v3.Aux = s 30119 v3.AddArg(p) 30120 v3.AddArg(idx) 30121 v3.AddArg(mem) 30122 v2.AddArg(v3) 30123 v1.AddArg(v2) 30124 v0.AddArg(v1) 30125 v0.AddArg(y) 30126 return true 30127 } 30128 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 30129 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30130 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30131 for { 30132 _ = v.Args[1] 30133 s0 := v.Args[0] 30134 if s0.Op != OpAMD64SHLQconst { 30135 break 30136 } 30137 j0 := s0.AuxInt 30138 x0 := s0.Args[0] 30139 if x0.Op != OpAMD64MOVBloadidx1 { 30140 break 30141 } 30142 i0 := x0.AuxInt 30143 s := x0.Aux 30144 _ = x0.Args[2] 30145 p := x0.Args[0] 30146 idx := x0.Args[1] 30147 mem := x0.Args[2] 30148 or := v.Args[1] 30149 if or.Op != OpAMD64ORQ { 30150 break 30151 } 30152 _ = or.Args[1] 30153 y := or.Args[0] 30154 s1 := or.Args[1] 30155 if s1.Op != OpAMD64SHLQconst { 30156 break 30157 } 30158 j1 := s1.AuxInt 30159 x1 := s1.Args[0] 30160 if x1.Op != OpAMD64MOVBloadidx1 { 30161 break 30162 } 30163 i1 := x1.AuxInt 30164 if x1.Aux != s { 30165 break 30166 } 30167 _ = x1.Args[2] 30168 if idx != x1.Args[0] { 30169 break 30170 } 30171 if p != x1.Args[1] { 30172 break 30173 } 30174 if mem != x1.Args[2] { 30175 break 30176 } 30177 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30178 break 30179 } 30180 b = mergePoint(b, x0, x1) 30181 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30182 v.reset(OpCopy) 30183 v.AddArg(v0) 30184 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30185 v1.AuxInt = j1 30186 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30187 v2.AuxInt = 8 30188 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30189 v3.AuxInt = i0 30190 v3.Aux = s 30191 v3.AddArg(p) 30192 v3.AddArg(idx) 30193 v3.AddArg(mem) 30194 v2.AddArg(v3) 30195 v1.AddArg(v2) 30196 v0.AddArg(v1) 30197 v0.AddArg(y) 30198 return true 30199 } 30200 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 30201 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30202 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30203 for { 30204 _ = v.Args[1] 30205 s0 := v.Args[0] 30206 if s0.Op != OpAMD64SHLQconst { 30207 break 30208 } 30209 j0 := s0.AuxInt 30210 x0 := s0.Args[0] 30211 if x0.Op != OpAMD64MOVBloadidx1 { 30212 break 30213 } 30214 i0 := x0.AuxInt 30215 s := x0.Aux 30216 _ = x0.Args[2] 30217 idx := x0.Args[0] 30218 p := x0.Args[1] 30219 mem := x0.Args[2] 30220 or := v.Args[1] 30221 if or.Op != OpAMD64ORQ { 30222 break 30223 } 30224 _ = or.Args[1] 30225 y := or.Args[0] 30226 s1 := or.Args[1] 30227 if s1.Op != OpAMD64SHLQconst { 30228 break 30229 } 30230 j1 := s1.AuxInt 30231 x1 := s1.Args[0] 30232 if x1.Op != OpAMD64MOVBloadidx1 { 30233 break 30234 } 30235 i1 := x1.AuxInt 30236 if x1.Aux != s { 30237 break 30238 } 30239 _ = x1.Args[2] 30240 if idx != x1.Args[0] { 30241 break 30242 } 30243 if p != x1.Args[1] { 30244 break 30245 } 30246 if mem != x1.Args[2] { 30247 break 30248 } 30249 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30250 break 30251 } 30252 b = mergePoint(b, x0, x1) 30253 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30254 v.reset(OpCopy) 30255 v.AddArg(v0) 30256 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30257 v1.AuxInt = j1 30258 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30259 v2.AuxInt = 8 30260 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30261 v3.AuxInt = i0 30262 v3.Aux = s 30263 v3.AddArg(p) 30264 v3.AddArg(idx) 30265 v3.AddArg(mem) 30266 v2.AddArg(v3) 30267 v1.AddArg(v2) 30268 v0.AddArg(v1) 30269 v0.AddArg(y) 30270 return true 30271 } 30272 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30273 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30274 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30275 for { 30276 _ = v.Args[1] 30277 or := v.Args[0] 30278 if or.Op != OpAMD64ORQ { 30279 break 30280 } 30281 _ = or.Args[1] 30282 s1 := or.Args[0] 30283 if s1.Op != OpAMD64SHLQconst { 30284 break 30285 } 30286 j1 := s1.AuxInt 30287 x1 := s1.Args[0] 30288 if x1.Op != OpAMD64MOVBloadidx1 { 30289 break 30290 } 30291 i1 := x1.AuxInt 30292 s := x1.Aux 30293 _ = x1.Args[2] 30294 p := x1.Args[0] 30295 idx := x1.Args[1] 30296 mem := x1.Args[2] 30297 y := or.Args[1] 30298 s0 := v.Args[1] 30299 if s0.Op != OpAMD64SHLQconst { 30300 break 30301 } 30302 j0 := s0.AuxInt 30303 x0 := s0.Args[0] 30304 if x0.Op != OpAMD64MOVBloadidx1 { 30305 break 30306 } 30307 i0 := x0.AuxInt 30308 if x0.Aux != s { 30309 break 30310 } 30311 _ = x0.Args[2] 30312 if p != x0.Args[0] { 30313 break 30314 } 30315 if idx != x0.Args[1] { 30316 break 30317 } 30318 if mem != x0.Args[2] { 30319 break 30320 } 30321 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30322 break 30323 } 30324 b = mergePoint(b, x0, x1) 30325 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30326 v.reset(OpCopy) 30327 v.AddArg(v0) 30328 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30329 v1.AuxInt = j1 30330 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30331 v2.AuxInt = 8 30332 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30333 v3.AuxInt = i0 30334 v3.Aux = s 30335 v3.AddArg(p) 30336 v3.AddArg(idx) 30337 v3.AddArg(mem) 30338 v2.AddArg(v3) 30339 v1.AddArg(v2) 30340 v0.AddArg(v1) 30341 v0.AddArg(y) 30342 return true 30343 } 30344 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30345 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30346 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30347 for { 30348 _ = v.Args[1] 30349 or := v.Args[0] 30350 if or.Op != OpAMD64ORQ { 30351 break 30352 } 30353 _ = or.Args[1] 30354 s1 := or.Args[0] 30355 if s1.Op != OpAMD64SHLQconst { 30356 break 30357 } 30358 j1 := s1.AuxInt 30359 x1 := s1.Args[0] 30360 if x1.Op != OpAMD64MOVBloadidx1 { 30361 break 30362 } 30363 i1 := x1.AuxInt 30364 s := x1.Aux 30365 _ = x1.Args[2] 30366 idx := x1.Args[0] 30367 p := x1.Args[1] 30368 mem := x1.Args[2] 30369 y := or.Args[1] 30370 s0 := v.Args[1] 30371 if s0.Op != OpAMD64SHLQconst { 30372 break 30373 } 30374 j0 := s0.AuxInt 30375 x0 := s0.Args[0] 30376 if x0.Op != OpAMD64MOVBloadidx1 { 30377 break 30378 } 30379 i0 := x0.AuxInt 30380 if x0.Aux != s { 30381 break 30382 } 30383 _ = x0.Args[2] 30384 if p != x0.Args[0] { 30385 break 30386 } 30387 if idx != x0.Args[1] { 30388 break 30389 } 30390 if mem != x0.Args[2] { 30391 break 30392 } 30393 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30394 break 30395 } 30396 b = mergePoint(b, x0, x1) 30397 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30398 v.reset(OpCopy) 30399 v.AddArg(v0) 30400 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30401 v1.AuxInt = j1 30402 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30403 v2.AuxInt = 8 30404 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30405 v3.AuxInt = i0 30406 v3.Aux = s 30407 v3.AddArg(p) 30408 v3.AddArg(idx) 30409 v3.AddArg(mem) 30410 v2.AddArg(v3) 30411 v1.AddArg(v2) 30412 v0.AddArg(v1) 30413 v0.AddArg(y) 30414 return true 30415 } 30416 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30417 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30418 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30419 for { 30420 _ = v.Args[1] 30421 or := v.Args[0] 30422 if or.Op != OpAMD64ORQ { 30423 break 30424 } 30425 _ = or.Args[1] 30426 y := or.Args[0] 30427 s1 := or.Args[1] 30428 if s1.Op != OpAMD64SHLQconst { 30429 break 30430 } 30431 j1 := s1.AuxInt 30432 x1 := s1.Args[0] 30433 if x1.Op != OpAMD64MOVBloadidx1 { 30434 break 30435 } 30436 i1 := x1.AuxInt 30437 s := x1.Aux 30438 _ = x1.Args[2] 30439 p := x1.Args[0] 30440 idx := x1.Args[1] 30441 mem := x1.Args[2] 30442 s0 := v.Args[1] 30443 if s0.Op != OpAMD64SHLQconst { 30444 break 30445 } 30446 j0 := s0.AuxInt 30447 x0 := s0.Args[0] 30448 if x0.Op != OpAMD64MOVBloadidx1 { 30449 break 30450 } 30451 i0 := x0.AuxInt 30452 if x0.Aux != s { 30453 break 30454 } 30455 _ = x0.Args[2] 30456 if p != x0.Args[0] { 30457 break 30458 } 30459 if idx != x0.Args[1] { 30460 break 30461 } 30462 if mem != x0.Args[2] { 30463 break 30464 } 30465 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30466 break 30467 } 30468 b = mergePoint(b, x0, x1) 30469 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30470 v.reset(OpCopy) 30471 v.AddArg(v0) 30472 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30473 v1.AuxInt = j1 30474 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30475 v2.AuxInt = 8 30476 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30477 v3.AuxInt = i0 30478 v3.Aux = s 30479 v3.AddArg(p) 30480 v3.AddArg(idx) 30481 v3.AddArg(mem) 30482 v2.AddArg(v3) 30483 v1.AddArg(v2) 30484 v0.AddArg(v1) 30485 v0.AddArg(y) 30486 return true 30487 } 30488 return false 30489 } 30490 func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool { 30491 b := v.Block 30492 _ = b 30493 typ := &b.Func.Config.Types 30494 _ = typ 30495 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30496 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30497 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30498 for { 30499 _ = v.Args[1] 30500 or := v.Args[0] 30501 if or.Op != OpAMD64ORQ { 30502 break 30503 } 30504 _ = or.Args[1] 30505 y := or.Args[0] 30506 s1 := or.Args[1] 30507 if s1.Op != OpAMD64SHLQconst { 30508 break 30509 } 30510 j1 := s1.AuxInt 30511 x1 := s1.Args[0] 30512 if x1.Op != OpAMD64MOVBloadidx1 { 30513 break 30514 } 30515 i1 := x1.AuxInt 30516 s := x1.Aux 30517 _ = x1.Args[2] 30518 idx := x1.Args[0] 30519 p := x1.Args[1] 30520 mem := x1.Args[2] 30521 s0 := v.Args[1] 30522 if s0.Op != OpAMD64SHLQconst { 30523 break 30524 } 30525 j0 := s0.AuxInt 30526 x0 := s0.Args[0] 30527 if x0.Op != OpAMD64MOVBloadidx1 { 30528 break 30529 } 30530 i0 := x0.AuxInt 30531 if x0.Aux != s { 30532 break 30533 } 30534 _ = x0.Args[2] 30535 if p != x0.Args[0] { 30536 break 30537 } 30538 if idx != x0.Args[1] { 30539 break 30540 } 30541 if mem != x0.Args[2] { 30542 break 30543 } 30544 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30545 break 30546 } 30547 b = mergePoint(b, x0, x1) 30548 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30549 v.reset(OpCopy) 30550 v.AddArg(v0) 30551 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30552 v1.AuxInt = j1 30553 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30554 v2.AuxInt = 8 30555 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30556 v3.AuxInt = i0 30557 v3.Aux = s 30558 v3.AddArg(p) 30559 v3.AddArg(idx) 30560 v3.AddArg(mem) 30561 v2.AddArg(v3) 30562 v1.AddArg(v2) 30563 v0.AddArg(v1) 30564 v0.AddArg(y) 30565 return true 30566 } 30567 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30568 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30569 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30570 for { 30571 _ = v.Args[1] 30572 or := v.Args[0] 30573 if or.Op != OpAMD64ORQ { 30574 break 30575 } 30576 _ = or.Args[1] 30577 s1 := or.Args[0] 30578 if s1.Op != OpAMD64SHLQconst { 30579 break 30580 } 30581 j1 := s1.AuxInt 30582 x1 := s1.Args[0] 30583 if x1.Op != OpAMD64MOVBloadidx1 { 30584 break 30585 } 30586 i1 := x1.AuxInt 30587 s := x1.Aux 30588 _ = x1.Args[2] 30589 p := x1.Args[0] 30590 idx := x1.Args[1] 30591 mem := x1.Args[2] 30592 y := or.Args[1] 30593 s0 := v.Args[1] 30594 if s0.Op != OpAMD64SHLQconst { 30595 break 30596 } 30597 j0 := s0.AuxInt 30598 x0 := s0.Args[0] 30599 if x0.Op != OpAMD64MOVBloadidx1 { 30600 break 30601 } 30602 i0 := x0.AuxInt 30603 if x0.Aux != s { 30604 break 30605 } 30606 _ = x0.Args[2] 30607 if idx != x0.Args[0] { 30608 break 30609 } 30610 if p != x0.Args[1] { 30611 break 30612 } 30613 if mem != x0.Args[2] { 30614 break 30615 } 30616 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30617 break 30618 } 30619 b = mergePoint(b, x0, x1) 30620 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30621 v.reset(OpCopy) 30622 v.AddArg(v0) 30623 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30624 v1.AuxInt = j1 30625 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30626 v2.AuxInt = 8 30627 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30628 v3.AuxInt = i0 30629 v3.Aux = s 30630 v3.AddArg(p) 30631 v3.AddArg(idx) 30632 v3.AddArg(mem) 30633 v2.AddArg(v3) 30634 v1.AddArg(v2) 30635 v0.AddArg(v1) 30636 v0.AddArg(y) 30637 return true 30638 } 30639 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30640 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30641 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30642 for { 30643 _ = v.Args[1] 30644 or := v.Args[0] 30645 if or.Op != OpAMD64ORQ { 30646 break 30647 } 30648 _ = or.Args[1] 30649 s1 := or.Args[0] 30650 if s1.Op != OpAMD64SHLQconst { 30651 break 30652 } 30653 j1 := s1.AuxInt 30654 x1 := s1.Args[0] 30655 if x1.Op != OpAMD64MOVBloadidx1 { 30656 break 30657 } 30658 i1 := x1.AuxInt 30659 s := x1.Aux 30660 _ = x1.Args[2] 30661 idx := x1.Args[0] 30662 p := x1.Args[1] 30663 mem := x1.Args[2] 30664 y := or.Args[1] 30665 s0 := v.Args[1] 30666 if s0.Op != OpAMD64SHLQconst { 30667 break 30668 } 30669 j0 := s0.AuxInt 30670 x0 := s0.Args[0] 30671 if x0.Op != OpAMD64MOVBloadidx1 { 30672 break 30673 } 30674 i0 := x0.AuxInt 30675 if x0.Aux != s { 30676 break 30677 } 30678 _ = x0.Args[2] 30679 if idx != x0.Args[0] { 30680 break 30681 } 30682 if p != x0.Args[1] { 30683 break 30684 } 30685 if mem != x0.Args[2] { 30686 break 30687 } 30688 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30689 break 30690 } 30691 b = mergePoint(b, x0, x1) 30692 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30693 v.reset(OpCopy) 30694 v.AddArg(v0) 30695 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30696 v1.AuxInt = j1 30697 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30698 v2.AuxInt = 8 30699 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30700 v3.AuxInt = i0 30701 v3.Aux = s 30702 v3.AddArg(p) 30703 v3.AddArg(idx) 30704 v3.AddArg(mem) 30705 v2.AddArg(v3) 30706 v1.AddArg(v2) 30707 v0.AddArg(v1) 30708 v0.AddArg(y) 30709 return true 30710 } 30711 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30712 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30713 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30714 for { 30715 _ = v.Args[1] 30716 or := v.Args[0] 30717 if or.Op != OpAMD64ORQ { 30718 break 30719 } 30720 _ = or.Args[1] 30721 y := or.Args[0] 30722 s1 := or.Args[1] 30723 if s1.Op != OpAMD64SHLQconst { 30724 break 30725 } 30726 j1 := s1.AuxInt 30727 x1 := s1.Args[0] 30728 if x1.Op != OpAMD64MOVBloadidx1 { 30729 break 30730 } 30731 i1 := x1.AuxInt 30732 s := x1.Aux 30733 _ = x1.Args[2] 30734 p := x1.Args[0] 30735 idx := x1.Args[1] 30736 mem := x1.Args[2] 30737 s0 := v.Args[1] 30738 if s0.Op != OpAMD64SHLQconst { 30739 break 30740 } 30741 j0 := s0.AuxInt 30742 x0 := s0.Args[0] 30743 if x0.Op != OpAMD64MOVBloadidx1 { 30744 break 30745 } 30746 i0 := x0.AuxInt 30747 if x0.Aux != s { 30748 break 30749 } 30750 _ = x0.Args[2] 30751 if idx != x0.Args[0] { 30752 break 30753 } 30754 if p != x0.Args[1] { 30755 break 30756 } 30757 if mem != x0.Args[2] { 30758 break 30759 } 30760 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30761 break 30762 } 30763 b = mergePoint(b, x0, x1) 30764 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30765 v.reset(OpCopy) 30766 v.AddArg(v0) 30767 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30768 v1.AuxInt = j1 30769 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30770 v2.AuxInt = 8 30771 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30772 v3.AuxInt = i0 30773 v3.Aux = s 30774 v3.AddArg(p) 30775 v3.AddArg(idx) 30776 v3.AddArg(mem) 30777 v2.AddArg(v3) 30778 v1.AddArg(v2) 30779 v0.AddArg(v1) 30780 v0.AddArg(y) 30781 return true 30782 } 30783 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30784 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 30785 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 30786 for { 30787 _ = v.Args[1] 30788 or := v.Args[0] 30789 if or.Op != OpAMD64ORQ { 30790 break 30791 } 30792 _ = or.Args[1] 30793 y := or.Args[0] 30794 s1 := or.Args[1] 30795 if s1.Op != OpAMD64SHLQconst { 30796 break 30797 } 30798 j1 := s1.AuxInt 30799 x1 := s1.Args[0] 30800 if x1.Op != OpAMD64MOVBloadidx1 { 30801 break 30802 } 30803 i1 := x1.AuxInt 30804 s := x1.Aux 30805 _ = x1.Args[2] 30806 idx := x1.Args[0] 30807 p := x1.Args[1] 30808 mem := x1.Args[2] 30809 s0 := v.Args[1] 30810 if s0.Op != OpAMD64SHLQconst { 30811 break 30812 } 30813 j0 := s0.AuxInt 30814 x0 := s0.Args[0] 30815 if x0.Op != OpAMD64MOVBloadidx1 { 30816 break 30817 } 30818 i0 := x0.AuxInt 30819 if x0.Aux != s { 30820 break 30821 } 30822 _ = x0.Args[2] 30823 if idx != x0.Args[0] { 30824 break 30825 } 30826 if p != x0.Args[1] { 30827 break 30828 } 30829 if mem != x0.Args[2] { 30830 break 30831 } 30832 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 30833 break 30834 } 30835 b = mergePoint(b, x0, x1) 30836 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30837 v.reset(OpCopy) 30838 v.AddArg(v0) 30839 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30840 v1.AuxInt = j1 30841 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 30842 v2.AuxInt = 8 30843 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30844 v3.AuxInt = i0 30845 v3.Aux = s 30846 v3.AddArg(p) 30847 v3.AddArg(idx) 30848 v3.AddArg(mem) 30849 v2.AddArg(v3) 30850 v1.AddArg(v2) 30851 v0.AddArg(v1) 30852 v0.AddArg(y) 30853 return true 30854 } 30855 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 30856 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 30857 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 30858 for { 30859 _ = v.Args[1] 30860 s0 := v.Args[0] 30861 if s0.Op != OpAMD64SHLQconst { 30862 break 30863 } 30864 j0 := s0.AuxInt 30865 r0 := s0.Args[0] 30866 if r0.Op != OpAMD64ROLWconst { 30867 break 30868 } 30869 if r0.AuxInt != 8 { 30870 break 30871 } 30872 x0 := r0.Args[0] 30873 if x0.Op != OpAMD64MOVWloadidx1 { 30874 break 30875 } 30876 i0 := x0.AuxInt 30877 s := x0.Aux 30878 _ = x0.Args[2] 30879 p := x0.Args[0] 30880 idx := x0.Args[1] 30881 mem := x0.Args[2] 30882 or := v.Args[1] 30883 if or.Op != OpAMD64ORQ { 30884 break 30885 } 30886 _ = or.Args[1] 30887 s1 := or.Args[0] 30888 if s1.Op != OpAMD64SHLQconst { 30889 break 30890 } 30891 j1 := s1.AuxInt 30892 r1 := s1.Args[0] 30893 if r1.Op != OpAMD64ROLWconst { 30894 break 30895 } 30896 if r1.AuxInt != 8 { 30897 break 30898 } 30899 x1 := r1.Args[0] 30900 if x1.Op != OpAMD64MOVWloadidx1 { 30901 break 30902 } 30903 i1 := x1.AuxInt 30904 if x1.Aux != s { 30905 break 30906 } 30907 _ = x1.Args[2] 30908 if p != x1.Args[0] { 30909 break 30910 } 30911 if idx != x1.Args[1] { 30912 break 30913 } 30914 if mem != x1.Args[2] { 30915 break 30916 } 30917 y := or.Args[1] 30918 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 30919 break 30920 } 30921 b = mergePoint(b, x0, x1) 30922 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30923 v.reset(OpCopy) 30924 v.AddArg(v0) 30925 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30926 v1.AuxInt = j1 30927 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 30928 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30929 v3.AuxInt = i0 30930 v3.Aux = s 30931 v3.AddArg(p) 30932 v3.AddArg(idx) 30933 v3.AddArg(mem) 30934 v2.AddArg(v3) 30935 v1.AddArg(v2) 30936 v0.AddArg(v1) 30937 v0.AddArg(y) 30938 return true 30939 } 30940 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 30941 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 30942 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 30943 for { 30944 _ = v.Args[1] 30945 s0 := v.Args[0] 30946 if s0.Op != OpAMD64SHLQconst { 30947 break 30948 } 30949 j0 := s0.AuxInt 30950 r0 := s0.Args[0] 30951 if r0.Op != OpAMD64ROLWconst { 30952 break 30953 } 30954 if r0.AuxInt != 8 { 30955 break 30956 } 30957 x0 := r0.Args[0] 30958 if x0.Op != OpAMD64MOVWloadidx1 { 30959 break 30960 } 30961 i0 := x0.AuxInt 30962 s := x0.Aux 30963 _ = x0.Args[2] 30964 idx := x0.Args[0] 30965 p := x0.Args[1] 30966 mem := x0.Args[2] 30967 or := v.Args[1] 30968 if or.Op != OpAMD64ORQ { 30969 break 30970 } 30971 _ = or.Args[1] 30972 s1 := or.Args[0] 30973 if s1.Op != OpAMD64SHLQconst { 30974 break 30975 } 30976 j1 := s1.AuxInt 30977 r1 := s1.Args[0] 30978 if r1.Op != OpAMD64ROLWconst { 30979 break 30980 } 30981 if r1.AuxInt != 8 { 30982 break 30983 } 30984 x1 := r1.Args[0] 30985 if x1.Op != OpAMD64MOVWloadidx1 { 30986 break 30987 } 30988 i1 := x1.AuxInt 30989 if x1.Aux != s { 30990 break 30991 } 30992 _ = x1.Args[2] 30993 if p != x1.Args[0] { 30994 break 30995 } 30996 if idx != x1.Args[1] { 30997 break 30998 } 30999 if mem != x1.Args[2] { 31000 break 31001 } 31002 y := or.Args[1] 31003 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31004 break 31005 } 31006 b = mergePoint(b, x0, x1) 31007 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31008 v.reset(OpCopy) 31009 v.AddArg(v0) 31010 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31011 v1.AuxInt = j1 31012 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31013 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31014 v3.AuxInt = i0 31015 v3.Aux = s 31016 v3.AddArg(p) 31017 v3.AddArg(idx) 31018 v3.AddArg(mem) 31019 v2.AddArg(v3) 31020 v1.AddArg(v2) 31021 v0.AddArg(v1) 31022 v0.AddArg(y) 31023 return true 31024 } 31025 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 31026 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31027 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31028 for { 31029 _ = v.Args[1] 31030 s0 := v.Args[0] 31031 if s0.Op != OpAMD64SHLQconst { 31032 break 31033 } 31034 j0 := s0.AuxInt 31035 r0 := s0.Args[0] 31036 if r0.Op != OpAMD64ROLWconst { 31037 break 31038 } 31039 if r0.AuxInt != 8 { 31040 break 31041 } 31042 x0 := r0.Args[0] 31043 if x0.Op != OpAMD64MOVWloadidx1 { 31044 break 31045 } 31046 i0 := x0.AuxInt 31047 s := x0.Aux 31048 _ = x0.Args[2] 31049 p := x0.Args[0] 31050 idx := x0.Args[1] 31051 mem := x0.Args[2] 31052 or := v.Args[1] 31053 if or.Op != OpAMD64ORQ { 31054 break 31055 } 31056 _ = or.Args[1] 31057 s1 := or.Args[0] 31058 if s1.Op != OpAMD64SHLQconst { 31059 break 31060 } 31061 j1 := s1.AuxInt 31062 r1 := s1.Args[0] 31063 if r1.Op != OpAMD64ROLWconst { 31064 break 31065 } 31066 if r1.AuxInt != 8 { 31067 break 31068 } 31069 x1 := r1.Args[0] 31070 if x1.Op != OpAMD64MOVWloadidx1 { 31071 break 31072 } 31073 i1 := x1.AuxInt 31074 if x1.Aux != s { 31075 break 31076 } 31077 _ = x1.Args[2] 31078 if idx != x1.Args[0] { 31079 break 31080 } 31081 if p != x1.Args[1] { 31082 break 31083 } 31084 if mem != x1.Args[2] { 31085 break 31086 } 31087 y := or.Args[1] 31088 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31089 break 31090 } 31091 b = mergePoint(b, x0, x1) 31092 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31093 v.reset(OpCopy) 31094 v.AddArg(v0) 31095 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31096 v1.AuxInt = j1 31097 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31098 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31099 v3.AuxInt = i0 31100 v3.Aux = s 31101 v3.AddArg(p) 31102 v3.AddArg(idx) 31103 v3.AddArg(mem) 31104 v2.AddArg(v3) 31105 v1.AddArg(v2) 31106 v0.AddArg(v1) 31107 v0.AddArg(y) 31108 return true 31109 } 31110 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 31111 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31112 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31113 for { 31114 _ = v.Args[1] 31115 s0 := v.Args[0] 31116 if s0.Op != OpAMD64SHLQconst { 31117 break 31118 } 31119 j0 := s0.AuxInt 31120 r0 := s0.Args[0] 31121 if r0.Op != OpAMD64ROLWconst { 31122 break 31123 } 31124 if r0.AuxInt != 8 { 31125 break 31126 } 31127 x0 := r0.Args[0] 31128 if x0.Op != OpAMD64MOVWloadidx1 { 31129 break 31130 } 31131 i0 := x0.AuxInt 31132 s := x0.Aux 31133 _ = x0.Args[2] 31134 idx := x0.Args[0] 31135 p := x0.Args[1] 31136 mem := x0.Args[2] 31137 or := v.Args[1] 31138 if or.Op != OpAMD64ORQ { 31139 break 31140 } 31141 _ = or.Args[1] 31142 s1 := or.Args[0] 31143 if s1.Op != OpAMD64SHLQconst { 31144 break 31145 } 31146 j1 := s1.AuxInt 31147 r1 := s1.Args[0] 31148 if r1.Op != OpAMD64ROLWconst { 31149 break 31150 } 31151 if r1.AuxInt != 8 { 31152 break 31153 } 31154 x1 := r1.Args[0] 31155 if x1.Op != OpAMD64MOVWloadidx1 { 31156 break 31157 } 31158 i1 := x1.AuxInt 31159 if x1.Aux != s { 31160 break 31161 } 31162 _ = x1.Args[2] 31163 if idx != x1.Args[0] { 31164 break 31165 } 31166 if p != x1.Args[1] { 31167 break 31168 } 31169 if mem != x1.Args[2] { 31170 break 31171 } 31172 y := or.Args[1] 31173 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31174 break 31175 } 31176 b = mergePoint(b, x0, x1) 31177 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31178 v.reset(OpCopy) 31179 v.AddArg(v0) 31180 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31181 v1.AuxInt = j1 31182 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31183 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31184 v3.AuxInt = i0 31185 v3.Aux = s 31186 v3.AddArg(p) 31187 v3.AddArg(idx) 31188 v3.AddArg(mem) 31189 v2.AddArg(v3) 31190 v1.AddArg(v2) 31191 v0.AddArg(v1) 31192 v0.AddArg(y) 31193 return true 31194 } 31195 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 31196 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31197 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31198 for { 31199 _ = v.Args[1] 31200 s0 := v.Args[0] 31201 if s0.Op != OpAMD64SHLQconst { 31202 break 31203 } 31204 j0 := s0.AuxInt 31205 r0 := s0.Args[0] 31206 if r0.Op != OpAMD64ROLWconst { 31207 break 31208 } 31209 if r0.AuxInt != 8 { 31210 break 31211 } 31212 x0 := r0.Args[0] 31213 if x0.Op != OpAMD64MOVWloadidx1 { 31214 break 31215 } 31216 i0 := x0.AuxInt 31217 s := x0.Aux 31218 _ = x0.Args[2] 31219 p := x0.Args[0] 31220 idx := x0.Args[1] 31221 mem := x0.Args[2] 31222 or := v.Args[1] 31223 if or.Op != OpAMD64ORQ { 31224 break 31225 } 31226 _ = or.Args[1] 31227 y := or.Args[0] 31228 s1 := or.Args[1] 31229 if s1.Op != OpAMD64SHLQconst { 31230 break 31231 } 31232 j1 := s1.AuxInt 31233 r1 := s1.Args[0] 31234 if r1.Op != OpAMD64ROLWconst { 31235 break 31236 } 31237 if r1.AuxInt != 8 { 31238 break 31239 } 31240 x1 := r1.Args[0] 31241 if x1.Op != OpAMD64MOVWloadidx1 { 31242 break 31243 } 31244 i1 := x1.AuxInt 31245 if x1.Aux != s { 31246 break 31247 } 31248 _ = x1.Args[2] 31249 if p != x1.Args[0] { 31250 break 31251 } 31252 if idx != x1.Args[1] { 31253 break 31254 } 31255 if mem != x1.Args[2] { 31256 break 31257 } 31258 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31259 break 31260 } 31261 b = mergePoint(b, x0, x1) 31262 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31263 v.reset(OpCopy) 31264 v.AddArg(v0) 31265 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31266 v1.AuxInt = j1 31267 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31268 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31269 v3.AuxInt = i0 31270 v3.Aux = s 31271 v3.AddArg(p) 31272 v3.AddArg(idx) 31273 v3.AddArg(mem) 31274 v2.AddArg(v3) 31275 v1.AddArg(v2) 31276 v0.AddArg(v1) 31277 v0.AddArg(y) 31278 return true 31279 } 31280 return false 31281 } 31282 func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool { 31283 b := v.Block 31284 _ = b 31285 typ := &b.Func.Config.Types 31286 _ = typ 31287 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 31288 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31289 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31290 for { 31291 _ = v.Args[1] 31292 s0 := v.Args[0] 31293 if s0.Op != OpAMD64SHLQconst { 31294 break 31295 } 31296 j0 := s0.AuxInt 31297 r0 := s0.Args[0] 31298 if r0.Op != OpAMD64ROLWconst { 31299 break 31300 } 31301 if r0.AuxInt != 8 { 31302 break 31303 } 31304 x0 := r0.Args[0] 31305 if x0.Op != OpAMD64MOVWloadidx1 { 31306 break 31307 } 31308 i0 := x0.AuxInt 31309 s := x0.Aux 31310 _ = x0.Args[2] 31311 idx := x0.Args[0] 31312 p := x0.Args[1] 31313 mem := x0.Args[2] 31314 or := v.Args[1] 31315 if or.Op != OpAMD64ORQ { 31316 break 31317 } 31318 _ = or.Args[1] 31319 y := or.Args[0] 31320 s1 := or.Args[1] 31321 if s1.Op != OpAMD64SHLQconst { 31322 break 31323 } 31324 j1 := s1.AuxInt 31325 r1 := s1.Args[0] 31326 if r1.Op != OpAMD64ROLWconst { 31327 break 31328 } 31329 if r1.AuxInt != 8 { 31330 break 31331 } 31332 x1 := r1.Args[0] 31333 if x1.Op != OpAMD64MOVWloadidx1 { 31334 break 31335 } 31336 i1 := x1.AuxInt 31337 if x1.Aux != s { 31338 break 31339 } 31340 _ = x1.Args[2] 31341 if p != x1.Args[0] { 31342 break 31343 } 31344 if idx != x1.Args[1] { 31345 break 31346 } 31347 if mem != x1.Args[2] { 31348 break 31349 } 31350 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31351 break 31352 } 31353 b = mergePoint(b, x0, x1) 31354 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31355 v.reset(OpCopy) 31356 v.AddArg(v0) 31357 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31358 v1.AuxInt = j1 31359 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31360 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31361 v3.AuxInt = i0 31362 v3.Aux = s 31363 v3.AddArg(p) 31364 v3.AddArg(idx) 31365 v3.AddArg(mem) 31366 v2.AddArg(v3) 31367 v1.AddArg(v2) 31368 v0.AddArg(v1) 31369 v0.AddArg(y) 31370 return true 31371 } 31372 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 31373 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31374 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31375 for { 31376 _ = v.Args[1] 31377 s0 := v.Args[0] 31378 if s0.Op != OpAMD64SHLQconst { 31379 break 31380 } 31381 j0 := s0.AuxInt 31382 r0 := s0.Args[0] 31383 if r0.Op != OpAMD64ROLWconst { 31384 break 31385 } 31386 if r0.AuxInt != 8 { 31387 break 31388 } 31389 x0 := r0.Args[0] 31390 if x0.Op != OpAMD64MOVWloadidx1 { 31391 break 31392 } 31393 i0 := x0.AuxInt 31394 s := x0.Aux 31395 _ = x0.Args[2] 31396 p := x0.Args[0] 31397 idx := x0.Args[1] 31398 mem := x0.Args[2] 31399 or := v.Args[1] 31400 if or.Op != OpAMD64ORQ { 31401 break 31402 } 31403 _ = or.Args[1] 31404 y := or.Args[0] 31405 s1 := or.Args[1] 31406 if s1.Op != OpAMD64SHLQconst { 31407 break 31408 } 31409 j1 := s1.AuxInt 31410 r1 := s1.Args[0] 31411 if r1.Op != OpAMD64ROLWconst { 31412 break 31413 } 31414 if r1.AuxInt != 8 { 31415 break 31416 } 31417 x1 := r1.Args[0] 31418 if x1.Op != OpAMD64MOVWloadidx1 { 31419 break 31420 } 31421 i1 := x1.AuxInt 31422 if x1.Aux != s { 31423 break 31424 } 31425 _ = x1.Args[2] 31426 if idx != x1.Args[0] { 31427 break 31428 } 31429 if p != x1.Args[1] { 31430 break 31431 } 31432 if mem != x1.Args[2] { 31433 break 31434 } 31435 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31436 break 31437 } 31438 b = mergePoint(b, x0, x1) 31439 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31440 v.reset(OpCopy) 31441 v.AddArg(v0) 31442 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31443 v1.AuxInt = j1 31444 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31445 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31446 v3.AuxInt = i0 31447 v3.Aux = s 31448 v3.AddArg(p) 31449 v3.AddArg(idx) 31450 v3.AddArg(mem) 31451 v2.AddArg(v3) 31452 v1.AddArg(v2) 31453 v0.AddArg(v1) 31454 v0.AddArg(y) 31455 return true 31456 } 31457 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 31458 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31459 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31460 for { 31461 _ = v.Args[1] 31462 s0 := v.Args[0] 31463 if s0.Op != OpAMD64SHLQconst { 31464 break 31465 } 31466 j0 := s0.AuxInt 31467 r0 := s0.Args[0] 31468 if r0.Op != OpAMD64ROLWconst { 31469 break 31470 } 31471 if r0.AuxInt != 8 { 31472 break 31473 } 31474 x0 := r0.Args[0] 31475 if x0.Op != OpAMD64MOVWloadidx1 { 31476 break 31477 } 31478 i0 := x0.AuxInt 31479 s := x0.Aux 31480 _ = x0.Args[2] 31481 idx := x0.Args[0] 31482 p := x0.Args[1] 31483 mem := x0.Args[2] 31484 or := v.Args[1] 31485 if or.Op != OpAMD64ORQ { 31486 break 31487 } 31488 _ = or.Args[1] 31489 y := or.Args[0] 31490 s1 := or.Args[1] 31491 if s1.Op != OpAMD64SHLQconst { 31492 break 31493 } 31494 j1 := s1.AuxInt 31495 r1 := s1.Args[0] 31496 if r1.Op != OpAMD64ROLWconst { 31497 break 31498 } 31499 if r1.AuxInt != 8 { 31500 break 31501 } 31502 x1 := r1.Args[0] 31503 if x1.Op != OpAMD64MOVWloadidx1 { 31504 break 31505 } 31506 i1 := x1.AuxInt 31507 if x1.Aux != s { 31508 break 31509 } 31510 _ = x1.Args[2] 31511 if idx != x1.Args[0] { 31512 break 31513 } 31514 if p != x1.Args[1] { 31515 break 31516 } 31517 if mem != x1.Args[2] { 31518 break 31519 } 31520 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31521 break 31522 } 31523 b = mergePoint(b, x0, x1) 31524 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31525 v.reset(OpCopy) 31526 v.AddArg(v0) 31527 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31528 v1.AuxInt = j1 31529 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31530 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31531 v3.AuxInt = i0 31532 v3.Aux = s 31533 v3.AddArg(p) 31534 v3.AddArg(idx) 31535 v3.AddArg(mem) 31536 v2.AddArg(v3) 31537 v1.AddArg(v2) 31538 v0.AddArg(v1) 31539 v0.AddArg(y) 31540 return true 31541 } 31542 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 31543 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31544 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31545 for { 31546 _ = v.Args[1] 31547 or := v.Args[0] 31548 if or.Op != OpAMD64ORQ { 31549 break 31550 } 31551 _ = or.Args[1] 31552 s1 := or.Args[0] 31553 if s1.Op != OpAMD64SHLQconst { 31554 break 31555 } 31556 j1 := s1.AuxInt 31557 r1 := s1.Args[0] 31558 if r1.Op != OpAMD64ROLWconst { 31559 break 31560 } 31561 if r1.AuxInt != 8 { 31562 break 31563 } 31564 x1 := r1.Args[0] 31565 if x1.Op != OpAMD64MOVWloadidx1 { 31566 break 31567 } 31568 i1 := x1.AuxInt 31569 s := x1.Aux 31570 _ = x1.Args[2] 31571 p := x1.Args[0] 31572 idx := x1.Args[1] 31573 mem := x1.Args[2] 31574 y := or.Args[1] 31575 s0 := v.Args[1] 31576 if s0.Op != OpAMD64SHLQconst { 31577 break 31578 } 31579 j0 := s0.AuxInt 31580 r0 := s0.Args[0] 31581 if r0.Op != OpAMD64ROLWconst { 31582 break 31583 } 31584 if r0.AuxInt != 8 { 31585 break 31586 } 31587 x0 := r0.Args[0] 31588 if x0.Op != OpAMD64MOVWloadidx1 { 31589 break 31590 } 31591 i0 := x0.AuxInt 31592 if x0.Aux != s { 31593 break 31594 } 31595 _ = x0.Args[2] 31596 if p != x0.Args[0] { 31597 break 31598 } 31599 if idx != x0.Args[1] { 31600 break 31601 } 31602 if mem != x0.Args[2] { 31603 break 31604 } 31605 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31606 break 31607 } 31608 b = mergePoint(b, x0, x1) 31609 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31610 v.reset(OpCopy) 31611 v.AddArg(v0) 31612 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31613 v1.AuxInt = j1 31614 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31615 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31616 v3.AuxInt = i0 31617 v3.Aux = s 31618 v3.AddArg(p) 31619 v3.AddArg(idx) 31620 v3.AddArg(mem) 31621 v2.AddArg(v3) 31622 v1.AddArg(v2) 31623 v0.AddArg(v1) 31624 v0.AddArg(y) 31625 return true 31626 } 31627 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 31628 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31629 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31630 for { 31631 _ = v.Args[1] 31632 or := v.Args[0] 31633 if or.Op != OpAMD64ORQ { 31634 break 31635 } 31636 _ = or.Args[1] 31637 s1 := or.Args[0] 31638 if s1.Op != OpAMD64SHLQconst { 31639 break 31640 } 31641 j1 := s1.AuxInt 31642 r1 := s1.Args[0] 31643 if r1.Op != OpAMD64ROLWconst { 31644 break 31645 } 31646 if r1.AuxInt != 8 { 31647 break 31648 } 31649 x1 := r1.Args[0] 31650 if x1.Op != OpAMD64MOVWloadidx1 { 31651 break 31652 } 31653 i1 := x1.AuxInt 31654 s := x1.Aux 31655 _ = x1.Args[2] 31656 idx := x1.Args[0] 31657 p := x1.Args[1] 31658 mem := x1.Args[2] 31659 y := or.Args[1] 31660 s0 := v.Args[1] 31661 if s0.Op != OpAMD64SHLQconst { 31662 break 31663 } 31664 j0 := s0.AuxInt 31665 r0 := s0.Args[0] 31666 if r0.Op != OpAMD64ROLWconst { 31667 break 31668 } 31669 if r0.AuxInt != 8 { 31670 break 31671 } 31672 x0 := r0.Args[0] 31673 if x0.Op != OpAMD64MOVWloadidx1 { 31674 break 31675 } 31676 i0 := x0.AuxInt 31677 if x0.Aux != s { 31678 break 31679 } 31680 _ = x0.Args[2] 31681 if p != x0.Args[0] { 31682 break 31683 } 31684 if idx != x0.Args[1] { 31685 break 31686 } 31687 if mem != x0.Args[2] { 31688 break 31689 } 31690 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31691 break 31692 } 31693 b = mergePoint(b, x0, x1) 31694 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31695 v.reset(OpCopy) 31696 v.AddArg(v0) 31697 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31698 v1.AuxInt = j1 31699 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31700 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31701 v3.AuxInt = i0 31702 v3.Aux = s 31703 v3.AddArg(p) 31704 v3.AddArg(idx) 31705 v3.AddArg(mem) 31706 v2.AddArg(v3) 31707 v1.AddArg(v2) 31708 v0.AddArg(v1) 31709 v0.AddArg(y) 31710 return true 31711 } 31712 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 31713 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31714 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31715 for { 31716 _ = v.Args[1] 31717 or := v.Args[0] 31718 if or.Op != OpAMD64ORQ { 31719 break 31720 } 31721 _ = or.Args[1] 31722 y := or.Args[0] 31723 s1 := or.Args[1] 31724 if s1.Op != OpAMD64SHLQconst { 31725 break 31726 } 31727 j1 := s1.AuxInt 31728 r1 := s1.Args[0] 31729 if r1.Op != OpAMD64ROLWconst { 31730 break 31731 } 31732 if r1.AuxInt != 8 { 31733 break 31734 } 31735 x1 := r1.Args[0] 31736 if x1.Op != OpAMD64MOVWloadidx1 { 31737 break 31738 } 31739 i1 := x1.AuxInt 31740 s := x1.Aux 31741 _ = x1.Args[2] 31742 p := x1.Args[0] 31743 idx := x1.Args[1] 31744 mem := x1.Args[2] 31745 s0 := v.Args[1] 31746 if s0.Op != OpAMD64SHLQconst { 31747 break 31748 } 31749 j0 := s0.AuxInt 31750 r0 := s0.Args[0] 31751 if r0.Op != OpAMD64ROLWconst { 31752 break 31753 } 31754 if r0.AuxInt != 8 { 31755 break 31756 } 31757 x0 := r0.Args[0] 31758 if x0.Op != OpAMD64MOVWloadidx1 { 31759 break 31760 } 31761 i0 := x0.AuxInt 31762 if x0.Aux != s { 31763 break 31764 } 31765 _ = x0.Args[2] 31766 if p != x0.Args[0] { 31767 break 31768 } 31769 if idx != x0.Args[1] { 31770 break 31771 } 31772 if mem != x0.Args[2] { 31773 break 31774 } 31775 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31776 break 31777 } 31778 b = mergePoint(b, x0, x1) 31779 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31780 v.reset(OpCopy) 31781 v.AddArg(v0) 31782 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31783 v1.AuxInt = j1 31784 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31785 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31786 v3.AuxInt = i0 31787 v3.Aux = s 31788 v3.AddArg(p) 31789 v3.AddArg(idx) 31790 v3.AddArg(mem) 31791 v2.AddArg(v3) 31792 v1.AddArg(v2) 31793 v0.AddArg(v1) 31794 v0.AddArg(y) 31795 return true 31796 } 31797 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 31798 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31799 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31800 for { 31801 _ = v.Args[1] 31802 or := v.Args[0] 31803 if or.Op != OpAMD64ORQ { 31804 break 31805 } 31806 _ = or.Args[1] 31807 y := or.Args[0] 31808 s1 := or.Args[1] 31809 if s1.Op != OpAMD64SHLQconst { 31810 break 31811 } 31812 j1 := s1.AuxInt 31813 r1 := s1.Args[0] 31814 if r1.Op != OpAMD64ROLWconst { 31815 break 31816 } 31817 if r1.AuxInt != 8 { 31818 break 31819 } 31820 x1 := r1.Args[0] 31821 if x1.Op != OpAMD64MOVWloadidx1 { 31822 break 31823 } 31824 i1 := x1.AuxInt 31825 s := x1.Aux 31826 _ = x1.Args[2] 31827 idx := x1.Args[0] 31828 p := x1.Args[1] 31829 mem := x1.Args[2] 31830 s0 := v.Args[1] 31831 if s0.Op != OpAMD64SHLQconst { 31832 break 31833 } 31834 j0 := s0.AuxInt 31835 r0 := s0.Args[0] 31836 if r0.Op != OpAMD64ROLWconst { 31837 break 31838 } 31839 if r0.AuxInt != 8 { 31840 break 31841 } 31842 x0 := r0.Args[0] 31843 if x0.Op != OpAMD64MOVWloadidx1 { 31844 break 31845 } 31846 i0 := x0.AuxInt 31847 if x0.Aux != s { 31848 break 31849 } 31850 _ = x0.Args[2] 31851 if p != x0.Args[0] { 31852 break 31853 } 31854 if idx != x0.Args[1] { 31855 break 31856 } 31857 if mem != x0.Args[2] { 31858 break 31859 } 31860 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31861 break 31862 } 31863 b = mergePoint(b, x0, x1) 31864 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31865 v.reset(OpCopy) 31866 v.AddArg(v0) 31867 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31868 v1.AuxInt = j1 31869 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31870 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31871 v3.AuxInt = i0 31872 v3.Aux = s 31873 v3.AddArg(p) 31874 v3.AddArg(idx) 31875 v3.AddArg(mem) 31876 v2.AddArg(v3) 31877 v1.AddArg(v2) 31878 v0.AddArg(v1) 31879 v0.AddArg(y) 31880 return true 31881 } 31882 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 31883 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31884 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31885 for { 31886 _ = v.Args[1] 31887 or := v.Args[0] 31888 if or.Op != OpAMD64ORQ { 31889 break 31890 } 31891 _ = or.Args[1] 31892 s1 := or.Args[0] 31893 if s1.Op != OpAMD64SHLQconst { 31894 break 31895 } 31896 j1 := s1.AuxInt 31897 r1 := s1.Args[0] 31898 if r1.Op != OpAMD64ROLWconst { 31899 break 31900 } 31901 if r1.AuxInt != 8 { 31902 break 31903 } 31904 x1 := r1.Args[0] 31905 if x1.Op != OpAMD64MOVWloadidx1 { 31906 break 31907 } 31908 i1 := x1.AuxInt 31909 s := x1.Aux 31910 _ = x1.Args[2] 31911 p := x1.Args[0] 31912 idx := x1.Args[1] 31913 mem := x1.Args[2] 31914 y := or.Args[1] 31915 s0 := v.Args[1] 31916 if s0.Op != OpAMD64SHLQconst { 31917 break 31918 } 31919 j0 := s0.AuxInt 31920 r0 := s0.Args[0] 31921 if r0.Op != OpAMD64ROLWconst { 31922 break 31923 } 31924 if r0.AuxInt != 8 { 31925 break 31926 } 31927 x0 := r0.Args[0] 31928 if x0.Op != OpAMD64MOVWloadidx1 { 31929 break 31930 } 31931 i0 := x0.AuxInt 31932 if x0.Aux != s { 31933 break 31934 } 31935 _ = x0.Args[2] 31936 if idx != x0.Args[0] { 31937 break 31938 } 31939 if p != x0.Args[1] { 31940 break 31941 } 31942 if mem != x0.Args[2] { 31943 break 31944 } 31945 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 31946 break 31947 } 31948 b = mergePoint(b, x0, x1) 31949 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31950 v.reset(OpCopy) 31951 v.AddArg(v0) 31952 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31953 v1.AuxInt = j1 31954 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 31955 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31956 v3.AuxInt = i0 31957 v3.Aux = s 31958 v3.AddArg(p) 31959 v3.AddArg(idx) 31960 v3.AddArg(mem) 31961 v2.AddArg(v3) 31962 v1.AddArg(v2) 31963 v0.AddArg(v1) 31964 v0.AddArg(y) 31965 return true 31966 } 31967 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 31968 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 31969 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 31970 for { 31971 _ = v.Args[1] 31972 or := v.Args[0] 31973 if or.Op != OpAMD64ORQ { 31974 break 31975 } 31976 _ = or.Args[1] 31977 s1 := or.Args[0] 31978 if s1.Op != OpAMD64SHLQconst { 31979 break 31980 } 31981 j1 := s1.AuxInt 31982 r1 := s1.Args[0] 31983 if r1.Op != OpAMD64ROLWconst { 31984 break 31985 } 31986 if r1.AuxInt != 8 { 31987 break 31988 } 31989 x1 := r1.Args[0] 31990 if x1.Op != OpAMD64MOVWloadidx1 { 31991 break 31992 } 31993 i1 := x1.AuxInt 31994 s := x1.Aux 31995 _ = x1.Args[2] 31996 idx := x1.Args[0] 31997 p := x1.Args[1] 31998 mem := x1.Args[2] 31999 y := or.Args[1] 32000 s0 := v.Args[1] 32001 if s0.Op != OpAMD64SHLQconst { 32002 break 32003 } 32004 j0 := s0.AuxInt 32005 r0 := s0.Args[0] 32006 if r0.Op != OpAMD64ROLWconst { 32007 break 32008 } 32009 if r0.AuxInt != 8 { 32010 break 32011 } 32012 x0 := r0.Args[0] 32013 if x0.Op != OpAMD64MOVWloadidx1 { 32014 break 32015 } 32016 i0 := x0.AuxInt 32017 if x0.Aux != s { 32018 break 32019 } 32020 _ = x0.Args[2] 32021 if idx != x0.Args[0] { 32022 break 32023 } 32024 if p != x0.Args[1] { 32025 break 32026 } 32027 if mem != x0.Args[2] { 32028 break 32029 } 32030 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32031 break 32032 } 32033 b = mergePoint(b, x0, x1) 32034 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32035 v.reset(OpCopy) 32036 v.AddArg(v0) 32037 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32038 v1.AuxInt = j1 32039 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32040 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32041 v3.AuxInt = i0 32042 v3.Aux = s 32043 v3.AddArg(p) 32044 v3.AddArg(idx) 32045 v3.AddArg(mem) 32046 v2.AddArg(v3) 32047 v1.AddArg(v2) 32048 v0.AddArg(v1) 32049 v0.AddArg(y) 32050 return true 32051 } 32052 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 32053 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32054 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32055 for { 32056 _ = v.Args[1] 32057 or := v.Args[0] 32058 if or.Op != OpAMD64ORQ { 32059 break 32060 } 32061 _ = or.Args[1] 32062 y := or.Args[0] 32063 s1 := or.Args[1] 32064 if s1.Op != OpAMD64SHLQconst { 32065 break 32066 } 32067 j1 := s1.AuxInt 32068 r1 := s1.Args[0] 32069 if r1.Op != OpAMD64ROLWconst { 32070 break 32071 } 32072 if r1.AuxInt != 8 { 32073 break 32074 } 32075 x1 := r1.Args[0] 32076 if x1.Op != OpAMD64MOVWloadidx1 { 32077 break 32078 } 32079 i1 := x1.AuxInt 32080 s := x1.Aux 32081 _ = x1.Args[2] 32082 p := x1.Args[0] 32083 idx := x1.Args[1] 32084 mem := x1.Args[2] 32085 s0 := v.Args[1] 32086 if s0.Op != OpAMD64SHLQconst { 32087 break 32088 } 32089 j0 := s0.AuxInt 32090 r0 := s0.Args[0] 32091 if r0.Op != OpAMD64ROLWconst { 32092 break 32093 } 32094 if r0.AuxInt != 8 { 32095 break 32096 } 32097 x0 := r0.Args[0] 32098 if x0.Op != OpAMD64MOVWloadidx1 { 32099 break 32100 } 32101 i0 := x0.AuxInt 32102 if x0.Aux != s { 32103 break 32104 } 32105 _ = x0.Args[2] 32106 if idx != x0.Args[0] { 32107 break 32108 } 32109 if p != x0.Args[1] { 32110 break 32111 } 32112 if mem != x0.Args[2] { 32113 break 32114 } 32115 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32116 break 32117 } 32118 b = mergePoint(b, x0, x1) 32119 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32120 v.reset(OpCopy) 32121 v.AddArg(v0) 32122 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32123 v1.AuxInt = j1 32124 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32125 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32126 v3.AuxInt = i0 32127 v3.Aux = s 32128 v3.AddArg(p) 32129 v3.AddArg(idx) 32130 v3.AddArg(mem) 32131 v2.AddArg(v3) 32132 v1.AddArg(v2) 32133 v0.AddArg(v1) 32134 v0.AddArg(y) 32135 return true 32136 } 32137 return false 32138 } 32139 func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { 32140 b := v.Block 32141 _ = b 32142 typ := &b.Func.Config.Types 32143 _ = typ 32144 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 32145 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32146 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32147 for { 32148 _ = v.Args[1] 32149 or := v.Args[0] 32150 if or.Op != OpAMD64ORQ { 32151 break 32152 } 32153 _ = or.Args[1] 32154 y := or.Args[0] 32155 s1 := or.Args[1] 32156 if s1.Op != OpAMD64SHLQconst { 32157 break 32158 } 32159 j1 := s1.AuxInt 32160 r1 := s1.Args[0] 32161 if r1.Op != OpAMD64ROLWconst { 32162 break 32163 } 32164 if r1.AuxInt != 8 { 32165 break 32166 } 32167 x1 := r1.Args[0] 32168 if x1.Op != OpAMD64MOVWloadidx1 { 32169 break 32170 } 32171 i1 := x1.AuxInt 32172 s := x1.Aux 32173 _ = x1.Args[2] 32174 idx := x1.Args[0] 32175 p := x1.Args[1] 32176 mem := x1.Args[2] 32177 s0 := v.Args[1] 32178 if s0.Op != OpAMD64SHLQconst { 32179 break 32180 } 32181 j0 := s0.AuxInt 32182 r0 := s0.Args[0] 32183 if r0.Op != OpAMD64ROLWconst { 32184 break 32185 } 32186 if r0.AuxInt != 8 { 32187 break 32188 } 32189 x0 := r0.Args[0] 32190 if x0.Op != OpAMD64MOVWloadidx1 { 32191 break 32192 } 32193 i0 := x0.AuxInt 32194 if x0.Aux != s { 32195 break 32196 } 32197 _ = x0.Args[2] 32198 if idx != x0.Args[0] { 32199 break 32200 } 32201 if p != x0.Args[1] { 32202 break 32203 } 32204 if mem != x0.Args[2] { 32205 break 32206 } 32207 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32208 break 32209 } 32210 b = mergePoint(b, x0, x1) 32211 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32212 v.reset(OpCopy) 32213 v.AddArg(v0) 32214 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32215 v1.AuxInt = j1 32216 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32217 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32218 v3.AuxInt = i0 32219 v3.Aux = s 32220 v3.AddArg(p) 32221 v3.AddArg(idx) 32222 v3.AddArg(mem) 32223 v2.AddArg(v3) 32224 v1.AddArg(v2) 32225 v0.AddArg(v1) 32226 v0.AddArg(y) 32227 return true 32228 } 32229 // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) 32230 // cond: canMergeLoad(v, l, x) && clobber(l) 32231 // result: (ORQmem x [off] {sym} ptr mem) 32232 for { 32233 _ = v.Args[1] 32234 x := v.Args[0] 32235 l := v.Args[1] 32236 if l.Op != OpAMD64MOVQload { 32237 break 32238 } 32239 off := l.AuxInt 32240 sym := l.Aux 32241 _ = l.Args[1] 32242 ptr := l.Args[0] 32243 mem := l.Args[1] 32244 if !(canMergeLoad(v, l, x) && clobber(l)) { 32245 break 32246 } 32247 v.reset(OpAMD64ORQmem) 32248 v.AuxInt = off 32249 v.Aux = sym 32250 v.AddArg(x) 32251 v.AddArg(ptr) 32252 v.AddArg(mem) 32253 return true 32254 } 32255 // match: (ORQ l:(MOVQload [off] {sym} ptr mem) x) 32256 // cond: canMergeLoad(v, l, x) && clobber(l) 32257 // result: (ORQmem x [off] {sym} ptr mem) 32258 for { 32259 _ = v.Args[1] 32260 l := v.Args[0] 32261 if l.Op != OpAMD64MOVQload { 32262 break 32263 } 32264 off := l.AuxInt 32265 sym := l.Aux 32266 _ = l.Args[1] 32267 ptr := l.Args[0] 32268 mem := l.Args[1] 32269 x := v.Args[1] 32270 if !(canMergeLoad(v, l, x) && clobber(l)) { 32271 break 32272 } 32273 v.reset(OpAMD64ORQmem) 32274 v.AuxInt = off 32275 v.Aux = sym 32276 v.AddArg(x) 32277 v.AddArg(ptr) 32278 v.AddArg(mem) 32279 return true 32280 } 32281 return false 32282 } 32283 func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool { 32284 // match: (ORQconst [0] x) 32285 // cond: 32286 // result: x 32287 for { 32288 if v.AuxInt != 0 { 32289 break 32290 } 32291 x := v.Args[0] 32292 v.reset(OpCopy) 32293 v.Type = x.Type 32294 v.AddArg(x) 32295 return true 32296 } 32297 // match: (ORQconst [-1] _) 32298 // cond: 32299 // result: (MOVQconst [-1]) 32300 for { 32301 if v.AuxInt != -1 { 32302 break 32303 } 32304 v.reset(OpAMD64MOVQconst) 32305 v.AuxInt = -1 32306 return true 32307 } 32308 // match: (ORQconst [c] (MOVQconst [d])) 32309 // cond: 32310 // result: (MOVQconst [c|d]) 32311 for { 32312 c := v.AuxInt 32313 v_0 := v.Args[0] 32314 if v_0.Op != OpAMD64MOVQconst { 32315 break 32316 } 32317 d := v_0.AuxInt 32318 v.reset(OpAMD64MOVQconst) 32319 v.AuxInt = c | d 32320 return true 32321 } 32322 return false 32323 } 32324 func rewriteValueAMD64_OpAMD64ROLB_0(v *Value) bool { 32325 // match: (ROLB x (NEGQ y)) 32326 // cond: 32327 // result: (RORB x y) 32328 for { 32329 _ = v.Args[1] 32330 x := v.Args[0] 32331 v_1 := v.Args[1] 32332 if v_1.Op != OpAMD64NEGQ { 32333 break 32334 } 32335 y := v_1.Args[0] 32336 v.reset(OpAMD64RORB) 32337 v.AddArg(x) 32338 v.AddArg(y) 32339 return true 32340 } 32341 // match: (ROLB x (NEGL y)) 32342 // cond: 32343 // result: (RORB x y) 32344 for { 32345 _ = v.Args[1] 32346 x := v.Args[0] 32347 v_1 := v.Args[1] 32348 if v_1.Op != OpAMD64NEGL { 32349 break 32350 } 32351 y := v_1.Args[0] 32352 v.reset(OpAMD64RORB) 32353 v.AddArg(x) 32354 v.AddArg(y) 32355 return true 32356 } 32357 // match: (ROLB x (MOVQconst [c])) 32358 // cond: 32359 // result: (ROLBconst [c&7 ] x) 32360 for { 32361 _ = v.Args[1] 32362 x := v.Args[0] 32363 v_1 := v.Args[1] 32364 if v_1.Op != OpAMD64MOVQconst { 32365 break 32366 } 32367 c := v_1.AuxInt 32368 v.reset(OpAMD64ROLBconst) 32369 v.AuxInt = c & 7 32370 v.AddArg(x) 32371 return true 32372 } 32373 // match: (ROLB x (MOVLconst [c])) 32374 // cond: 32375 // result: (ROLBconst [c&7 ] x) 32376 for { 32377 _ = v.Args[1] 32378 x := v.Args[0] 32379 v_1 := v.Args[1] 32380 if v_1.Op != OpAMD64MOVLconst { 32381 break 32382 } 32383 c := v_1.AuxInt 32384 v.reset(OpAMD64ROLBconst) 32385 v.AuxInt = c & 7 32386 v.AddArg(x) 32387 return true 32388 } 32389 return false 32390 } 32391 func rewriteValueAMD64_OpAMD64ROLBconst_0(v *Value) bool { 32392 // match: (ROLBconst [c] (ROLBconst [d] x)) 32393 // cond: 32394 // result: (ROLBconst [(c+d)& 7] x) 32395 for { 32396 c := v.AuxInt 32397 v_0 := v.Args[0] 32398 if v_0.Op != OpAMD64ROLBconst { 32399 break 32400 } 32401 d := v_0.AuxInt 32402 x := v_0.Args[0] 32403 v.reset(OpAMD64ROLBconst) 32404 v.AuxInt = (c + d) & 7 32405 v.AddArg(x) 32406 return true 32407 } 32408 // match: (ROLBconst x [0]) 32409 // cond: 32410 // result: x 32411 for { 32412 if v.AuxInt != 0 { 32413 break 32414 } 32415 x := v.Args[0] 32416 v.reset(OpCopy) 32417 v.Type = x.Type 32418 v.AddArg(x) 32419 return true 32420 } 32421 return false 32422 } 32423 func rewriteValueAMD64_OpAMD64ROLL_0(v *Value) bool { 32424 // match: (ROLL x (NEGQ y)) 32425 // cond: 32426 // result: (RORL x y) 32427 for { 32428 _ = v.Args[1] 32429 x := v.Args[0] 32430 v_1 := v.Args[1] 32431 if v_1.Op != OpAMD64NEGQ { 32432 break 32433 } 32434 y := v_1.Args[0] 32435 v.reset(OpAMD64RORL) 32436 v.AddArg(x) 32437 v.AddArg(y) 32438 return true 32439 } 32440 // match: (ROLL x (NEGL y)) 32441 // cond: 32442 // result: (RORL x y) 32443 for { 32444 _ = v.Args[1] 32445 x := v.Args[0] 32446 v_1 := v.Args[1] 32447 if v_1.Op != OpAMD64NEGL { 32448 break 32449 } 32450 y := v_1.Args[0] 32451 v.reset(OpAMD64RORL) 32452 v.AddArg(x) 32453 v.AddArg(y) 32454 return true 32455 } 32456 // match: (ROLL x (MOVQconst [c])) 32457 // cond: 32458 // result: (ROLLconst [c&31] x) 32459 for { 32460 _ = v.Args[1] 32461 x := v.Args[0] 32462 v_1 := v.Args[1] 32463 if v_1.Op != OpAMD64MOVQconst { 32464 break 32465 } 32466 c := v_1.AuxInt 32467 v.reset(OpAMD64ROLLconst) 32468 v.AuxInt = c & 31 32469 v.AddArg(x) 32470 return true 32471 } 32472 // match: (ROLL x (MOVLconst [c])) 32473 // cond: 32474 // result: (ROLLconst [c&31] x) 32475 for { 32476 _ = v.Args[1] 32477 x := v.Args[0] 32478 v_1 := v.Args[1] 32479 if v_1.Op != OpAMD64MOVLconst { 32480 break 32481 } 32482 c := v_1.AuxInt 32483 v.reset(OpAMD64ROLLconst) 32484 v.AuxInt = c & 31 32485 v.AddArg(x) 32486 return true 32487 } 32488 return false 32489 } 32490 func rewriteValueAMD64_OpAMD64ROLLconst_0(v *Value) bool { 32491 // match: (ROLLconst [c] (ROLLconst [d] x)) 32492 // cond: 32493 // result: (ROLLconst [(c+d)&31] x) 32494 for { 32495 c := v.AuxInt 32496 v_0 := v.Args[0] 32497 if v_0.Op != OpAMD64ROLLconst { 32498 break 32499 } 32500 d := v_0.AuxInt 32501 x := v_0.Args[0] 32502 v.reset(OpAMD64ROLLconst) 32503 v.AuxInt = (c + d) & 31 32504 v.AddArg(x) 32505 return true 32506 } 32507 // match: (ROLLconst x [0]) 32508 // cond: 32509 // result: x 32510 for { 32511 if v.AuxInt != 0 { 32512 break 32513 } 32514 x := v.Args[0] 32515 v.reset(OpCopy) 32516 v.Type = x.Type 32517 v.AddArg(x) 32518 return true 32519 } 32520 return false 32521 } 32522 func rewriteValueAMD64_OpAMD64ROLQ_0(v *Value) bool { 32523 // match: (ROLQ x (NEGQ y)) 32524 // cond: 32525 // result: (RORQ x y) 32526 for { 32527 _ = v.Args[1] 32528 x := v.Args[0] 32529 v_1 := v.Args[1] 32530 if v_1.Op != OpAMD64NEGQ { 32531 break 32532 } 32533 y := v_1.Args[0] 32534 v.reset(OpAMD64RORQ) 32535 v.AddArg(x) 32536 v.AddArg(y) 32537 return true 32538 } 32539 // match: (ROLQ x (NEGL y)) 32540 // cond: 32541 // result: (RORQ x y) 32542 for { 32543 _ = v.Args[1] 32544 x := v.Args[0] 32545 v_1 := v.Args[1] 32546 if v_1.Op != OpAMD64NEGL { 32547 break 32548 } 32549 y := v_1.Args[0] 32550 v.reset(OpAMD64RORQ) 32551 v.AddArg(x) 32552 v.AddArg(y) 32553 return true 32554 } 32555 // match: (ROLQ x (MOVQconst [c])) 32556 // cond: 32557 // result: (ROLQconst [c&63] x) 32558 for { 32559 _ = v.Args[1] 32560 x := v.Args[0] 32561 v_1 := v.Args[1] 32562 if v_1.Op != OpAMD64MOVQconst { 32563 break 32564 } 32565 c := v_1.AuxInt 32566 v.reset(OpAMD64ROLQconst) 32567 v.AuxInt = c & 63 32568 v.AddArg(x) 32569 return true 32570 } 32571 // match: (ROLQ x (MOVLconst [c])) 32572 // cond: 32573 // result: (ROLQconst [c&63] x) 32574 for { 32575 _ = v.Args[1] 32576 x := v.Args[0] 32577 v_1 := v.Args[1] 32578 if v_1.Op != OpAMD64MOVLconst { 32579 break 32580 } 32581 c := v_1.AuxInt 32582 v.reset(OpAMD64ROLQconst) 32583 v.AuxInt = c & 63 32584 v.AddArg(x) 32585 return true 32586 } 32587 return false 32588 } 32589 func rewriteValueAMD64_OpAMD64ROLQconst_0(v *Value) bool { 32590 // match: (ROLQconst [c] (ROLQconst [d] x)) 32591 // cond: 32592 // result: (ROLQconst [(c+d)&63] x) 32593 for { 32594 c := v.AuxInt 32595 v_0 := v.Args[0] 32596 if v_0.Op != OpAMD64ROLQconst { 32597 break 32598 } 32599 d := v_0.AuxInt 32600 x := v_0.Args[0] 32601 v.reset(OpAMD64ROLQconst) 32602 v.AuxInt = (c + d) & 63 32603 v.AddArg(x) 32604 return true 32605 } 32606 // match: (ROLQconst x [0]) 32607 // cond: 32608 // result: x 32609 for { 32610 if v.AuxInt != 0 { 32611 break 32612 } 32613 x := v.Args[0] 32614 v.reset(OpCopy) 32615 v.Type = x.Type 32616 v.AddArg(x) 32617 return true 32618 } 32619 return false 32620 } 32621 func rewriteValueAMD64_OpAMD64ROLW_0(v *Value) bool { 32622 // match: (ROLW x (NEGQ y)) 32623 // cond: 32624 // result: (RORW x y) 32625 for { 32626 _ = v.Args[1] 32627 x := v.Args[0] 32628 v_1 := v.Args[1] 32629 if v_1.Op != OpAMD64NEGQ { 32630 break 32631 } 32632 y := v_1.Args[0] 32633 v.reset(OpAMD64RORW) 32634 v.AddArg(x) 32635 v.AddArg(y) 32636 return true 32637 } 32638 // match: (ROLW x (NEGL y)) 32639 // cond: 32640 // result: (RORW x y) 32641 for { 32642 _ = v.Args[1] 32643 x := v.Args[0] 32644 v_1 := v.Args[1] 32645 if v_1.Op != OpAMD64NEGL { 32646 break 32647 } 32648 y := v_1.Args[0] 32649 v.reset(OpAMD64RORW) 32650 v.AddArg(x) 32651 v.AddArg(y) 32652 return true 32653 } 32654 // match: (ROLW x (MOVQconst [c])) 32655 // cond: 32656 // result: (ROLWconst [c&15] x) 32657 for { 32658 _ = v.Args[1] 32659 x := v.Args[0] 32660 v_1 := v.Args[1] 32661 if v_1.Op != OpAMD64MOVQconst { 32662 break 32663 } 32664 c := v_1.AuxInt 32665 v.reset(OpAMD64ROLWconst) 32666 v.AuxInt = c & 15 32667 v.AddArg(x) 32668 return true 32669 } 32670 // match: (ROLW x (MOVLconst [c])) 32671 // cond: 32672 // result: (ROLWconst [c&15] x) 32673 for { 32674 _ = v.Args[1] 32675 x := v.Args[0] 32676 v_1 := v.Args[1] 32677 if v_1.Op != OpAMD64MOVLconst { 32678 break 32679 } 32680 c := v_1.AuxInt 32681 v.reset(OpAMD64ROLWconst) 32682 v.AuxInt = c & 15 32683 v.AddArg(x) 32684 return true 32685 } 32686 return false 32687 } 32688 func rewriteValueAMD64_OpAMD64ROLWconst_0(v *Value) bool { 32689 // match: (ROLWconst [c] (ROLWconst [d] x)) 32690 // cond: 32691 // result: (ROLWconst [(c+d)&15] x) 32692 for { 32693 c := v.AuxInt 32694 v_0 := v.Args[0] 32695 if v_0.Op != OpAMD64ROLWconst { 32696 break 32697 } 32698 d := v_0.AuxInt 32699 x := v_0.Args[0] 32700 v.reset(OpAMD64ROLWconst) 32701 v.AuxInt = (c + d) & 15 32702 v.AddArg(x) 32703 return true 32704 } 32705 // match: (ROLWconst x [0]) 32706 // cond: 32707 // result: x 32708 for { 32709 if v.AuxInt != 0 { 32710 break 32711 } 32712 x := v.Args[0] 32713 v.reset(OpCopy) 32714 v.Type = x.Type 32715 v.AddArg(x) 32716 return true 32717 } 32718 return false 32719 } 32720 func rewriteValueAMD64_OpAMD64RORB_0(v *Value) bool { 32721 // match: (RORB x (NEGQ y)) 32722 // cond: 32723 // result: (ROLB x y) 32724 for { 32725 _ = v.Args[1] 32726 x := v.Args[0] 32727 v_1 := v.Args[1] 32728 if v_1.Op != OpAMD64NEGQ { 32729 break 32730 } 32731 y := v_1.Args[0] 32732 v.reset(OpAMD64ROLB) 32733 v.AddArg(x) 32734 v.AddArg(y) 32735 return true 32736 } 32737 // match: (RORB x (NEGL y)) 32738 // cond: 32739 // result: (ROLB x y) 32740 for { 32741 _ = v.Args[1] 32742 x := v.Args[0] 32743 v_1 := v.Args[1] 32744 if v_1.Op != OpAMD64NEGL { 32745 break 32746 } 32747 y := v_1.Args[0] 32748 v.reset(OpAMD64ROLB) 32749 v.AddArg(x) 32750 v.AddArg(y) 32751 return true 32752 } 32753 // match: (RORB x (MOVQconst [c])) 32754 // cond: 32755 // result: (ROLBconst [(-c)&7 ] x) 32756 for { 32757 _ = v.Args[1] 32758 x := v.Args[0] 32759 v_1 := v.Args[1] 32760 if v_1.Op != OpAMD64MOVQconst { 32761 break 32762 } 32763 c := v_1.AuxInt 32764 v.reset(OpAMD64ROLBconst) 32765 v.AuxInt = (-c) & 7 32766 v.AddArg(x) 32767 return true 32768 } 32769 // match: (RORB x (MOVLconst [c])) 32770 // cond: 32771 // result: (ROLBconst [(-c)&7 ] x) 32772 for { 32773 _ = v.Args[1] 32774 x := v.Args[0] 32775 v_1 := v.Args[1] 32776 if v_1.Op != OpAMD64MOVLconst { 32777 break 32778 } 32779 c := v_1.AuxInt 32780 v.reset(OpAMD64ROLBconst) 32781 v.AuxInt = (-c) & 7 32782 v.AddArg(x) 32783 return true 32784 } 32785 return false 32786 } 32787 func rewriteValueAMD64_OpAMD64RORL_0(v *Value) bool { 32788 // match: (RORL x (NEGQ y)) 32789 // cond: 32790 // result: (ROLL x y) 32791 for { 32792 _ = v.Args[1] 32793 x := v.Args[0] 32794 v_1 := v.Args[1] 32795 if v_1.Op != OpAMD64NEGQ { 32796 break 32797 } 32798 y := v_1.Args[0] 32799 v.reset(OpAMD64ROLL) 32800 v.AddArg(x) 32801 v.AddArg(y) 32802 return true 32803 } 32804 // match: (RORL x (NEGL y)) 32805 // cond: 32806 // result: (ROLL x y) 32807 for { 32808 _ = v.Args[1] 32809 x := v.Args[0] 32810 v_1 := v.Args[1] 32811 if v_1.Op != OpAMD64NEGL { 32812 break 32813 } 32814 y := v_1.Args[0] 32815 v.reset(OpAMD64ROLL) 32816 v.AddArg(x) 32817 v.AddArg(y) 32818 return true 32819 } 32820 // match: (RORL x (MOVQconst [c])) 32821 // cond: 32822 // result: (ROLLconst [(-c)&31] x) 32823 for { 32824 _ = v.Args[1] 32825 x := v.Args[0] 32826 v_1 := v.Args[1] 32827 if v_1.Op != OpAMD64MOVQconst { 32828 break 32829 } 32830 c := v_1.AuxInt 32831 v.reset(OpAMD64ROLLconst) 32832 v.AuxInt = (-c) & 31 32833 v.AddArg(x) 32834 return true 32835 } 32836 // match: (RORL x (MOVLconst [c])) 32837 // cond: 32838 // result: (ROLLconst [(-c)&31] x) 32839 for { 32840 _ = v.Args[1] 32841 x := v.Args[0] 32842 v_1 := v.Args[1] 32843 if v_1.Op != OpAMD64MOVLconst { 32844 break 32845 } 32846 c := v_1.AuxInt 32847 v.reset(OpAMD64ROLLconst) 32848 v.AuxInt = (-c) & 31 32849 v.AddArg(x) 32850 return true 32851 } 32852 return false 32853 } 32854 func rewriteValueAMD64_OpAMD64RORQ_0(v *Value) bool { 32855 // match: (RORQ x (NEGQ y)) 32856 // cond: 32857 // result: (ROLQ x y) 32858 for { 32859 _ = v.Args[1] 32860 x := v.Args[0] 32861 v_1 := v.Args[1] 32862 if v_1.Op != OpAMD64NEGQ { 32863 break 32864 } 32865 y := v_1.Args[0] 32866 v.reset(OpAMD64ROLQ) 32867 v.AddArg(x) 32868 v.AddArg(y) 32869 return true 32870 } 32871 // match: (RORQ x (NEGL y)) 32872 // cond: 32873 // result: (ROLQ x y) 32874 for { 32875 _ = v.Args[1] 32876 x := v.Args[0] 32877 v_1 := v.Args[1] 32878 if v_1.Op != OpAMD64NEGL { 32879 break 32880 } 32881 y := v_1.Args[0] 32882 v.reset(OpAMD64ROLQ) 32883 v.AddArg(x) 32884 v.AddArg(y) 32885 return true 32886 } 32887 // match: (RORQ x (MOVQconst [c])) 32888 // cond: 32889 // result: (ROLQconst [(-c)&63] x) 32890 for { 32891 _ = v.Args[1] 32892 x := v.Args[0] 32893 v_1 := v.Args[1] 32894 if v_1.Op != OpAMD64MOVQconst { 32895 break 32896 } 32897 c := v_1.AuxInt 32898 v.reset(OpAMD64ROLQconst) 32899 v.AuxInt = (-c) & 63 32900 v.AddArg(x) 32901 return true 32902 } 32903 // match: (RORQ x (MOVLconst [c])) 32904 // cond: 32905 // result: (ROLQconst [(-c)&63] x) 32906 for { 32907 _ = v.Args[1] 32908 x := v.Args[0] 32909 v_1 := v.Args[1] 32910 if v_1.Op != OpAMD64MOVLconst { 32911 break 32912 } 32913 c := v_1.AuxInt 32914 v.reset(OpAMD64ROLQconst) 32915 v.AuxInt = (-c) & 63 32916 v.AddArg(x) 32917 return true 32918 } 32919 return false 32920 } 32921 func rewriteValueAMD64_OpAMD64RORW_0(v *Value) bool { 32922 // match: (RORW x (NEGQ y)) 32923 // cond: 32924 // result: (ROLW x y) 32925 for { 32926 _ = v.Args[1] 32927 x := v.Args[0] 32928 v_1 := v.Args[1] 32929 if v_1.Op != OpAMD64NEGQ { 32930 break 32931 } 32932 y := v_1.Args[0] 32933 v.reset(OpAMD64ROLW) 32934 v.AddArg(x) 32935 v.AddArg(y) 32936 return true 32937 } 32938 // match: (RORW x (NEGL y)) 32939 // cond: 32940 // result: (ROLW x y) 32941 for { 32942 _ = v.Args[1] 32943 x := v.Args[0] 32944 v_1 := v.Args[1] 32945 if v_1.Op != OpAMD64NEGL { 32946 break 32947 } 32948 y := v_1.Args[0] 32949 v.reset(OpAMD64ROLW) 32950 v.AddArg(x) 32951 v.AddArg(y) 32952 return true 32953 } 32954 // match: (RORW x (MOVQconst [c])) 32955 // cond: 32956 // result: (ROLWconst [(-c)&15] x) 32957 for { 32958 _ = v.Args[1] 32959 x := v.Args[0] 32960 v_1 := v.Args[1] 32961 if v_1.Op != OpAMD64MOVQconst { 32962 break 32963 } 32964 c := v_1.AuxInt 32965 v.reset(OpAMD64ROLWconst) 32966 v.AuxInt = (-c) & 15 32967 v.AddArg(x) 32968 return true 32969 } 32970 // match: (RORW x (MOVLconst [c])) 32971 // cond: 32972 // result: (ROLWconst [(-c)&15] x) 32973 for { 32974 _ = v.Args[1] 32975 x := v.Args[0] 32976 v_1 := v.Args[1] 32977 if v_1.Op != OpAMD64MOVLconst { 32978 break 32979 } 32980 c := v_1.AuxInt 32981 v.reset(OpAMD64ROLWconst) 32982 v.AuxInt = (-c) & 15 32983 v.AddArg(x) 32984 return true 32985 } 32986 return false 32987 } 32988 func rewriteValueAMD64_OpAMD64SARB_0(v *Value) bool { 32989 // match: (SARB x (MOVQconst [c])) 32990 // cond: 32991 // result: (SARBconst [min(c&31,7)] x) 32992 for { 32993 _ = v.Args[1] 32994 x := v.Args[0] 32995 v_1 := v.Args[1] 32996 if v_1.Op != OpAMD64MOVQconst { 32997 break 32998 } 32999 c := v_1.AuxInt 33000 v.reset(OpAMD64SARBconst) 33001 v.AuxInt = min(c&31, 7) 33002 v.AddArg(x) 33003 return true 33004 } 33005 // match: (SARB x (MOVLconst [c])) 33006 // cond: 33007 // result: (SARBconst [min(c&31,7)] x) 33008 for { 33009 _ = v.Args[1] 33010 x := v.Args[0] 33011 v_1 := v.Args[1] 33012 if v_1.Op != OpAMD64MOVLconst { 33013 break 33014 } 33015 c := v_1.AuxInt 33016 v.reset(OpAMD64SARBconst) 33017 v.AuxInt = min(c&31, 7) 33018 v.AddArg(x) 33019 return true 33020 } 33021 return false 33022 } 33023 func rewriteValueAMD64_OpAMD64SARBconst_0(v *Value) bool { 33024 // match: (SARBconst x [0]) 33025 // cond: 33026 // result: x 33027 for { 33028 if v.AuxInt != 0 { 33029 break 33030 } 33031 x := v.Args[0] 33032 v.reset(OpCopy) 33033 v.Type = x.Type 33034 v.AddArg(x) 33035 return true 33036 } 33037 // match: (SARBconst [c] (MOVQconst [d])) 33038 // cond: 33039 // result: (MOVQconst [d>>uint64(c)]) 33040 for { 33041 c := v.AuxInt 33042 v_0 := v.Args[0] 33043 if v_0.Op != OpAMD64MOVQconst { 33044 break 33045 } 33046 d := v_0.AuxInt 33047 v.reset(OpAMD64MOVQconst) 33048 v.AuxInt = d >> uint64(c) 33049 return true 33050 } 33051 return false 33052 } 33053 func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { 33054 b := v.Block 33055 _ = b 33056 // match: (SARL x (MOVQconst [c])) 33057 // cond: 33058 // result: (SARLconst [c&31] x) 33059 for { 33060 _ = v.Args[1] 33061 x := v.Args[0] 33062 v_1 := v.Args[1] 33063 if v_1.Op != OpAMD64MOVQconst { 33064 break 33065 } 33066 c := v_1.AuxInt 33067 v.reset(OpAMD64SARLconst) 33068 v.AuxInt = c & 31 33069 v.AddArg(x) 33070 return true 33071 } 33072 // match: (SARL x (MOVLconst [c])) 33073 // cond: 33074 // result: (SARLconst [c&31] x) 33075 for { 33076 _ = v.Args[1] 33077 x := v.Args[0] 33078 v_1 := v.Args[1] 33079 if v_1.Op != OpAMD64MOVLconst { 33080 break 33081 } 33082 c := v_1.AuxInt 33083 v.reset(OpAMD64SARLconst) 33084 v.AuxInt = c & 31 33085 v.AddArg(x) 33086 return true 33087 } 33088 // match: (SARL x (ADDQconst [c] y)) 33089 // cond: c & 31 == 0 33090 // result: (SARL x y) 33091 for { 33092 _ = v.Args[1] 33093 x := v.Args[0] 33094 v_1 := v.Args[1] 33095 if v_1.Op != OpAMD64ADDQconst { 33096 break 33097 } 33098 c := v_1.AuxInt 33099 y := v_1.Args[0] 33100 if !(c&31 == 0) { 33101 break 33102 } 33103 v.reset(OpAMD64SARL) 33104 v.AddArg(x) 33105 v.AddArg(y) 33106 return true 33107 } 33108 // match: (SARL x (NEGQ <t> (ADDQconst [c] y))) 33109 // cond: c & 31 == 0 33110 // result: (SARL x (NEGQ <t> y)) 33111 for { 33112 _ = v.Args[1] 33113 x := v.Args[0] 33114 v_1 := v.Args[1] 33115 if v_1.Op != OpAMD64NEGQ { 33116 break 33117 } 33118 t := v_1.Type 33119 v_1_0 := v_1.Args[0] 33120 if v_1_0.Op != OpAMD64ADDQconst { 33121 break 33122 } 33123 c := v_1_0.AuxInt 33124 y := v_1_0.Args[0] 33125 if !(c&31 == 0) { 33126 break 33127 } 33128 v.reset(OpAMD64SARL) 33129 v.AddArg(x) 33130 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33131 v0.AddArg(y) 33132 v.AddArg(v0) 33133 return true 33134 } 33135 // match: (SARL x (ANDQconst [c] y)) 33136 // cond: c & 31 == 31 33137 // result: (SARL x y) 33138 for { 33139 _ = v.Args[1] 33140 x := v.Args[0] 33141 v_1 := v.Args[1] 33142 if v_1.Op != OpAMD64ANDQconst { 33143 break 33144 } 33145 c := v_1.AuxInt 33146 y := v_1.Args[0] 33147 if !(c&31 == 31) { 33148 break 33149 } 33150 v.reset(OpAMD64SARL) 33151 v.AddArg(x) 33152 v.AddArg(y) 33153 return true 33154 } 33155 // match: (SARL x (NEGQ <t> (ANDQconst [c] y))) 33156 // cond: c & 31 == 31 33157 // result: (SARL x (NEGQ <t> y)) 33158 for { 33159 _ = v.Args[1] 33160 x := v.Args[0] 33161 v_1 := v.Args[1] 33162 if v_1.Op != OpAMD64NEGQ { 33163 break 33164 } 33165 t := v_1.Type 33166 v_1_0 := v_1.Args[0] 33167 if v_1_0.Op != OpAMD64ANDQconst { 33168 break 33169 } 33170 c := v_1_0.AuxInt 33171 y := v_1_0.Args[0] 33172 if !(c&31 == 31) { 33173 break 33174 } 33175 v.reset(OpAMD64SARL) 33176 v.AddArg(x) 33177 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33178 v0.AddArg(y) 33179 v.AddArg(v0) 33180 return true 33181 } 33182 // match: (SARL x (ADDLconst [c] y)) 33183 // cond: c & 31 == 0 33184 // result: (SARL x y) 33185 for { 33186 _ = v.Args[1] 33187 x := v.Args[0] 33188 v_1 := v.Args[1] 33189 if v_1.Op != OpAMD64ADDLconst { 33190 break 33191 } 33192 c := v_1.AuxInt 33193 y := v_1.Args[0] 33194 if !(c&31 == 0) { 33195 break 33196 } 33197 v.reset(OpAMD64SARL) 33198 v.AddArg(x) 33199 v.AddArg(y) 33200 return true 33201 } 33202 // match: (SARL x (NEGL <t> (ADDLconst [c] y))) 33203 // cond: c & 31 == 0 33204 // result: (SARL x (NEGL <t> y)) 33205 for { 33206 _ = v.Args[1] 33207 x := v.Args[0] 33208 v_1 := v.Args[1] 33209 if v_1.Op != OpAMD64NEGL { 33210 break 33211 } 33212 t := v_1.Type 33213 v_1_0 := v_1.Args[0] 33214 if v_1_0.Op != OpAMD64ADDLconst { 33215 break 33216 } 33217 c := v_1_0.AuxInt 33218 y := v_1_0.Args[0] 33219 if !(c&31 == 0) { 33220 break 33221 } 33222 v.reset(OpAMD64SARL) 33223 v.AddArg(x) 33224 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 33225 v0.AddArg(y) 33226 v.AddArg(v0) 33227 return true 33228 } 33229 // match: (SARL x (ANDLconst [c] y)) 33230 // cond: c & 31 == 31 33231 // result: (SARL x y) 33232 for { 33233 _ = v.Args[1] 33234 x := v.Args[0] 33235 v_1 := v.Args[1] 33236 if v_1.Op != OpAMD64ANDLconst { 33237 break 33238 } 33239 c := v_1.AuxInt 33240 y := v_1.Args[0] 33241 if !(c&31 == 31) { 33242 break 33243 } 33244 v.reset(OpAMD64SARL) 33245 v.AddArg(x) 33246 v.AddArg(y) 33247 return true 33248 } 33249 // match: (SARL x (NEGL <t> (ANDLconst [c] y))) 33250 // cond: c & 31 == 31 33251 // result: (SARL x (NEGL <t> y)) 33252 for { 33253 _ = v.Args[1] 33254 x := v.Args[0] 33255 v_1 := v.Args[1] 33256 if v_1.Op != OpAMD64NEGL { 33257 break 33258 } 33259 t := v_1.Type 33260 v_1_0 := v_1.Args[0] 33261 if v_1_0.Op != OpAMD64ANDLconst { 33262 break 33263 } 33264 c := v_1_0.AuxInt 33265 y := v_1_0.Args[0] 33266 if !(c&31 == 31) { 33267 break 33268 } 33269 v.reset(OpAMD64SARL) 33270 v.AddArg(x) 33271 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 33272 v0.AddArg(y) 33273 v.AddArg(v0) 33274 return true 33275 } 33276 return false 33277 } 33278 func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool { 33279 // match: (SARLconst x [0]) 33280 // cond: 33281 // result: x 33282 for { 33283 if v.AuxInt != 0 { 33284 break 33285 } 33286 x := v.Args[0] 33287 v.reset(OpCopy) 33288 v.Type = x.Type 33289 v.AddArg(x) 33290 return true 33291 } 33292 // match: (SARLconst [c] (MOVQconst [d])) 33293 // cond: 33294 // result: (MOVQconst [d>>uint64(c)]) 33295 for { 33296 c := v.AuxInt 33297 v_0 := v.Args[0] 33298 if v_0.Op != OpAMD64MOVQconst { 33299 break 33300 } 33301 d := v_0.AuxInt 33302 v.reset(OpAMD64MOVQconst) 33303 v.AuxInt = d >> uint64(c) 33304 return true 33305 } 33306 return false 33307 } 33308 func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { 33309 b := v.Block 33310 _ = b 33311 // match: (SARQ x (MOVQconst [c])) 33312 // cond: 33313 // result: (SARQconst [c&63] x) 33314 for { 33315 _ = v.Args[1] 33316 x := v.Args[0] 33317 v_1 := v.Args[1] 33318 if v_1.Op != OpAMD64MOVQconst { 33319 break 33320 } 33321 c := v_1.AuxInt 33322 v.reset(OpAMD64SARQconst) 33323 v.AuxInt = c & 63 33324 v.AddArg(x) 33325 return true 33326 } 33327 // match: (SARQ x (MOVLconst [c])) 33328 // cond: 33329 // result: (SARQconst [c&63] x) 33330 for { 33331 _ = v.Args[1] 33332 x := v.Args[0] 33333 v_1 := v.Args[1] 33334 if v_1.Op != OpAMD64MOVLconst { 33335 break 33336 } 33337 c := v_1.AuxInt 33338 v.reset(OpAMD64SARQconst) 33339 v.AuxInt = c & 63 33340 v.AddArg(x) 33341 return true 33342 } 33343 // match: (SARQ x (ADDQconst [c] y)) 33344 // cond: c & 63 == 0 33345 // result: (SARQ x y) 33346 for { 33347 _ = v.Args[1] 33348 x := v.Args[0] 33349 v_1 := v.Args[1] 33350 if v_1.Op != OpAMD64ADDQconst { 33351 break 33352 } 33353 c := v_1.AuxInt 33354 y := v_1.Args[0] 33355 if !(c&63 == 0) { 33356 break 33357 } 33358 v.reset(OpAMD64SARQ) 33359 v.AddArg(x) 33360 v.AddArg(y) 33361 return true 33362 } 33363 // match: (SARQ x (NEGQ <t> (ADDQconst [c] y))) 33364 // cond: c & 63 == 0 33365 // result: (SARQ x (NEGQ <t> y)) 33366 for { 33367 _ = v.Args[1] 33368 x := v.Args[0] 33369 v_1 := v.Args[1] 33370 if v_1.Op != OpAMD64NEGQ { 33371 break 33372 } 33373 t := v_1.Type 33374 v_1_0 := v_1.Args[0] 33375 if v_1_0.Op != OpAMD64ADDQconst { 33376 break 33377 } 33378 c := v_1_0.AuxInt 33379 y := v_1_0.Args[0] 33380 if !(c&63 == 0) { 33381 break 33382 } 33383 v.reset(OpAMD64SARQ) 33384 v.AddArg(x) 33385 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33386 v0.AddArg(y) 33387 v.AddArg(v0) 33388 return true 33389 } 33390 // match: (SARQ x (ANDQconst [c] y)) 33391 // cond: c & 63 == 63 33392 // result: (SARQ x y) 33393 for { 33394 _ = v.Args[1] 33395 x := v.Args[0] 33396 v_1 := v.Args[1] 33397 if v_1.Op != OpAMD64ANDQconst { 33398 break 33399 } 33400 c := v_1.AuxInt 33401 y := v_1.Args[0] 33402 if !(c&63 == 63) { 33403 break 33404 } 33405 v.reset(OpAMD64SARQ) 33406 v.AddArg(x) 33407 v.AddArg(y) 33408 return true 33409 } 33410 // match: (SARQ x (NEGQ <t> (ANDQconst [c] y))) 33411 // cond: c & 63 == 63 33412 // result: (SARQ x (NEGQ <t> y)) 33413 for { 33414 _ = v.Args[1] 33415 x := v.Args[0] 33416 v_1 := v.Args[1] 33417 if v_1.Op != OpAMD64NEGQ { 33418 break 33419 } 33420 t := v_1.Type 33421 v_1_0 := v_1.Args[0] 33422 if v_1_0.Op != OpAMD64ANDQconst { 33423 break 33424 } 33425 c := v_1_0.AuxInt 33426 y := v_1_0.Args[0] 33427 if !(c&63 == 63) { 33428 break 33429 } 33430 v.reset(OpAMD64SARQ) 33431 v.AddArg(x) 33432 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 33433 v0.AddArg(y) 33434 v.AddArg(v0) 33435 return true 33436 } 33437 // match: (SARQ x (ADDLconst [c] y)) 33438 // cond: c & 63 == 0 33439 // result: (SARQ x y) 33440 for { 33441 _ = v.Args[1] 33442 x := v.Args[0] 33443 v_1 := v.Args[1] 33444 if v_1.Op != OpAMD64ADDLconst { 33445 break 33446 } 33447 c := v_1.AuxInt 33448 y := v_1.Args[0] 33449 if !(c&63 == 0) { 33450 break 33451 } 33452 v.reset(OpAMD64SARQ) 33453 v.AddArg(x) 33454 v.AddArg(y) 33455 return true 33456 } 33457 // match: (SARQ x (NEGL <t> (ADDLconst [c] y))) 33458 // cond: c & 63 == 0 33459 // result: (SARQ x (NEGL <t> y)) 33460 for { 33461 _ = v.Args[1] 33462 x := v.Args[0] 33463 v_1 := v.Args[1] 33464 if v_1.Op != OpAMD64NEGL { 33465 break 33466 } 33467 t := v_1.Type 33468 v_1_0 := v_1.Args[0] 33469 if v_1_0.Op != OpAMD64ADDLconst { 33470 break 33471 } 33472 c := v_1_0.AuxInt 33473 y := v_1_0.Args[0] 33474 if !(c&63 == 0) { 33475 break 33476 } 33477 v.reset(OpAMD64SARQ) 33478 v.AddArg(x) 33479 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 33480 v0.AddArg(y) 33481 v.AddArg(v0) 33482 return true 33483 } 33484 // match: (SARQ x (ANDLconst [c] y)) 33485 // cond: c & 63 == 63 33486 // result: (SARQ x y) 33487 for { 33488 _ = v.Args[1] 33489 x := v.Args[0] 33490 v_1 := v.Args[1] 33491 if v_1.Op != OpAMD64ANDLconst { 33492 break 33493 } 33494 c := v_1.AuxInt 33495 y := v_1.Args[0] 33496 if !(c&63 == 63) { 33497 break 33498 } 33499 v.reset(OpAMD64SARQ) 33500 v.AddArg(x) 33501 v.AddArg(y) 33502 return true 33503 } 33504 // match: (SARQ x (NEGL <t> (ANDLconst [c] y))) 33505 // cond: c & 63 == 63 33506 // result: (SARQ x (NEGL <t> y)) 33507 for { 33508 _ = v.Args[1] 33509 x := v.Args[0] 33510 v_1 := v.Args[1] 33511 if v_1.Op != OpAMD64NEGL { 33512 break 33513 } 33514 t := v_1.Type 33515 v_1_0 := v_1.Args[0] 33516 if v_1_0.Op != OpAMD64ANDLconst { 33517 break 33518 } 33519 c := v_1_0.AuxInt 33520 y := v_1_0.Args[0] 33521 if !(c&63 == 63) { 33522 break 33523 } 33524 v.reset(OpAMD64SARQ) 33525 v.AddArg(x) 33526 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 33527 v0.AddArg(y) 33528 v.AddArg(v0) 33529 return true 33530 } 33531 return false 33532 } 33533 func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool { 33534 // match: (SARQconst x [0]) 33535 // cond: 33536 // result: x 33537 for { 33538 if v.AuxInt != 0 { 33539 break 33540 } 33541 x := v.Args[0] 33542 v.reset(OpCopy) 33543 v.Type = x.Type 33544 v.AddArg(x) 33545 return true 33546 } 33547 // match: (SARQconst [c] (MOVQconst [d])) 33548 // cond: 33549 // result: (MOVQconst [d>>uint64(c)]) 33550 for { 33551 c := v.AuxInt 33552 v_0 := v.Args[0] 33553 if v_0.Op != OpAMD64MOVQconst { 33554 break 33555 } 33556 d := v_0.AuxInt 33557 v.reset(OpAMD64MOVQconst) 33558 v.AuxInt = d >> uint64(c) 33559 return true 33560 } 33561 return false 33562 } 33563 func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool { 33564 // match: (SARW x (MOVQconst [c])) 33565 // cond: 33566 // result: (SARWconst [min(c&31,15)] x) 33567 for { 33568 _ = v.Args[1] 33569 x := v.Args[0] 33570 v_1 := v.Args[1] 33571 if v_1.Op != OpAMD64MOVQconst { 33572 break 33573 } 33574 c := v_1.AuxInt 33575 v.reset(OpAMD64SARWconst) 33576 v.AuxInt = min(c&31, 15) 33577 v.AddArg(x) 33578 return true 33579 } 33580 // match: (SARW x (MOVLconst [c])) 33581 // cond: 33582 // result: (SARWconst [min(c&31,15)] x) 33583 for { 33584 _ = v.Args[1] 33585 x := v.Args[0] 33586 v_1 := v.Args[1] 33587 if v_1.Op != OpAMD64MOVLconst { 33588 break 33589 } 33590 c := v_1.AuxInt 33591 v.reset(OpAMD64SARWconst) 33592 v.AuxInt = min(c&31, 15) 33593 v.AddArg(x) 33594 return true 33595 } 33596 return false 33597 } 33598 func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool { 33599 // match: (SARWconst x [0]) 33600 // cond: 33601 // result: x 33602 for { 33603 if v.AuxInt != 0 { 33604 break 33605 } 33606 x := v.Args[0] 33607 v.reset(OpCopy) 33608 v.Type = x.Type 33609 v.AddArg(x) 33610 return true 33611 } 33612 // match: (SARWconst [c] (MOVQconst [d])) 33613 // cond: 33614 // result: (MOVQconst [d>>uint64(c)]) 33615 for { 33616 c := v.AuxInt 33617 v_0 := v.Args[0] 33618 if v_0.Op != OpAMD64MOVQconst { 33619 break 33620 } 33621 d := v_0.AuxInt 33622 v.reset(OpAMD64MOVQconst) 33623 v.AuxInt = d >> uint64(c) 33624 return true 33625 } 33626 return false 33627 } 33628 func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { 33629 // match: (SBBLcarrymask (FlagEQ)) 33630 // cond: 33631 // result: (MOVLconst [0]) 33632 for { 33633 v_0 := v.Args[0] 33634 if v_0.Op != OpAMD64FlagEQ { 33635 break 33636 } 33637 v.reset(OpAMD64MOVLconst) 33638 v.AuxInt = 0 33639 return true 33640 } 33641 // match: (SBBLcarrymask (FlagLT_ULT)) 33642 // cond: 33643 // result: (MOVLconst [-1]) 33644 for { 33645 v_0 := v.Args[0] 33646 if v_0.Op != OpAMD64FlagLT_ULT { 33647 break 33648 } 33649 v.reset(OpAMD64MOVLconst) 33650 v.AuxInt = -1 33651 return true 33652 } 33653 // match: (SBBLcarrymask (FlagLT_UGT)) 33654 // cond: 33655 // result: (MOVLconst [0]) 33656 for { 33657 v_0 := v.Args[0] 33658 if v_0.Op != OpAMD64FlagLT_UGT { 33659 break 33660 } 33661 v.reset(OpAMD64MOVLconst) 33662 v.AuxInt = 0 33663 return true 33664 } 33665 // match: (SBBLcarrymask (FlagGT_ULT)) 33666 // cond: 33667 // result: (MOVLconst [-1]) 33668 for { 33669 v_0 := v.Args[0] 33670 if v_0.Op != OpAMD64FlagGT_ULT { 33671 break 33672 } 33673 v.reset(OpAMD64MOVLconst) 33674 v.AuxInt = -1 33675 return true 33676 } 33677 // match: (SBBLcarrymask (FlagGT_UGT)) 33678 // cond: 33679 // result: (MOVLconst [0]) 33680 for { 33681 v_0 := v.Args[0] 33682 if v_0.Op != OpAMD64FlagGT_UGT { 33683 break 33684 } 33685 v.reset(OpAMD64MOVLconst) 33686 v.AuxInt = 0 33687 return true 33688 } 33689 return false 33690 } 33691 func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { 33692 // match: (SBBQcarrymask (FlagEQ)) 33693 // cond: 33694 // result: (MOVQconst [0]) 33695 for { 33696 v_0 := v.Args[0] 33697 if v_0.Op != OpAMD64FlagEQ { 33698 break 33699 } 33700 v.reset(OpAMD64MOVQconst) 33701 v.AuxInt = 0 33702 return true 33703 } 33704 // match: (SBBQcarrymask (FlagLT_ULT)) 33705 // cond: 33706 // result: (MOVQconst [-1]) 33707 for { 33708 v_0 := v.Args[0] 33709 if v_0.Op != OpAMD64FlagLT_ULT { 33710 break 33711 } 33712 v.reset(OpAMD64MOVQconst) 33713 v.AuxInt = -1 33714 return true 33715 } 33716 // match: (SBBQcarrymask (FlagLT_UGT)) 33717 // cond: 33718 // result: (MOVQconst [0]) 33719 for { 33720 v_0 := v.Args[0] 33721 if v_0.Op != OpAMD64FlagLT_UGT { 33722 break 33723 } 33724 v.reset(OpAMD64MOVQconst) 33725 v.AuxInt = 0 33726 return true 33727 } 33728 // match: (SBBQcarrymask (FlagGT_ULT)) 33729 // cond: 33730 // result: (MOVQconst [-1]) 33731 for { 33732 v_0 := v.Args[0] 33733 if v_0.Op != OpAMD64FlagGT_ULT { 33734 break 33735 } 33736 v.reset(OpAMD64MOVQconst) 33737 v.AuxInt = -1 33738 return true 33739 } 33740 // match: (SBBQcarrymask (FlagGT_UGT)) 33741 // cond: 33742 // result: (MOVQconst [0]) 33743 for { 33744 v_0 := v.Args[0] 33745 if v_0.Op != OpAMD64FlagGT_UGT { 33746 break 33747 } 33748 v.reset(OpAMD64MOVQconst) 33749 v.AuxInt = 0 33750 return true 33751 } 33752 return false 33753 } 33754 func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { 33755 // match: (SETA (InvertFlags x)) 33756 // cond: 33757 // result: (SETB x) 33758 for { 33759 v_0 := v.Args[0] 33760 if v_0.Op != OpAMD64InvertFlags { 33761 break 33762 } 33763 x := v_0.Args[0] 33764 v.reset(OpAMD64SETB) 33765 v.AddArg(x) 33766 return true 33767 } 33768 // match: (SETA (FlagEQ)) 33769 // cond: 33770 // result: (MOVLconst [0]) 33771 for { 33772 v_0 := v.Args[0] 33773 if v_0.Op != OpAMD64FlagEQ { 33774 break 33775 } 33776 v.reset(OpAMD64MOVLconst) 33777 v.AuxInt = 0 33778 return true 33779 } 33780 // match: (SETA (FlagLT_ULT)) 33781 // cond: 33782 // result: (MOVLconst [0]) 33783 for { 33784 v_0 := v.Args[0] 33785 if v_0.Op != OpAMD64FlagLT_ULT { 33786 break 33787 } 33788 v.reset(OpAMD64MOVLconst) 33789 v.AuxInt = 0 33790 return true 33791 } 33792 // match: (SETA (FlagLT_UGT)) 33793 // cond: 33794 // result: (MOVLconst [1]) 33795 for { 33796 v_0 := v.Args[0] 33797 if v_0.Op != OpAMD64FlagLT_UGT { 33798 break 33799 } 33800 v.reset(OpAMD64MOVLconst) 33801 v.AuxInt = 1 33802 return true 33803 } 33804 // match: (SETA (FlagGT_ULT)) 33805 // cond: 33806 // result: (MOVLconst [0]) 33807 for { 33808 v_0 := v.Args[0] 33809 if v_0.Op != OpAMD64FlagGT_ULT { 33810 break 33811 } 33812 v.reset(OpAMD64MOVLconst) 33813 v.AuxInt = 0 33814 return true 33815 } 33816 // match: (SETA (FlagGT_UGT)) 33817 // cond: 33818 // result: (MOVLconst [1]) 33819 for { 33820 v_0 := v.Args[0] 33821 if v_0.Op != OpAMD64FlagGT_UGT { 33822 break 33823 } 33824 v.reset(OpAMD64MOVLconst) 33825 v.AuxInt = 1 33826 return true 33827 } 33828 return false 33829 } 33830 func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { 33831 // match: (SETAE (InvertFlags x)) 33832 // cond: 33833 // result: (SETBE x) 33834 for { 33835 v_0 := v.Args[0] 33836 if v_0.Op != OpAMD64InvertFlags { 33837 break 33838 } 33839 x := v_0.Args[0] 33840 v.reset(OpAMD64SETBE) 33841 v.AddArg(x) 33842 return true 33843 } 33844 // match: (SETAE (FlagEQ)) 33845 // cond: 33846 // result: (MOVLconst [1]) 33847 for { 33848 v_0 := v.Args[0] 33849 if v_0.Op != OpAMD64FlagEQ { 33850 break 33851 } 33852 v.reset(OpAMD64MOVLconst) 33853 v.AuxInt = 1 33854 return true 33855 } 33856 // match: (SETAE (FlagLT_ULT)) 33857 // cond: 33858 // result: (MOVLconst [0]) 33859 for { 33860 v_0 := v.Args[0] 33861 if v_0.Op != OpAMD64FlagLT_ULT { 33862 break 33863 } 33864 v.reset(OpAMD64MOVLconst) 33865 v.AuxInt = 0 33866 return true 33867 } 33868 // match: (SETAE (FlagLT_UGT)) 33869 // cond: 33870 // result: (MOVLconst [1]) 33871 for { 33872 v_0 := v.Args[0] 33873 if v_0.Op != OpAMD64FlagLT_UGT { 33874 break 33875 } 33876 v.reset(OpAMD64MOVLconst) 33877 v.AuxInt = 1 33878 return true 33879 } 33880 // match: (SETAE (FlagGT_ULT)) 33881 // cond: 33882 // result: (MOVLconst [0]) 33883 for { 33884 v_0 := v.Args[0] 33885 if v_0.Op != OpAMD64FlagGT_ULT { 33886 break 33887 } 33888 v.reset(OpAMD64MOVLconst) 33889 v.AuxInt = 0 33890 return true 33891 } 33892 // match: (SETAE (FlagGT_UGT)) 33893 // cond: 33894 // result: (MOVLconst [1]) 33895 for { 33896 v_0 := v.Args[0] 33897 if v_0.Op != OpAMD64FlagGT_UGT { 33898 break 33899 } 33900 v.reset(OpAMD64MOVLconst) 33901 v.AuxInt = 1 33902 return true 33903 } 33904 return false 33905 } 33906 func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { 33907 // match: (SETB (InvertFlags x)) 33908 // cond: 33909 // result: (SETA x) 33910 for { 33911 v_0 := v.Args[0] 33912 if v_0.Op != OpAMD64InvertFlags { 33913 break 33914 } 33915 x := v_0.Args[0] 33916 v.reset(OpAMD64SETA) 33917 v.AddArg(x) 33918 return true 33919 } 33920 // match: (SETB (FlagEQ)) 33921 // cond: 33922 // result: (MOVLconst [0]) 33923 for { 33924 v_0 := v.Args[0] 33925 if v_0.Op != OpAMD64FlagEQ { 33926 break 33927 } 33928 v.reset(OpAMD64MOVLconst) 33929 v.AuxInt = 0 33930 return true 33931 } 33932 // match: (SETB (FlagLT_ULT)) 33933 // cond: 33934 // result: (MOVLconst [1]) 33935 for { 33936 v_0 := v.Args[0] 33937 if v_0.Op != OpAMD64FlagLT_ULT { 33938 break 33939 } 33940 v.reset(OpAMD64MOVLconst) 33941 v.AuxInt = 1 33942 return true 33943 } 33944 // match: (SETB (FlagLT_UGT)) 33945 // cond: 33946 // result: (MOVLconst [0]) 33947 for { 33948 v_0 := v.Args[0] 33949 if v_0.Op != OpAMD64FlagLT_UGT { 33950 break 33951 } 33952 v.reset(OpAMD64MOVLconst) 33953 v.AuxInt = 0 33954 return true 33955 } 33956 // match: (SETB (FlagGT_ULT)) 33957 // cond: 33958 // result: (MOVLconst [1]) 33959 for { 33960 v_0 := v.Args[0] 33961 if v_0.Op != OpAMD64FlagGT_ULT { 33962 break 33963 } 33964 v.reset(OpAMD64MOVLconst) 33965 v.AuxInt = 1 33966 return true 33967 } 33968 // match: (SETB (FlagGT_UGT)) 33969 // cond: 33970 // result: (MOVLconst [0]) 33971 for { 33972 v_0 := v.Args[0] 33973 if v_0.Op != OpAMD64FlagGT_UGT { 33974 break 33975 } 33976 v.reset(OpAMD64MOVLconst) 33977 v.AuxInt = 0 33978 return true 33979 } 33980 return false 33981 } 33982 func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { 33983 // match: (SETBE (InvertFlags x)) 33984 // cond: 33985 // result: (SETAE x) 33986 for { 33987 v_0 := v.Args[0] 33988 if v_0.Op != OpAMD64InvertFlags { 33989 break 33990 } 33991 x := v_0.Args[0] 33992 v.reset(OpAMD64SETAE) 33993 v.AddArg(x) 33994 return true 33995 } 33996 // match: (SETBE (FlagEQ)) 33997 // cond: 33998 // result: (MOVLconst [1]) 33999 for { 34000 v_0 := v.Args[0] 34001 if v_0.Op != OpAMD64FlagEQ { 34002 break 34003 } 34004 v.reset(OpAMD64MOVLconst) 34005 v.AuxInt = 1 34006 return true 34007 } 34008 // match: (SETBE (FlagLT_ULT)) 34009 // cond: 34010 // result: (MOVLconst [1]) 34011 for { 34012 v_0 := v.Args[0] 34013 if v_0.Op != OpAMD64FlagLT_ULT { 34014 break 34015 } 34016 v.reset(OpAMD64MOVLconst) 34017 v.AuxInt = 1 34018 return true 34019 } 34020 // match: (SETBE (FlagLT_UGT)) 34021 // cond: 34022 // result: (MOVLconst [0]) 34023 for { 34024 v_0 := v.Args[0] 34025 if v_0.Op != OpAMD64FlagLT_UGT { 34026 break 34027 } 34028 v.reset(OpAMD64MOVLconst) 34029 v.AuxInt = 0 34030 return true 34031 } 34032 // match: (SETBE (FlagGT_ULT)) 34033 // cond: 34034 // result: (MOVLconst [1]) 34035 for { 34036 v_0 := v.Args[0] 34037 if v_0.Op != OpAMD64FlagGT_ULT { 34038 break 34039 } 34040 v.reset(OpAMD64MOVLconst) 34041 v.AuxInt = 1 34042 return true 34043 } 34044 // match: (SETBE (FlagGT_UGT)) 34045 // cond: 34046 // result: (MOVLconst [0]) 34047 for { 34048 v_0 := v.Args[0] 34049 if v_0.Op != OpAMD64FlagGT_UGT { 34050 break 34051 } 34052 v.reset(OpAMD64MOVLconst) 34053 v.AuxInt = 0 34054 return true 34055 } 34056 return false 34057 } 34058 func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { 34059 b := v.Block 34060 _ = b 34061 config := b.Func.Config 34062 _ = config 34063 // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) 34064 // cond: !config.nacl 34065 // result: (SETAE (BTL x y)) 34066 for { 34067 v_0 := v.Args[0] 34068 if v_0.Op != OpAMD64TESTL { 34069 break 34070 } 34071 _ = v_0.Args[1] 34072 v_0_0 := v_0.Args[0] 34073 if v_0_0.Op != OpAMD64SHLL { 34074 break 34075 } 34076 _ = v_0_0.Args[1] 34077 v_0_0_0 := v_0_0.Args[0] 34078 if v_0_0_0.Op != OpAMD64MOVLconst { 34079 break 34080 } 34081 if v_0_0_0.AuxInt != 1 { 34082 break 34083 } 34084 x := v_0_0.Args[1] 34085 y := v_0.Args[1] 34086 if !(!config.nacl) { 34087 break 34088 } 34089 v.reset(OpAMD64SETAE) 34090 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 34091 v0.AddArg(x) 34092 v0.AddArg(y) 34093 v.AddArg(v0) 34094 return true 34095 } 34096 // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x))) 34097 // cond: !config.nacl 34098 // result: (SETAE (BTL x y)) 34099 for { 34100 v_0 := v.Args[0] 34101 if v_0.Op != OpAMD64TESTL { 34102 break 34103 } 34104 _ = v_0.Args[1] 34105 y := v_0.Args[0] 34106 v_0_1 := v_0.Args[1] 34107 if v_0_1.Op != OpAMD64SHLL { 34108 break 34109 } 34110 _ = v_0_1.Args[1] 34111 v_0_1_0 := v_0_1.Args[0] 34112 if v_0_1_0.Op != OpAMD64MOVLconst { 34113 break 34114 } 34115 if v_0_1_0.AuxInt != 1 { 34116 break 34117 } 34118 x := v_0_1.Args[1] 34119 if !(!config.nacl) { 34120 break 34121 } 34122 v.reset(OpAMD64SETAE) 34123 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 34124 v0.AddArg(x) 34125 v0.AddArg(y) 34126 v.AddArg(v0) 34127 return true 34128 } 34129 // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 34130 // cond: !config.nacl 34131 // result: (SETAE (BTQ x y)) 34132 for { 34133 v_0 := v.Args[0] 34134 if v_0.Op != OpAMD64TESTQ { 34135 break 34136 } 34137 _ = v_0.Args[1] 34138 v_0_0 := v_0.Args[0] 34139 if v_0_0.Op != OpAMD64SHLQ { 34140 break 34141 } 34142 _ = v_0_0.Args[1] 34143 v_0_0_0 := v_0_0.Args[0] 34144 if v_0_0_0.Op != OpAMD64MOVQconst { 34145 break 34146 } 34147 if v_0_0_0.AuxInt != 1 { 34148 break 34149 } 34150 x := v_0_0.Args[1] 34151 y := v_0.Args[1] 34152 if !(!config.nacl) { 34153 break 34154 } 34155 v.reset(OpAMD64SETAE) 34156 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 34157 v0.AddArg(x) 34158 v0.AddArg(y) 34159 v.AddArg(v0) 34160 return true 34161 } 34162 // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 34163 // cond: !config.nacl 34164 // result: (SETAE (BTQ x y)) 34165 for { 34166 v_0 := v.Args[0] 34167 if v_0.Op != OpAMD64TESTQ { 34168 break 34169 } 34170 _ = v_0.Args[1] 34171 y := v_0.Args[0] 34172 v_0_1 := v_0.Args[1] 34173 if v_0_1.Op != OpAMD64SHLQ { 34174 break 34175 } 34176 _ = v_0_1.Args[1] 34177 v_0_1_0 := v_0_1.Args[0] 34178 if v_0_1_0.Op != OpAMD64MOVQconst { 34179 break 34180 } 34181 if v_0_1_0.AuxInt != 1 { 34182 break 34183 } 34184 x := v_0_1.Args[1] 34185 if !(!config.nacl) { 34186 break 34187 } 34188 v.reset(OpAMD64SETAE) 34189 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 34190 v0.AddArg(x) 34191 v0.AddArg(y) 34192 v.AddArg(v0) 34193 return true 34194 } 34195 // match: (SETEQ (TESTLconst [c] x)) 34196 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 34197 // result: (SETAE (BTLconst [log2(c)] x)) 34198 for { 34199 v_0 := v.Args[0] 34200 if v_0.Op != OpAMD64TESTLconst { 34201 break 34202 } 34203 c := v_0.AuxInt 34204 x := v_0.Args[0] 34205 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 34206 break 34207 } 34208 v.reset(OpAMD64SETAE) 34209 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 34210 v0.AuxInt = log2(c) 34211 v0.AddArg(x) 34212 v.AddArg(v0) 34213 return true 34214 } 34215 // match: (SETEQ (TESTQconst [c] x)) 34216 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 34217 // result: (SETAE (BTQconst [log2(c)] x)) 34218 for { 34219 v_0 := v.Args[0] 34220 if v_0.Op != OpAMD64TESTQconst { 34221 break 34222 } 34223 c := v_0.AuxInt 34224 x := v_0.Args[0] 34225 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 34226 break 34227 } 34228 v.reset(OpAMD64SETAE) 34229 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 34230 v0.AuxInt = log2(c) 34231 v0.AddArg(x) 34232 v.AddArg(v0) 34233 return true 34234 } 34235 // match: (SETEQ (TESTQ (MOVQconst [c]) x)) 34236 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 34237 // result: (SETAE (BTQconst [log2(c)] x)) 34238 for { 34239 v_0 := v.Args[0] 34240 if v_0.Op != OpAMD64TESTQ { 34241 break 34242 } 34243 _ = v_0.Args[1] 34244 v_0_0 := v_0.Args[0] 34245 if v_0_0.Op != OpAMD64MOVQconst { 34246 break 34247 } 34248 c := v_0_0.AuxInt 34249 x := v_0.Args[1] 34250 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 34251 break 34252 } 34253 v.reset(OpAMD64SETAE) 34254 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 34255 v0.AuxInt = log2(c) 34256 v0.AddArg(x) 34257 v.AddArg(v0) 34258 return true 34259 } 34260 // match: (SETEQ (TESTQ x (MOVQconst [c]))) 34261 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 34262 // result: (SETAE (BTQconst [log2(c)] x)) 34263 for { 34264 v_0 := v.Args[0] 34265 if v_0.Op != OpAMD64TESTQ { 34266 break 34267 } 34268 _ = v_0.Args[1] 34269 x := v_0.Args[0] 34270 v_0_1 := v_0.Args[1] 34271 if v_0_1.Op != OpAMD64MOVQconst { 34272 break 34273 } 34274 c := v_0_1.AuxInt 34275 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 34276 break 34277 } 34278 v.reset(OpAMD64SETAE) 34279 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 34280 v0.AuxInt = log2(c) 34281 v0.AddArg(x) 34282 v.AddArg(v0) 34283 return true 34284 } 34285 // match: (SETEQ (InvertFlags x)) 34286 // cond: 34287 // result: (SETEQ x) 34288 for { 34289 v_0 := v.Args[0] 34290 if v_0.Op != OpAMD64InvertFlags { 34291 break 34292 } 34293 x := v_0.Args[0] 34294 v.reset(OpAMD64SETEQ) 34295 v.AddArg(x) 34296 return true 34297 } 34298 // match: (SETEQ (FlagEQ)) 34299 // cond: 34300 // result: (MOVLconst [1]) 34301 for { 34302 v_0 := v.Args[0] 34303 if v_0.Op != OpAMD64FlagEQ { 34304 break 34305 } 34306 v.reset(OpAMD64MOVLconst) 34307 v.AuxInt = 1 34308 return true 34309 } 34310 return false 34311 } 34312 func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { 34313 // match: (SETEQ (FlagLT_ULT)) 34314 // cond: 34315 // result: (MOVLconst [0]) 34316 for { 34317 v_0 := v.Args[0] 34318 if v_0.Op != OpAMD64FlagLT_ULT { 34319 break 34320 } 34321 v.reset(OpAMD64MOVLconst) 34322 v.AuxInt = 0 34323 return true 34324 } 34325 // match: (SETEQ (FlagLT_UGT)) 34326 // cond: 34327 // result: (MOVLconst [0]) 34328 for { 34329 v_0 := v.Args[0] 34330 if v_0.Op != OpAMD64FlagLT_UGT { 34331 break 34332 } 34333 v.reset(OpAMD64MOVLconst) 34334 v.AuxInt = 0 34335 return true 34336 } 34337 // match: (SETEQ (FlagGT_ULT)) 34338 // cond: 34339 // result: (MOVLconst [0]) 34340 for { 34341 v_0 := v.Args[0] 34342 if v_0.Op != OpAMD64FlagGT_ULT { 34343 break 34344 } 34345 v.reset(OpAMD64MOVLconst) 34346 v.AuxInt = 0 34347 return true 34348 } 34349 // match: (SETEQ (FlagGT_UGT)) 34350 // cond: 34351 // result: (MOVLconst [0]) 34352 for { 34353 v_0 := v.Args[0] 34354 if v_0.Op != OpAMD64FlagGT_UGT { 34355 break 34356 } 34357 v.reset(OpAMD64MOVLconst) 34358 v.AuxInt = 0 34359 return true 34360 } 34361 return false 34362 } 34363 func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { 34364 // match: (SETG (InvertFlags x)) 34365 // cond: 34366 // result: (SETL x) 34367 for { 34368 v_0 := v.Args[0] 34369 if v_0.Op != OpAMD64InvertFlags { 34370 break 34371 } 34372 x := v_0.Args[0] 34373 v.reset(OpAMD64SETL) 34374 v.AddArg(x) 34375 return true 34376 } 34377 // match: (SETG (FlagEQ)) 34378 // cond: 34379 // result: (MOVLconst [0]) 34380 for { 34381 v_0 := v.Args[0] 34382 if v_0.Op != OpAMD64FlagEQ { 34383 break 34384 } 34385 v.reset(OpAMD64MOVLconst) 34386 v.AuxInt = 0 34387 return true 34388 } 34389 // match: (SETG (FlagLT_ULT)) 34390 // cond: 34391 // result: (MOVLconst [0]) 34392 for { 34393 v_0 := v.Args[0] 34394 if v_0.Op != OpAMD64FlagLT_ULT { 34395 break 34396 } 34397 v.reset(OpAMD64MOVLconst) 34398 v.AuxInt = 0 34399 return true 34400 } 34401 // match: (SETG (FlagLT_UGT)) 34402 // cond: 34403 // result: (MOVLconst [0]) 34404 for { 34405 v_0 := v.Args[0] 34406 if v_0.Op != OpAMD64FlagLT_UGT { 34407 break 34408 } 34409 v.reset(OpAMD64MOVLconst) 34410 v.AuxInt = 0 34411 return true 34412 } 34413 // match: (SETG (FlagGT_ULT)) 34414 // cond: 34415 // result: (MOVLconst [1]) 34416 for { 34417 v_0 := v.Args[0] 34418 if v_0.Op != OpAMD64FlagGT_ULT { 34419 break 34420 } 34421 v.reset(OpAMD64MOVLconst) 34422 v.AuxInt = 1 34423 return true 34424 } 34425 // match: (SETG (FlagGT_UGT)) 34426 // cond: 34427 // result: (MOVLconst [1]) 34428 for { 34429 v_0 := v.Args[0] 34430 if v_0.Op != OpAMD64FlagGT_UGT { 34431 break 34432 } 34433 v.reset(OpAMD64MOVLconst) 34434 v.AuxInt = 1 34435 return true 34436 } 34437 return false 34438 } 34439 func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { 34440 // match: (SETGE (InvertFlags x)) 34441 // cond: 34442 // result: (SETLE x) 34443 for { 34444 v_0 := v.Args[0] 34445 if v_0.Op != OpAMD64InvertFlags { 34446 break 34447 } 34448 x := v_0.Args[0] 34449 v.reset(OpAMD64SETLE) 34450 v.AddArg(x) 34451 return true 34452 } 34453 // match: (SETGE (FlagEQ)) 34454 // cond: 34455 // result: (MOVLconst [1]) 34456 for { 34457 v_0 := v.Args[0] 34458 if v_0.Op != OpAMD64FlagEQ { 34459 break 34460 } 34461 v.reset(OpAMD64MOVLconst) 34462 v.AuxInt = 1 34463 return true 34464 } 34465 // match: (SETGE (FlagLT_ULT)) 34466 // cond: 34467 // result: (MOVLconst [0]) 34468 for { 34469 v_0 := v.Args[0] 34470 if v_0.Op != OpAMD64FlagLT_ULT { 34471 break 34472 } 34473 v.reset(OpAMD64MOVLconst) 34474 v.AuxInt = 0 34475 return true 34476 } 34477 // match: (SETGE (FlagLT_UGT)) 34478 // cond: 34479 // result: (MOVLconst [0]) 34480 for { 34481 v_0 := v.Args[0] 34482 if v_0.Op != OpAMD64FlagLT_UGT { 34483 break 34484 } 34485 v.reset(OpAMD64MOVLconst) 34486 v.AuxInt = 0 34487 return true 34488 } 34489 // match: (SETGE (FlagGT_ULT)) 34490 // cond: 34491 // result: (MOVLconst [1]) 34492 for { 34493 v_0 := v.Args[0] 34494 if v_0.Op != OpAMD64FlagGT_ULT { 34495 break 34496 } 34497 v.reset(OpAMD64MOVLconst) 34498 v.AuxInt = 1 34499 return true 34500 } 34501 // match: (SETGE (FlagGT_UGT)) 34502 // cond: 34503 // result: (MOVLconst [1]) 34504 for { 34505 v_0 := v.Args[0] 34506 if v_0.Op != OpAMD64FlagGT_UGT { 34507 break 34508 } 34509 v.reset(OpAMD64MOVLconst) 34510 v.AuxInt = 1 34511 return true 34512 } 34513 return false 34514 } 34515 func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { 34516 // match: (SETL (InvertFlags x)) 34517 // cond: 34518 // result: (SETG x) 34519 for { 34520 v_0 := v.Args[0] 34521 if v_0.Op != OpAMD64InvertFlags { 34522 break 34523 } 34524 x := v_0.Args[0] 34525 v.reset(OpAMD64SETG) 34526 v.AddArg(x) 34527 return true 34528 } 34529 // match: (SETL (FlagEQ)) 34530 // cond: 34531 // result: (MOVLconst [0]) 34532 for { 34533 v_0 := v.Args[0] 34534 if v_0.Op != OpAMD64FlagEQ { 34535 break 34536 } 34537 v.reset(OpAMD64MOVLconst) 34538 v.AuxInt = 0 34539 return true 34540 } 34541 // match: (SETL (FlagLT_ULT)) 34542 // cond: 34543 // result: (MOVLconst [1]) 34544 for { 34545 v_0 := v.Args[0] 34546 if v_0.Op != OpAMD64FlagLT_ULT { 34547 break 34548 } 34549 v.reset(OpAMD64MOVLconst) 34550 v.AuxInt = 1 34551 return true 34552 } 34553 // match: (SETL (FlagLT_UGT)) 34554 // cond: 34555 // result: (MOVLconst [1]) 34556 for { 34557 v_0 := v.Args[0] 34558 if v_0.Op != OpAMD64FlagLT_UGT { 34559 break 34560 } 34561 v.reset(OpAMD64MOVLconst) 34562 v.AuxInt = 1 34563 return true 34564 } 34565 // match: (SETL (FlagGT_ULT)) 34566 // cond: 34567 // result: (MOVLconst [0]) 34568 for { 34569 v_0 := v.Args[0] 34570 if v_0.Op != OpAMD64FlagGT_ULT { 34571 break 34572 } 34573 v.reset(OpAMD64MOVLconst) 34574 v.AuxInt = 0 34575 return true 34576 } 34577 // match: (SETL (FlagGT_UGT)) 34578 // cond: 34579 // result: (MOVLconst [0]) 34580 for { 34581 v_0 := v.Args[0] 34582 if v_0.Op != OpAMD64FlagGT_UGT { 34583 break 34584 } 34585 v.reset(OpAMD64MOVLconst) 34586 v.AuxInt = 0 34587 return true 34588 } 34589 return false 34590 } 34591 func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { 34592 // match: (SETLE (InvertFlags x)) 34593 // cond: 34594 // result: (SETGE x) 34595 for { 34596 v_0 := v.Args[0] 34597 if v_0.Op != OpAMD64InvertFlags { 34598 break 34599 } 34600 x := v_0.Args[0] 34601 v.reset(OpAMD64SETGE) 34602 v.AddArg(x) 34603 return true 34604 } 34605 // match: (SETLE (FlagEQ)) 34606 // cond: 34607 // result: (MOVLconst [1]) 34608 for { 34609 v_0 := v.Args[0] 34610 if v_0.Op != OpAMD64FlagEQ { 34611 break 34612 } 34613 v.reset(OpAMD64MOVLconst) 34614 v.AuxInt = 1 34615 return true 34616 } 34617 // match: (SETLE (FlagLT_ULT)) 34618 // cond: 34619 // result: (MOVLconst [1]) 34620 for { 34621 v_0 := v.Args[0] 34622 if v_0.Op != OpAMD64FlagLT_ULT { 34623 break 34624 } 34625 v.reset(OpAMD64MOVLconst) 34626 v.AuxInt = 1 34627 return true 34628 } 34629 // match: (SETLE (FlagLT_UGT)) 34630 // cond: 34631 // result: (MOVLconst [1]) 34632 for { 34633 v_0 := v.Args[0] 34634 if v_0.Op != OpAMD64FlagLT_UGT { 34635 break 34636 } 34637 v.reset(OpAMD64MOVLconst) 34638 v.AuxInt = 1 34639 return true 34640 } 34641 // match: (SETLE (FlagGT_ULT)) 34642 // cond: 34643 // result: (MOVLconst [0]) 34644 for { 34645 v_0 := v.Args[0] 34646 if v_0.Op != OpAMD64FlagGT_ULT { 34647 break 34648 } 34649 v.reset(OpAMD64MOVLconst) 34650 v.AuxInt = 0 34651 return true 34652 } 34653 // match: (SETLE (FlagGT_UGT)) 34654 // cond: 34655 // result: (MOVLconst [0]) 34656 for { 34657 v_0 := v.Args[0] 34658 if v_0.Op != OpAMD64FlagGT_UGT { 34659 break 34660 } 34661 v.reset(OpAMD64MOVLconst) 34662 v.AuxInt = 0 34663 return true 34664 } 34665 return false 34666 } 34667 func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { 34668 b := v.Block 34669 _ = b 34670 config := b.Func.Config 34671 _ = config 34672 // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) 34673 // cond: !config.nacl 34674 // result: (SETB (BTL x y)) 34675 for { 34676 v_0 := v.Args[0] 34677 if v_0.Op != OpAMD64TESTL { 34678 break 34679 } 34680 _ = v_0.Args[1] 34681 v_0_0 := v_0.Args[0] 34682 if v_0_0.Op != OpAMD64SHLL { 34683 break 34684 } 34685 _ = v_0_0.Args[1] 34686 v_0_0_0 := v_0_0.Args[0] 34687 if v_0_0_0.Op != OpAMD64MOVLconst { 34688 break 34689 } 34690 if v_0_0_0.AuxInt != 1 { 34691 break 34692 } 34693 x := v_0_0.Args[1] 34694 y := v_0.Args[1] 34695 if !(!config.nacl) { 34696 break 34697 } 34698 v.reset(OpAMD64SETB) 34699 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 34700 v0.AddArg(x) 34701 v0.AddArg(y) 34702 v.AddArg(v0) 34703 return true 34704 } 34705 // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x))) 34706 // cond: !config.nacl 34707 // result: (SETB (BTL x y)) 34708 for { 34709 v_0 := v.Args[0] 34710 if v_0.Op != OpAMD64TESTL { 34711 break 34712 } 34713 _ = v_0.Args[1] 34714 y := v_0.Args[0] 34715 v_0_1 := v_0.Args[1] 34716 if v_0_1.Op != OpAMD64SHLL { 34717 break 34718 } 34719 _ = v_0_1.Args[1] 34720 v_0_1_0 := v_0_1.Args[0] 34721 if v_0_1_0.Op != OpAMD64MOVLconst { 34722 break 34723 } 34724 if v_0_1_0.AuxInt != 1 { 34725 break 34726 } 34727 x := v_0_1.Args[1] 34728 if !(!config.nacl) { 34729 break 34730 } 34731 v.reset(OpAMD64SETB) 34732 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 34733 v0.AddArg(x) 34734 v0.AddArg(y) 34735 v.AddArg(v0) 34736 return true 34737 } 34738 // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 34739 // cond: !config.nacl 34740 // result: (SETB (BTQ x y)) 34741 for { 34742 v_0 := v.Args[0] 34743 if v_0.Op != OpAMD64TESTQ { 34744 break 34745 } 34746 _ = v_0.Args[1] 34747 v_0_0 := v_0.Args[0] 34748 if v_0_0.Op != OpAMD64SHLQ { 34749 break 34750 } 34751 _ = v_0_0.Args[1] 34752 v_0_0_0 := v_0_0.Args[0] 34753 if v_0_0_0.Op != OpAMD64MOVQconst { 34754 break 34755 } 34756 if v_0_0_0.AuxInt != 1 { 34757 break 34758 } 34759 x := v_0_0.Args[1] 34760 y := v_0.Args[1] 34761 if !(!config.nacl) { 34762 break 34763 } 34764 v.reset(OpAMD64SETB) 34765 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 34766 v0.AddArg(x) 34767 v0.AddArg(y) 34768 v.AddArg(v0) 34769 return true 34770 } 34771 // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x))) 34772 // cond: !config.nacl 34773 // result: (SETB (BTQ x y)) 34774 for { 34775 v_0 := v.Args[0] 34776 if v_0.Op != OpAMD64TESTQ { 34777 break 34778 } 34779 _ = v_0.Args[1] 34780 y := v_0.Args[0] 34781 v_0_1 := v_0.Args[1] 34782 if v_0_1.Op != OpAMD64SHLQ { 34783 break 34784 } 34785 _ = v_0_1.Args[1] 34786 v_0_1_0 := v_0_1.Args[0] 34787 if v_0_1_0.Op != OpAMD64MOVQconst { 34788 break 34789 } 34790 if v_0_1_0.AuxInt != 1 { 34791 break 34792 } 34793 x := v_0_1.Args[1] 34794 if !(!config.nacl) { 34795 break 34796 } 34797 v.reset(OpAMD64SETB) 34798 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 34799 v0.AddArg(x) 34800 v0.AddArg(y) 34801 v.AddArg(v0) 34802 return true 34803 } 34804 // match: (SETNE (TESTLconst [c] x)) 34805 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 34806 // result: (SETB (BTLconst [log2(c)] x)) 34807 for { 34808 v_0 := v.Args[0] 34809 if v_0.Op != OpAMD64TESTLconst { 34810 break 34811 } 34812 c := v_0.AuxInt 34813 x := v_0.Args[0] 34814 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 34815 break 34816 } 34817 v.reset(OpAMD64SETB) 34818 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 34819 v0.AuxInt = log2(c) 34820 v0.AddArg(x) 34821 v.AddArg(v0) 34822 return true 34823 } 34824 // match: (SETNE (TESTQconst [c] x)) 34825 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 34826 // result: (SETB (BTQconst [log2(c)] x)) 34827 for { 34828 v_0 := v.Args[0] 34829 if v_0.Op != OpAMD64TESTQconst { 34830 break 34831 } 34832 c := v_0.AuxInt 34833 x := v_0.Args[0] 34834 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 34835 break 34836 } 34837 v.reset(OpAMD64SETB) 34838 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 34839 v0.AuxInt = log2(c) 34840 v0.AddArg(x) 34841 v.AddArg(v0) 34842 return true 34843 } 34844 // match: (SETNE (TESTQ (MOVQconst [c]) x)) 34845 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 34846 // result: (SETB (BTQconst [log2(c)] x)) 34847 for { 34848 v_0 := v.Args[0] 34849 if v_0.Op != OpAMD64TESTQ { 34850 break 34851 } 34852 _ = v_0.Args[1] 34853 v_0_0 := v_0.Args[0] 34854 if v_0_0.Op != OpAMD64MOVQconst { 34855 break 34856 } 34857 c := v_0_0.AuxInt 34858 x := v_0.Args[1] 34859 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 34860 break 34861 } 34862 v.reset(OpAMD64SETB) 34863 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 34864 v0.AuxInt = log2(c) 34865 v0.AddArg(x) 34866 v.AddArg(v0) 34867 return true 34868 } 34869 // match: (SETNE (TESTQ x (MOVQconst [c]))) 34870 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 34871 // result: (SETB (BTQconst [log2(c)] x)) 34872 for { 34873 v_0 := v.Args[0] 34874 if v_0.Op != OpAMD64TESTQ { 34875 break 34876 } 34877 _ = v_0.Args[1] 34878 x := v_0.Args[0] 34879 v_0_1 := v_0.Args[1] 34880 if v_0_1.Op != OpAMD64MOVQconst { 34881 break 34882 } 34883 c := v_0_1.AuxInt 34884 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 34885 break 34886 } 34887 v.reset(OpAMD64SETB) 34888 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 34889 v0.AuxInt = log2(c) 34890 v0.AddArg(x) 34891 v.AddArg(v0) 34892 return true 34893 } 34894 // match: (SETNE (InvertFlags x)) 34895 // cond: 34896 // result: (SETNE x) 34897 for { 34898 v_0 := v.Args[0] 34899 if v_0.Op != OpAMD64InvertFlags { 34900 break 34901 } 34902 x := v_0.Args[0] 34903 v.reset(OpAMD64SETNE) 34904 v.AddArg(x) 34905 return true 34906 } 34907 // match: (SETNE (FlagEQ)) 34908 // cond: 34909 // result: (MOVLconst [0]) 34910 for { 34911 v_0 := v.Args[0] 34912 if v_0.Op != OpAMD64FlagEQ { 34913 break 34914 } 34915 v.reset(OpAMD64MOVLconst) 34916 v.AuxInt = 0 34917 return true 34918 } 34919 return false 34920 } 34921 func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { 34922 // match: (SETNE (FlagLT_ULT)) 34923 // cond: 34924 // result: (MOVLconst [1]) 34925 for { 34926 v_0 := v.Args[0] 34927 if v_0.Op != OpAMD64FlagLT_ULT { 34928 break 34929 } 34930 v.reset(OpAMD64MOVLconst) 34931 v.AuxInt = 1 34932 return true 34933 } 34934 // match: (SETNE (FlagLT_UGT)) 34935 // cond: 34936 // result: (MOVLconst [1]) 34937 for { 34938 v_0 := v.Args[0] 34939 if v_0.Op != OpAMD64FlagLT_UGT { 34940 break 34941 } 34942 v.reset(OpAMD64MOVLconst) 34943 v.AuxInt = 1 34944 return true 34945 } 34946 // match: (SETNE (FlagGT_ULT)) 34947 // cond: 34948 // result: (MOVLconst [1]) 34949 for { 34950 v_0 := v.Args[0] 34951 if v_0.Op != OpAMD64FlagGT_ULT { 34952 break 34953 } 34954 v.reset(OpAMD64MOVLconst) 34955 v.AuxInt = 1 34956 return true 34957 } 34958 // match: (SETNE (FlagGT_UGT)) 34959 // cond: 34960 // result: (MOVLconst [1]) 34961 for { 34962 v_0 := v.Args[0] 34963 if v_0.Op != OpAMD64FlagGT_UGT { 34964 break 34965 } 34966 v.reset(OpAMD64MOVLconst) 34967 v.AuxInt = 1 34968 return true 34969 } 34970 return false 34971 } 34972 func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { 34973 b := v.Block 34974 _ = b 34975 // match: (SHLL x (MOVQconst [c])) 34976 // cond: 34977 // result: (SHLLconst [c&31] x) 34978 for { 34979 _ = v.Args[1] 34980 x := v.Args[0] 34981 v_1 := v.Args[1] 34982 if v_1.Op != OpAMD64MOVQconst { 34983 break 34984 } 34985 c := v_1.AuxInt 34986 v.reset(OpAMD64SHLLconst) 34987 v.AuxInt = c & 31 34988 v.AddArg(x) 34989 return true 34990 } 34991 // match: (SHLL x (MOVLconst [c])) 34992 // cond: 34993 // result: (SHLLconst [c&31] x) 34994 for { 34995 _ = v.Args[1] 34996 x := v.Args[0] 34997 v_1 := v.Args[1] 34998 if v_1.Op != OpAMD64MOVLconst { 34999 break 35000 } 35001 c := v_1.AuxInt 35002 v.reset(OpAMD64SHLLconst) 35003 v.AuxInt = c & 31 35004 v.AddArg(x) 35005 return true 35006 } 35007 // match: (SHLL x (ADDQconst [c] y)) 35008 // cond: c & 31 == 0 35009 // result: (SHLL x y) 35010 for { 35011 _ = v.Args[1] 35012 x := v.Args[0] 35013 v_1 := v.Args[1] 35014 if v_1.Op != OpAMD64ADDQconst { 35015 break 35016 } 35017 c := v_1.AuxInt 35018 y := v_1.Args[0] 35019 if !(c&31 == 0) { 35020 break 35021 } 35022 v.reset(OpAMD64SHLL) 35023 v.AddArg(x) 35024 v.AddArg(y) 35025 return true 35026 } 35027 // match: (SHLL x (NEGQ <t> (ADDQconst [c] y))) 35028 // cond: c & 31 == 0 35029 // result: (SHLL x (NEGQ <t> y)) 35030 for { 35031 _ = v.Args[1] 35032 x := v.Args[0] 35033 v_1 := v.Args[1] 35034 if v_1.Op != OpAMD64NEGQ { 35035 break 35036 } 35037 t := v_1.Type 35038 v_1_0 := v_1.Args[0] 35039 if v_1_0.Op != OpAMD64ADDQconst { 35040 break 35041 } 35042 c := v_1_0.AuxInt 35043 y := v_1_0.Args[0] 35044 if !(c&31 == 0) { 35045 break 35046 } 35047 v.reset(OpAMD64SHLL) 35048 v.AddArg(x) 35049 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35050 v0.AddArg(y) 35051 v.AddArg(v0) 35052 return true 35053 } 35054 // match: (SHLL x (ANDQconst [c] y)) 35055 // cond: c & 31 == 31 35056 // result: (SHLL x y) 35057 for { 35058 _ = v.Args[1] 35059 x := v.Args[0] 35060 v_1 := v.Args[1] 35061 if v_1.Op != OpAMD64ANDQconst { 35062 break 35063 } 35064 c := v_1.AuxInt 35065 y := v_1.Args[0] 35066 if !(c&31 == 31) { 35067 break 35068 } 35069 v.reset(OpAMD64SHLL) 35070 v.AddArg(x) 35071 v.AddArg(y) 35072 return true 35073 } 35074 // match: (SHLL x (NEGQ <t> (ANDQconst [c] y))) 35075 // cond: c & 31 == 31 35076 // result: (SHLL x (NEGQ <t> y)) 35077 for { 35078 _ = v.Args[1] 35079 x := v.Args[0] 35080 v_1 := v.Args[1] 35081 if v_1.Op != OpAMD64NEGQ { 35082 break 35083 } 35084 t := v_1.Type 35085 v_1_0 := v_1.Args[0] 35086 if v_1_0.Op != OpAMD64ANDQconst { 35087 break 35088 } 35089 c := v_1_0.AuxInt 35090 y := v_1_0.Args[0] 35091 if !(c&31 == 31) { 35092 break 35093 } 35094 v.reset(OpAMD64SHLL) 35095 v.AddArg(x) 35096 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35097 v0.AddArg(y) 35098 v.AddArg(v0) 35099 return true 35100 } 35101 // match: (SHLL x (ADDLconst [c] y)) 35102 // cond: c & 31 == 0 35103 // result: (SHLL x y) 35104 for { 35105 _ = v.Args[1] 35106 x := v.Args[0] 35107 v_1 := v.Args[1] 35108 if v_1.Op != OpAMD64ADDLconst { 35109 break 35110 } 35111 c := v_1.AuxInt 35112 y := v_1.Args[0] 35113 if !(c&31 == 0) { 35114 break 35115 } 35116 v.reset(OpAMD64SHLL) 35117 v.AddArg(x) 35118 v.AddArg(y) 35119 return true 35120 } 35121 // match: (SHLL x (NEGL <t> (ADDLconst [c] y))) 35122 // cond: c & 31 == 0 35123 // result: (SHLL x (NEGL <t> y)) 35124 for { 35125 _ = v.Args[1] 35126 x := v.Args[0] 35127 v_1 := v.Args[1] 35128 if v_1.Op != OpAMD64NEGL { 35129 break 35130 } 35131 t := v_1.Type 35132 v_1_0 := v_1.Args[0] 35133 if v_1_0.Op != OpAMD64ADDLconst { 35134 break 35135 } 35136 c := v_1_0.AuxInt 35137 y := v_1_0.Args[0] 35138 if !(c&31 == 0) { 35139 break 35140 } 35141 v.reset(OpAMD64SHLL) 35142 v.AddArg(x) 35143 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35144 v0.AddArg(y) 35145 v.AddArg(v0) 35146 return true 35147 } 35148 // match: (SHLL x (ANDLconst [c] y)) 35149 // cond: c & 31 == 31 35150 // result: (SHLL x y) 35151 for { 35152 _ = v.Args[1] 35153 x := v.Args[0] 35154 v_1 := v.Args[1] 35155 if v_1.Op != OpAMD64ANDLconst { 35156 break 35157 } 35158 c := v_1.AuxInt 35159 y := v_1.Args[0] 35160 if !(c&31 == 31) { 35161 break 35162 } 35163 v.reset(OpAMD64SHLL) 35164 v.AddArg(x) 35165 v.AddArg(y) 35166 return true 35167 } 35168 // match: (SHLL x (NEGL <t> (ANDLconst [c] y))) 35169 // cond: c & 31 == 31 35170 // result: (SHLL x (NEGL <t> y)) 35171 for { 35172 _ = v.Args[1] 35173 x := v.Args[0] 35174 v_1 := v.Args[1] 35175 if v_1.Op != OpAMD64NEGL { 35176 break 35177 } 35178 t := v_1.Type 35179 v_1_0 := v_1.Args[0] 35180 if v_1_0.Op != OpAMD64ANDLconst { 35181 break 35182 } 35183 c := v_1_0.AuxInt 35184 y := v_1_0.Args[0] 35185 if !(c&31 == 31) { 35186 break 35187 } 35188 v.reset(OpAMD64SHLL) 35189 v.AddArg(x) 35190 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35191 v0.AddArg(y) 35192 v.AddArg(v0) 35193 return true 35194 } 35195 return false 35196 } 35197 func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool { 35198 // match: (SHLLconst x [0]) 35199 // cond: 35200 // result: x 35201 for { 35202 if v.AuxInt != 0 { 35203 break 35204 } 35205 x := v.Args[0] 35206 v.reset(OpCopy) 35207 v.Type = x.Type 35208 v.AddArg(x) 35209 return true 35210 } 35211 return false 35212 } 35213 func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { 35214 b := v.Block 35215 _ = b 35216 // match: (SHLQ x (MOVQconst [c])) 35217 // cond: 35218 // result: (SHLQconst [c&63] x) 35219 for { 35220 _ = v.Args[1] 35221 x := v.Args[0] 35222 v_1 := v.Args[1] 35223 if v_1.Op != OpAMD64MOVQconst { 35224 break 35225 } 35226 c := v_1.AuxInt 35227 v.reset(OpAMD64SHLQconst) 35228 v.AuxInt = c & 63 35229 v.AddArg(x) 35230 return true 35231 } 35232 // match: (SHLQ x (MOVLconst [c])) 35233 // cond: 35234 // result: (SHLQconst [c&63] x) 35235 for { 35236 _ = v.Args[1] 35237 x := v.Args[0] 35238 v_1 := v.Args[1] 35239 if v_1.Op != OpAMD64MOVLconst { 35240 break 35241 } 35242 c := v_1.AuxInt 35243 v.reset(OpAMD64SHLQconst) 35244 v.AuxInt = c & 63 35245 v.AddArg(x) 35246 return true 35247 } 35248 // match: (SHLQ x (ADDQconst [c] y)) 35249 // cond: c & 63 == 0 35250 // result: (SHLQ x y) 35251 for { 35252 _ = v.Args[1] 35253 x := v.Args[0] 35254 v_1 := v.Args[1] 35255 if v_1.Op != OpAMD64ADDQconst { 35256 break 35257 } 35258 c := v_1.AuxInt 35259 y := v_1.Args[0] 35260 if !(c&63 == 0) { 35261 break 35262 } 35263 v.reset(OpAMD64SHLQ) 35264 v.AddArg(x) 35265 v.AddArg(y) 35266 return true 35267 } 35268 // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y))) 35269 // cond: c & 63 == 0 35270 // result: (SHLQ x (NEGQ <t> y)) 35271 for { 35272 _ = v.Args[1] 35273 x := v.Args[0] 35274 v_1 := v.Args[1] 35275 if v_1.Op != OpAMD64NEGQ { 35276 break 35277 } 35278 t := v_1.Type 35279 v_1_0 := v_1.Args[0] 35280 if v_1_0.Op != OpAMD64ADDQconst { 35281 break 35282 } 35283 c := v_1_0.AuxInt 35284 y := v_1_0.Args[0] 35285 if !(c&63 == 0) { 35286 break 35287 } 35288 v.reset(OpAMD64SHLQ) 35289 v.AddArg(x) 35290 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35291 v0.AddArg(y) 35292 v.AddArg(v0) 35293 return true 35294 } 35295 // match: (SHLQ x (ANDQconst [c] y)) 35296 // cond: c & 63 == 63 35297 // result: (SHLQ x y) 35298 for { 35299 _ = v.Args[1] 35300 x := v.Args[0] 35301 v_1 := v.Args[1] 35302 if v_1.Op != OpAMD64ANDQconst { 35303 break 35304 } 35305 c := v_1.AuxInt 35306 y := v_1.Args[0] 35307 if !(c&63 == 63) { 35308 break 35309 } 35310 v.reset(OpAMD64SHLQ) 35311 v.AddArg(x) 35312 v.AddArg(y) 35313 return true 35314 } 35315 // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y))) 35316 // cond: c & 63 == 63 35317 // result: (SHLQ x (NEGQ <t> y)) 35318 for { 35319 _ = v.Args[1] 35320 x := v.Args[0] 35321 v_1 := v.Args[1] 35322 if v_1.Op != OpAMD64NEGQ { 35323 break 35324 } 35325 t := v_1.Type 35326 v_1_0 := v_1.Args[0] 35327 if v_1_0.Op != OpAMD64ANDQconst { 35328 break 35329 } 35330 c := v_1_0.AuxInt 35331 y := v_1_0.Args[0] 35332 if !(c&63 == 63) { 35333 break 35334 } 35335 v.reset(OpAMD64SHLQ) 35336 v.AddArg(x) 35337 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35338 v0.AddArg(y) 35339 v.AddArg(v0) 35340 return true 35341 } 35342 // match: (SHLQ x (ADDLconst [c] y)) 35343 // cond: c & 63 == 0 35344 // result: (SHLQ x y) 35345 for { 35346 _ = v.Args[1] 35347 x := v.Args[0] 35348 v_1 := v.Args[1] 35349 if v_1.Op != OpAMD64ADDLconst { 35350 break 35351 } 35352 c := v_1.AuxInt 35353 y := v_1.Args[0] 35354 if !(c&63 == 0) { 35355 break 35356 } 35357 v.reset(OpAMD64SHLQ) 35358 v.AddArg(x) 35359 v.AddArg(y) 35360 return true 35361 } 35362 // match: (SHLQ x (NEGL <t> (ADDLconst [c] y))) 35363 // cond: c & 63 == 0 35364 // result: (SHLQ x (NEGL <t> y)) 35365 for { 35366 _ = v.Args[1] 35367 x := v.Args[0] 35368 v_1 := v.Args[1] 35369 if v_1.Op != OpAMD64NEGL { 35370 break 35371 } 35372 t := v_1.Type 35373 v_1_0 := v_1.Args[0] 35374 if v_1_0.Op != OpAMD64ADDLconst { 35375 break 35376 } 35377 c := v_1_0.AuxInt 35378 y := v_1_0.Args[0] 35379 if !(c&63 == 0) { 35380 break 35381 } 35382 v.reset(OpAMD64SHLQ) 35383 v.AddArg(x) 35384 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35385 v0.AddArg(y) 35386 v.AddArg(v0) 35387 return true 35388 } 35389 // match: (SHLQ x (ANDLconst [c] y)) 35390 // cond: c & 63 == 63 35391 // result: (SHLQ x y) 35392 for { 35393 _ = v.Args[1] 35394 x := v.Args[0] 35395 v_1 := v.Args[1] 35396 if v_1.Op != OpAMD64ANDLconst { 35397 break 35398 } 35399 c := v_1.AuxInt 35400 y := v_1.Args[0] 35401 if !(c&63 == 63) { 35402 break 35403 } 35404 v.reset(OpAMD64SHLQ) 35405 v.AddArg(x) 35406 v.AddArg(y) 35407 return true 35408 } 35409 // match: (SHLQ x (NEGL <t> (ANDLconst [c] y))) 35410 // cond: c & 63 == 63 35411 // result: (SHLQ x (NEGL <t> y)) 35412 for { 35413 _ = v.Args[1] 35414 x := v.Args[0] 35415 v_1 := v.Args[1] 35416 if v_1.Op != OpAMD64NEGL { 35417 break 35418 } 35419 t := v_1.Type 35420 v_1_0 := v_1.Args[0] 35421 if v_1_0.Op != OpAMD64ANDLconst { 35422 break 35423 } 35424 c := v_1_0.AuxInt 35425 y := v_1_0.Args[0] 35426 if !(c&63 == 63) { 35427 break 35428 } 35429 v.reset(OpAMD64SHLQ) 35430 v.AddArg(x) 35431 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35432 v0.AddArg(y) 35433 v.AddArg(v0) 35434 return true 35435 } 35436 return false 35437 } 35438 func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool { 35439 // match: (SHLQconst x [0]) 35440 // cond: 35441 // result: x 35442 for { 35443 if v.AuxInt != 0 { 35444 break 35445 } 35446 x := v.Args[0] 35447 v.reset(OpCopy) 35448 v.Type = x.Type 35449 v.AddArg(x) 35450 return true 35451 } 35452 return false 35453 } 35454 func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { 35455 // match: (SHRB x (MOVQconst [c])) 35456 // cond: c&31 < 8 35457 // result: (SHRBconst [c&31] x) 35458 for { 35459 _ = v.Args[1] 35460 x := v.Args[0] 35461 v_1 := v.Args[1] 35462 if v_1.Op != OpAMD64MOVQconst { 35463 break 35464 } 35465 c := v_1.AuxInt 35466 if !(c&31 < 8) { 35467 break 35468 } 35469 v.reset(OpAMD64SHRBconst) 35470 v.AuxInt = c & 31 35471 v.AddArg(x) 35472 return true 35473 } 35474 // match: (SHRB x (MOVLconst [c])) 35475 // cond: c&31 < 8 35476 // result: (SHRBconst [c&31] x) 35477 for { 35478 _ = v.Args[1] 35479 x := v.Args[0] 35480 v_1 := v.Args[1] 35481 if v_1.Op != OpAMD64MOVLconst { 35482 break 35483 } 35484 c := v_1.AuxInt 35485 if !(c&31 < 8) { 35486 break 35487 } 35488 v.reset(OpAMD64SHRBconst) 35489 v.AuxInt = c & 31 35490 v.AddArg(x) 35491 return true 35492 } 35493 // match: (SHRB _ (MOVQconst [c])) 35494 // cond: c&31 >= 8 35495 // result: (MOVLconst [0]) 35496 for { 35497 _ = v.Args[1] 35498 v_1 := v.Args[1] 35499 if v_1.Op != OpAMD64MOVQconst { 35500 break 35501 } 35502 c := v_1.AuxInt 35503 if !(c&31 >= 8) { 35504 break 35505 } 35506 v.reset(OpAMD64MOVLconst) 35507 v.AuxInt = 0 35508 return true 35509 } 35510 // match: (SHRB _ (MOVLconst [c])) 35511 // cond: c&31 >= 8 35512 // result: (MOVLconst [0]) 35513 for { 35514 _ = v.Args[1] 35515 v_1 := v.Args[1] 35516 if v_1.Op != OpAMD64MOVLconst { 35517 break 35518 } 35519 c := v_1.AuxInt 35520 if !(c&31 >= 8) { 35521 break 35522 } 35523 v.reset(OpAMD64MOVLconst) 35524 v.AuxInt = 0 35525 return true 35526 } 35527 return false 35528 } 35529 func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool { 35530 // match: (SHRBconst x [0]) 35531 // cond: 35532 // result: x 35533 for { 35534 if v.AuxInt != 0 { 35535 break 35536 } 35537 x := v.Args[0] 35538 v.reset(OpCopy) 35539 v.Type = x.Type 35540 v.AddArg(x) 35541 return true 35542 } 35543 return false 35544 } 35545 func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { 35546 b := v.Block 35547 _ = b 35548 // match: (SHRL x (MOVQconst [c])) 35549 // cond: 35550 // result: (SHRLconst [c&31] x) 35551 for { 35552 _ = v.Args[1] 35553 x := v.Args[0] 35554 v_1 := v.Args[1] 35555 if v_1.Op != OpAMD64MOVQconst { 35556 break 35557 } 35558 c := v_1.AuxInt 35559 v.reset(OpAMD64SHRLconst) 35560 v.AuxInt = c & 31 35561 v.AddArg(x) 35562 return true 35563 } 35564 // match: (SHRL x (MOVLconst [c])) 35565 // cond: 35566 // result: (SHRLconst [c&31] x) 35567 for { 35568 _ = v.Args[1] 35569 x := v.Args[0] 35570 v_1 := v.Args[1] 35571 if v_1.Op != OpAMD64MOVLconst { 35572 break 35573 } 35574 c := v_1.AuxInt 35575 v.reset(OpAMD64SHRLconst) 35576 v.AuxInt = c & 31 35577 v.AddArg(x) 35578 return true 35579 } 35580 // match: (SHRL x (ADDQconst [c] y)) 35581 // cond: c & 31 == 0 35582 // result: (SHRL x y) 35583 for { 35584 _ = v.Args[1] 35585 x := v.Args[0] 35586 v_1 := v.Args[1] 35587 if v_1.Op != OpAMD64ADDQconst { 35588 break 35589 } 35590 c := v_1.AuxInt 35591 y := v_1.Args[0] 35592 if !(c&31 == 0) { 35593 break 35594 } 35595 v.reset(OpAMD64SHRL) 35596 v.AddArg(x) 35597 v.AddArg(y) 35598 return true 35599 } 35600 // match: (SHRL x (NEGQ <t> (ADDQconst [c] y))) 35601 // cond: c & 31 == 0 35602 // result: (SHRL x (NEGQ <t> y)) 35603 for { 35604 _ = v.Args[1] 35605 x := v.Args[0] 35606 v_1 := v.Args[1] 35607 if v_1.Op != OpAMD64NEGQ { 35608 break 35609 } 35610 t := v_1.Type 35611 v_1_0 := v_1.Args[0] 35612 if v_1_0.Op != OpAMD64ADDQconst { 35613 break 35614 } 35615 c := v_1_0.AuxInt 35616 y := v_1_0.Args[0] 35617 if !(c&31 == 0) { 35618 break 35619 } 35620 v.reset(OpAMD64SHRL) 35621 v.AddArg(x) 35622 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35623 v0.AddArg(y) 35624 v.AddArg(v0) 35625 return true 35626 } 35627 // match: (SHRL x (ANDQconst [c] y)) 35628 // cond: c & 31 == 31 35629 // result: (SHRL x y) 35630 for { 35631 _ = v.Args[1] 35632 x := v.Args[0] 35633 v_1 := v.Args[1] 35634 if v_1.Op != OpAMD64ANDQconst { 35635 break 35636 } 35637 c := v_1.AuxInt 35638 y := v_1.Args[0] 35639 if !(c&31 == 31) { 35640 break 35641 } 35642 v.reset(OpAMD64SHRL) 35643 v.AddArg(x) 35644 v.AddArg(y) 35645 return true 35646 } 35647 // match: (SHRL x (NEGQ <t> (ANDQconst [c] y))) 35648 // cond: c & 31 == 31 35649 // result: (SHRL x (NEGQ <t> y)) 35650 for { 35651 _ = v.Args[1] 35652 x := v.Args[0] 35653 v_1 := v.Args[1] 35654 if v_1.Op != OpAMD64NEGQ { 35655 break 35656 } 35657 t := v_1.Type 35658 v_1_0 := v_1.Args[0] 35659 if v_1_0.Op != OpAMD64ANDQconst { 35660 break 35661 } 35662 c := v_1_0.AuxInt 35663 y := v_1_0.Args[0] 35664 if !(c&31 == 31) { 35665 break 35666 } 35667 v.reset(OpAMD64SHRL) 35668 v.AddArg(x) 35669 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35670 v0.AddArg(y) 35671 v.AddArg(v0) 35672 return true 35673 } 35674 // match: (SHRL x (ADDLconst [c] y)) 35675 // cond: c & 31 == 0 35676 // result: (SHRL x y) 35677 for { 35678 _ = v.Args[1] 35679 x := v.Args[0] 35680 v_1 := v.Args[1] 35681 if v_1.Op != OpAMD64ADDLconst { 35682 break 35683 } 35684 c := v_1.AuxInt 35685 y := v_1.Args[0] 35686 if !(c&31 == 0) { 35687 break 35688 } 35689 v.reset(OpAMD64SHRL) 35690 v.AddArg(x) 35691 v.AddArg(y) 35692 return true 35693 } 35694 // match: (SHRL x (NEGL <t> (ADDLconst [c] y))) 35695 // cond: c & 31 == 0 35696 // result: (SHRL x (NEGL <t> y)) 35697 for { 35698 _ = v.Args[1] 35699 x := v.Args[0] 35700 v_1 := v.Args[1] 35701 if v_1.Op != OpAMD64NEGL { 35702 break 35703 } 35704 t := v_1.Type 35705 v_1_0 := v_1.Args[0] 35706 if v_1_0.Op != OpAMD64ADDLconst { 35707 break 35708 } 35709 c := v_1_0.AuxInt 35710 y := v_1_0.Args[0] 35711 if !(c&31 == 0) { 35712 break 35713 } 35714 v.reset(OpAMD64SHRL) 35715 v.AddArg(x) 35716 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35717 v0.AddArg(y) 35718 v.AddArg(v0) 35719 return true 35720 } 35721 // match: (SHRL x (ANDLconst [c] y)) 35722 // cond: c & 31 == 31 35723 // result: (SHRL x y) 35724 for { 35725 _ = v.Args[1] 35726 x := v.Args[0] 35727 v_1 := v.Args[1] 35728 if v_1.Op != OpAMD64ANDLconst { 35729 break 35730 } 35731 c := v_1.AuxInt 35732 y := v_1.Args[0] 35733 if !(c&31 == 31) { 35734 break 35735 } 35736 v.reset(OpAMD64SHRL) 35737 v.AddArg(x) 35738 v.AddArg(y) 35739 return true 35740 } 35741 // match: (SHRL x (NEGL <t> (ANDLconst [c] y))) 35742 // cond: c & 31 == 31 35743 // result: (SHRL x (NEGL <t> y)) 35744 for { 35745 _ = v.Args[1] 35746 x := v.Args[0] 35747 v_1 := v.Args[1] 35748 if v_1.Op != OpAMD64NEGL { 35749 break 35750 } 35751 t := v_1.Type 35752 v_1_0 := v_1.Args[0] 35753 if v_1_0.Op != OpAMD64ANDLconst { 35754 break 35755 } 35756 c := v_1_0.AuxInt 35757 y := v_1_0.Args[0] 35758 if !(c&31 == 31) { 35759 break 35760 } 35761 v.reset(OpAMD64SHRL) 35762 v.AddArg(x) 35763 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35764 v0.AddArg(y) 35765 v.AddArg(v0) 35766 return true 35767 } 35768 return false 35769 } 35770 func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool { 35771 // match: (SHRLconst x [0]) 35772 // cond: 35773 // result: x 35774 for { 35775 if v.AuxInt != 0 { 35776 break 35777 } 35778 x := v.Args[0] 35779 v.reset(OpCopy) 35780 v.Type = x.Type 35781 v.AddArg(x) 35782 return true 35783 } 35784 return false 35785 } 35786 func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { 35787 b := v.Block 35788 _ = b 35789 // match: (SHRQ x (MOVQconst [c])) 35790 // cond: 35791 // result: (SHRQconst [c&63] x) 35792 for { 35793 _ = v.Args[1] 35794 x := v.Args[0] 35795 v_1 := v.Args[1] 35796 if v_1.Op != OpAMD64MOVQconst { 35797 break 35798 } 35799 c := v_1.AuxInt 35800 v.reset(OpAMD64SHRQconst) 35801 v.AuxInt = c & 63 35802 v.AddArg(x) 35803 return true 35804 } 35805 // match: (SHRQ x (MOVLconst [c])) 35806 // cond: 35807 // result: (SHRQconst [c&63] x) 35808 for { 35809 _ = v.Args[1] 35810 x := v.Args[0] 35811 v_1 := v.Args[1] 35812 if v_1.Op != OpAMD64MOVLconst { 35813 break 35814 } 35815 c := v_1.AuxInt 35816 v.reset(OpAMD64SHRQconst) 35817 v.AuxInt = c & 63 35818 v.AddArg(x) 35819 return true 35820 } 35821 // match: (SHRQ x (ADDQconst [c] y)) 35822 // cond: c & 63 == 0 35823 // result: (SHRQ x y) 35824 for { 35825 _ = v.Args[1] 35826 x := v.Args[0] 35827 v_1 := v.Args[1] 35828 if v_1.Op != OpAMD64ADDQconst { 35829 break 35830 } 35831 c := v_1.AuxInt 35832 y := v_1.Args[0] 35833 if !(c&63 == 0) { 35834 break 35835 } 35836 v.reset(OpAMD64SHRQ) 35837 v.AddArg(x) 35838 v.AddArg(y) 35839 return true 35840 } 35841 // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y))) 35842 // cond: c & 63 == 0 35843 // result: (SHRQ x (NEGQ <t> y)) 35844 for { 35845 _ = v.Args[1] 35846 x := v.Args[0] 35847 v_1 := v.Args[1] 35848 if v_1.Op != OpAMD64NEGQ { 35849 break 35850 } 35851 t := v_1.Type 35852 v_1_0 := v_1.Args[0] 35853 if v_1_0.Op != OpAMD64ADDQconst { 35854 break 35855 } 35856 c := v_1_0.AuxInt 35857 y := v_1_0.Args[0] 35858 if !(c&63 == 0) { 35859 break 35860 } 35861 v.reset(OpAMD64SHRQ) 35862 v.AddArg(x) 35863 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35864 v0.AddArg(y) 35865 v.AddArg(v0) 35866 return true 35867 } 35868 // match: (SHRQ x (ANDQconst [c] y)) 35869 // cond: c & 63 == 63 35870 // result: (SHRQ x y) 35871 for { 35872 _ = v.Args[1] 35873 x := v.Args[0] 35874 v_1 := v.Args[1] 35875 if v_1.Op != OpAMD64ANDQconst { 35876 break 35877 } 35878 c := v_1.AuxInt 35879 y := v_1.Args[0] 35880 if !(c&63 == 63) { 35881 break 35882 } 35883 v.reset(OpAMD64SHRQ) 35884 v.AddArg(x) 35885 v.AddArg(y) 35886 return true 35887 } 35888 // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y))) 35889 // cond: c & 63 == 63 35890 // result: (SHRQ x (NEGQ <t> y)) 35891 for { 35892 _ = v.Args[1] 35893 x := v.Args[0] 35894 v_1 := v.Args[1] 35895 if v_1.Op != OpAMD64NEGQ { 35896 break 35897 } 35898 t := v_1.Type 35899 v_1_0 := v_1.Args[0] 35900 if v_1_0.Op != OpAMD64ANDQconst { 35901 break 35902 } 35903 c := v_1_0.AuxInt 35904 y := v_1_0.Args[0] 35905 if !(c&63 == 63) { 35906 break 35907 } 35908 v.reset(OpAMD64SHRQ) 35909 v.AddArg(x) 35910 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35911 v0.AddArg(y) 35912 v.AddArg(v0) 35913 return true 35914 } 35915 // match: (SHRQ x (ADDLconst [c] y)) 35916 // cond: c & 63 == 0 35917 // result: (SHRQ x y) 35918 for { 35919 _ = v.Args[1] 35920 x := v.Args[0] 35921 v_1 := v.Args[1] 35922 if v_1.Op != OpAMD64ADDLconst { 35923 break 35924 } 35925 c := v_1.AuxInt 35926 y := v_1.Args[0] 35927 if !(c&63 == 0) { 35928 break 35929 } 35930 v.reset(OpAMD64SHRQ) 35931 v.AddArg(x) 35932 v.AddArg(y) 35933 return true 35934 } 35935 // match: (SHRQ x (NEGL <t> (ADDLconst [c] y))) 35936 // cond: c & 63 == 0 35937 // result: (SHRQ x (NEGL <t> y)) 35938 for { 35939 _ = v.Args[1] 35940 x := v.Args[0] 35941 v_1 := v.Args[1] 35942 if v_1.Op != OpAMD64NEGL { 35943 break 35944 } 35945 t := v_1.Type 35946 v_1_0 := v_1.Args[0] 35947 if v_1_0.Op != OpAMD64ADDLconst { 35948 break 35949 } 35950 c := v_1_0.AuxInt 35951 y := v_1_0.Args[0] 35952 if !(c&63 == 0) { 35953 break 35954 } 35955 v.reset(OpAMD64SHRQ) 35956 v.AddArg(x) 35957 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35958 v0.AddArg(y) 35959 v.AddArg(v0) 35960 return true 35961 } 35962 // match: (SHRQ x (ANDLconst [c] y)) 35963 // cond: c & 63 == 63 35964 // result: (SHRQ x y) 35965 for { 35966 _ = v.Args[1] 35967 x := v.Args[0] 35968 v_1 := v.Args[1] 35969 if v_1.Op != OpAMD64ANDLconst { 35970 break 35971 } 35972 c := v_1.AuxInt 35973 y := v_1.Args[0] 35974 if !(c&63 == 63) { 35975 break 35976 } 35977 v.reset(OpAMD64SHRQ) 35978 v.AddArg(x) 35979 v.AddArg(y) 35980 return true 35981 } 35982 // match: (SHRQ x (NEGL <t> (ANDLconst [c] y))) 35983 // cond: c & 63 == 63 35984 // result: (SHRQ x (NEGL <t> y)) 35985 for { 35986 _ = v.Args[1] 35987 x := v.Args[0] 35988 v_1 := v.Args[1] 35989 if v_1.Op != OpAMD64NEGL { 35990 break 35991 } 35992 t := v_1.Type 35993 v_1_0 := v_1.Args[0] 35994 if v_1_0.Op != OpAMD64ANDLconst { 35995 break 35996 } 35997 c := v_1_0.AuxInt 35998 y := v_1_0.Args[0] 35999 if !(c&63 == 63) { 36000 break 36001 } 36002 v.reset(OpAMD64SHRQ) 36003 v.AddArg(x) 36004 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 36005 v0.AddArg(y) 36006 v.AddArg(v0) 36007 return true 36008 } 36009 return false 36010 } 36011 func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool { 36012 // match: (SHRQconst x [0]) 36013 // cond: 36014 // result: x 36015 for { 36016 if v.AuxInt != 0 { 36017 break 36018 } 36019 x := v.Args[0] 36020 v.reset(OpCopy) 36021 v.Type = x.Type 36022 v.AddArg(x) 36023 return true 36024 } 36025 return false 36026 } 36027 func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { 36028 // match: (SHRW x (MOVQconst [c])) 36029 // cond: c&31 < 16 36030 // result: (SHRWconst [c&31] x) 36031 for { 36032 _ = v.Args[1] 36033 x := v.Args[0] 36034 v_1 := v.Args[1] 36035 if v_1.Op != OpAMD64MOVQconst { 36036 break 36037 } 36038 c := v_1.AuxInt 36039 if !(c&31 < 16) { 36040 break 36041 } 36042 v.reset(OpAMD64SHRWconst) 36043 v.AuxInt = c & 31 36044 v.AddArg(x) 36045 return true 36046 } 36047 // match: (SHRW x (MOVLconst [c])) 36048 // cond: c&31 < 16 36049 // result: (SHRWconst [c&31] x) 36050 for { 36051 _ = v.Args[1] 36052 x := v.Args[0] 36053 v_1 := v.Args[1] 36054 if v_1.Op != OpAMD64MOVLconst { 36055 break 36056 } 36057 c := v_1.AuxInt 36058 if !(c&31 < 16) { 36059 break 36060 } 36061 v.reset(OpAMD64SHRWconst) 36062 v.AuxInt = c & 31 36063 v.AddArg(x) 36064 return true 36065 } 36066 // match: (SHRW _ (MOVQconst [c])) 36067 // cond: c&31 >= 16 36068 // result: (MOVLconst [0]) 36069 for { 36070 _ = v.Args[1] 36071 v_1 := v.Args[1] 36072 if v_1.Op != OpAMD64MOVQconst { 36073 break 36074 } 36075 c := v_1.AuxInt 36076 if !(c&31 >= 16) { 36077 break 36078 } 36079 v.reset(OpAMD64MOVLconst) 36080 v.AuxInt = 0 36081 return true 36082 } 36083 // match: (SHRW _ (MOVLconst [c])) 36084 // cond: c&31 >= 16 36085 // result: (MOVLconst [0]) 36086 for { 36087 _ = v.Args[1] 36088 v_1 := v.Args[1] 36089 if v_1.Op != OpAMD64MOVLconst { 36090 break 36091 } 36092 c := v_1.AuxInt 36093 if !(c&31 >= 16) { 36094 break 36095 } 36096 v.reset(OpAMD64MOVLconst) 36097 v.AuxInt = 0 36098 return true 36099 } 36100 return false 36101 } 36102 func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool { 36103 // match: (SHRWconst x [0]) 36104 // cond: 36105 // result: x 36106 for { 36107 if v.AuxInt != 0 { 36108 break 36109 } 36110 x := v.Args[0] 36111 v.reset(OpCopy) 36112 v.Type = x.Type 36113 v.AddArg(x) 36114 return true 36115 } 36116 return false 36117 } 36118 func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { 36119 b := v.Block 36120 _ = b 36121 // match: (SUBL x (MOVLconst [c])) 36122 // cond: 36123 // result: (SUBLconst x [c]) 36124 for { 36125 _ = v.Args[1] 36126 x := v.Args[0] 36127 v_1 := v.Args[1] 36128 if v_1.Op != OpAMD64MOVLconst { 36129 break 36130 } 36131 c := v_1.AuxInt 36132 v.reset(OpAMD64SUBLconst) 36133 v.AuxInt = c 36134 v.AddArg(x) 36135 return true 36136 } 36137 // match: (SUBL (MOVLconst [c]) x) 36138 // cond: 36139 // result: (NEGL (SUBLconst <v.Type> x [c])) 36140 for { 36141 _ = v.Args[1] 36142 v_0 := v.Args[0] 36143 if v_0.Op != OpAMD64MOVLconst { 36144 break 36145 } 36146 c := v_0.AuxInt 36147 x := v.Args[1] 36148 v.reset(OpAMD64NEGL) 36149 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 36150 v0.AuxInt = c 36151 v0.AddArg(x) 36152 v.AddArg(v0) 36153 return true 36154 } 36155 // match: (SUBL x x) 36156 // cond: 36157 // result: (MOVLconst [0]) 36158 for { 36159 _ = v.Args[1] 36160 x := v.Args[0] 36161 if x != v.Args[1] { 36162 break 36163 } 36164 v.reset(OpAMD64MOVLconst) 36165 v.AuxInt = 0 36166 return true 36167 } 36168 // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) 36169 // cond: canMergeLoad(v, l, x) && clobber(l) 36170 // result: (SUBLmem x [off] {sym} ptr mem) 36171 for { 36172 _ = v.Args[1] 36173 x := v.Args[0] 36174 l := v.Args[1] 36175 if l.Op != OpAMD64MOVLload { 36176 break 36177 } 36178 off := l.AuxInt 36179 sym := l.Aux 36180 _ = l.Args[1] 36181 ptr := l.Args[0] 36182 mem := l.Args[1] 36183 if !(canMergeLoad(v, l, x) && clobber(l)) { 36184 break 36185 } 36186 v.reset(OpAMD64SUBLmem) 36187 v.AuxInt = off 36188 v.Aux = sym 36189 v.AddArg(x) 36190 v.AddArg(ptr) 36191 v.AddArg(mem) 36192 return true 36193 } 36194 return false 36195 } 36196 func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool { 36197 // match: (SUBLconst [c] x) 36198 // cond: int32(c) == 0 36199 // result: x 36200 for { 36201 c := v.AuxInt 36202 x := v.Args[0] 36203 if !(int32(c) == 0) { 36204 break 36205 } 36206 v.reset(OpCopy) 36207 v.Type = x.Type 36208 v.AddArg(x) 36209 return true 36210 } 36211 // match: (SUBLconst [c] x) 36212 // cond: 36213 // result: (ADDLconst [int64(int32(-c))] x) 36214 for { 36215 c := v.AuxInt 36216 x := v.Args[0] 36217 v.reset(OpAMD64ADDLconst) 36218 v.AuxInt = int64(int32(-c)) 36219 v.AddArg(x) 36220 return true 36221 } 36222 } 36223 func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { 36224 b := v.Block 36225 _ = b 36226 // match: (SUBQ x (MOVQconst [c])) 36227 // cond: is32Bit(c) 36228 // result: (SUBQconst x [c]) 36229 for { 36230 _ = v.Args[1] 36231 x := v.Args[0] 36232 v_1 := v.Args[1] 36233 if v_1.Op != OpAMD64MOVQconst { 36234 break 36235 } 36236 c := v_1.AuxInt 36237 if !(is32Bit(c)) { 36238 break 36239 } 36240 v.reset(OpAMD64SUBQconst) 36241 v.AuxInt = c 36242 v.AddArg(x) 36243 return true 36244 } 36245 // match: (SUBQ (MOVQconst [c]) x) 36246 // cond: is32Bit(c) 36247 // result: (NEGQ (SUBQconst <v.Type> x [c])) 36248 for { 36249 _ = v.Args[1] 36250 v_0 := v.Args[0] 36251 if v_0.Op != OpAMD64MOVQconst { 36252 break 36253 } 36254 c := v_0.AuxInt 36255 x := v.Args[1] 36256 if !(is32Bit(c)) { 36257 break 36258 } 36259 v.reset(OpAMD64NEGQ) 36260 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 36261 v0.AuxInt = c 36262 v0.AddArg(x) 36263 v.AddArg(v0) 36264 return true 36265 } 36266 // match: (SUBQ x x) 36267 // cond: 36268 // result: (MOVQconst [0]) 36269 for { 36270 _ = v.Args[1] 36271 x := v.Args[0] 36272 if x != v.Args[1] { 36273 break 36274 } 36275 v.reset(OpAMD64MOVQconst) 36276 v.AuxInt = 0 36277 return true 36278 } 36279 // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) 36280 // cond: canMergeLoad(v, l, x) && clobber(l) 36281 // result: (SUBQmem x [off] {sym} ptr mem) 36282 for { 36283 _ = v.Args[1] 36284 x := v.Args[0] 36285 l := v.Args[1] 36286 if l.Op != OpAMD64MOVQload { 36287 break 36288 } 36289 off := l.AuxInt 36290 sym := l.Aux 36291 _ = l.Args[1] 36292 ptr := l.Args[0] 36293 mem := l.Args[1] 36294 if !(canMergeLoad(v, l, x) && clobber(l)) { 36295 break 36296 } 36297 v.reset(OpAMD64SUBQmem) 36298 v.AuxInt = off 36299 v.Aux = sym 36300 v.AddArg(x) 36301 v.AddArg(ptr) 36302 v.AddArg(mem) 36303 return true 36304 } 36305 return false 36306 } 36307 func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { 36308 // match: (SUBQconst [0] x) 36309 // cond: 36310 // result: x 36311 for { 36312 if v.AuxInt != 0 { 36313 break 36314 } 36315 x := v.Args[0] 36316 v.reset(OpCopy) 36317 v.Type = x.Type 36318 v.AddArg(x) 36319 return true 36320 } 36321 // match: (SUBQconst [c] x) 36322 // cond: c != -(1<<31) 36323 // result: (ADDQconst [-c] x) 36324 for { 36325 c := v.AuxInt 36326 x := v.Args[0] 36327 if !(c != -(1 << 31)) { 36328 break 36329 } 36330 v.reset(OpAMD64ADDQconst) 36331 v.AuxInt = -c 36332 v.AddArg(x) 36333 return true 36334 } 36335 // match: (SUBQconst (MOVQconst [d]) [c]) 36336 // cond: 36337 // result: (MOVQconst [d-c]) 36338 for { 36339 c := v.AuxInt 36340 v_0 := v.Args[0] 36341 if v_0.Op != OpAMD64MOVQconst { 36342 break 36343 } 36344 d := v_0.AuxInt 36345 v.reset(OpAMD64MOVQconst) 36346 v.AuxInt = d - c 36347 return true 36348 } 36349 // match: (SUBQconst (SUBQconst x [d]) [c]) 36350 // cond: is32Bit(-c-d) 36351 // result: (ADDQconst [-c-d] x) 36352 for { 36353 c := v.AuxInt 36354 v_0 := v.Args[0] 36355 if v_0.Op != OpAMD64SUBQconst { 36356 break 36357 } 36358 d := v_0.AuxInt 36359 x := v_0.Args[0] 36360 if !(is32Bit(-c - d)) { 36361 break 36362 } 36363 v.reset(OpAMD64ADDQconst) 36364 v.AuxInt = -c - d 36365 v.AddArg(x) 36366 return true 36367 } 36368 return false 36369 } 36370 func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { 36371 // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) 36372 // cond: canMergeLoad(v, l, x) && clobber(l) 36373 // result: (SUBSDmem x [off] {sym} ptr mem) 36374 for { 36375 _ = v.Args[1] 36376 x := v.Args[0] 36377 l := v.Args[1] 36378 if l.Op != OpAMD64MOVSDload { 36379 break 36380 } 36381 off := l.AuxInt 36382 sym := l.Aux 36383 _ = l.Args[1] 36384 ptr := l.Args[0] 36385 mem := l.Args[1] 36386 if !(canMergeLoad(v, l, x) && clobber(l)) { 36387 break 36388 } 36389 v.reset(OpAMD64SUBSDmem) 36390 v.AuxInt = off 36391 v.Aux = sym 36392 v.AddArg(x) 36393 v.AddArg(ptr) 36394 v.AddArg(mem) 36395 return true 36396 } 36397 return false 36398 } 36399 func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { 36400 // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) 36401 // cond: canMergeLoad(v, l, x) && clobber(l) 36402 // result: (SUBSSmem x [off] {sym} ptr mem) 36403 for { 36404 _ = v.Args[1] 36405 x := v.Args[0] 36406 l := v.Args[1] 36407 if l.Op != OpAMD64MOVSSload { 36408 break 36409 } 36410 off := l.AuxInt 36411 sym := l.Aux 36412 _ = l.Args[1] 36413 ptr := l.Args[0] 36414 mem := l.Args[1] 36415 if !(canMergeLoad(v, l, x) && clobber(l)) { 36416 break 36417 } 36418 v.reset(OpAMD64SUBSSmem) 36419 v.AuxInt = off 36420 v.Aux = sym 36421 v.AddArg(x) 36422 v.AddArg(ptr) 36423 v.AddArg(mem) 36424 return true 36425 } 36426 return false 36427 } 36428 func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { 36429 // match: (TESTB (MOVLconst [c]) x) 36430 // cond: 36431 // result: (TESTBconst [c] x) 36432 for { 36433 _ = v.Args[1] 36434 v_0 := v.Args[0] 36435 if v_0.Op != OpAMD64MOVLconst { 36436 break 36437 } 36438 c := v_0.AuxInt 36439 x := v.Args[1] 36440 v.reset(OpAMD64TESTBconst) 36441 v.AuxInt = c 36442 v.AddArg(x) 36443 return true 36444 } 36445 // match: (TESTB x (MOVLconst [c])) 36446 // cond: 36447 // result: (TESTBconst [c] x) 36448 for { 36449 _ = v.Args[1] 36450 x := v.Args[0] 36451 v_1 := v.Args[1] 36452 if v_1.Op != OpAMD64MOVLconst { 36453 break 36454 } 36455 c := v_1.AuxInt 36456 v.reset(OpAMD64TESTBconst) 36457 v.AuxInt = c 36458 v.AddArg(x) 36459 return true 36460 } 36461 return false 36462 } 36463 func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { 36464 // match: (TESTL (MOVLconst [c]) x) 36465 // cond: 36466 // result: (TESTLconst [c] x) 36467 for { 36468 _ = v.Args[1] 36469 v_0 := v.Args[0] 36470 if v_0.Op != OpAMD64MOVLconst { 36471 break 36472 } 36473 c := v_0.AuxInt 36474 x := v.Args[1] 36475 v.reset(OpAMD64TESTLconst) 36476 v.AuxInt = c 36477 v.AddArg(x) 36478 return true 36479 } 36480 // match: (TESTL x (MOVLconst [c])) 36481 // cond: 36482 // result: (TESTLconst [c] x) 36483 for { 36484 _ = v.Args[1] 36485 x := v.Args[0] 36486 v_1 := v.Args[1] 36487 if v_1.Op != OpAMD64MOVLconst { 36488 break 36489 } 36490 c := v_1.AuxInt 36491 v.reset(OpAMD64TESTLconst) 36492 v.AuxInt = c 36493 v.AddArg(x) 36494 return true 36495 } 36496 return false 36497 } 36498 func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { 36499 // match: (TESTQ (MOVQconst [c]) x) 36500 // cond: is32Bit(c) 36501 // result: (TESTQconst [c] x) 36502 for { 36503 _ = v.Args[1] 36504 v_0 := v.Args[0] 36505 if v_0.Op != OpAMD64MOVQconst { 36506 break 36507 } 36508 c := v_0.AuxInt 36509 x := v.Args[1] 36510 if !(is32Bit(c)) { 36511 break 36512 } 36513 v.reset(OpAMD64TESTQconst) 36514 v.AuxInt = c 36515 v.AddArg(x) 36516 return true 36517 } 36518 // match: (TESTQ x (MOVQconst [c])) 36519 // cond: is32Bit(c) 36520 // result: (TESTQconst [c] x) 36521 for { 36522 _ = v.Args[1] 36523 x := v.Args[0] 36524 v_1 := v.Args[1] 36525 if v_1.Op != OpAMD64MOVQconst { 36526 break 36527 } 36528 c := v_1.AuxInt 36529 if !(is32Bit(c)) { 36530 break 36531 } 36532 v.reset(OpAMD64TESTQconst) 36533 v.AuxInt = c 36534 v.AddArg(x) 36535 return true 36536 } 36537 return false 36538 } 36539 func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { 36540 // match: (TESTW (MOVLconst [c]) x) 36541 // cond: 36542 // result: (TESTWconst [c] x) 36543 for { 36544 _ = v.Args[1] 36545 v_0 := v.Args[0] 36546 if v_0.Op != OpAMD64MOVLconst { 36547 break 36548 } 36549 c := v_0.AuxInt 36550 x := v.Args[1] 36551 v.reset(OpAMD64TESTWconst) 36552 v.AuxInt = c 36553 v.AddArg(x) 36554 return true 36555 } 36556 // match: (TESTW x (MOVLconst [c])) 36557 // cond: 36558 // result: (TESTWconst [c] x) 36559 for { 36560 _ = v.Args[1] 36561 x := v.Args[0] 36562 v_1 := v.Args[1] 36563 if v_1.Op != OpAMD64MOVLconst { 36564 break 36565 } 36566 c := v_1.AuxInt 36567 v.reset(OpAMD64TESTWconst) 36568 v.AuxInt = c 36569 v.AddArg(x) 36570 return true 36571 } 36572 return false 36573 } 36574 func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool { 36575 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 36576 // cond: is32Bit(off1+off2) 36577 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 36578 for { 36579 off1 := v.AuxInt 36580 sym := v.Aux 36581 _ = v.Args[2] 36582 val := v.Args[0] 36583 v_1 := v.Args[1] 36584 if v_1.Op != OpAMD64ADDQconst { 36585 break 36586 } 36587 off2 := v_1.AuxInt 36588 ptr := v_1.Args[0] 36589 mem := v.Args[2] 36590 if !(is32Bit(off1 + off2)) { 36591 break 36592 } 36593 v.reset(OpAMD64XADDLlock) 36594 v.AuxInt = off1 + off2 36595 v.Aux = sym 36596 v.AddArg(val) 36597 v.AddArg(ptr) 36598 v.AddArg(mem) 36599 return true 36600 } 36601 return false 36602 } 36603 func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool { 36604 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 36605 // cond: is32Bit(off1+off2) 36606 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 36607 for { 36608 off1 := v.AuxInt 36609 sym := v.Aux 36610 _ = v.Args[2] 36611 val := v.Args[0] 36612 v_1 := v.Args[1] 36613 if v_1.Op != OpAMD64ADDQconst { 36614 break 36615 } 36616 off2 := v_1.AuxInt 36617 ptr := v_1.Args[0] 36618 mem := v.Args[2] 36619 if !(is32Bit(off1 + off2)) { 36620 break 36621 } 36622 v.reset(OpAMD64XADDQlock) 36623 v.AuxInt = off1 + off2 36624 v.Aux = sym 36625 v.AddArg(val) 36626 v.AddArg(ptr) 36627 v.AddArg(mem) 36628 return true 36629 } 36630 return false 36631 } 36632 func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool { 36633 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 36634 // cond: is32Bit(off1+off2) 36635 // result: (XCHGL [off1+off2] {sym} val ptr mem) 36636 for { 36637 off1 := v.AuxInt 36638 sym := v.Aux 36639 _ = v.Args[2] 36640 val := v.Args[0] 36641 v_1 := v.Args[1] 36642 if v_1.Op != OpAMD64ADDQconst { 36643 break 36644 } 36645 off2 := v_1.AuxInt 36646 ptr := v_1.Args[0] 36647 mem := v.Args[2] 36648 if !(is32Bit(off1 + off2)) { 36649 break 36650 } 36651 v.reset(OpAMD64XCHGL) 36652 v.AuxInt = off1 + off2 36653 v.Aux = sym 36654 v.AddArg(val) 36655 v.AddArg(ptr) 36656 v.AddArg(mem) 36657 return true 36658 } 36659 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 36660 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 36661 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 36662 for { 36663 off1 := v.AuxInt 36664 sym1 := v.Aux 36665 _ = v.Args[2] 36666 val := v.Args[0] 36667 v_1 := v.Args[1] 36668 if v_1.Op != OpAMD64LEAQ { 36669 break 36670 } 36671 off2 := v_1.AuxInt 36672 sym2 := v_1.Aux 36673 ptr := v_1.Args[0] 36674 mem := v.Args[2] 36675 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 36676 break 36677 } 36678 v.reset(OpAMD64XCHGL) 36679 v.AuxInt = off1 + off2 36680 v.Aux = mergeSym(sym1, sym2) 36681 v.AddArg(val) 36682 v.AddArg(ptr) 36683 v.AddArg(mem) 36684 return true 36685 } 36686 return false 36687 } 36688 func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool { 36689 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 36690 // cond: is32Bit(off1+off2) 36691 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 36692 for { 36693 off1 := v.AuxInt 36694 sym := v.Aux 36695 _ = v.Args[2] 36696 val := v.Args[0] 36697 v_1 := v.Args[1] 36698 if v_1.Op != OpAMD64ADDQconst { 36699 break 36700 } 36701 off2 := v_1.AuxInt 36702 ptr := v_1.Args[0] 36703 mem := v.Args[2] 36704 if !(is32Bit(off1 + off2)) { 36705 break 36706 } 36707 v.reset(OpAMD64XCHGQ) 36708 v.AuxInt = off1 + off2 36709 v.Aux = sym 36710 v.AddArg(val) 36711 v.AddArg(ptr) 36712 v.AddArg(mem) 36713 return true 36714 } 36715 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 36716 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 36717 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 36718 for { 36719 off1 := v.AuxInt 36720 sym1 := v.Aux 36721 _ = v.Args[2] 36722 val := v.Args[0] 36723 v_1 := v.Args[1] 36724 if v_1.Op != OpAMD64LEAQ { 36725 break 36726 } 36727 off2 := v_1.AuxInt 36728 sym2 := v_1.Aux 36729 ptr := v_1.Args[0] 36730 mem := v.Args[2] 36731 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 36732 break 36733 } 36734 v.reset(OpAMD64XCHGQ) 36735 v.AuxInt = off1 + off2 36736 v.Aux = mergeSym(sym1, sym2) 36737 v.AddArg(val) 36738 v.AddArg(ptr) 36739 v.AddArg(mem) 36740 return true 36741 } 36742 return false 36743 } 36744 func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { 36745 // match: (XORL x (MOVLconst [c])) 36746 // cond: 36747 // result: (XORLconst [c] x) 36748 for { 36749 _ = v.Args[1] 36750 x := v.Args[0] 36751 v_1 := v.Args[1] 36752 if v_1.Op != OpAMD64MOVLconst { 36753 break 36754 } 36755 c := v_1.AuxInt 36756 v.reset(OpAMD64XORLconst) 36757 v.AuxInt = c 36758 v.AddArg(x) 36759 return true 36760 } 36761 // match: (XORL (MOVLconst [c]) x) 36762 // cond: 36763 // result: (XORLconst [c] x) 36764 for { 36765 _ = v.Args[1] 36766 v_0 := v.Args[0] 36767 if v_0.Op != OpAMD64MOVLconst { 36768 break 36769 } 36770 c := v_0.AuxInt 36771 x := v.Args[1] 36772 v.reset(OpAMD64XORLconst) 36773 v.AuxInt = c 36774 v.AddArg(x) 36775 return true 36776 } 36777 // match: (XORL (SHLLconst x [c]) (SHRLconst x [d])) 36778 // cond: d==32-c 36779 // result: (ROLLconst x [c]) 36780 for { 36781 _ = v.Args[1] 36782 v_0 := v.Args[0] 36783 if v_0.Op != OpAMD64SHLLconst { 36784 break 36785 } 36786 c := v_0.AuxInt 36787 x := v_0.Args[0] 36788 v_1 := v.Args[1] 36789 if v_1.Op != OpAMD64SHRLconst { 36790 break 36791 } 36792 d := v_1.AuxInt 36793 if x != v_1.Args[0] { 36794 break 36795 } 36796 if !(d == 32-c) { 36797 break 36798 } 36799 v.reset(OpAMD64ROLLconst) 36800 v.AuxInt = c 36801 v.AddArg(x) 36802 return true 36803 } 36804 // match: (XORL (SHRLconst x [d]) (SHLLconst x [c])) 36805 // cond: d==32-c 36806 // result: (ROLLconst x [c]) 36807 for { 36808 _ = v.Args[1] 36809 v_0 := v.Args[0] 36810 if v_0.Op != OpAMD64SHRLconst { 36811 break 36812 } 36813 d := v_0.AuxInt 36814 x := v_0.Args[0] 36815 v_1 := v.Args[1] 36816 if v_1.Op != OpAMD64SHLLconst { 36817 break 36818 } 36819 c := v_1.AuxInt 36820 if x != v_1.Args[0] { 36821 break 36822 } 36823 if !(d == 32-c) { 36824 break 36825 } 36826 v.reset(OpAMD64ROLLconst) 36827 v.AuxInt = c 36828 v.AddArg(x) 36829 return true 36830 } 36831 // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 36832 // cond: d==16-c && c < 16 && t.Size() == 2 36833 // result: (ROLWconst x [c]) 36834 for { 36835 t := v.Type 36836 _ = v.Args[1] 36837 v_0 := v.Args[0] 36838 if v_0.Op != OpAMD64SHLLconst { 36839 break 36840 } 36841 c := v_0.AuxInt 36842 x := v_0.Args[0] 36843 v_1 := v.Args[1] 36844 if v_1.Op != OpAMD64SHRWconst { 36845 break 36846 } 36847 d := v_1.AuxInt 36848 if x != v_1.Args[0] { 36849 break 36850 } 36851 if !(d == 16-c && c < 16 && t.Size() == 2) { 36852 break 36853 } 36854 v.reset(OpAMD64ROLWconst) 36855 v.AuxInt = c 36856 v.AddArg(x) 36857 return true 36858 } 36859 // match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 36860 // cond: d==16-c && c < 16 && t.Size() == 2 36861 // result: (ROLWconst x [c]) 36862 for { 36863 t := v.Type 36864 _ = v.Args[1] 36865 v_0 := v.Args[0] 36866 if v_0.Op != OpAMD64SHRWconst { 36867 break 36868 } 36869 d := v_0.AuxInt 36870 x := v_0.Args[0] 36871 v_1 := v.Args[1] 36872 if v_1.Op != OpAMD64SHLLconst { 36873 break 36874 } 36875 c := v_1.AuxInt 36876 if x != v_1.Args[0] { 36877 break 36878 } 36879 if !(d == 16-c && c < 16 && t.Size() == 2) { 36880 break 36881 } 36882 v.reset(OpAMD64ROLWconst) 36883 v.AuxInt = c 36884 v.AddArg(x) 36885 return true 36886 } 36887 // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 36888 // cond: d==8-c && c < 8 && t.Size() == 1 36889 // result: (ROLBconst x [c]) 36890 for { 36891 t := v.Type 36892 _ = v.Args[1] 36893 v_0 := v.Args[0] 36894 if v_0.Op != OpAMD64SHLLconst { 36895 break 36896 } 36897 c := v_0.AuxInt 36898 x := v_0.Args[0] 36899 v_1 := v.Args[1] 36900 if v_1.Op != OpAMD64SHRBconst { 36901 break 36902 } 36903 d := v_1.AuxInt 36904 if x != v_1.Args[0] { 36905 break 36906 } 36907 if !(d == 8-c && c < 8 && t.Size() == 1) { 36908 break 36909 } 36910 v.reset(OpAMD64ROLBconst) 36911 v.AuxInt = c 36912 v.AddArg(x) 36913 return true 36914 } 36915 // match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 36916 // cond: d==8-c && c < 8 && t.Size() == 1 36917 // result: (ROLBconst x [c]) 36918 for { 36919 t := v.Type 36920 _ = v.Args[1] 36921 v_0 := v.Args[0] 36922 if v_0.Op != OpAMD64SHRBconst { 36923 break 36924 } 36925 d := v_0.AuxInt 36926 x := v_0.Args[0] 36927 v_1 := v.Args[1] 36928 if v_1.Op != OpAMD64SHLLconst { 36929 break 36930 } 36931 c := v_1.AuxInt 36932 if x != v_1.Args[0] { 36933 break 36934 } 36935 if !(d == 8-c && c < 8 && t.Size() == 1) { 36936 break 36937 } 36938 v.reset(OpAMD64ROLBconst) 36939 v.AuxInt = c 36940 v.AddArg(x) 36941 return true 36942 } 36943 // match: (XORL x x) 36944 // cond: 36945 // result: (MOVLconst [0]) 36946 for { 36947 _ = v.Args[1] 36948 x := v.Args[0] 36949 if x != v.Args[1] { 36950 break 36951 } 36952 v.reset(OpAMD64MOVLconst) 36953 v.AuxInt = 0 36954 return true 36955 } 36956 // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) 36957 // cond: canMergeLoad(v, l, x) && clobber(l) 36958 // result: (XORLmem x [off] {sym} ptr mem) 36959 for { 36960 _ = v.Args[1] 36961 x := v.Args[0] 36962 l := v.Args[1] 36963 if l.Op != OpAMD64MOVLload { 36964 break 36965 } 36966 off := l.AuxInt 36967 sym := l.Aux 36968 _ = l.Args[1] 36969 ptr := l.Args[0] 36970 mem := l.Args[1] 36971 if !(canMergeLoad(v, l, x) && clobber(l)) { 36972 break 36973 } 36974 v.reset(OpAMD64XORLmem) 36975 v.AuxInt = off 36976 v.Aux = sym 36977 v.AddArg(x) 36978 v.AddArg(ptr) 36979 v.AddArg(mem) 36980 return true 36981 } 36982 return false 36983 } 36984 func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { 36985 // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) 36986 // cond: canMergeLoad(v, l, x) && clobber(l) 36987 // result: (XORLmem x [off] {sym} ptr mem) 36988 for { 36989 _ = v.Args[1] 36990 l := v.Args[0] 36991 if l.Op != OpAMD64MOVLload { 36992 break 36993 } 36994 off := l.AuxInt 36995 sym := l.Aux 36996 _ = l.Args[1] 36997 ptr := l.Args[0] 36998 mem := l.Args[1] 36999 x := v.Args[1] 37000 if !(canMergeLoad(v, l, x) && clobber(l)) { 37001 break 37002 } 37003 v.reset(OpAMD64XORLmem) 37004 v.AuxInt = off 37005 v.Aux = sym 37006 v.AddArg(x) 37007 v.AddArg(ptr) 37008 v.AddArg(mem) 37009 return true 37010 } 37011 return false 37012 } 37013 func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { 37014 // match: (XORLconst [1] (SETNE x)) 37015 // cond: 37016 // result: (SETEQ x) 37017 for { 37018 if v.AuxInt != 1 { 37019 break 37020 } 37021 v_0 := v.Args[0] 37022 if v_0.Op != OpAMD64SETNE { 37023 break 37024 } 37025 x := v_0.Args[0] 37026 v.reset(OpAMD64SETEQ) 37027 v.AddArg(x) 37028 return true 37029 } 37030 // match: (XORLconst [1] (SETEQ x)) 37031 // cond: 37032 // result: (SETNE x) 37033 for { 37034 if v.AuxInt != 1 { 37035 break 37036 } 37037 v_0 := v.Args[0] 37038 if v_0.Op != OpAMD64SETEQ { 37039 break 37040 } 37041 x := v_0.Args[0] 37042 v.reset(OpAMD64SETNE) 37043 v.AddArg(x) 37044 return true 37045 } 37046 // match: (XORLconst [1] (SETL x)) 37047 // cond: 37048 // result: (SETGE x) 37049 for { 37050 if v.AuxInt != 1 { 37051 break 37052 } 37053 v_0 := v.Args[0] 37054 if v_0.Op != OpAMD64SETL { 37055 break 37056 } 37057 x := v_0.Args[0] 37058 v.reset(OpAMD64SETGE) 37059 v.AddArg(x) 37060 return true 37061 } 37062 // match: (XORLconst [1] (SETGE x)) 37063 // cond: 37064 // result: (SETL x) 37065 for { 37066 if v.AuxInt != 1 { 37067 break 37068 } 37069 v_0 := v.Args[0] 37070 if v_0.Op != OpAMD64SETGE { 37071 break 37072 } 37073 x := v_0.Args[0] 37074 v.reset(OpAMD64SETL) 37075 v.AddArg(x) 37076 return true 37077 } 37078 // match: (XORLconst [1] (SETLE x)) 37079 // cond: 37080 // result: (SETG x) 37081 for { 37082 if v.AuxInt != 1 { 37083 break 37084 } 37085 v_0 := v.Args[0] 37086 if v_0.Op != OpAMD64SETLE { 37087 break 37088 } 37089 x := v_0.Args[0] 37090 v.reset(OpAMD64SETG) 37091 v.AddArg(x) 37092 return true 37093 } 37094 // match: (XORLconst [1] (SETG x)) 37095 // cond: 37096 // result: (SETLE x) 37097 for { 37098 if v.AuxInt != 1 { 37099 break 37100 } 37101 v_0 := v.Args[0] 37102 if v_0.Op != OpAMD64SETG { 37103 break 37104 } 37105 x := v_0.Args[0] 37106 v.reset(OpAMD64SETLE) 37107 v.AddArg(x) 37108 return true 37109 } 37110 // match: (XORLconst [1] (SETB x)) 37111 // cond: 37112 // result: (SETAE x) 37113 for { 37114 if v.AuxInt != 1 { 37115 break 37116 } 37117 v_0 := v.Args[0] 37118 if v_0.Op != OpAMD64SETB { 37119 break 37120 } 37121 x := v_0.Args[0] 37122 v.reset(OpAMD64SETAE) 37123 v.AddArg(x) 37124 return true 37125 } 37126 // match: (XORLconst [1] (SETAE x)) 37127 // cond: 37128 // result: (SETB x) 37129 for { 37130 if v.AuxInt != 1 { 37131 break 37132 } 37133 v_0 := v.Args[0] 37134 if v_0.Op != OpAMD64SETAE { 37135 break 37136 } 37137 x := v_0.Args[0] 37138 v.reset(OpAMD64SETB) 37139 v.AddArg(x) 37140 return true 37141 } 37142 // match: (XORLconst [1] (SETBE x)) 37143 // cond: 37144 // result: (SETA x) 37145 for { 37146 if v.AuxInt != 1 { 37147 break 37148 } 37149 v_0 := v.Args[0] 37150 if v_0.Op != OpAMD64SETBE { 37151 break 37152 } 37153 x := v_0.Args[0] 37154 v.reset(OpAMD64SETA) 37155 v.AddArg(x) 37156 return true 37157 } 37158 // match: (XORLconst [1] (SETA x)) 37159 // cond: 37160 // result: (SETBE x) 37161 for { 37162 if v.AuxInt != 1 { 37163 break 37164 } 37165 v_0 := v.Args[0] 37166 if v_0.Op != OpAMD64SETA { 37167 break 37168 } 37169 x := v_0.Args[0] 37170 v.reset(OpAMD64SETBE) 37171 v.AddArg(x) 37172 return true 37173 } 37174 return false 37175 } 37176 func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool { 37177 // match: (XORLconst [c] (XORLconst [d] x)) 37178 // cond: 37179 // result: (XORLconst [c ^ d] x) 37180 for { 37181 c := v.AuxInt 37182 v_0 := v.Args[0] 37183 if v_0.Op != OpAMD64XORLconst { 37184 break 37185 } 37186 d := v_0.AuxInt 37187 x := v_0.Args[0] 37188 v.reset(OpAMD64XORLconst) 37189 v.AuxInt = c ^ d 37190 v.AddArg(x) 37191 return true 37192 } 37193 // match: (XORLconst [c] x) 37194 // cond: int32(c)==0 37195 // result: x 37196 for { 37197 c := v.AuxInt 37198 x := v.Args[0] 37199 if !(int32(c) == 0) { 37200 break 37201 } 37202 v.reset(OpCopy) 37203 v.Type = x.Type 37204 v.AddArg(x) 37205 return true 37206 } 37207 // match: (XORLconst [c] (MOVLconst [d])) 37208 // cond: 37209 // result: (MOVLconst [c^d]) 37210 for { 37211 c := v.AuxInt 37212 v_0 := v.Args[0] 37213 if v_0.Op != OpAMD64MOVLconst { 37214 break 37215 } 37216 d := v_0.AuxInt 37217 v.reset(OpAMD64MOVLconst) 37218 v.AuxInt = c ^ d 37219 return true 37220 } 37221 return false 37222 } 37223 func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { 37224 // match: (XORQ x (MOVQconst [c])) 37225 // cond: is32Bit(c) 37226 // result: (XORQconst [c] x) 37227 for { 37228 _ = v.Args[1] 37229 x := v.Args[0] 37230 v_1 := v.Args[1] 37231 if v_1.Op != OpAMD64MOVQconst { 37232 break 37233 } 37234 c := v_1.AuxInt 37235 if !(is32Bit(c)) { 37236 break 37237 } 37238 v.reset(OpAMD64XORQconst) 37239 v.AuxInt = c 37240 v.AddArg(x) 37241 return true 37242 } 37243 // match: (XORQ (MOVQconst [c]) x) 37244 // cond: is32Bit(c) 37245 // result: (XORQconst [c] x) 37246 for { 37247 _ = v.Args[1] 37248 v_0 := v.Args[0] 37249 if v_0.Op != OpAMD64MOVQconst { 37250 break 37251 } 37252 c := v_0.AuxInt 37253 x := v.Args[1] 37254 if !(is32Bit(c)) { 37255 break 37256 } 37257 v.reset(OpAMD64XORQconst) 37258 v.AuxInt = c 37259 v.AddArg(x) 37260 return true 37261 } 37262 // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d])) 37263 // cond: d==64-c 37264 // result: (ROLQconst x [c]) 37265 for { 37266 _ = v.Args[1] 37267 v_0 := v.Args[0] 37268 if v_0.Op != OpAMD64SHLQconst { 37269 break 37270 } 37271 c := v_0.AuxInt 37272 x := v_0.Args[0] 37273 v_1 := v.Args[1] 37274 if v_1.Op != OpAMD64SHRQconst { 37275 break 37276 } 37277 d := v_1.AuxInt 37278 if x != v_1.Args[0] { 37279 break 37280 } 37281 if !(d == 64-c) { 37282 break 37283 } 37284 v.reset(OpAMD64ROLQconst) 37285 v.AuxInt = c 37286 v.AddArg(x) 37287 return true 37288 } 37289 // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c])) 37290 // cond: d==64-c 37291 // result: (ROLQconst x [c]) 37292 for { 37293 _ = v.Args[1] 37294 v_0 := v.Args[0] 37295 if v_0.Op != OpAMD64SHRQconst { 37296 break 37297 } 37298 d := v_0.AuxInt 37299 x := v_0.Args[0] 37300 v_1 := v.Args[1] 37301 if v_1.Op != OpAMD64SHLQconst { 37302 break 37303 } 37304 c := v_1.AuxInt 37305 if x != v_1.Args[0] { 37306 break 37307 } 37308 if !(d == 64-c) { 37309 break 37310 } 37311 v.reset(OpAMD64ROLQconst) 37312 v.AuxInt = c 37313 v.AddArg(x) 37314 return true 37315 } 37316 // match: (XORQ x x) 37317 // cond: 37318 // result: (MOVQconst [0]) 37319 for { 37320 _ = v.Args[1] 37321 x := v.Args[0] 37322 if x != v.Args[1] { 37323 break 37324 } 37325 v.reset(OpAMD64MOVQconst) 37326 v.AuxInt = 0 37327 return true 37328 } 37329 // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) 37330 // cond: canMergeLoad(v, l, x) && clobber(l) 37331 // result: (XORQmem x [off] {sym} ptr mem) 37332 for { 37333 _ = v.Args[1] 37334 x := v.Args[0] 37335 l := v.Args[1] 37336 if l.Op != OpAMD64MOVQload { 37337 break 37338 } 37339 off := l.AuxInt 37340 sym := l.Aux 37341 _ = l.Args[1] 37342 ptr := l.Args[0] 37343 mem := l.Args[1] 37344 if !(canMergeLoad(v, l, x) && clobber(l)) { 37345 break 37346 } 37347 v.reset(OpAMD64XORQmem) 37348 v.AuxInt = off 37349 v.Aux = sym 37350 v.AddArg(x) 37351 v.AddArg(ptr) 37352 v.AddArg(mem) 37353 return true 37354 } 37355 // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x) 37356 // cond: canMergeLoad(v, l, x) && clobber(l) 37357 // result: (XORQmem x [off] {sym} ptr mem) 37358 for { 37359 _ = v.Args[1] 37360 l := v.Args[0] 37361 if l.Op != OpAMD64MOVQload { 37362 break 37363 } 37364 off := l.AuxInt 37365 sym := l.Aux 37366 _ = l.Args[1] 37367 ptr := l.Args[0] 37368 mem := l.Args[1] 37369 x := v.Args[1] 37370 if !(canMergeLoad(v, l, x) && clobber(l)) { 37371 break 37372 } 37373 v.reset(OpAMD64XORQmem) 37374 v.AuxInt = off 37375 v.Aux = sym 37376 v.AddArg(x) 37377 v.AddArg(ptr) 37378 v.AddArg(mem) 37379 return true 37380 } 37381 return false 37382 } 37383 func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool { 37384 // match: (XORQconst [c] (XORQconst [d] x)) 37385 // cond: 37386 // result: (XORQconst [c ^ d] x) 37387 for { 37388 c := v.AuxInt 37389 v_0 := v.Args[0] 37390 if v_0.Op != OpAMD64XORQconst { 37391 break 37392 } 37393 d := v_0.AuxInt 37394 x := v_0.Args[0] 37395 v.reset(OpAMD64XORQconst) 37396 v.AuxInt = c ^ d 37397 v.AddArg(x) 37398 return true 37399 } 37400 // match: (XORQconst [0] x) 37401 // cond: 37402 // result: x 37403 for { 37404 if v.AuxInt != 0 { 37405 break 37406 } 37407 x := v.Args[0] 37408 v.reset(OpCopy) 37409 v.Type = x.Type 37410 v.AddArg(x) 37411 return true 37412 } 37413 // match: (XORQconst [c] (MOVQconst [d])) 37414 // cond: 37415 // result: (MOVQconst [c^d]) 37416 for { 37417 c := v.AuxInt 37418 v_0 := v.Args[0] 37419 if v_0.Op != OpAMD64MOVQconst { 37420 break 37421 } 37422 d := v_0.AuxInt 37423 v.reset(OpAMD64MOVQconst) 37424 v.AuxInt = c ^ d 37425 return true 37426 } 37427 return false 37428 } 37429 func rewriteValueAMD64_OpAdd16_0(v *Value) bool { 37430 // match: (Add16 x y) 37431 // cond: 37432 // result: (ADDL x y) 37433 for { 37434 _ = v.Args[1] 37435 x := v.Args[0] 37436 y := v.Args[1] 37437 v.reset(OpAMD64ADDL) 37438 v.AddArg(x) 37439 v.AddArg(y) 37440 return true 37441 } 37442 } 37443 func rewriteValueAMD64_OpAdd32_0(v *Value) bool { 37444 // match: (Add32 x y) 37445 // cond: 37446 // result: (ADDL x y) 37447 for { 37448 _ = v.Args[1] 37449 x := v.Args[0] 37450 y := v.Args[1] 37451 v.reset(OpAMD64ADDL) 37452 v.AddArg(x) 37453 v.AddArg(y) 37454 return true 37455 } 37456 } 37457 func rewriteValueAMD64_OpAdd32F_0(v *Value) bool { 37458 // match: (Add32F x y) 37459 // cond: 37460 // result: (ADDSS x y) 37461 for { 37462 _ = v.Args[1] 37463 x := v.Args[0] 37464 y := v.Args[1] 37465 v.reset(OpAMD64ADDSS) 37466 v.AddArg(x) 37467 v.AddArg(y) 37468 return true 37469 } 37470 } 37471 func rewriteValueAMD64_OpAdd64_0(v *Value) bool { 37472 // match: (Add64 x y) 37473 // cond: 37474 // result: (ADDQ x y) 37475 for { 37476 _ = v.Args[1] 37477 x := v.Args[0] 37478 y := v.Args[1] 37479 v.reset(OpAMD64ADDQ) 37480 v.AddArg(x) 37481 v.AddArg(y) 37482 return true 37483 } 37484 } 37485 func rewriteValueAMD64_OpAdd64F_0(v *Value) bool { 37486 // match: (Add64F x y) 37487 // cond: 37488 // result: (ADDSD x y) 37489 for { 37490 _ = v.Args[1] 37491 x := v.Args[0] 37492 y := v.Args[1] 37493 v.reset(OpAMD64ADDSD) 37494 v.AddArg(x) 37495 v.AddArg(y) 37496 return true 37497 } 37498 } 37499 func rewriteValueAMD64_OpAdd8_0(v *Value) bool { 37500 // match: (Add8 x y) 37501 // cond: 37502 // result: (ADDL x y) 37503 for { 37504 _ = v.Args[1] 37505 x := v.Args[0] 37506 y := v.Args[1] 37507 v.reset(OpAMD64ADDL) 37508 v.AddArg(x) 37509 v.AddArg(y) 37510 return true 37511 } 37512 } 37513 func rewriteValueAMD64_OpAddPtr_0(v *Value) bool { 37514 b := v.Block 37515 _ = b 37516 config := b.Func.Config 37517 _ = config 37518 // match: (AddPtr x y) 37519 // cond: config.PtrSize == 8 37520 // result: (ADDQ x y) 37521 for { 37522 _ = v.Args[1] 37523 x := v.Args[0] 37524 y := v.Args[1] 37525 if !(config.PtrSize == 8) { 37526 break 37527 } 37528 v.reset(OpAMD64ADDQ) 37529 v.AddArg(x) 37530 v.AddArg(y) 37531 return true 37532 } 37533 // match: (AddPtr x y) 37534 // cond: config.PtrSize == 4 37535 // result: (ADDL x y) 37536 for { 37537 _ = v.Args[1] 37538 x := v.Args[0] 37539 y := v.Args[1] 37540 if !(config.PtrSize == 4) { 37541 break 37542 } 37543 v.reset(OpAMD64ADDL) 37544 v.AddArg(x) 37545 v.AddArg(y) 37546 return true 37547 } 37548 return false 37549 } 37550 func rewriteValueAMD64_OpAddr_0(v *Value) bool { 37551 b := v.Block 37552 _ = b 37553 config := b.Func.Config 37554 _ = config 37555 // match: (Addr {sym} base) 37556 // cond: config.PtrSize == 8 37557 // result: (LEAQ {sym} base) 37558 for { 37559 sym := v.Aux 37560 base := v.Args[0] 37561 if !(config.PtrSize == 8) { 37562 break 37563 } 37564 v.reset(OpAMD64LEAQ) 37565 v.Aux = sym 37566 v.AddArg(base) 37567 return true 37568 } 37569 // match: (Addr {sym} base) 37570 // cond: config.PtrSize == 4 37571 // result: (LEAL {sym} base) 37572 for { 37573 sym := v.Aux 37574 base := v.Args[0] 37575 if !(config.PtrSize == 4) { 37576 break 37577 } 37578 v.reset(OpAMD64LEAL) 37579 v.Aux = sym 37580 v.AddArg(base) 37581 return true 37582 } 37583 return false 37584 } 37585 func rewriteValueAMD64_OpAnd16_0(v *Value) bool { 37586 // match: (And16 x y) 37587 // cond: 37588 // result: (ANDL x y) 37589 for { 37590 _ = v.Args[1] 37591 x := v.Args[0] 37592 y := v.Args[1] 37593 v.reset(OpAMD64ANDL) 37594 v.AddArg(x) 37595 v.AddArg(y) 37596 return true 37597 } 37598 } 37599 func rewriteValueAMD64_OpAnd32_0(v *Value) bool { 37600 // match: (And32 x y) 37601 // cond: 37602 // result: (ANDL x y) 37603 for { 37604 _ = v.Args[1] 37605 x := v.Args[0] 37606 y := v.Args[1] 37607 v.reset(OpAMD64ANDL) 37608 v.AddArg(x) 37609 v.AddArg(y) 37610 return true 37611 } 37612 } 37613 func rewriteValueAMD64_OpAnd64_0(v *Value) bool { 37614 // match: (And64 x y) 37615 // cond: 37616 // result: (ANDQ x y) 37617 for { 37618 _ = v.Args[1] 37619 x := v.Args[0] 37620 y := v.Args[1] 37621 v.reset(OpAMD64ANDQ) 37622 v.AddArg(x) 37623 v.AddArg(y) 37624 return true 37625 } 37626 } 37627 func rewriteValueAMD64_OpAnd8_0(v *Value) bool { 37628 // match: (And8 x y) 37629 // cond: 37630 // result: (ANDL x y) 37631 for { 37632 _ = v.Args[1] 37633 x := v.Args[0] 37634 y := v.Args[1] 37635 v.reset(OpAMD64ANDL) 37636 v.AddArg(x) 37637 v.AddArg(y) 37638 return true 37639 } 37640 } 37641 func rewriteValueAMD64_OpAndB_0(v *Value) bool { 37642 // match: (AndB x y) 37643 // cond: 37644 // result: (ANDL x y) 37645 for { 37646 _ = v.Args[1] 37647 x := v.Args[0] 37648 y := v.Args[1] 37649 v.reset(OpAMD64ANDL) 37650 v.AddArg(x) 37651 v.AddArg(y) 37652 return true 37653 } 37654 } 37655 func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool { 37656 b := v.Block 37657 _ = b 37658 typ := &b.Func.Config.Types 37659 _ = typ 37660 // match: (AtomicAdd32 ptr val mem) 37661 // cond: 37662 // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) 37663 for { 37664 _ = v.Args[2] 37665 ptr := v.Args[0] 37666 val := v.Args[1] 37667 mem := v.Args[2] 37668 v.reset(OpAMD64AddTupleFirst32) 37669 v.AddArg(val) 37670 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) 37671 v0.AddArg(val) 37672 v0.AddArg(ptr) 37673 v0.AddArg(mem) 37674 v.AddArg(v0) 37675 return true 37676 } 37677 } 37678 func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool { 37679 b := v.Block 37680 _ = b 37681 typ := &b.Func.Config.Types 37682 _ = typ 37683 // match: (AtomicAdd64 ptr val mem) 37684 // cond: 37685 // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) 37686 for { 37687 _ = v.Args[2] 37688 ptr := v.Args[0] 37689 val := v.Args[1] 37690 mem := v.Args[2] 37691 v.reset(OpAMD64AddTupleFirst64) 37692 v.AddArg(val) 37693 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) 37694 v0.AddArg(val) 37695 v0.AddArg(ptr) 37696 v0.AddArg(mem) 37697 v.AddArg(v0) 37698 return true 37699 } 37700 } 37701 func rewriteValueAMD64_OpAtomicAnd8_0(v *Value) bool { 37702 // match: (AtomicAnd8 ptr val mem) 37703 // cond: 37704 // result: (ANDBlock ptr val mem) 37705 for { 37706 _ = v.Args[2] 37707 ptr := v.Args[0] 37708 val := v.Args[1] 37709 mem := v.Args[2] 37710 v.reset(OpAMD64ANDBlock) 37711 v.AddArg(ptr) 37712 v.AddArg(val) 37713 v.AddArg(mem) 37714 return true 37715 } 37716 } 37717 func rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v *Value) bool { 37718 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 37719 // cond: 37720 // result: (CMPXCHGLlock ptr old new_ mem) 37721 for { 37722 _ = v.Args[3] 37723 ptr := v.Args[0] 37724 old := v.Args[1] 37725 new_ := v.Args[2] 37726 mem := v.Args[3] 37727 v.reset(OpAMD64CMPXCHGLlock) 37728 v.AddArg(ptr) 37729 v.AddArg(old) 37730 v.AddArg(new_) 37731 v.AddArg(mem) 37732 return true 37733 } 37734 } 37735 func rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v *Value) bool { 37736 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 37737 // cond: 37738 // result: (CMPXCHGQlock ptr old new_ mem) 37739 for { 37740 _ = v.Args[3] 37741 ptr := v.Args[0] 37742 old := v.Args[1] 37743 new_ := v.Args[2] 37744 mem := v.Args[3] 37745 v.reset(OpAMD64CMPXCHGQlock) 37746 v.AddArg(ptr) 37747 v.AddArg(old) 37748 v.AddArg(new_) 37749 v.AddArg(mem) 37750 return true 37751 } 37752 } 37753 func rewriteValueAMD64_OpAtomicExchange32_0(v *Value) bool { 37754 // match: (AtomicExchange32 ptr val mem) 37755 // cond: 37756 // result: (XCHGL val ptr mem) 37757 for { 37758 _ = v.Args[2] 37759 ptr := v.Args[0] 37760 val := v.Args[1] 37761 mem := v.Args[2] 37762 v.reset(OpAMD64XCHGL) 37763 v.AddArg(val) 37764 v.AddArg(ptr) 37765 v.AddArg(mem) 37766 return true 37767 } 37768 } 37769 func rewriteValueAMD64_OpAtomicExchange64_0(v *Value) bool { 37770 // match: (AtomicExchange64 ptr val mem) 37771 // cond: 37772 // result: (XCHGQ val ptr mem) 37773 for { 37774 _ = v.Args[2] 37775 ptr := v.Args[0] 37776 val := v.Args[1] 37777 mem := v.Args[2] 37778 v.reset(OpAMD64XCHGQ) 37779 v.AddArg(val) 37780 v.AddArg(ptr) 37781 v.AddArg(mem) 37782 return true 37783 } 37784 } 37785 func rewriteValueAMD64_OpAtomicLoad32_0(v *Value) bool { 37786 // match: (AtomicLoad32 ptr mem) 37787 // cond: 37788 // result: (MOVLatomicload ptr mem) 37789 for { 37790 _ = v.Args[1] 37791 ptr := v.Args[0] 37792 mem := v.Args[1] 37793 v.reset(OpAMD64MOVLatomicload) 37794 v.AddArg(ptr) 37795 v.AddArg(mem) 37796 return true 37797 } 37798 } 37799 func rewriteValueAMD64_OpAtomicLoad64_0(v *Value) bool { 37800 // match: (AtomicLoad64 ptr mem) 37801 // cond: 37802 // result: (MOVQatomicload ptr mem) 37803 for { 37804 _ = v.Args[1] 37805 ptr := v.Args[0] 37806 mem := v.Args[1] 37807 v.reset(OpAMD64MOVQatomicload) 37808 v.AddArg(ptr) 37809 v.AddArg(mem) 37810 return true 37811 } 37812 } 37813 func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool { 37814 b := v.Block 37815 _ = b 37816 config := b.Func.Config 37817 _ = config 37818 // match: (AtomicLoadPtr ptr mem) 37819 // cond: config.PtrSize == 8 37820 // result: (MOVQatomicload ptr mem) 37821 for { 37822 _ = v.Args[1] 37823 ptr := v.Args[0] 37824 mem := v.Args[1] 37825 if !(config.PtrSize == 8) { 37826 break 37827 } 37828 v.reset(OpAMD64MOVQatomicload) 37829 v.AddArg(ptr) 37830 v.AddArg(mem) 37831 return true 37832 } 37833 // match: (AtomicLoadPtr ptr mem) 37834 // cond: config.PtrSize == 4 37835 // result: (MOVLatomicload ptr mem) 37836 for { 37837 _ = v.Args[1] 37838 ptr := v.Args[0] 37839 mem := v.Args[1] 37840 if !(config.PtrSize == 4) { 37841 break 37842 } 37843 v.reset(OpAMD64MOVLatomicload) 37844 v.AddArg(ptr) 37845 v.AddArg(mem) 37846 return true 37847 } 37848 return false 37849 } 37850 func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool { 37851 // match: (AtomicOr8 ptr val mem) 37852 // cond: 37853 // result: (ORBlock ptr val mem) 37854 for { 37855 _ = v.Args[2] 37856 ptr := v.Args[0] 37857 val := v.Args[1] 37858 mem := v.Args[2] 37859 v.reset(OpAMD64ORBlock) 37860 v.AddArg(ptr) 37861 v.AddArg(val) 37862 v.AddArg(mem) 37863 return true 37864 } 37865 } 37866 func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool { 37867 b := v.Block 37868 _ = b 37869 typ := &b.Func.Config.Types 37870 _ = typ 37871 // match: (AtomicStore32 ptr val mem) 37872 // cond: 37873 // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem)) 37874 for { 37875 _ = v.Args[2] 37876 ptr := v.Args[0] 37877 val := v.Args[1] 37878 mem := v.Args[2] 37879 v.reset(OpSelect1) 37880 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) 37881 v0.AddArg(val) 37882 v0.AddArg(ptr) 37883 v0.AddArg(mem) 37884 v.AddArg(v0) 37885 return true 37886 } 37887 } 37888 func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool { 37889 b := v.Block 37890 _ = b 37891 typ := &b.Func.Config.Types 37892 _ = typ 37893 // match: (AtomicStore64 ptr val mem) 37894 // cond: 37895 // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem)) 37896 for { 37897 _ = v.Args[2] 37898 ptr := v.Args[0] 37899 val := v.Args[1] 37900 mem := v.Args[2] 37901 v.reset(OpSelect1) 37902 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) 37903 v0.AddArg(val) 37904 v0.AddArg(ptr) 37905 v0.AddArg(mem) 37906 v.AddArg(v0) 37907 return true 37908 } 37909 } 37910 func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool { 37911 b := v.Block 37912 _ = b 37913 config := b.Func.Config 37914 _ = config 37915 typ := &b.Func.Config.Types 37916 _ = typ 37917 // match: (AtomicStorePtrNoWB ptr val mem) 37918 // cond: config.PtrSize == 8 37919 // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 37920 for { 37921 _ = v.Args[2] 37922 ptr := v.Args[0] 37923 val := v.Args[1] 37924 mem := v.Args[2] 37925 if !(config.PtrSize == 8) { 37926 break 37927 } 37928 v.reset(OpSelect1) 37929 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) 37930 v0.AddArg(val) 37931 v0.AddArg(ptr) 37932 v0.AddArg(mem) 37933 v.AddArg(v0) 37934 return true 37935 } 37936 // match: (AtomicStorePtrNoWB ptr val mem) 37937 // cond: config.PtrSize == 4 37938 // result: (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 37939 for { 37940 _ = v.Args[2] 37941 ptr := v.Args[0] 37942 val := v.Args[1] 37943 mem := v.Args[2] 37944 if !(config.PtrSize == 4) { 37945 break 37946 } 37947 v.reset(OpSelect1) 37948 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem)) 37949 v0.AddArg(val) 37950 v0.AddArg(ptr) 37951 v0.AddArg(mem) 37952 v.AddArg(v0) 37953 return true 37954 } 37955 return false 37956 } 37957 func rewriteValueAMD64_OpAvg64u_0(v *Value) bool { 37958 // match: (Avg64u x y) 37959 // cond: 37960 // result: (AVGQU x y) 37961 for { 37962 _ = v.Args[1] 37963 x := v.Args[0] 37964 y := v.Args[1] 37965 v.reset(OpAMD64AVGQU) 37966 v.AddArg(x) 37967 v.AddArg(y) 37968 return true 37969 } 37970 } 37971 func rewriteValueAMD64_OpBitLen32_0(v *Value) bool { 37972 b := v.Block 37973 _ = b 37974 typ := &b.Func.Config.Types 37975 _ = typ 37976 // match: (BitLen32 x) 37977 // cond: 37978 // result: (BitLen64 (MOVLQZX <typ.UInt64> x)) 37979 for { 37980 x := v.Args[0] 37981 v.reset(OpBitLen64) 37982 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 37983 v0.AddArg(x) 37984 v.AddArg(v0) 37985 return true 37986 } 37987 } 37988 func rewriteValueAMD64_OpBitLen64_0(v *Value) bool { 37989 b := v.Block 37990 _ = b 37991 typ := &b.Func.Config.Types 37992 _ = typ 37993 // match: (BitLen64 <t> x) 37994 // cond: 37995 // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x)))) 37996 for { 37997 t := v.Type 37998 x := v.Args[0] 37999 v.reset(OpAMD64ADDQconst) 38000 v.AuxInt = 1 38001 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) 38002 v1 := b.NewValue0(v.Pos, OpSelect0, t) 38003 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 38004 v2.AddArg(x) 38005 v1.AddArg(v2) 38006 v0.AddArg(v1) 38007 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 38008 v3.AuxInt = -1 38009 v0.AddArg(v3) 38010 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 38011 v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 38012 v5.AddArg(x) 38013 v4.AddArg(v5) 38014 v0.AddArg(v4) 38015 v.AddArg(v0) 38016 return true 38017 } 38018 } 38019 func rewriteValueAMD64_OpBswap32_0(v *Value) bool { 38020 // match: (Bswap32 x) 38021 // cond: 38022 // result: (BSWAPL x) 38023 for { 38024 x := v.Args[0] 38025 v.reset(OpAMD64BSWAPL) 38026 v.AddArg(x) 38027 return true 38028 } 38029 } 38030 func rewriteValueAMD64_OpBswap64_0(v *Value) bool { 38031 // match: (Bswap64 x) 38032 // cond: 38033 // result: (BSWAPQ x) 38034 for { 38035 x := v.Args[0] 38036 v.reset(OpAMD64BSWAPQ) 38037 v.AddArg(x) 38038 return true 38039 } 38040 } 38041 func rewriteValueAMD64_OpClosureCall_0(v *Value) bool { 38042 // match: (ClosureCall [argwid] entry closure mem) 38043 // cond: 38044 // result: (CALLclosure [argwid] entry closure mem) 38045 for { 38046 argwid := v.AuxInt 38047 _ = v.Args[2] 38048 entry := v.Args[0] 38049 closure := v.Args[1] 38050 mem := v.Args[2] 38051 v.reset(OpAMD64CALLclosure) 38052 v.AuxInt = argwid 38053 v.AddArg(entry) 38054 v.AddArg(closure) 38055 v.AddArg(mem) 38056 return true 38057 } 38058 } 38059 func rewriteValueAMD64_OpCom16_0(v *Value) bool { 38060 // match: (Com16 x) 38061 // cond: 38062 // result: (NOTL x) 38063 for { 38064 x := v.Args[0] 38065 v.reset(OpAMD64NOTL) 38066 v.AddArg(x) 38067 return true 38068 } 38069 } 38070 func rewriteValueAMD64_OpCom32_0(v *Value) bool { 38071 // match: (Com32 x) 38072 // cond: 38073 // result: (NOTL x) 38074 for { 38075 x := v.Args[0] 38076 v.reset(OpAMD64NOTL) 38077 v.AddArg(x) 38078 return true 38079 } 38080 } 38081 func rewriteValueAMD64_OpCom64_0(v *Value) bool { 38082 // match: (Com64 x) 38083 // cond: 38084 // result: (NOTQ x) 38085 for { 38086 x := v.Args[0] 38087 v.reset(OpAMD64NOTQ) 38088 v.AddArg(x) 38089 return true 38090 } 38091 } 38092 func rewriteValueAMD64_OpCom8_0(v *Value) bool { 38093 // match: (Com8 x) 38094 // cond: 38095 // result: (NOTL x) 38096 for { 38097 x := v.Args[0] 38098 v.reset(OpAMD64NOTL) 38099 v.AddArg(x) 38100 return true 38101 } 38102 } 38103 func rewriteValueAMD64_OpConst16_0(v *Value) bool { 38104 // match: (Const16 [val]) 38105 // cond: 38106 // result: (MOVLconst [val]) 38107 for { 38108 val := v.AuxInt 38109 v.reset(OpAMD64MOVLconst) 38110 v.AuxInt = val 38111 return true 38112 } 38113 } 38114 func rewriteValueAMD64_OpConst32_0(v *Value) bool { 38115 // match: (Const32 [val]) 38116 // cond: 38117 // result: (MOVLconst [val]) 38118 for { 38119 val := v.AuxInt 38120 v.reset(OpAMD64MOVLconst) 38121 v.AuxInt = val 38122 return true 38123 } 38124 } 38125 func rewriteValueAMD64_OpConst32F_0(v *Value) bool { 38126 // match: (Const32F [val]) 38127 // cond: 38128 // result: (MOVSSconst [val]) 38129 for { 38130 val := v.AuxInt 38131 v.reset(OpAMD64MOVSSconst) 38132 v.AuxInt = val 38133 return true 38134 } 38135 } 38136 func rewriteValueAMD64_OpConst64_0(v *Value) bool { 38137 // match: (Const64 [val]) 38138 // cond: 38139 // result: (MOVQconst [val]) 38140 for { 38141 val := v.AuxInt 38142 v.reset(OpAMD64MOVQconst) 38143 v.AuxInt = val 38144 return true 38145 } 38146 } 38147 func rewriteValueAMD64_OpConst64F_0(v *Value) bool { 38148 // match: (Const64F [val]) 38149 // cond: 38150 // result: (MOVSDconst [val]) 38151 for { 38152 val := v.AuxInt 38153 v.reset(OpAMD64MOVSDconst) 38154 v.AuxInt = val 38155 return true 38156 } 38157 } 38158 func rewriteValueAMD64_OpConst8_0(v *Value) bool { 38159 // match: (Const8 [val]) 38160 // cond: 38161 // result: (MOVLconst [val]) 38162 for { 38163 val := v.AuxInt 38164 v.reset(OpAMD64MOVLconst) 38165 v.AuxInt = val 38166 return true 38167 } 38168 } 38169 func rewriteValueAMD64_OpConstBool_0(v *Value) bool { 38170 // match: (ConstBool [b]) 38171 // cond: 38172 // result: (MOVLconst [b]) 38173 for { 38174 b := v.AuxInt 38175 v.reset(OpAMD64MOVLconst) 38176 v.AuxInt = b 38177 return true 38178 } 38179 } 38180 func rewriteValueAMD64_OpConstNil_0(v *Value) bool { 38181 b := v.Block 38182 _ = b 38183 config := b.Func.Config 38184 _ = config 38185 // match: (ConstNil) 38186 // cond: config.PtrSize == 8 38187 // result: (MOVQconst [0]) 38188 for { 38189 if !(config.PtrSize == 8) { 38190 break 38191 } 38192 v.reset(OpAMD64MOVQconst) 38193 v.AuxInt = 0 38194 return true 38195 } 38196 // match: (ConstNil) 38197 // cond: config.PtrSize == 4 38198 // result: (MOVLconst [0]) 38199 for { 38200 if !(config.PtrSize == 4) { 38201 break 38202 } 38203 v.reset(OpAMD64MOVLconst) 38204 v.AuxInt = 0 38205 return true 38206 } 38207 return false 38208 } 38209 func rewriteValueAMD64_OpConvert_0(v *Value) bool { 38210 b := v.Block 38211 _ = b 38212 config := b.Func.Config 38213 _ = config 38214 // match: (Convert <t> x mem) 38215 // cond: config.PtrSize == 8 38216 // result: (MOVQconvert <t> x mem) 38217 for { 38218 t := v.Type 38219 _ = v.Args[1] 38220 x := v.Args[0] 38221 mem := v.Args[1] 38222 if !(config.PtrSize == 8) { 38223 break 38224 } 38225 v.reset(OpAMD64MOVQconvert) 38226 v.Type = t 38227 v.AddArg(x) 38228 v.AddArg(mem) 38229 return true 38230 } 38231 // match: (Convert <t> x mem) 38232 // cond: config.PtrSize == 4 38233 // result: (MOVLconvert <t> x mem) 38234 for { 38235 t := v.Type 38236 _ = v.Args[1] 38237 x := v.Args[0] 38238 mem := v.Args[1] 38239 if !(config.PtrSize == 4) { 38240 break 38241 } 38242 v.reset(OpAMD64MOVLconvert) 38243 v.Type = t 38244 v.AddArg(x) 38245 v.AddArg(mem) 38246 return true 38247 } 38248 return false 38249 } 38250 func rewriteValueAMD64_OpCtz32_0(v *Value) bool { 38251 b := v.Block 38252 _ = b 38253 typ := &b.Func.Config.Types 38254 _ = typ 38255 // match: (Ctz32 x) 38256 // cond: 38257 // result: (Select0 (BSFQ (ORQ <typ.UInt64> (MOVQconst [1<<32]) x))) 38258 for { 38259 x := v.Args[0] 38260 v.reset(OpSelect0) 38261 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 38262 v1 := b.NewValue0(v.Pos, OpAMD64ORQ, typ.UInt64) 38263 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 38264 v2.AuxInt = 1 << 32 38265 v1.AddArg(v2) 38266 v1.AddArg(x) 38267 v0.AddArg(v1) 38268 v.AddArg(v0) 38269 return true 38270 } 38271 } 38272 func rewriteValueAMD64_OpCtz64_0(v *Value) bool { 38273 b := v.Block 38274 _ = b 38275 typ := &b.Func.Config.Types 38276 _ = typ 38277 // match: (Ctz64 <t> x) 38278 // cond: 38279 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x))) 38280 for { 38281 t := v.Type 38282 x := v.Args[0] 38283 v.reset(OpAMD64CMOVQEQ) 38284 v0 := b.NewValue0(v.Pos, OpSelect0, t) 38285 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 38286 v1.AddArg(x) 38287 v0.AddArg(v1) 38288 v.AddArg(v0) 38289 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 38290 v2.AuxInt = 64 38291 v.AddArg(v2) 38292 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 38293 v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 38294 v4.AddArg(x) 38295 v3.AddArg(v4) 38296 v.AddArg(v3) 38297 return true 38298 } 38299 } 38300 func rewriteValueAMD64_OpCvt32Fto32_0(v *Value) bool { 38301 // match: (Cvt32Fto32 x) 38302 // cond: 38303 // result: (CVTTSS2SL x) 38304 for { 38305 x := v.Args[0] 38306 v.reset(OpAMD64CVTTSS2SL) 38307 v.AddArg(x) 38308 return true 38309 } 38310 } 38311 func rewriteValueAMD64_OpCvt32Fto64_0(v *Value) bool { 38312 // match: (Cvt32Fto64 x) 38313 // cond: 38314 // result: (CVTTSS2SQ x) 38315 for { 38316 x := v.Args[0] 38317 v.reset(OpAMD64CVTTSS2SQ) 38318 v.AddArg(x) 38319 return true 38320 } 38321 } 38322 func rewriteValueAMD64_OpCvt32Fto64F_0(v *Value) bool { 38323 // match: (Cvt32Fto64F x) 38324 // cond: 38325 // result: (CVTSS2SD x) 38326 for { 38327 x := v.Args[0] 38328 v.reset(OpAMD64CVTSS2SD) 38329 v.AddArg(x) 38330 return true 38331 } 38332 } 38333 func rewriteValueAMD64_OpCvt32to32F_0(v *Value) bool { 38334 // match: (Cvt32to32F x) 38335 // cond: 38336 // result: (CVTSL2SS x) 38337 for { 38338 x := v.Args[0] 38339 v.reset(OpAMD64CVTSL2SS) 38340 v.AddArg(x) 38341 return true 38342 } 38343 } 38344 func rewriteValueAMD64_OpCvt32to64F_0(v *Value) bool { 38345 // match: (Cvt32to64F x) 38346 // cond: 38347 // result: (CVTSL2SD x) 38348 for { 38349 x := v.Args[0] 38350 v.reset(OpAMD64CVTSL2SD) 38351 v.AddArg(x) 38352 return true 38353 } 38354 } 38355 func rewriteValueAMD64_OpCvt64Fto32_0(v *Value) bool { 38356 // match: (Cvt64Fto32 x) 38357 // cond: 38358 // result: (CVTTSD2SL x) 38359 for { 38360 x := v.Args[0] 38361 v.reset(OpAMD64CVTTSD2SL) 38362 v.AddArg(x) 38363 return true 38364 } 38365 } 38366 func rewriteValueAMD64_OpCvt64Fto32F_0(v *Value) bool { 38367 // match: (Cvt64Fto32F x) 38368 // cond: 38369 // result: (CVTSD2SS x) 38370 for { 38371 x := v.Args[0] 38372 v.reset(OpAMD64CVTSD2SS) 38373 v.AddArg(x) 38374 return true 38375 } 38376 } 38377 func rewriteValueAMD64_OpCvt64Fto64_0(v *Value) bool { 38378 // match: (Cvt64Fto64 x) 38379 // cond: 38380 // result: (CVTTSD2SQ x) 38381 for { 38382 x := v.Args[0] 38383 v.reset(OpAMD64CVTTSD2SQ) 38384 v.AddArg(x) 38385 return true 38386 } 38387 } 38388 func rewriteValueAMD64_OpCvt64to32F_0(v *Value) bool { 38389 // match: (Cvt64to32F x) 38390 // cond: 38391 // result: (CVTSQ2SS x) 38392 for { 38393 x := v.Args[0] 38394 v.reset(OpAMD64CVTSQ2SS) 38395 v.AddArg(x) 38396 return true 38397 } 38398 } 38399 func rewriteValueAMD64_OpCvt64to64F_0(v *Value) bool { 38400 // match: (Cvt64to64F x) 38401 // cond: 38402 // result: (CVTSQ2SD x) 38403 for { 38404 x := v.Args[0] 38405 v.reset(OpAMD64CVTSQ2SD) 38406 v.AddArg(x) 38407 return true 38408 } 38409 } 38410 func rewriteValueAMD64_OpDiv128u_0(v *Value) bool { 38411 // match: (Div128u xhi xlo y) 38412 // cond: 38413 // result: (DIVQU2 xhi xlo y) 38414 for { 38415 _ = v.Args[2] 38416 xhi := v.Args[0] 38417 xlo := v.Args[1] 38418 y := v.Args[2] 38419 v.reset(OpAMD64DIVQU2) 38420 v.AddArg(xhi) 38421 v.AddArg(xlo) 38422 v.AddArg(y) 38423 return true 38424 } 38425 } 38426 func rewriteValueAMD64_OpDiv16_0(v *Value) bool { 38427 b := v.Block 38428 _ = b 38429 typ := &b.Func.Config.Types 38430 _ = typ 38431 // match: (Div16 x y) 38432 // cond: 38433 // result: (Select0 (DIVW x y)) 38434 for { 38435 _ = v.Args[1] 38436 x := v.Args[0] 38437 y := v.Args[1] 38438 v.reset(OpSelect0) 38439 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 38440 v0.AddArg(x) 38441 v0.AddArg(y) 38442 v.AddArg(v0) 38443 return true 38444 } 38445 } 38446 func rewriteValueAMD64_OpDiv16u_0(v *Value) bool { 38447 b := v.Block 38448 _ = b 38449 typ := &b.Func.Config.Types 38450 _ = typ 38451 // match: (Div16u x y) 38452 // cond: 38453 // result: (Select0 (DIVWU x y)) 38454 for { 38455 _ = v.Args[1] 38456 x := v.Args[0] 38457 y := v.Args[1] 38458 v.reset(OpSelect0) 38459 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 38460 v0.AddArg(x) 38461 v0.AddArg(y) 38462 v.AddArg(v0) 38463 return true 38464 } 38465 } 38466 func rewriteValueAMD64_OpDiv32_0(v *Value) bool { 38467 b := v.Block 38468 _ = b 38469 typ := &b.Func.Config.Types 38470 _ = typ 38471 // match: (Div32 x y) 38472 // cond: 38473 // result: (Select0 (DIVL x y)) 38474 for { 38475 _ = v.Args[1] 38476 x := v.Args[0] 38477 y := v.Args[1] 38478 v.reset(OpSelect0) 38479 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 38480 v0.AddArg(x) 38481 v0.AddArg(y) 38482 v.AddArg(v0) 38483 return true 38484 } 38485 } 38486 func rewriteValueAMD64_OpDiv32F_0(v *Value) bool { 38487 // match: (Div32F x y) 38488 // cond: 38489 // result: (DIVSS x y) 38490 for { 38491 _ = v.Args[1] 38492 x := v.Args[0] 38493 y := v.Args[1] 38494 v.reset(OpAMD64DIVSS) 38495 v.AddArg(x) 38496 v.AddArg(y) 38497 return true 38498 } 38499 } 38500 func rewriteValueAMD64_OpDiv32u_0(v *Value) bool { 38501 b := v.Block 38502 _ = b 38503 typ := &b.Func.Config.Types 38504 _ = typ 38505 // match: (Div32u x y) 38506 // cond: 38507 // result: (Select0 (DIVLU x y)) 38508 for { 38509 _ = v.Args[1] 38510 x := v.Args[0] 38511 y := v.Args[1] 38512 v.reset(OpSelect0) 38513 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 38514 v0.AddArg(x) 38515 v0.AddArg(y) 38516 v.AddArg(v0) 38517 return true 38518 } 38519 } 38520 func rewriteValueAMD64_OpDiv64_0(v *Value) bool { 38521 b := v.Block 38522 _ = b 38523 typ := &b.Func.Config.Types 38524 _ = typ 38525 // match: (Div64 x y) 38526 // cond: 38527 // result: (Select0 (DIVQ x y)) 38528 for { 38529 _ = v.Args[1] 38530 x := v.Args[0] 38531 y := v.Args[1] 38532 v.reset(OpSelect0) 38533 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 38534 v0.AddArg(x) 38535 v0.AddArg(y) 38536 v.AddArg(v0) 38537 return true 38538 } 38539 } 38540 func rewriteValueAMD64_OpDiv64F_0(v *Value) bool { 38541 // match: (Div64F x y) 38542 // cond: 38543 // result: (DIVSD x y) 38544 for { 38545 _ = v.Args[1] 38546 x := v.Args[0] 38547 y := v.Args[1] 38548 v.reset(OpAMD64DIVSD) 38549 v.AddArg(x) 38550 v.AddArg(y) 38551 return true 38552 } 38553 } 38554 func rewriteValueAMD64_OpDiv64u_0(v *Value) bool { 38555 b := v.Block 38556 _ = b 38557 typ := &b.Func.Config.Types 38558 _ = typ 38559 // match: (Div64u x y) 38560 // cond: 38561 // result: (Select0 (DIVQU x y)) 38562 for { 38563 _ = v.Args[1] 38564 x := v.Args[0] 38565 y := v.Args[1] 38566 v.reset(OpSelect0) 38567 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 38568 v0.AddArg(x) 38569 v0.AddArg(y) 38570 v.AddArg(v0) 38571 return true 38572 } 38573 } 38574 func rewriteValueAMD64_OpDiv8_0(v *Value) bool { 38575 b := v.Block 38576 _ = b 38577 typ := &b.Func.Config.Types 38578 _ = typ 38579 // match: (Div8 x y) 38580 // cond: 38581 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 38582 for { 38583 _ = v.Args[1] 38584 x := v.Args[0] 38585 y := v.Args[1] 38586 v.reset(OpSelect0) 38587 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 38588 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 38589 v1.AddArg(x) 38590 v0.AddArg(v1) 38591 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 38592 v2.AddArg(y) 38593 v0.AddArg(v2) 38594 v.AddArg(v0) 38595 return true 38596 } 38597 } 38598 func rewriteValueAMD64_OpDiv8u_0(v *Value) bool { 38599 b := v.Block 38600 _ = b 38601 typ := &b.Func.Config.Types 38602 _ = typ 38603 // match: (Div8u x y) 38604 // cond: 38605 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 38606 for { 38607 _ = v.Args[1] 38608 x := v.Args[0] 38609 y := v.Args[1] 38610 v.reset(OpSelect0) 38611 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 38612 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 38613 v1.AddArg(x) 38614 v0.AddArg(v1) 38615 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 38616 v2.AddArg(y) 38617 v0.AddArg(v2) 38618 v.AddArg(v0) 38619 return true 38620 } 38621 } 38622 func rewriteValueAMD64_OpEq16_0(v *Value) bool { 38623 b := v.Block 38624 _ = b 38625 // match: (Eq16 x y) 38626 // cond: 38627 // result: (SETEQ (CMPW x y)) 38628 for { 38629 _ = v.Args[1] 38630 x := v.Args[0] 38631 y := v.Args[1] 38632 v.reset(OpAMD64SETEQ) 38633 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 38634 v0.AddArg(x) 38635 v0.AddArg(y) 38636 v.AddArg(v0) 38637 return true 38638 } 38639 } 38640 func rewriteValueAMD64_OpEq32_0(v *Value) bool { 38641 b := v.Block 38642 _ = b 38643 // match: (Eq32 x y) 38644 // cond: 38645 // result: (SETEQ (CMPL x y)) 38646 for { 38647 _ = v.Args[1] 38648 x := v.Args[0] 38649 y := v.Args[1] 38650 v.reset(OpAMD64SETEQ) 38651 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 38652 v0.AddArg(x) 38653 v0.AddArg(y) 38654 v.AddArg(v0) 38655 return true 38656 } 38657 } 38658 func rewriteValueAMD64_OpEq32F_0(v *Value) bool { 38659 b := v.Block 38660 _ = b 38661 // match: (Eq32F x y) 38662 // cond: 38663 // result: (SETEQF (UCOMISS x y)) 38664 for { 38665 _ = v.Args[1] 38666 x := v.Args[0] 38667 y := v.Args[1] 38668 v.reset(OpAMD64SETEQF) 38669 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 38670 v0.AddArg(x) 38671 v0.AddArg(y) 38672 v.AddArg(v0) 38673 return true 38674 } 38675 } 38676 func rewriteValueAMD64_OpEq64_0(v *Value) bool { 38677 b := v.Block 38678 _ = b 38679 // match: (Eq64 x y) 38680 // cond: 38681 // result: (SETEQ (CMPQ x y)) 38682 for { 38683 _ = v.Args[1] 38684 x := v.Args[0] 38685 y := v.Args[1] 38686 v.reset(OpAMD64SETEQ) 38687 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 38688 v0.AddArg(x) 38689 v0.AddArg(y) 38690 v.AddArg(v0) 38691 return true 38692 } 38693 } 38694 func rewriteValueAMD64_OpEq64F_0(v *Value) bool { 38695 b := v.Block 38696 _ = b 38697 // match: (Eq64F x y) 38698 // cond: 38699 // result: (SETEQF (UCOMISD x y)) 38700 for { 38701 _ = v.Args[1] 38702 x := v.Args[0] 38703 y := v.Args[1] 38704 v.reset(OpAMD64SETEQF) 38705 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 38706 v0.AddArg(x) 38707 v0.AddArg(y) 38708 v.AddArg(v0) 38709 return true 38710 } 38711 } 38712 func rewriteValueAMD64_OpEq8_0(v *Value) bool { 38713 b := v.Block 38714 _ = b 38715 // match: (Eq8 x y) 38716 // cond: 38717 // result: (SETEQ (CMPB x y)) 38718 for { 38719 _ = v.Args[1] 38720 x := v.Args[0] 38721 y := v.Args[1] 38722 v.reset(OpAMD64SETEQ) 38723 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 38724 v0.AddArg(x) 38725 v0.AddArg(y) 38726 v.AddArg(v0) 38727 return true 38728 } 38729 } 38730 func rewriteValueAMD64_OpEqB_0(v *Value) bool { 38731 b := v.Block 38732 _ = b 38733 // match: (EqB x y) 38734 // cond: 38735 // result: (SETEQ (CMPB x y)) 38736 for { 38737 _ = v.Args[1] 38738 x := v.Args[0] 38739 y := v.Args[1] 38740 v.reset(OpAMD64SETEQ) 38741 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 38742 v0.AddArg(x) 38743 v0.AddArg(y) 38744 v.AddArg(v0) 38745 return true 38746 } 38747 } 38748 func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { 38749 b := v.Block 38750 _ = b 38751 config := b.Func.Config 38752 _ = config 38753 // match: (EqPtr x y) 38754 // cond: config.PtrSize == 8 38755 // result: (SETEQ (CMPQ x y)) 38756 for { 38757 _ = v.Args[1] 38758 x := v.Args[0] 38759 y := v.Args[1] 38760 if !(config.PtrSize == 8) { 38761 break 38762 } 38763 v.reset(OpAMD64SETEQ) 38764 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 38765 v0.AddArg(x) 38766 v0.AddArg(y) 38767 v.AddArg(v0) 38768 return true 38769 } 38770 // match: (EqPtr x y) 38771 // cond: config.PtrSize == 4 38772 // result: (SETEQ (CMPL x y)) 38773 for { 38774 _ = v.Args[1] 38775 x := v.Args[0] 38776 y := v.Args[1] 38777 if !(config.PtrSize == 4) { 38778 break 38779 } 38780 v.reset(OpAMD64SETEQ) 38781 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 38782 v0.AddArg(x) 38783 v0.AddArg(y) 38784 v.AddArg(v0) 38785 return true 38786 } 38787 return false 38788 } 38789 func rewriteValueAMD64_OpGeq16_0(v *Value) bool { 38790 b := v.Block 38791 _ = b 38792 // match: (Geq16 x y) 38793 // cond: 38794 // result: (SETGE (CMPW x y)) 38795 for { 38796 _ = v.Args[1] 38797 x := v.Args[0] 38798 y := v.Args[1] 38799 v.reset(OpAMD64SETGE) 38800 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 38801 v0.AddArg(x) 38802 v0.AddArg(y) 38803 v.AddArg(v0) 38804 return true 38805 } 38806 } 38807 func rewriteValueAMD64_OpGeq16U_0(v *Value) bool { 38808 b := v.Block 38809 _ = b 38810 // match: (Geq16U x y) 38811 // cond: 38812 // result: (SETAE (CMPW x y)) 38813 for { 38814 _ = v.Args[1] 38815 x := v.Args[0] 38816 y := v.Args[1] 38817 v.reset(OpAMD64SETAE) 38818 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 38819 v0.AddArg(x) 38820 v0.AddArg(y) 38821 v.AddArg(v0) 38822 return true 38823 } 38824 } 38825 func rewriteValueAMD64_OpGeq32_0(v *Value) bool { 38826 b := v.Block 38827 _ = b 38828 // match: (Geq32 x y) 38829 // cond: 38830 // result: (SETGE (CMPL x y)) 38831 for { 38832 _ = v.Args[1] 38833 x := v.Args[0] 38834 y := v.Args[1] 38835 v.reset(OpAMD64SETGE) 38836 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 38837 v0.AddArg(x) 38838 v0.AddArg(y) 38839 v.AddArg(v0) 38840 return true 38841 } 38842 } 38843 func rewriteValueAMD64_OpGeq32F_0(v *Value) bool { 38844 b := v.Block 38845 _ = b 38846 // match: (Geq32F x y) 38847 // cond: 38848 // result: (SETGEF (UCOMISS x y)) 38849 for { 38850 _ = v.Args[1] 38851 x := v.Args[0] 38852 y := v.Args[1] 38853 v.reset(OpAMD64SETGEF) 38854 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 38855 v0.AddArg(x) 38856 v0.AddArg(y) 38857 v.AddArg(v0) 38858 return true 38859 } 38860 } 38861 func rewriteValueAMD64_OpGeq32U_0(v *Value) bool { 38862 b := v.Block 38863 _ = b 38864 // match: (Geq32U x y) 38865 // cond: 38866 // result: (SETAE (CMPL x y)) 38867 for { 38868 _ = v.Args[1] 38869 x := v.Args[0] 38870 y := v.Args[1] 38871 v.reset(OpAMD64SETAE) 38872 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 38873 v0.AddArg(x) 38874 v0.AddArg(y) 38875 v.AddArg(v0) 38876 return true 38877 } 38878 } 38879 func rewriteValueAMD64_OpGeq64_0(v *Value) bool { 38880 b := v.Block 38881 _ = b 38882 // match: (Geq64 x y) 38883 // cond: 38884 // result: (SETGE (CMPQ x y)) 38885 for { 38886 _ = v.Args[1] 38887 x := v.Args[0] 38888 y := v.Args[1] 38889 v.reset(OpAMD64SETGE) 38890 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 38891 v0.AddArg(x) 38892 v0.AddArg(y) 38893 v.AddArg(v0) 38894 return true 38895 } 38896 } 38897 func rewriteValueAMD64_OpGeq64F_0(v *Value) bool { 38898 b := v.Block 38899 _ = b 38900 // match: (Geq64F x y) 38901 // cond: 38902 // result: (SETGEF (UCOMISD x y)) 38903 for { 38904 _ = v.Args[1] 38905 x := v.Args[0] 38906 y := v.Args[1] 38907 v.reset(OpAMD64SETGEF) 38908 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 38909 v0.AddArg(x) 38910 v0.AddArg(y) 38911 v.AddArg(v0) 38912 return true 38913 } 38914 } 38915 func rewriteValueAMD64_OpGeq64U_0(v *Value) bool { 38916 b := v.Block 38917 _ = b 38918 // match: (Geq64U x y) 38919 // cond: 38920 // result: (SETAE (CMPQ x y)) 38921 for { 38922 _ = v.Args[1] 38923 x := v.Args[0] 38924 y := v.Args[1] 38925 v.reset(OpAMD64SETAE) 38926 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 38927 v0.AddArg(x) 38928 v0.AddArg(y) 38929 v.AddArg(v0) 38930 return true 38931 } 38932 } 38933 func rewriteValueAMD64_OpGeq8_0(v *Value) bool { 38934 b := v.Block 38935 _ = b 38936 // match: (Geq8 x y) 38937 // cond: 38938 // result: (SETGE (CMPB x y)) 38939 for { 38940 _ = v.Args[1] 38941 x := v.Args[0] 38942 y := v.Args[1] 38943 v.reset(OpAMD64SETGE) 38944 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 38945 v0.AddArg(x) 38946 v0.AddArg(y) 38947 v.AddArg(v0) 38948 return true 38949 } 38950 } 38951 func rewriteValueAMD64_OpGeq8U_0(v *Value) bool { 38952 b := v.Block 38953 _ = b 38954 // match: (Geq8U x y) 38955 // cond: 38956 // result: (SETAE (CMPB x y)) 38957 for { 38958 _ = v.Args[1] 38959 x := v.Args[0] 38960 y := v.Args[1] 38961 v.reset(OpAMD64SETAE) 38962 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 38963 v0.AddArg(x) 38964 v0.AddArg(y) 38965 v.AddArg(v0) 38966 return true 38967 } 38968 } 38969 func rewriteValueAMD64_OpGetClosurePtr_0(v *Value) bool { 38970 // match: (GetClosurePtr) 38971 // cond: 38972 // result: (LoweredGetClosurePtr) 38973 for { 38974 v.reset(OpAMD64LoweredGetClosurePtr) 38975 return true 38976 } 38977 } 38978 func rewriteValueAMD64_OpGetG_0(v *Value) bool { 38979 // match: (GetG mem) 38980 // cond: 38981 // result: (LoweredGetG mem) 38982 for { 38983 mem := v.Args[0] 38984 v.reset(OpAMD64LoweredGetG) 38985 v.AddArg(mem) 38986 return true 38987 } 38988 } 38989 func rewriteValueAMD64_OpGreater16_0(v *Value) bool { 38990 b := v.Block 38991 _ = b 38992 // match: (Greater16 x y) 38993 // cond: 38994 // result: (SETG (CMPW x y)) 38995 for { 38996 _ = v.Args[1] 38997 x := v.Args[0] 38998 y := v.Args[1] 38999 v.reset(OpAMD64SETG) 39000 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39001 v0.AddArg(x) 39002 v0.AddArg(y) 39003 v.AddArg(v0) 39004 return true 39005 } 39006 } 39007 func rewriteValueAMD64_OpGreater16U_0(v *Value) bool { 39008 b := v.Block 39009 _ = b 39010 // match: (Greater16U x y) 39011 // cond: 39012 // result: (SETA (CMPW x y)) 39013 for { 39014 _ = v.Args[1] 39015 x := v.Args[0] 39016 y := v.Args[1] 39017 v.reset(OpAMD64SETA) 39018 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39019 v0.AddArg(x) 39020 v0.AddArg(y) 39021 v.AddArg(v0) 39022 return true 39023 } 39024 } 39025 func rewriteValueAMD64_OpGreater32_0(v *Value) bool { 39026 b := v.Block 39027 _ = b 39028 // match: (Greater32 x y) 39029 // cond: 39030 // result: (SETG (CMPL x y)) 39031 for { 39032 _ = v.Args[1] 39033 x := v.Args[0] 39034 y := v.Args[1] 39035 v.reset(OpAMD64SETG) 39036 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39037 v0.AddArg(x) 39038 v0.AddArg(y) 39039 v.AddArg(v0) 39040 return true 39041 } 39042 } 39043 func rewriteValueAMD64_OpGreater32F_0(v *Value) bool { 39044 b := v.Block 39045 _ = b 39046 // match: (Greater32F x y) 39047 // cond: 39048 // result: (SETGF (UCOMISS x y)) 39049 for { 39050 _ = v.Args[1] 39051 x := v.Args[0] 39052 y := v.Args[1] 39053 v.reset(OpAMD64SETGF) 39054 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 39055 v0.AddArg(x) 39056 v0.AddArg(y) 39057 v.AddArg(v0) 39058 return true 39059 } 39060 } 39061 func rewriteValueAMD64_OpGreater32U_0(v *Value) bool { 39062 b := v.Block 39063 _ = b 39064 // match: (Greater32U x y) 39065 // cond: 39066 // result: (SETA (CMPL x y)) 39067 for { 39068 _ = v.Args[1] 39069 x := v.Args[0] 39070 y := v.Args[1] 39071 v.reset(OpAMD64SETA) 39072 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39073 v0.AddArg(x) 39074 v0.AddArg(y) 39075 v.AddArg(v0) 39076 return true 39077 } 39078 } 39079 func rewriteValueAMD64_OpGreater64_0(v *Value) bool { 39080 b := v.Block 39081 _ = b 39082 // match: (Greater64 x y) 39083 // cond: 39084 // result: (SETG (CMPQ x y)) 39085 for { 39086 _ = v.Args[1] 39087 x := v.Args[0] 39088 y := v.Args[1] 39089 v.reset(OpAMD64SETG) 39090 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39091 v0.AddArg(x) 39092 v0.AddArg(y) 39093 v.AddArg(v0) 39094 return true 39095 } 39096 } 39097 func rewriteValueAMD64_OpGreater64F_0(v *Value) bool { 39098 b := v.Block 39099 _ = b 39100 // match: (Greater64F x y) 39101 // cond: 39102 // result: (SETGF (UCOMISD x y)) 39103 for { 39104 _ = v.Args[1] 39105 x := v.Args[0] 39106 y := v.Args[1] 39107 v.reset(OpAMD64SETGF) 39108 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 39109 v0.AddArg(x) 39110 v0.AddArg(y) 39111 v.AddArg(v0) 39112 return true 39113 } 39114 } 39115 func rewriteValueAMD64_OpGreater64U_0(v *Value) bool { 39116 b := v.Block 39117 _ = b 39118 // match: (Greater64U x y) 39119 // cond: 39120 // result: (SETA (CMPQ x y)) 39121 for { 39122 _ = v.Args[1] 39123 x := v.Args[0] 39124 y := v.Args[1] 39125 v.reset(OpAMD64SETA) 39126 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39127 v0.AddArg(x) 39128 v0.AddArg(y) 39129 v.AddArg(v0) 39130 return true 39131 } 39132 } 39133 func rewriteValueAMD64_OpGreater8_0(v *Value) bool { 39134 b := v.Block 39135 _ = b 39136 // match: (Greater8 x y) 39137 // cond: 39138 // result: (SETG (CMPB x y)) 39139 for { 39140 _ = v.Args[1] 39141 x := v.Args[0] 39142 y := v.Args[1] 39143 v.reset(OpAMD64SETG) 39144 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39145 v0.AddArg(x) 39146 v0.AddArg(y) 39147 v.AddArg(v0) 39148 return true 39149 } 39150 } 39151 func rewriteValueAMD64_OpGreater8U_0(v *Value) bool { 39152 b := v.Block 39153 _ = b 39154 // match: (Greater8U x y) 39155 // cond: 39156 // result: (SETA (CMPB x y)) 39157 for { 39158 _ = v.Args[1] 39159 x := v.Args[0] 39160 y := v.Args[1] 39161 v.reset(OpAMD64SETA) 39162 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39163 v0.AddArg(x) 39164 v0.AddArg(y) 39165 v.AddArg(v0) 39166 return true 39167 } 39168 } 39169 func rewriteValueAMD64_OpHmul32_0(v *Value) bool { 39170 // match: (Hmul32 x y) 39171 // cond: 39172 // result: (HMULL x y) 39173 for { 39174 _ = v.Args[1] 39175 x := v.Args[0] 39176 y := v.Args[1] 39177 v.reset(OpAMD64HMULL) 39178 v.AddArg(x) 39179 v.AddArg(y) 39180 return true 39181 } 39182 } 39183 func rewriteValueAMD64_OpHmul32u_0(v *Value) bool { 39184 // match: (Hmul32u x y) 39185 // cond: 39186 // result: (HMULLU x y) 39187 for { 39188 _ = v.Args[1] 39189 x := v.Args[0] 39190 y := v.Args[1] 39191 v.reset(OpAMD64HMULLU) 39192 v.AddArg(x) 39193 v.AddArg(y) 39194 return true 39195 } 39196 } 39197 func rewriteValueAMD64_OpHmul64_0(v *Value) bool { 39198 // match: (Hmul64 x y) 39199 // cond: 39200 // result: (HMULQ x y) 39201 for { 39202 _ = v.Args[1] 39203 x := v.Args[0] 39204 y := v.Args[1] 39205 v.reset(OpAMD64HMULQ) 39206 v.AddArg(x) 39207 v.AddArg(y) 39208 return true 39209 } 39210 } 39211 func rewriteValueAMD64_OpHmul64u_0(v *Value) bool { 39212 // match: (Hmul64u x y) 39213 // cond: 39214 // result: (HMULQU x y) 39215 for { 39216 _ = v.Args[1] 39217 x := v.Args[0] 39218 y := v.Args[1] 39219 v.reset(OpAMD64HMULQU) 39220 v.AddArg(x) 39221 v.AddArg(y) 39222 return true 39223 } 39224 } 39225 func rewriteValueAMD64_OpInt64Hi_0(v *Value) bool { 39226 // match: (Int64Hi x) 39227 // cond: 39228 // result: (SHRQconst [32] x) 39229 for { 39230 x := v.Args[0] 39231 v.reset(OpAMD64SHRQconst) 39232 v.AuxInt = 32 39233 v.AddArg(x) 39234 return true 39235 } 39236 } 39237 func rewriteValueAMD64_OpInterCall_0(v *Value) bool { 39238 // match: (InterCall [argwid] entry mem) 39239 // cond: 39240 // result: (CALLinter [argwid] entry mem) 39241 for { 39242 argwid := v.AuxInt 39243 _ = v.Args[1] 39244 entry := v.Args[0] 39245 mem := v.Args[1] 39246 v.reset(OpAMD64CALLinter) 39247 v.AuxInt = argwid 39248 v.AddArg(entry) 39249 v.AddArg(mem) 39250 return true 39251 } 39252 } 39253 func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool { 39254 b := v.Block 39255 _ = b 39256 // match: (IsInBounds idx len) 39257 // cond: 39258 // result: (SETB (CMPQ idx len)) 39259 for { 39260 _ = v.Args[1] 39261 idx := v.Args[0] 39262 len := v.Args[1] 39263 v.reset(OpAMD64SETB) 39264 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39265 v0.AddArg(idx) 39266 v0.AddArg(len) 39267 v.AddArg(v0) 39268 return true 39269 } 39270 } 39271 func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool { 39272 b := v.Block 39273 _ = b 39274 config := b.Func.Config 39275 _ = config 39276 // match: (IsNonNil p) 39277 // cond: config.PtrSize == 8 39278 // result: (SETNE (TESTQ p p)) 39279 for { 39280 p := v.Args[0] 39281 if !(config.PtrSize == 8) { 39282 break 39283 } 39284 v.reset(OpAMD64SETNE) 39285 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) 39286 v0.AddArg(p) 39287 v0.AddArg(p) 39288 v.AddArg(v0) 39289 return true 39290 } 39291 // match: (IsNonNil p) 39292 // cond: config.PtrSize == 4 39293 // result: (SETNE (TESTL p p)) 39294 for { 39295 p := v.Args[0] 39296 if !(config.PtrSize == 4) { 39297 break 39298 } 39299 v.reset(OpAMD64SETNE) 39300 v0 := b.NewValue0(v.Pos, OpAMD64TESTL, types.TypeFlags) 39301 v0.AddArg(p) 39302 v0.AddArg(p) 39303 v.AddArg(v0) 39304 return true 39305 } 39306 return false 39307 } 39308 func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool { 39309 b := v.Block 39310 _ = b 39311 // match: (IsSliceInBounds idx len) 39312 // cond: 39313 // result: (SETBE (CMPQ idx len)) 39314 for { 39315 _ = v.Args[1] 39316 idx := v.Args[0] 39317 len := v.Args[1] 39318 v.reset(OpAMD64SETBE) 39319 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39320 v0.AddArg(idx) 39321 v0.AddArg(len) 39322 v.AddArg(v0) 39323 return true 39324 } 39325 } 39326 func rewriteValueAMD64_OpLeq16_0(v *Value) bool { 39327 b := v.Block 39328 _ = b 39329 // match: (Leq16 x y) 39330 // cond: 39331 // result: (SETLE (CMPW x y)) 39332 for { 39333 _ = v.Args[1] 39334 x := v.Args[0] 39335 y := v.Args[1] 39336 v.reset(OpAMD64SETLE) 39337 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39338 v0.AddArg(x) 39339 v0.AddArg(y) 39340 v.AddArg(v0) 39341 return true 39342 } 39343 } 39344 func rewriteValueAMD64_OpLeq16U_0(v *Value) bool { 39345 b := v.Block 39346 _ = b 39347 // match: (Leq16U x y) 39348 // cond: 39349 // result: (SETBE (CMPW x y)) 39350 for { 39351 _ = v.Args[1] 39352 x := v.Args[0] 39353 y := v.Args[1] 39354 v.reset(OpAMD64SETBE) 39355 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39356 v0.AddArg(x) 39357 v0.AddArg(y) 39358 v.AddArg(v0) 39359 return true 39360 } 39361 } 39362 func rewriteValueAMD64_OpLeq32_0(v *Value) bool { 39363 b := v.Block 39364 _ = b 39365 // match: (Leq32 x y) 39366 // cond: 39367 // result: (SETLE (CMPL x y)) 39368 for { 39369 _ = v.Args[1] 39370 x := v.Args[0] 39371 y := v.Args[1] 39372 v.reset(OpAMD64SETLE) 39373 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39374 v0.AddArg(x) 39375 v0.AddArg(y) 39376 v.AddArg(v0) 39377 return true 39378 } 39379 } 39380 func rewriteValueAMD64_OpLeq32F_0(v *Value) bool { 39381 b := v.Block 39382 _ = b 39383 // match: (Leq32F x y) 39384 // cond: 39385 // result: (SETGEF (UCOMISS y x)) 39386 for { 39387 _ = v.Args[1] 39388 x := v.Args[0] 39389 y := v.Args[1] 39390 v.reset(OpAMD64SETGEF) 39391 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 39392 v0.AddArg(y) 39393 v0.AddArg(x) 39394 v.AddArg(v0) 39395 return true 39396 } 39397 } 39398 func rewriteValueAMD64_OpLeq32U_0(v *Value) bool { 39399 b := v.Block 39400 _ = b 39401 // match: (Leq32U x y) 39402 // cond: 39403 // result: (SETBE (CMPL x y)) 39404 for { 39405 _ = v.Args[1] 39406 x := v.Args[0] 39407 y := v.Args[1] 39408 v.reset(OpAMD64SETBE) 39409 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39410 v0.AddArg(x) 39411 v0.AddArg(y) 39412 v.AddArg(v0) 39413 return true 39414 } 39415 } 39416 func rewriteValueAMD64_OpLeq64_0(v *Value) bool { 39417 b := v.Block 39418 _ = b 39419 // match: (Leq64 x y) 39420 // cond: 39421 // result: (SETLE (CMPQ x y)) 39422 for { 39423 _ = v.Args[1] 39424 x := v.Args[0] 39425 y := v.Args[1] 39426 v.reset(OpAMD64SETLE) 39427 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39428 v0.AddArg(x) 39429 v0.AddArg(y) 39430 v.AddArg(v0) 39431 return true 39432 } 39433 } 39434 func rewriteValueAMD64_OpLeq64F_0(v *Value) bool { 39435 b := v.Block 39436 _ = b 39437 // match: (Leq64F x y) 39438 // cond: 39439 // result: (SETGEF (UCOMISD y x)) 39440 for { 39441 _ = v.Args[1] 39442 x := v.Args[0] 39443 y := v.Args[1] 39444 v.reset(OpAMD64SETGEF) 39445 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 39446 v0.AddArg(y) 39447 v0.AddArg(x) 39448 v.AddArg(v0) 39449 return true 39450 } 39451 } 39452 func rewriteValueAMD64_OpLeq64U_0(v *Value) bool { 39453 b := v.Block 39454 _ = b 39455 // match: (Leq64U x y) 39456 // cond: 39457 // result: (SETBE (CMPQ x y)) 39458 for { 39459 _ = v.Args[1] 39460 x := v.Args[0] 39461 y := v.Args[1] 39462 v.reset(OpAMD64SETBE) 39463 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39464 v0.AddArg(x) 39465 v0.AddArg(y) 39466 v.AddArg(v0) 39467 return true 39468 } 39469 } 39470 func rewriteValueAMD64_OpLeq8_0(v *Value) bool { 39471 b := v.Block 39472 _ = b 39473 // match: (Leq8 x y) 39474 // cond: 39475 // result: (SETLE (CMPB x y)) 39476 for { 39477 _ = v.Args[1] 39478 x := v.Args[0] 39479 y := v.Args[1] 39480 v.reset(OpAMD64SETLE) 39481 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39482 v0.AddArg(x) 39483 v0.AddArg(y) 39484 v.AddArg(v0) 39485 return true 39486 } 39487 } 39488 func rewriteValueAMD64_OpLeq8U_0(v *Value) bool { 39489 b := v.Block 39490 _ = b 39491 // match: (Leq8U x y) 39492 // cond: 39493 // result: (SETBE (CMPB x y)) 39494 for { 39495 _ = v.Args[1] 39496 x := v.Args[0] 39497 y := v.Args[1] 39498 v.reset(OpAMD64SETBE) 39499 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39500 v0.AddArg(x) 39501 v0.AddArg(y) 39502 v.AddArg(v0) 39503 return true 39504 } 39505 } 39506 func rewriteValueAMD64_OpLess16_0(v *Value) bool { 39507 b := v.Block 39508 _ = b 39509 // match: (Less16 x y) 39510 // cond: 39511 // result: (SETL (CMPW x y)) 39512 for { 39513 _ = v.Args[1] 39514 x := v.Args[0] 39515 y := v.Args[1] 39516 v.reset(OpAMD64SETL) 39517 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39518 v0.AddArg(x) 39519 v0.AddArg(y) 39520 v.AddArg(v0) 39521 return true 39522 } 39523 } 39524 func rewriteValueAMD64_OpLess16U_0(v *Value) bool { 39525 b := v.Block 39526 _ = b 39527 // match: (Less16U x y) 39528 // cond: 39529 // result: (SETB (CMPW x y)) 39530 for { 39531 _ = v.Args[1] 39532 x := v.Args[0] 39533 y := v.Args[1] 39534 v.reset(OpAMD64SETB) 39535 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 39536 v0.AddArg(x) 39537 v0.AddArg(y) 39538 v.AddArg(v0) 39539 return true 39540 } 39541 } 39542 func rewriteValueAMD64_OpLess32_0(v *Value) bool { 39543 b := v.Block 39544 _ = b 39545 // match: (Less32 x y) 39546 // cond: 39547 // result: (SETL (CMPL x y)) 39548 for { 39549 _ = v.Args[1] 39550 x := v.Args[0] 39551 y := v.Args[1] 39552 v.reset(OpAMD64SETL) 39553 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39554 v0.AddArg(x) 39555 v0.AddArg(y) 39556 v.AddArg(v0) 39557 return true 39558 } 39559 } 39560 func rewriteValueAMD64_OpLess32F_0(v *Value) bool { 39561 b := v.Block 39562 _ = b 39563 // match: (Less32F x y) 39564 // cond: 39565 // result: (SETGF (UCOMISS y x)) 39566 for { 39567 _ = v.Args[1] 39568 x := v.Args[0] 39569 y := v.Args[1] 39570 v.reset(OpAMD64SETGF) 39571 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 39572 v0.AddArg(y) 39573 v0.AddArg(x) 39574 v.AddArg(v0) 39575 return true 39576 } 39577 } 39578 func rewriteValueAMD64_OpLess32U_0(v *Value) bool { 39579 b := v.Block 39580 _ = b 39581 // match: (Less32U x y) 39582 // cond: 39583 // result: (SETB (CMPL x y)) 39584 for { 39585 _ = v.Args[1] 39586 x := v.Args[0] 39587 y := v.Args[1] 39588 v.reset(OpAMD64SETB) 39589 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 39590 v0.AddArg(x) 39591 v0.AddArg(y) 39592 v.AddArg(v0) 39593 return true 39594 } 39595 } 39596 func rewriteValueAMD64_OpLess64_0(v *Value) bool { 39597 b := v.Block 39598 _ = b 39599 // match: (Less64 x y) 39600 // cond: 39601 // result: (SETL (CMPQ x y)) 39602 for { 39603 _ = v.Args[1] 39604 x := v.Args[0] 39605 y := v.Args[1] 39606 v.reset(OpAMD64SETL) 39607 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39608 v0.AddArg(x) 39609 v0.AddArg(y) 39610 v.AddArg(v0) 39611 return true 39612 } 39613 } 39614 func rewriteValueAMD64_OpLess64F_0(v *Value) bool { 39615 b := v.Block 39616 _ = b 39617 // match: (Less64F x y) 39618 // cond: 39619 // result: (SETGF (UCOMISD y x)) 39620 for { 39621 _ = v.Args[1] 39622 x := v.Args[0] 39623 y := v.Args[1] 39624 v.reset(OpAMD64SETGF) 39625 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 39626 v0.AddArg(y) 39627 v0.AddArg(x) 39628 v.AddArg(v0) 39629 return true 39630 } 39631 } 39632 func rewriteValueAMD64_OpLess64U_0(v *Value) bool { 39633 b := v.Block 39634 _ = b 39635 // match: (Less64U x y) 39636 // cond: 39637 // result: (SETB (CMPQ x y)) 39638 for { 39639 _ = v.Args[1] 39640 x := v.Args[0] 39641 y := v.Args[1] 39642 v.reset(OpAMD64SETB) 39643 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 39644 v0.AddArg(x) 39645 v0.AddArg(y) 39646 v.AddArg(v0) 39647 return true 39648 } 39649 } 39650 func rewriteValueAMD64_OpLess8_0(v *Value) bool { 39651 b := v.Block 39652 _ = b 39653 // match: (Less8 x y) 39654 // cond: 39655 // result: (SETL (CMPB x y)) 39656 for { 39657 _ = v.Args[1] 39658 x := v.Args[0] 39659 y := v.Args[1] 39660 v.reset(OpAMD64SETL) 39661 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39662 v0.AddArg(x) 39663 v0.AddArg(y) 39664 v.AddArg(v0) 39665 return true 39666 } 39667 } 39668 func rewriteValueAMD64_OpLess8U_0(v *Value) bool { 39669 b := v.Block 39670 _ = b 39671 // match: (Less8U x y) 39672 // cond: 39673 // result: (SETB (CMPB x y)) 39674 for { 39675 _ = v.Args[1] 39676 x := v.Args[0] 39677 y := v.Args[1] 39678 v.reset(OpAMD64SETB) 39679 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 39680 v0.AddArg(x) 39681 v0.AddArg(y) 39682 v.AddArg(v0) 39683 return true 39684 } 39685 } 39686 func rewriteValueAMD64_OpLoad_0(v *Value) bool { 39687 b := v.Block 39688 _ = b 39689 config := b.Func.Config 39690 _ = config 39691 // match: (Load <t> ptr mem) 39692 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 39693 // result: (MOVQload ptr mem) 39694 for { 39695 t := v.Type 39696 _ = v.Args[1] 39697 ptr := v.Args[0] 39698 mem := v.Args[1] 39699 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 39700 break 39701 } 39702 v.reset(OpAMD64MOVQload) 39703 v.AddArg(ptr) 39704 v.AddArg(mem) 39705 return true 39706 } 39707 // match: (Load <t> ptr mem) 39708 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 39709 // result: (MOVLload ptr mem) 39710 for { 39711 t := v.Type 39712 _ = v.Args[1] 39713 ptr := v.Args[0] 39714 mem := v.Args[1] 39715 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 39716 break 39717 } 39718 v.reset(OpAMD64MOVLload) 39719 v.AddArg(ptr) 39720 v.AddArg(mem) 39721 return true 39722 } 39723 // match: (Load <t> ptr mem) 39724 // cond: is16BitInt(t) 39725 // result: (MOVWload ptr mem) 39726 for { 39727 t := v.Type 39728 _ = v.Args[1] 39729 ptr := v.Args[0] 39730 mem := v.Args[1] 39731 if !(is16BitInt(t)) { 39732 break 39733 } 39734 v.reset(OpAMD64MOVWload) 39735 v.AddArg(ptr) 39736 v.AddArg(mem) 39737 return true 39738 } 39739 // match: (Load <t> ptr mem) 39740 // cond: (t.IsBoolean() || is8BitInt(t)) 39741 // result: (MOVBload ptr mem) 39742 for { 39743 t := v.Type 39744 _ = v.Args[1] 39745 ptr := v.Args[0] 39746 mem := v.Args[1] 39747 if !(t.IsBoolean() || is8BitInt(t)) { 39748 break 39749 } 39750 v.reset(OpAMD64MOVBload) 39751 v.AddArg(ptr) 39752 v.AddArg(mem) 39753 return true 39754 } 39755 // match: (Load <t> ptr mem) 39756 // cond: is32BitFloat(t) 39757 // result: (MOVSSload ptr mem) 39758 for { 39759 t := v.Type 39760 _ = v.Args[1] 39761 ptr := v.Args[0] 39762 mem := v.Args[1] 39763 if !(is32BitFloat(t)) { 39764 break 39765 } 39766 v.reset(OpAMD64MOVSSload) 39767 v.AddArg(ptr) 39768 v.AddArg(mem) 39769 return true 39770 } 39771 // match: (Load <t> ptr mem) 39772 // cond: is64BitFloat(t) 39773 // result: (MOVSDload ptr mem) 39774 for { 39775 t := v.Type 39776 _ = v.Args[1] 39777 ptr := v.Args[0] 39778 mem := v.Args[1] 39779 if !(is64BitFloat(t)) { 39780 break 39781 } 39782 v.reset(OpAMD64MOVSDload) 39783 v.AddArg(ptr) 39784 v.AddArg(mem) 39785 return true 39786 } 39787 return false 39788 } 39789 func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool { 39790 b := v.Block 39791 _ = b 39792 // match: (Lsh16x16 <t> x y) 39793 // cond: 39794 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 39795 for { 39796 t := v.Type 39797 _ = v.Args[1] 39798 x := v.Args[0] 39799 y := v.Args[1] 39800 v.reset(OpAMD64ANDL) 39801 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 39802 v0.AddArg(x) 39803 v0.AddArg(y) 39804 v.AddArg(v0) 39805 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39806 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 39807 v2.AuxInt = 32 39808 v2.AddArg(y) 39809 v1.AddArg(v2) 39810 v.AddArg(v1) 39811 return true 39812 } 39813 } 39814 func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool { 39815 b := v.Block 39816 _ = b 39817 // match: (Lsh16x32 <t> x y) 39818 // cond: 39819 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 39820 for { 39821 t := v.Type 39822 _ = v.Args[1] 39823 x := v.Args[0] 39824 y := v.Args[1] 39825 v.reset(OpAMD64ANDL) 39826 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 39827 v0.AddArg(x) 39828 v0.AddArg(y) 39829 v.AddArg(v0) 39830 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39831 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 39832 v2.AuxInt = 32 39833 v2.AddArg(y) 39834 v1.AddArg(v2) 39835 v.AddArg(v1) 39836 return true 39837 } 39838 } 39839 func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool { 39840 b := v.Block 39841 _ = b 39842 // match: (Lsh16x64 <t> x y) 39843 // cond: 39844 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 39845 for { 39846 t := v.Type 39847 _ = v.Args[1] 39848 x := v.Args[0] 39849 y := v.Args[1] 39850 v.reset(OpAMD64ANDL) 39851 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 39852 v0.AddArg(x) 39853 v0.AddArg(y) 39854 v.AddArg(v0) 39855 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39856 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 39857 v2.AuxInt = 32 39858 v2.AddArg(y) 39859 v1.AddArg(v2) 39860 v.AddArg(v1) 39861 return true 39862 } 39863 } 39864 func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool { 39865 b := v.Block 39866 _ = b 39867 // match: (Lsh16x8 <t> x y) 39868 // cond: 39869 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 39870 for { 39871 t := v.Type 39872 _ = v.Args[1] 39873 x := v.Args[0] 39874 y := v.Args[1] 39875 v.reset(OpAMD64ANDL) 39876 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 39877 v0.AddArg(x) 39878 v0.AddArg(y) 39879 v.AddArg(v0) 39880 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39881 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 39882 v2.AuxInt = 32 39883 v2.AddArg(y) 39884 v1.AddArg(v2) 39885 v.AddArg(v1) 39886 return true 39887 } 39888 } 39889 func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool { 39890 b := v.Block 39891 _ = b 39892 // match: (Lsh32x16 <t> x y) 39893 // cond: 39894 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 39895 for { 39896 t := v.Type 39897 _ = v.Args[1] 39898 x := v.Args[0] 39899 y := v.Args[1] 39900 v.reset(OpAMD64ANDL) 39901 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 39902 v0.AddArg(x) 39903 v0.AddArg(y) 39904 v.AddArg(v0) 39905 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39906 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 39907 v2.AuxInt = 32 39908 v2.AddArg(y) 39909 v1.AddArg(v2) 39910 v.AddArg(v1) 39911 return true 39912 } 39913 } 39914 func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool { 39915 b := v.Block 39916 _ = b 39917 // match: (Lsh32x32 <t> x y) 39918 // cond: 39919 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 39920 for { 39921 t := v.Type 39922 _ = v.Args[1] 39923 x := v.Args[0] 39924 y := v.Args[1] 39925 v.reset(OpAMD64ANDL) 39926 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 39927 v0.AddArg(x) 39928 v0.AddArg(y) 39929 v.AddArg(v0) 39930 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39931 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 39932 v2.AuxInt = 32 39933 v2.AddArg(y) 39934 v1.AddArg(v2) 39935 v.AddArg(v1) 39936 return true 39937 } 39938 } 39939 func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool { 39940 b := v.Block 39941 _ = b 39942 // match: (Lsh32x64 <t> x y) 39943 // cond: 39944 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 39945 for { 39946 t := v.Type 39947 _ = v.Args[1] 39948 x := v.Args[0] 39949 y := v.Args[1] 39950 v.reset(OpAMD64ANDL) 39951 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 39952 v0.AddArg(x) 39953 v0.AddArg(y) 39954 v.AddArg(v0) 39955 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39956 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 39957 v2.AuxInt = 32 39958 v2.AddArg(y) 39959 v1.AddArg(v2) 39960 v.AddArg(v1) 39961 return true 39962 } 39963 } 39964 func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool { 39965 b := v.Block 39966 _ = b 39967 // match: (Lsh32x8 <t> x y) 39968 // cond: 39969 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 39970 for { 39971 t := v.Type 39972 _ = v.Args[1] 39973 x := v.Args[0] 39974 y := v.Args[1] 39975 v.reset(OpAMD64ANDL) 39976 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 39977 v0.AddArg(x) 39978 v0.AddArg(y) 39979 v.AddArg(v0) 39980 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 39981 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 39982 v2.AuxInt = 32 39983 v2.AddArg(y) 39984 v1.AddArg(v2) 39985 v.AddArg(v1) 39986 return true 39987 } 39988 } 39989 func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool { 39990 b := v.Block 39991 _ = b 39992 // match: (Lsh64x16 <t> x y) 39993 // cond: 39994 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 39995 for { 39996 t := v.Type 39997 _ = v.Args[1] 39998 x := v.Args[0] 39999 y := v.Args[1] 40000 v.reset(OpAMD64ANDQ) 40001 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 40002 v0.AddArg(x) 40003 v0.AddArg(y) 40004 v.AddArg(v0) 40005 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 40006 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 40007 v2.AuxInt = 64 40008 v2.AddArg(y) 40009 v1.AddArg(v2) 40010 v.AddArg(v1) 40011 return true 40012 } 40013 } 40014 func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool { 40015 b := v.Block 40016 _ = b 40017 // match: (Lsh64x32 <t> x y) 40018 // cond: 40019 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 40020 for { 40021 t := v.Type 40022 _ = v.Args[1] 40023 x := v.Args[0] 40024 y := v.Args[1] 40025 v.reset(OpAMD64ANDQ) 40026 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 40027 v0.AddArg(x) 40028 v0.AddArg(y) 40029 v.AddArg(v0) 40030 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 40031 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 40032 v2.AuxInt = 64 40033 v2.AddArg(y) 40034 v1.AddArg(v2) 40035 v.AddArg(v1) 40036 return true 40037 } 40038 } 40039 func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool { 40040 b := v.Block 40041 _ = b 40042 // match: (Lsh64x64 <t> x y) 40043 // cond: 40044 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 40045 for { 40046 t := v.Type 40047 _ = v.Args[1] 40048 x := v.Args[0] 40049 y := v.Args[1] 40050 v.reset(OpAMD64ANDQ) 40051 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 40052 v0.AddArg(x) 40053 v0.AddArg(y) 40054 v.AddArg(v0) 40055 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 40056 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 40057 v2.AuxInt = 64 40058 v2.AddArg(y) 40059 v1.AddArg(v2) 40060 v.AddArg(v1) 40061 return true 40062 } 40063 } 40064 func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool { 40065 b := v.Block 40066 _ = b 40067 // match: (Lsh64x8 <t> x y) 40068 // cond: 40069 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 40070 for { 40071 t := v.Type 40072 _ = v.Args[1] 40073 x := v.Args[0] 40074 y := v.Args[1] 40075 v.reset(OpAMD64ANDQ) 40076 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 40077 v0.AddArg(x) 40078 v0.AddArg(y) 40079 v.AddArg(v0) 40080 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 40081 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 40082 v2.AuxInt = 64 40083 v2.AddArg(y) 40084 v1.AddArg(v2) 40085 v.AddArg(v1) 40086 return true 40087 } 40088 } 40089 func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool { 40090 b := v.Block 40091 _ = b 40092 // match: (Lsh8x16 <t> x y) 40093 // cond: 40094 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 40095 for { 40096 t := v.Type 40097 _ = v.Args[1] 40098 x := v.Args[0] 40099 y := v.Args[1] 40100 v.reset(OpAMD64ANDL) 40101 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40102 v0.AddArg(x) 40103 v0.AddArg(y) 40104 v.AddArg(v0) 40105 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40106 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 40107 v2.AuxInt = 32 40108 v2.AddArg(y) 40109 v1.AddArg(v2) 40110 v.AddArg(v1) 40111 return true 40112 } 40113 } 40114 func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool { 40115 b := v.Block 40116 _ = b 40117 // match: (Lsh8x32 <t> x y) 40118 // cond: 40119 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 40120 for { 40121 t := v.Type 40122 _ = v.Args[1] 40123 x := v.Args[0] 40124 y := v.Args[1] 40125 v.reset(OpAMD64ANDL) 40126 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40127 v0.AddArg(x) 40128 v0.AddArg(y) 40129 v.AddArg(v0) 40130 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40131 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 40132 v2.AuxInt = 32 40133 v2.AddArg(y) 40134 v1.AddArg(v2) 40135 v.AddArg(v1) 40136 return true 40137 } 40138 } 40139 func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool { 40140 b := v.Block 40141 _ = b 40142 // match: (Lsh8x64 <t> x y) 40143 // cond: 40144 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 40145 for { 40146 t := v.Type 40147 _ = v.Args[1] 40148 x := v.Args[0] 40149 y := v.Args[1] 40150 v.reset(OpAMD64ANDL) 40151 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40152 v0.AddArg(x) 40153 v0.AddArg(y) 40154 v.AddArg(v0) 40155 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40156 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 40157 v2.AuxInt = 32 40158 v2.AddArg(y) 40159 v1.AddArg(v2) 40160 v.AddArg(v1) 40161 return true 40162 } 40163 } 40164 func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool { 40165 b := v.Block 40166 _ = b 40167 // match: (Lsh8x8 <t> x y) 40168 // cond: 40169 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 40170 for { 40171 t := v.Type 40172 _ = v.Args[1] 40173 x := v.Args[0] 40174 y := v.Args[1] 40175 v.reset(OpAMD64ANDL) 40176 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 40177 v0.AddArg(x) 40178 v0.AddArg(y) 40179 v.AddArg(v0) 40180 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 40181 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 40182 v2.AuxInt = 32 40183 v2.AddArg(y) 40184 v1.AddArg(v2) 40185 v.AddArg(v1) 40186 return true 40187 } 40188 } 40189 func rewriteValueAMD64_OpMod16_0(v *Value) bool { 40190 b := v.Block 40191 _ = b 40192 typ := &b.Func.Config.Types 40193 _ = typ 40194 // match: (Mod16 x y) 40195 // cond: 40196 // result: (Select1 (DIVW x y)) 40197 for { 40198 _ = v.Args[1] 40199 x := v.Args[0] 40200 y := v.Args[1] 40201 v.reset(OpSelect1) 40202 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 40203 v0.AddArg(x) 40204 v0.AddArg(y) 40205 v.AddArg(v0) 40206 return true 40207 } 40208 } 40209 func rewriteValueAMD64_OpMod16u_0(v *Value) bool { 40210 b := v.Block 40211 _ = b 40212 typ := &b.Func.Config.Types 40213 _ = typ 40214 // match: (Mod16u x y) 40215 // cond: 40216 // result: (Select1 (DIVWU x y)) 40217 for { 40218 _ = v.Args[1] 40219 x := v.Args[0] 40220 y := v.Args[1] 40221 v.reset(OpSelect1) 40222 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 40223 v0.AddArg(x) 40224 v0.AddArg(y) 40225 v.AddArg(v0) 40226 return true 40227 } 40228 } 40229 func rewriteValueAMD64_OpMod32_0(v *Value) bool { 40230 b := v.Block 40231 _ = b 40232 typ := &b.Func.Config.Types 40233 _ = typ 40234 // match: (Mod32 x y) 40235 // cond: 40236 // result: (Select1 (DIVL x y)) 40237 for { 40238 _ = v.Args[1] 40239 x := v.Args[0] 40240 y := v.Args[1] 40241 v.reset(OpSelect1) 40242 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 40243 v0.AddArg(x) 40244 v0.AddArg(y) 40245 v.AddArg(v0) 40246 return true 40247 } 40248 } 40249 func rewriteValueAMD64_OpMod32u_0(v *Value) bool { 40250 b := v.Block 40251 _ = b 40252 typ := &b.Func.Config.Types 40253 _ = typ 40254 // match: (Mod32u x y) 40255 // cond: 40256 // result: (Select1 (DIVLU x y)) 40257 for { 40258 _ = v.Args[1] 40259 x := v.Args[0] 40260 y := v.Args[1] 40261 v.reset(OpSelect1) 40262 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 40263 v0.AddArg(x) 40264 v0.AddArg(y) 40265 v.AddArg(v0) 40266 return true 40267 } 40268 } 40269 func rewriteValueAMD64_OpMod64_0(v *Value) bool { 40270 b := v.Block 40271 _ = b 40272 typ := &b.Func.Config.Types 40273 _ = typ 40274 // match: (Mod64 x y) 40275 // cond: 40276 // result: (Select1 (DIVQ x y)) 40277 for { 40278 _ = v.Args[1] 40279 x := v.Args[0] 40280 y := v.Args[1] 40281 v.reset(OpSelect1) 40282 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 40283 v0.AddArg(x) 40284 v0.AddArg(y) 40285 v.AddArg(v0) 40286 return true 40287 } 40288 } 40289 func rewriteValueAMD64_OpMod64u_0(v *Value) bool { 40290 b := v.Block 40291 _ = b 40292 typ := &b.Func.Config.Types 40293 _ = typ 40294 // match: (Mod64u x y) 40295 // cond: 40296 // result: (Select1 (DIVQU x y)) 40297 for { 40298 _ = v.Args[1] 40299 x := v.Args[0] 40300 y := v.Args[1] 40301 v.reset(OpSelect1) 40302 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 40303 v0.AddArg(x) 40304 v0.AddArg(y) 40305 v.AddArg(v0) 40306 return true 40307 } 40308 } 40309 func rewriteValueAMD64_OpMod8_0(v *Value) bool { 40310 b := v.Block 40311 _ = b 40312 typ := &b.Func.Config.Types 40313 _ = typ 40314 // match: (Mod8 x y) 40315 // cond: 40316 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 40317 for { 40318 _ = v.Args[1] 40319 x := v.Args[0] 40320 y := v.Args[1] 40321 v.reset(OpSelect1) 40322 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 40323 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 40324 v1.AddArg(x) 40325 v0.AddArg(v1) 40326 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 40327 v2.AddArg(y) 40328 v0.AddArg(v2) 40329 v.AddArg(v0) 40330 return true 40331 } 40332 } 40333 func rewriteValueAMD64_OpMod8u_0(v *Value) bool { 40334 b := v.Block 40335 _ = b 40336 typ := &b.Func.Config.Types 40337 _ = typ 40338 // match: (Mod8u x y) 40339 // cond: 40340 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 40341 for { 40342 _ = v.Args[1] 40343 x := v.Args[0] 40344 y := v.Args[1] 40345 v.reset(OpSelect1) 40346 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 40347 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 40348 v1.AddArg(x) 40349 v0.AddArg(v1) 40350 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 40351 v2.AddArg(y) 40352 v0.AddArg(v2) 40353 v.AddArg(v0) 40354 return true 40355 } 40356 } 40357 func rewriteValueAMD64_OpMove_0(v *Value) bool { 40358 b := v.Block 40359 _ = b 40360 typ := &b.Func.Config.Types 40361 _ = typ 40362 // match: (Move [0] _ _ mem) 40363 // cond: 40364 // result: mem 40365 for { 40366 if v.AuxInt != 0 { 40367 break 40368 } 40369 _ = v.Args[2] 40370 mem := v.Args[2] 40371 v.reset(OpCopy) 40372 v.Type = mem.Type 40373 v.AddArg(mem) 40374 return true 40375 } 40376 // match: (Move [1] dst src mem) 40377 // cond: 40378 // result: (MOVBstore dst (MOVBload src mem) mem) 40379 for { 40380 if v.AuxInt != 1 { 40381 break 40382 } 40383 _ = v.Args[2] 40384 dst := v.Args[0] 40385 src := v.Args[1] 40386 mem := v.Args[2] 40387 v.reset(OpAMD64MOVBstore) 40388 v.AddArg(dst) 40389 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 40390 v0.AddArg(src) 40391 v0.AddArg(mem) 40392 v.AddArg(v0) 40393 v.AddArg(mem) 40394 return true 40395 } 40396 // match: (Move [2] dst src mem) 40397 // cond: 40398 // result: (MOVWstore dst (MOVWload src mem) mem) 40399 for { 40400 if v.AuxInt != 2 { 40401 break 40402 } 40403 _ = v.Args[2] 40404 dst := v.Args[0] 40405 src := v.Args[1] 40406 mem := v.Args[2] 40407 v.reset(OpAMD64MOVWstore) 40408 v.AddArg(dst) 40409 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 40410 v0.AddArg(src) 40411 v0.AddArg(mem) 40412 v.AddArg(v0) 40413 v.AddArg(mem) 40414 return true 40415 } 40416 // match: (Move [4] dst src mem) 40417 // cond: 40418 // result: (MOVLstore dst (MOVLload src mem) mem) 40419 for { 40420 if v.AuxInt != 4 { 40421 break 40422 } 40423 _ = v.Args[2] 40424 dst := v.Args[0] 40425 src := v.Args[1] 40426 mem := v.Args[2] 40427 v.reset(OpAMD64MOVLstore) 40428 v.AddArg(dst) 40429 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 40430 v0.AddArg(src) 40431 v0.AddArg(mem) 40432 v.AddArg(v0) 40433 v.AddArg(mem) 40434 return true 40435 } 40436 // match: (Move [8] dst src mem) 40437 // cond: 40438 // result: (MOVQstore dst (MOVQload src mem) mem) 40439 for { 40440 if v.AuxInt != 8 { 40441 break 40442 } 40443 _ = v.Args[2] 40444 dst := v.Args[0] 40445 src := v.Args[1] 40446 mem := v.Args[2] 40447 v.reset(OpAMD64MOVQstore) 40448 v.AddArg(dst) 40449 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 40450 v0.AddArg(src) 40451 v0.AddArg(mem) 40452 v.AddArg(v0) 40453 v.AddArg(mem) 40454 return true 40455 } 40456 // match: (Move [16] dst src mem) 40457 // cond: 40458 // result: (MOVOstore dst (MOVOload src mem) mem) 40459 for { 40460 if v.AuxInt != 16 { 40461 break 40462 } 40463 _ = v.Args[2] 40464 dst := v.Args[0] 40465 src := v.Args[1] 40466 mem := v.Args[2] 40467 v.reset(OpAMD64MOVOstore) 40468 v.AddArg(dst) 40469 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 40470 v0.AddArg(src) 40471 v0.AddArg(mem) 40472 v.AddArg(v0) 40473 v.AddArg(mem) 40474 return true 40475 } 40476 // match: (Move [3] dst src mem) 40477 // cond: 40478 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 40479 for { 40480 if v.AuxInt != 3 { 40481 break 40482 } 40483 _ = v.Args[2] 40484 dst := v.Args[0] 40485 src := v.Args[1] 40486 mem := v.Args[2] 40487 v.reset(OpAMD64MOVBstore) 40488 v.AuxInt = 2 40489 v.AddArg(dst) 40490 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 40491 v0.AuxInt = 2 40492 v0.AddArg(src) 40493 v0.AddArg(mem) 40494 v.AddArg(v0) 40495 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) 40496 v1.AddArg(dst) 40497 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 40498 v2.AddArg(src) 40499 v2.AddArg(mem) 40500 v1.AddArg(v2) 40501 v1.AddArg(mem) 40502 v.AddArg(v1) 40503 return true 40504 } 40505 // match: (Move [5] dst src mem) 40506 // cond: 40507 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 40508 for { 40509 if v.AuxInt != 5 { 40510 break 40511 } 40512 _ = v.Args[2] 40513 dst := v.Args[0] 40514 src := v.Args[1] 40515 mem := v.Args[2] 40516 v.reset(OpAMD64MOVBstore) 40517 v.AuxInt = 4 40518 v.AddArg(dst) 40519 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 40520 v0.AuxInt = 4 40521 v0.AddArg(src) 40522 v0.AddArg(mem) 40523 v.AddArg(v0) 40524 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 40525 v1.AddArg(dst) 40526 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 40527 v2.AddArg(src) 40528 v2.AddArg(mem) 40529 v1.AddArg(v2) 40530 v1.AddArg(mem) 40531 v.AddArg(v1) 40532 return true 40533 } 40534 // match: (Move [6] dst src mem) 40535 // cond: 40536 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 40537 for { 40538 if v.AuxInt != 6 { 40539 break 40540 } 40541 _ = v.Args[2] 40542 dst := v.Args[0] 40543 src := v.Args[1] 40544 mem := v.Args[2] 40545 v.reset(OpAMD64MOVWstore) 40546 v.AuxInt = 4 40547 v.AddArg(dst) 40548 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 40549 v0.AuxInt = 4 40550 v0.AddArg(src) 40551 v0.AddArg(mem) 40552 v.AddArg(v0) 40553 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 40554 v1.AddArg(dst) 40555 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 40556 v2.AddArg(src) 40557 v2.AddArg(mem) 40558 v1.AddArg(v2) 40559 v1.AddArg(mem) 40560 v.AddArg(v1) 40561 return true 40562 } 40563 // match: (Move [7] dst src mem) 40564 // cond: 40565 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 40566 for { 40567 if v.AuxInt != 7 { 40568 break 40569 } 40570 _ = v.Args[2] 40571 dst := v.Args[0] 40572 src := v.Args[1] 40573 mem := v.Args[2] 40574 v.reset(OpAMD64MOVLstore) 40575 v.AuxInt = 3 40576 v.AddArg(dst) 40577 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 40578 v0.AuxInt = 3 40579 v0.AddArg(src) 40580 v0.AddArg(mem) 40581 v.AddArg(v0) 40582 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 40583 v1.AddArg(dst) 40584 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 40585 v2.AddArg(src) 40586 v2.AddArg(mem) 40587 v1.AddArg(v2) 40588 v1.AddArg(mem) 40589 v.AddArg(v1) 40590 return true 40591 } 40592 return false 40593 } 40594 func rewriteValueAMD64_OpMove_10(v *Value) bool { 40595 b := v.Block 40596 _ = b 40597 config := b.Func.Config 40598 _ = config 40599 typ := &b.Func.Config.Types 40600 _ = typ 40601 // match: (Move [s] dst src mem) 40602 // cond: s > 8 && s < 16 40603 // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 40604 for { 40605 s := v.AuxInt 40606 _ = v.Args[2] 40607 dst := v.Args[0] 40608 src := v.Args[1] 40609 mem := v.Args[2] 40610 if !(s > 8 && s < 16) { 40611 break 40612 } 40613 v.reset(OpAMD64MOVQstore) 40614 v.AuxInt = s - 8 40615 v.AddArg(dst) 40616 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 40617 v0.AuxInt = s - 8 40618 v0.AddArg(src) 40619 v0.AddArg(mem) 40620 v.AddArg(v0) 40621 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 40622 v1.AddArg(dst) 40623 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 40624 v2.AddArg(src) 40625 v2.AddArg(mem) 40626 v1.AddArg(v2) 40627 v1.AddArg(mem) 40628 v.AddArg(v1) 40629 return true 40630 } 40631 // match: (Move [s] dst src mem) 40632 // cond: s > 16 && s%16 != 0 && s%16 <= 8 40633 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) 40634 for { 40635 s := v.AuxInt 40636 _ = v.Args[2] 40637 dst := v.Args[0] 40638 src := v.Args[1] 40639 mem := v.Args[2] 40640 if !(s > 16 && s%16 != 0 && s%16 <= 8) { 40641 break 40642 } 40643 v.reset(OpMove) 40644 v.AuxInt = s - s%16 40645 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 40646 v0.AuxInt = s % 16 40647 v0.AddArg(dst) 40648 v.AddArg(v0) 40649 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 40650 v1.AuxInt = s % 16 40651 v1.AddArg(src) 40652 v.AddArg(v1) 40653 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 40654 v2.AddArg(dst) 40655 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 40656 v3.AddArg(src) 40657 v3.AddArg(mem) 40658 v2.AddArg(v3) 40659 v2.AddArg(mem) 40660 v.AddArg(v2) 40661 return true 40662 } 40663 // match: (Move [s] dst src mem) 40664 // cond: s > 16 && s%16 != 0 && s%16 > 8 40665 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) 40666 for { 40667 s := v.AuxInt 40668 _ = v.Args[2] 40669 dst := v.Args[0] 40670 src := v.Args[1] 40671 mem := v.Args[2] 40672 if !(s > 16 && s%16 != 0 && s%16 > 8) { 40673 break 40674 } 40675 v.reset(OpMove) 40676 v.AuxInt = s - s%16 40677 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 40678 v0.AuxInt = s % 16 40679 v0.AddArg(dst) 40680 v.AddArg(v0) 40681 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 40682 v1.AuxInt = s % 16 40683 v1.AddArg(src) 40684 v.AddArg(v1) 40685 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 40686 v2.AddArg(dst) 40687 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 40688 v3.AddArg(src) 40689 v3.AddArg(mem) 40690 v2.AddArg(v3) 40691 v2.AddArg(mem) 40692 v.AddArg(v2) 40693 return true 40694 } 40695 // match: (Move [s] dst src mem) 40696 // cond: s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice 40697 // result: (DUFFCOPY [14*(64-s/16)] dst src mem) 40698 for { 40699 s := v.AuxInt 40700 _ = v.Args[2] 40701 dst := v.Args[0] 40702 src := v.Args[1] 40703 mem := v.Args[2] 40704 if !(s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { 40705 break 40706 } 40707 v.reset(OpAMD64DUFFCOPY) 40708 v.AuxInt = 14 * (64 - s/16) 40709 v.AddArg(dst) 40710 v.AddArg(src) 40711 v.AddArg(mem) 40712 return true 40713 } 40714 // match: (Move [s] dst src mem) 40715 // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 40716 // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) 40717 for { 40718 s := v.AuxInt 40719 _ = v.Args[2] 40720 dst := v.Args[0] 40721 src := v.Args[1] 40722 mem := v.Args[2] 40723 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0) { 40724 break 40725 } 40726 v.reset(OpAMD64REPMOVSQ) 40727 v.AddArg(dst) 40728 v.AddArg(src) 40729 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 40730 v0.AuxInt = s / 8 40731 v.AddArg(v0) 40732 v.AddArg(mem) 40733 return true 40734 } 40735 return false 40736 } 40737 func rewriteValueAMD64_OpMul16_0(v *Value) bool { 40738 // match: (Mul16 x y) 40739 // cond: 40740 // result: (MULL x y) 40741 for { 40742 _ = v.Args[1] 40743 x := v.Args[0] 40744 y := v.Args[1] 40745 v.reset(OpAMD64MULL) 40746 v.AddArg(x) 40747 v.AddArg(y) 40748 return true 40749 } 40750 } 40751 func rewriteValueAMD64_OpMul32_0(v *Value) bool { 40752 // match: (Mul32 x y) 40753 // cond: 40754 // result: (MULL x y) 40755 for { 40756 _ = v.Args[1] 40757 x := v.Args[0] 40758 y := v.Args[1] 40759 v.reset(OpAMD64MULL) 40760 v.AddArg(x) 40761 v.AddArg(y) 40762 return true 40763 } 40764 } 40765 func rewriteValueAMD64_OpMul32F_0(v *Value) bool { 40766 // match: (Mul32F x y) 40767 // cond: 40768 // result: (MULSS x y) 40769 for { 40770 _ = v.Args[1] 40771 x := v.Args[0] 40772 y := v.Args[1] 40773 v.reset(OpAMD64MULSS) 40774 v.AddArg(x) 40775 v.AddArg(y) 40776 return true 40777 } 40778 } 40779 func rewriteValueAMD64_OpMul64_0(v *Value) bool { 40780 // match: (Mul64 x y) 40781 // cond: 40782 // result: (MULQ x y) 40783 for { 40784 _ = v.Args[1] 40785 x := v.Args[0] 40786 y := v.Args[1] 40787 v.reset(OpAMD64MULQ) 40788 v.AddArg(x) 40789 v.AddArg(y) 40790 return true 40791 } 40792 } 40793 func rewriteValueAMD64_OpMul64F_0(v *Value) bool { 40794 // match: (Mul64F x y) 40795 // cond: 40796 // result: (MULSD x y) 40797 for { 40798 _ = v.Args[1] 40799 x := v.Args[0] 40800 y := v.Args[1] 40801 v.reset(OpAMD64MULSD) 40802 v.AddArg(x) 40803 v.AddArg(y) 40804 return true 40805 } 40806 } 40807 func rewriteValueAMD64_OpMul64uhilo_0(v *Value) bool { 40808 // match: (Mul64uhilo x y) 40809 // cond: 40810 // result: (MULQU2 x y) 40811 for { 40812 _ = v.Args[1] 40813 x := v.Args[0] 40814 y := v.Args[1] 40815 v.reset(OpAMD64MULQU2) 40816 v.AddArg(x) 40817 v.AddArg(y) 40818 return true 40819 } 40820 } 40821 func rewriteValueAMD64_OpMul8_0(v *Value) bool { 40822 // match: (Mul8 x y) 40823 // cond: 40824 // result: (MULL x y) 40825 for { 40826 _ = v.Args[1] 40827 x := v.Args[0] 40828 y := v.Args[1] 40829 v.reset(OpAMD64MULL) 40830 v.AddArg(x) 40831 v.AddArg(y) 40832 return true 40833 } 40834 } 40835 func rewriteValueAMD64_OpNeg16_0(v *Value) bool { 40836 // match: (Neg16 x) 40837 // cond: 40838 // result: (NEGL x) 40839 for { 40840 x := v.Args[0] 40841 v.reset(OpAMD64NEGL) 40842 v.AddArg(x) 40843 return true 40844 } 40845 } 40846 func rewriteValueAMD64_OpNeg32_0(v *Value) bool { 40847 // match: (Neg32 x) 40848 // cond: 40849 // result: (NEGL x) 40850 for { 40851 x := v.Args[0] 40852 v.reset(OpAMD64NEGL) 40853 v.AddArg(x) 40854 return true 40855 } 40856 } 40857 func rewriteValueAMD64_OpNeg32F_0(v *Value) bool { 40858 b := v.Block 40859 _ = b 40860 typ := &b.Func.Config.Types 40861 _ = typ 40862 // match: (Neg32F x) 40863 // cond: 40864 // result: (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))])) 40865 for { 40866 x := v.Args[0] 40867 v.reset(OpAMD64PXOR) 40868 v.AddArg(x) 40869 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) 40870 v0.AuxInt = f2i(math.Copysign(0, -1)) 40871 v.AddArg(v0) 40872 return true 40873 } 40874 } 40875 func rewriteValueAMD64_OpNeg64_0(v *Value) bool { 40876 // match: (Neg64 x) 40877 // cond: 40878 // result: (NEGQ x) 40879 for { 40880 x := v.Args[0] 40881 v.reset(OpAMD64NEGQ) 40882 v.AddArg(x) 40883 return true 40884 } 40885 } 40886 func rewriteValueAMD64_OpNeg64F_0(v *Value) bool { 40887 b := v.Block 40888 _ = b 40889 typ := &b.Func.Config.Types 40890 _ = typ 40891 // match: (Neg64F x) 40892 // cond: 40893 // result: (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))])) 40894 for { 40895 x := v.Args[0] 40896 v.reset(OpAMD64PXOR) 40897 v.AddArg(x) 40898 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) 40899 v0.AuxInt = f2i(math.Copysign(0, -1)) 40900 v.AddArg(v0) 40901 return true 40902 } 40903 } 40904 func rewriteValueAMD64_OpNeg8_0(v *Value) bool { 40905 // match: (Neg8 x) 40906 // cond: 40907 // result: (NEGL x) 40908 for { 40909 x := v.Args[0] 40910 v.reset(OpAMD64NEGL) 40911 v.AddArg(x) 40912 return true 40913 } 40914 } 40915 func rewriteValueAMD64_OpNeq16_0(v *Value) bool { 40916 b := v.Block 40917 _ = b 40918 // match: (Neq16 x y) 40919 // cond: 40920 // result: (SETNE (CMPW x y)) 40921 for { 40922 _ = v.Args[1] 40923 x := v.Args[0] 40924 y := v.Args[1] 40925 v.reset(OpAMD64SETNE) 40926 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 40927 v0.AddArg(x) 40928 v0.AddArg(y) 40929 v.AddArg(v0) 40930 return true 40931 } 40932 } 40933 func rewriteValueAMD64_OpNeq32_0(v *Value) bool { 40934 b := v.Block 40935 _ = b 40936 // match: (Neq32 x y) 40937 // cond: 40938 // result: (SETNE (CMPL x y)) 40939 for { 40940 _ = v.Args[1] 40941 x := v.Args[0] 40942 y := v.Args[1] 40943 v.reset(OpAMD64SETNE) 40944 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 40945 v0.AddArg(x) 40946 v0.AddArg(y) 40947 v.AddArg(v0) 40948 return true 40949 } 40950 } 40951 func rewriteValueAMD64_OpNeq32F_0(v *Value) bool { 40952 b := v.Block 40953 _ = b 40954 // match: (Neq32F x y) 40955 // cond: 40956 // result: (SETNEF (UCOMISS x y)) 40957 for { 40958 _ = v.Args[1] 40959 x := v.Args[0] 40960 y := v.Args[1] 40961 v.reset(OpAMD64SETNEF) 40962 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 40963 v0.AddArg(x) 40964 v0.AddArg(y) 40965 v.AddArg(v0) 40966 return true 40967 } 40968 } 40969 func rewriteValueAMD64_OpNeq64_0(v *Value) bool { 40970 b := v.Block 40971 _ = b 40972 // match: (Neq64 x y) 40973 // cond: 40974 // result: (SETNE (CMPQ x y)) 40975 for { 40976 _ = v.Args[1] 40977 x := v.Args[0] 40978 y := v.Args[1] 40979 v.reset(OpAMD64SETNE) 40980 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 40981 v0.AddArg(x) 40982 v0.AddArg(y) 40983 v.AddArg(v0) 40984 return true 40985 } 40986 } 40987 func rewriteValueAMD64_OpNeq64F_0(v *Value) bool { 40988 b := v.Block 40989 _ = b 40990 // match: (Neq64F x y) 40991 // cond: 40992 // result: (SETNEF (UCOMISD x y)) 40993 for { 40994 _ = v.Args[1] 40995 x := v.Args[0] 40996 y := v.Args[1] 40997 v.reset(OpAMD64SETNEF) 40998 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 40999 v0.AddArg(x) 41000 v0.AddArg(y) 41001 v.AddArg(v0) 41002 return true 41003 } 41004 } 41005 func rewriteValueAMD64_OpNeq8_0(v *Value) bool { 41006 b := v.Block 41007 _ = b 41008 // match: (Neq8 x y) 41009 // cond: 41010 // result: (SETNE (CMPB x y)) 41011 for { 41012 _ = v.Args[1] 41013 x := v.Args[0] 41014 y := v.Args[1] 41015 v.reset(OpAMD64SETNE) 41016 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 41017 v0.AddArg(x) 41018 v0.AddArg(y) 41019 v.AddArg(v0) 41020 return true 41021 } 41022 } 41023 func rewriteValueAMD64_OpNeqB_0(v *Value) bool { 41024 b := v.Block 41025 _ = b 41026 // match: (NeqB x y) 41027 // cond: 41028 // result: (SETNE (CMPB x y)) 41029 for { 41030 _ = v.Args[1] 41031 x := v.Args[0] 41032 y := v.Args[1] 41033 v.reset(OpAMD64SETNE) 41034 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 41035 v0.AddArg(x) 41036 v0.AddArg(y) 41037 v.AddArg(v0) 41038 return true 41039 } 41040 } 41041 func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool { 41042 b := v.Block 41043 _ = b 41044 config := b.Func.Config 41045 _ = config 41046 // match: (NeqPtr x y) 41047 // cond: config.PtrSize == 8 41048 // result: (SETNE (CMPQ x y)) 41049 for { 41050 _ = v.Args[1] 41051 x := v.Args[0] 41052 y := v.Args[1] 41053 if !(config.PtrSize == 8) { 41054 break 41055 } 41056 v.reset(OpAMD64SETNE) 41057 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 41058 v0.AddArg(x) 41059 v0.AddArg(y) 41060 v.AddArg(v0) 41061 return true 41062 } 41063 // match: (NeqPtr x y) 41064 // cond: config.PtrSize == 4 41065 // result: (SETNE (CMPL x y)) 41066 for { 41067 _ = v.Args[1] 41068 x := v.Args[0] 41069 y := v.Args[1] 41070 if !(config.PtrSize == 4) { 41071 break 41072 } 41073 v.reset(OpAMD64SETNE) 41074 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 41075 v0.AddArg(x) 41076 v0.AddArg(y) 41077 v.AddArg(v0) 41078 return true 41079 } 41080 return false 41081 } 41082 func rewriteValueAMD64_OpNilCheck_0(v *Value) bool { 41083 // match: (NilCheck ptr mem) 41084 // cond: 41085 // result: (LoweredNilCheck ptr mem) 41086 for { 41087 _ = v.Args[1] 41088 ptr := v.Args[0] 41089 mem := v.Args[1] 41090 v.reset(OpAMD64LoweredNilCheck) 41091 v.AddArg(ptr) 41092 v.AddArg(mem) 41093 return true 41094 } 41095 } 41096 func rewriteValueAMD64_OpNot_0(v *Value) bool { 41097 // match: (Not x) 41098 // cond: 41099 // result: (XORLconst [1] x) 41100 for { 41101 x := v.Args[0] 41102 v.reset(OpAMD64XORLconst) 41103 v.AuxInt = 1 41104 v.AddArg(x) 41105 return true 41106 } 41107 } 41108 func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { 41109 b := v.Block 41110 _ = b 41111 config := b.Func.Config 41112 _ = config 41113 typ := &b.Func.Config.Types 41114 _ = typ 41115 // match: (OffPtr [off] ptr) 41116 // cond: config.PtrSize == 8 && is32Bit(off) 41117 // result: (ADDQconst [off] ptr) 41118 for { 41119 off := v.AuxInt 41120 ptr := v.Args[0] 41121 if !(config.PtrSize == 8 && is32Bit(off)) { 41122 break 41123 } 41124 v.reset(OpAMD64ADDQconst) 41125 v.AuxInt = off 41126 v.AddArg(ptr) 41127 return true 41128 } 41129 // match: (OffPtr [off] ptr) 41130 // cond: config.PtrSize == 8 41131 // result: (ADDQ (MOVQconst [off]) ptr) 41132 for { 41133 off := v.AuxInt 41134 ptr := v.Args[0] 41135 if !(config.PtrSize == 8) { 41136 break 41137 } 41138 v.reset(OpAMD64ADDQ) 41139 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 41140 v0.AuxInt = off 41141 v.AddArg(v0) 41142 v.AddArg(ptr) 41143 return true 41144 } 41145 // match: (OffPtr [off] ptr) 41146 // cond: config.PtrSize == 4 41147 // result: (ADDLconst [off] ptr) 41148 for { 41149 off := v.AuxInt 41150 ptr := v.Args[0] 41151 if !(config.PtrSize == 4) { 41152 break 41153 } 41154 v.reset(OpAMD64ADDLconst) 41155 v.AuxInt = off 41156 v.AddArg(ptr) 41157 return true 41158 } 41159 return false 41160 } 41161 func rewriteValueAMD64_OpOr16_0(v *Value) bool { 41162 // match: (Or16 x y) 41163 // cond: 41164 // result: (ORL x y) 41165 for { 41166 _ = v.Args[1] 41167 x := v.Args[0] 41168 y := v.Args[1] 41169 v.reset(OpAMD64ORL) 41170 v.AddArg(x) 41171 v.AddArg(y) 41172 return true 41173 } 41174 } 41175 func rewriteValueAMD64_OpOr32_0(v *Value) bool { 41176 // match: (Or32 x y) 41177 // cond: 41178 // result: (ORL x y) 41179 for { 41180 _ = v.Args[1] 41181 x := v.Args[0] 41182 y := v.Args[1] 41183 v.reset(OpAMD64ORL) 41184 v.AddArg(x) 41185 v.AddArg(y) 41186 return true 41187 } 41188 } 41189 func rewriteValueAMD64_OpOr64_0(v *Value) bool { 41190 // match: (Or64 x y) 41191 // cond: 41192 // result: (ORQ x y) 41193 for { 41194 _ = v.Args[1] 41195 x := v.Args[0] 41196 y := v.Args[1] 41197 v.reset(OpAMD64ORQ) 41198 v.AddArg(x) 41199 v.AddArg(y) 41200 return true 41201 } 41202 } 41203 func rewriteValueAMD64_OpOr8_0(v *Value) bool { 41204 // match: (Or8 x y) 41205 // cond: 41206 // result: (ORL x y) 41207 for { 41208 _ = v.Args[1] 41209 x := v.Args[0] 41210 y := v.Args[1] 41211 v.reset(OpAMD64ORL) 41212 v.AddArg(x) 41213 v.AddArg(y) 41214 return true 41215 } 41216 } 41217 func rewriteValueAMD64_OpOrB_0(v *Value) bool { 41218 // match: (OrB x y) 41219 // cond: 41220 // result: (ORL x y) 41221 for { 41222 _ = v.Args[1] 41223 x := v.Args[0] 41224 y := v.Args[1] 41225 v.reset(OpAMD64ORL) 41226 v.AddArg(x) 41227 v.AddArg(y) 41228 return true 41229 } 41230 } 41231 func rewriteValueAMD64_OpPopCount16_0(v *Value) bool { 41232 b := v.Block 41233 _ = b 41234 typ := &b.Func.Config.Types 41235 _ = typ 41236 // match: (PopCount16 x) 41237 // cond: 41238 // result: (POPCNTL (MOVWQZX <typ.UInt32> x)) 41239 for { 41240 x := v.Args[0] 41241 v.reset(OpAMD64POPCNTL) 41242 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 41243 v0.AddArg(x) 41244 v.AddArg(v0) 41245 return true 41246 } 41247 } 41248 func rewriteValueAMD64_OpPopCount32_0(v *Value) bool { 41249 // match: (PopCount32 x) 41250 // cond: 41251 // result: (POPCNTL x) 41252 for { 41253 x := v.Args[0] 41254 v.reset(OpAMD64POPCNTL) 41255 v.AddArg(x) 41256 return true 41257 } 41258 } 41259 func rewriteValueAMD64_OpPopCount64_0(v *Value) bool { 41260 // match: (PopCount64 x) 41261 // cond: 41262 // result: (POPCNTQ x) 41263 for { 41264 x := v.Args[0] 41265 v.reset(OpAMD64POPCNTQ) 41266 v.AddArg(x) 41267 return true 41268 } 41269 } 41270 func rewriteValueAMD64_OpPopCount8_0(v *Value) bool { 41271 b := v.Block 41272 _ = b 41273 typ := &b.Func.Config.Types 41274 _ = typ 41275 // match: (PopCount8 x) 41276 // cond: 41277 // result: (POPCNTL (MOVBQZX <typ.UInt32> x)) 41278 for { 41279 x := v.Args[0] 41280 v.reset(OpAMD64POPCNTL) 41281 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 41282 v0.AddArg(x) 41283 v.AddArg(v0) 41284 return true 41285 } 41286 } 41287 func rewriteValueAMD64_OpRound32F_0(v *Value) bool { 41288 // match: (Round32F x) 41289 // cond: 41290 // result: x 41291 for { 41292 x := v.Args[0] 41293 v.reset(OpCopy) 41294 v.Type = x.Type 41295 v.AddArg(x) 41296 return true 41297 } 41298 } 41299 func rewriteValueAMD64_OpRound64F_0(v *Value) bool { 41300 // match: (Round64F x) 41301 // cond: 41302 // result: x 41303 for { 41304 x := v.Args[0] 41305 v.reset(OpCopy) 41306 v.Type = x.Type 41307 v.AddArg(x) 41308 return true 41309 } 41310 } 41311 func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool { 41312 b := v.Block 41313 _ = b 41314 // match: (Rsh16Ux16 <t> x y) 41315 // cond: 41316 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 41317 for { 41318 t := v.Type 41319 _ = v.Args[1] 41320 x := v.Args[0] 41321 y := v.Args[1] 41322 v.reset(OpAMD64ANDL) 41323 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 41324 v0.AddArg(x) 41325 v0.AddArg(y) 41326 v.AddArg(v0) 41327 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41328 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41329 v2.AuxInt = 16 41330 v2.AddArg(y) 41331 v1.AddArg(v2) 41332 v.AddArg(v1) 41333 return true 41334 } 41335 } 41336 func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool { 41337 b := v.Block 41338 _ = b 41339 // match: (Rsh16Ux32 <t> x y) 41340 // cond: 41341 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 41342 for { 41343 t := v.Type 41344 _ = v.Args[1] 41345 x := v.Args[0] 41346 y := v.Args[1] 41347 v.reset(OpAMD64ANDL) 41348 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 41349 v0.AddArg(x) 41350 v0.AddArg(y) 41351 v.AddArg(v0) 41352 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41353 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41354 v2.AuxInt = 16 41355 v2.AddArg(y) 41356 v1.AddArg(v2) 41357 v.AddArg(v1) 41358 return true 41359 } 41360 } 41361 func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool { 41362 b := v.Block 41363 _ = b 41364 // match: (Rsh16Ux64 <t> x y) 41365 // cond: 41366 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 41367 for { 41368 t := v.Type 41369 _ = v.Args[1] 41370 x := v.Args[0] 41371 y := v.Args[1] 41372 v.reset(OpAMD64ANDL) 41373 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 41374 v0.AddArg(x) 41375 v0.AddArg(y) 41376 v.AddArg(v0) 41377 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41378 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41379 v2.AuxInt = 16 41380 v2.AddArg(y) 41381 v1.AddArg(v2) 41382 v.AddArg(v1) 41383 return true 41384 } 41385 } 41386 func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool { 41387 b := v.Block 41388 _ = b 41389 // match: (Rsh16Ux8 <t> x y) 41390 // cond: 41391 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 41392 for { 41393 t := v.Type 41394 _ = v.Args[1] 41395 x := v.Args[0] 41396 y := v.Args[1] 41397 v.reset(OpAMD64ANDL) 41398 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 41399 v0.AddArg(x) 41400 v0.AddArg(y) 41401 v.AddArg(v0) 41402 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41403 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41404 v2.AuxInt = 16 41405 v2.AddArg(y) 41406 v1.AddArg(v2) 41407 v.AddArg(v1) 41408 return true 41409 } 41410 } 41411 func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool { 41412 b := v.Block 41413 _ = b 41414 // match: (Rsh16x16 <t> x y) 41415 // cond: 41416 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 41417 for { 41418 t := v.Type 41419 _ = v.Args[1] 41420 x := v.Args[0] 41421 y := v.Args[1] 41422 v.reset(OpAMD64SARW) 41423 v.Type = t 41424 v.AddArg(x) 41425 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41426 v0.AddArg(y) 41427 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41428 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41429 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41430 v3.AuxInt = 16 41431 v3.AddArg(y) 41432 v2.AddArg(v3) 41433 v1.AddArg(v2) 41434 v0.AddArg(v1) 41435 v.AddArg(v0) 41436 return true 41437 } 41438 } 41439 func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool { 41440 b := v.Block 41441 _ = b 41442 // match: (Rsh16x32 <t> x y) 41443 // cond: 41444 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 41445 for { 41446 t := v.Type 41447 _ = v.Args[1] 41448 x := v.Args[0] 41449 y := v.Args[1] 41450 v.reset(OpAMD64SARW) 41451 v.Type = t 41452 v.AddArg(x) 41453 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41454 v0.AddArg(y) 41455 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41456 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41457 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41458 v3.AuxInt = 16 41459 v3.AddArg(y) 41460 v2.AddArg(v3) 41461 v1.AddArg(v2) 41462 v0.AddArg(v1) 41463 v.AddArg(v0) 41464 return true 41465 } 41466 } 41467 func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool { 41468 b := v.Block 41469 _ = b 41470 // match: (Rsh16x64 <t> x y) 41471 // cond: 41472 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 41473 for { 41474 t := v.Type 41475 _ = v.Args[1] 41476 x := v.Args[0] 41477 y := v.Args[1] 41478 v.reset(OpAMD64SARW) 41479 v.Type = t 41480 v.AddArg(x) 41481 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 41482 v0.AddArg(y) 41483 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 41484 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 41485 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41486 v3.AuxInt = 16 41487 v3.AddArg(y) 41488 v2.AddArg(v3) 41489 v1.AddArg(v2) 41490 v0.AddArg(v1) 41491 v.AddArg(v0) 41492 return true 41493 } 41494 } 41495 func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool { 41496 b := v.Block 41497 _ = b 41498 // match: (Rsh16x8 <t> x y) 41499 // cond: 41500 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 41501 for { 41502 t := v.Type 41503 _ = v.Args[1] 41504 x := v.Args[0] 41505 y := v.Args[1] 41506 v.reset(OpAMD64SARW) 41507 v.Type = t 41508 v.AddArg(x) 41509 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41510 v0.AddArg(y) 41511 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41512 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41513 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41514 v3.AuxInt = 16 41515 v3.AddArg(y) 41516 v2.AddArg(v3) 41517 v1.AddArg(v2) 41518 v0.AddArg(v1) 41519 v.AddArg(v0) 41520 return true 41521 } 41522 } 41523 func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool { 41524 b := v.Block 41525 _ = b 41526 // match: (Rsh32Ux16 <t> x y) 41527 // cond: 41528 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 41529 for { 41530 t := v.Type 41531 _ = v.Args[1] 41532 x := v.Args[0] 41533 y := v.Args[1] 41534 v.reset(OpAMD64ANDL) 41535 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 41536 v0.AddArg(x) 41537 v0.AddArg(y) 41538 v.AddArg(v0) 41539 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41540 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41541 v2.AuxInt = 32 41542 v2.AddArg(y) 41543 v1.AddArg(v2) 41544 v.AddArg(v1) 41545 return true 41546 } 41547 } 41548 func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool { 41549 b := v.Block 41550 _ = b 41551 // match: (Rsh32Ux32 <t> x y) 41552 // cond: 41553 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 41554 for { 41555 t := v.Type 41556 _ = v.Args[1] 41557 x := v.Args[0] 41558 y := v.Args[1] 41559 v.reset(OpAMD64ANDL) 41560 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 41561 v0.AddArg(x) 41562 v0.AddArg(y) 41563 v.AddArg(v0) 41564 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41565 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41566 v2.AuxInt = 32 41567 v2.AddArg(y) 41568 v1.AddArg(v2) 41569 v.AddArg(v1) 41570 return true 41571 } 41572 } 41573 func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool { 41574 b := v.Block 41575 _ = b 41576 // match: (Rsh32Ux64 <t> x y) 41577 // cond: 41578 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 41579 for { 41580 t := v.Type 41581 _ = v.Args[1] 41582 x := v.Args[0] 41583 y := v.Args[1] 41584 v.reset(OpAMD64ANDL) 41585 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 41586 v0.AddArg(x) 41587 v0.AddArg(y) 41588 v.AddArg(v0) 41589 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41590 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41591 v2.AuxInt = 32 41592 v2.AddArg(y) 41593 v1.AddArg(v2) 41594 v.AddArg(v1) 41595 return true 41596 } 41597 } 41598 func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool { 41599 b := v.Block 41600 _ = b 41601 // match: (Rsh32Ux8 <t> x y) 41602 // cond: 41603 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 41604 for { 41605 t := v.Type 41606 _ = v.Args[1] 41607 x := v.Args[0] 41608 y := v.Args[1] 41609 v.reset(OpAMD64ANDL) 41610 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 41611 v0.AddArg(x) 41612 v0.AddArg(y) 41613 v.AddArg(v0) 41614 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41615 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41616 v2.AuxInt = 32 41617 v2.AddArg(y) 41618 v1.AddArg(v2) 41619 v.AddArg(v1) 41620 return true 41621 } 41622 } 41623 func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool { 41624 b := v.Block 41625 _ = b 41626 // match: (Rsh32x16 <t> x y) 41627 // cond: 41628 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 41629 for { 41630 t := v.Type 41631 _ = v.Args[1] 41632 x := v.Args[0] 41633 y := v.Args[1] 41634 v.reset(OpAMD64SARL) 41635 v.Type = t 41636 v.AddArg(x) 41637 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41638 v0.AddArg(y) 41639 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41640 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41641 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41642 v3.AuxInt = 32 41643 v3.AddArg(y) 41644 v2.AddArg(v3) 41645 v1.AddArg(v2) 41646 v0.AddArg(v1) 41647 v.AddArg(v0) 41648 return true 41649 } 41650 } 41651 func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool { 41652 b := v.Block 41653 _ = b 41654 // match: (Rsh32x32 <t> x y) 41655 // cond: 41656 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 41657 for { 41658 t := v.Type 41659 _ = v.Args[1] 41660 x := v.Args[0] 41661 y := v.Args[1] 41662 v.reset(OpAMD64SARL) 41663 v.Type = t 41664 v.AddArg(x) 41665 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41666 v0.AddArg(y) 41667 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41668 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41669 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41670 v3.AuxInt = 32 41671 v3.AddArg(y) 41672 v2.AddArg(v3) 41673 v1.AddArg(v2) 41674 v0.AddArg(v1) 41675 v.AddArg(v0) 41676 return true 41677 } 41678 } 41679 func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool { 41680 b := v.Block 41681 _ = b 41682 // match: (Rsh32x64 <t> x y) 41683 // cond: 41684 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 41685 for { 41686 t := v.Type 41687 _ = v.Args[1] 41688 x := v.Args[0] 41689 y := v.Args[1] 41690 v.reset(OpAMD64SARL) 41691 v.Type = t 41692 v.AddArg(x) 41693 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 41694 v0.AddArg(y) 41695 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 41696 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 41697 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41698 v3.AuxInt = 32 41699 v3.AddArg(y) 41700 v2.AddArg(v3) 41701 v1.AddArg(v2) 41702 v0.AddArg(v1) 41703 v.AddArg(v0) 41704 return true 41705 } 41706 } 41707 func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool { 41708 b := v.Block 41709 _ = b 41710 // match: (Rsh32x8 <t> x y) 41711 // cond: 41712 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 41713 for { 41714 t := v.Type 41715 _ = v.Args[1] 41716 x := v.Args[0] 41717 y := v.Args[1] 41718 v.reset(OpAMD64SARL) 41719 v.Type = t 41720 v.AddArg(x) 41721 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41722 v0.AddArg(y) 41723 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41724 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41725 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41726 v3.AuxInt = 32 41727 v3.AddArg(y) 41728 v2.AddArg(v3) 41729 v1.AddArg(v2) 41730 v0.AddArg(v1) 41731 v.AddArg(v0) 41732 return true 41733 } 41734 } 41735 func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool { 41736 b := v.Block 41737 _ = b 41738 // match: (Rsh64Ux16 <t> x y) 41739 // cond: 41740 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 41741 for { 41742 t := v.Type 41743 _ = v.Args[1] 41744 x := v.Args[0] 41745 y := v.Args[1] 41746 v.reset(OpAMD64ANDQ) 41747 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 41748 v0.AddArg(x) 41749 v0.AddArg(y) 41750 v.AddArg(v0) 41751 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 41752 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41753 v2.AuxInt = 64 41754 v2.AddArg(y) 41755 v1.AddArg(v2) 41756 v.AddArg(v1) 41757 return true 41758 } 41759 } 41760 func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool { 41761 b := v.Block 41762 _ = b 41763 // match: (Rsh64Ux32 <t> x y) 41764 // cond: 41765 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 41766 for { 41767 t := v.Type 41768 _ = v.Args[1] 41769 x := v.Args[0] 41770 y := v.Args[1] 41771 v.reset(OpAMD64ANDQ) 41772 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 41773 v0.AddArg(x) 41774 v0.AddArg(y) 41775 v.AddArg(v0) 41776 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 41777 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41778 v2.AuxInt = 64 41779 v2.AddArg(y) 41780 v1.AddArg(v2) 41781 v.AddArg(v1) 41782 return true 41783 } 41784 } 41785 func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool { 41786 b := v.Block 41787 _ = b 41788 // match: (Rsh64Ux64 <t> x y) 41789 // cond: 41790 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 41791 for { 41792 t := v.Type 41793 _ = v.Args[1] 41794 x := v.Args[0] 41795 y := v.Args[1] 41796 v.reset(OpAMD64ANDQ) 41797 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 41798 v0.AddArg(x) 41799 v0.AddArg(y) 41800 v.AddArg(v0) 41801 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 41802 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41803 v2.AuxInt = 64 41804 v2.AddArg(y) 41805 v1.AddArg(v2) 41806 v.AddArg(v1) 41807 return true 41808 } 41809 } 41810 func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool { 41811 b := v.Block 41812 _ = b 41813 // match: (Rsh64Ux8 <t> x y) 41814 // cond: 41815 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 41816 for { 41817 t := v.Type 41818 _ = v.Args[1] 41819 x := v.Args[0] 41820 y := v.Args[1] 41821 v.reset(OpAMD64ANDQ) 41822 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 41823 v0.AddArg(x) 41824 v0.AddArg(y) 41825 v.AddArg(v0) 41826 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 41827 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41828 v2.AuxInt = 64 41829 v2.AddArg(y) 41830 v1.AddArg(v2) 41831 v.AddArg(v1) 41832 return true 41833 } 41834 } 41835 func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool { 41836 b := v.Block 41837 _ = b 41838 // match: (Rsh64x16 <t> x y) 41839 // cond: 41840 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 41841 for { 41842 t := v.Type 41843 _ = v.Args[1] 41844 x := v.Args[0] 41845 y := v.Args[1] 41846 v.reset(OpAMD64SARQ) 41847 v.Type = t 41848 v.AddArg(x) 41849 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41850 v0.AddArg(y) 41851 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41852 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41853 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41854 v3.AuxInt = 64 41855 v3.AddArg(y) 41856 v2.AddArg(v3) 41857 v1.AddArg(v2) 41858 v0.AddArg(v1) 41859 v.AddArg(v0) 41860 return true 41861 } 41862 } 41863 func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool { 41864 b := v.Block 41865 _ = b 41866 // match: (Rsh64x32 <t> x y) 41867 // cond: 41868 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 41869 for { 41870 t := v.Type 41871 _ = v.Args[1] 41872 x := v.Args[0] 41873 y := v.Args[1] 41874 v.reset(OpAMD64SARQ) 41875 v.Type = t 41876 v.AddArg(x) 41877 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41878 v0.AddArg(y) 41879 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41880 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41881 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41882 v3.AuxInt = 64 41883 v3.AddArg(y) 41884 v2.AddArg(v3) 41885 v1.AddArg(v2) 41886 v0.AddArg(v1) 41887 v.AddArg(v0) 41888 return true 41889 } 41890 } 41891 func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool { 41892 b := v.Block 41893 _ = b 41894 // match: (Rsh64x64 <t> x y) 41895 // cond: 41896 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 41897 for { 41898 t := v.Type 41899 _ = v.Args[1] 41900 x := v.Args[0] 41901 y := v.Args[1] 41902 v.reset(OpAMD64SARQ) 41903 v.Type = t 41904 v.AddArg(x) 41905 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 41906 v0.AddArg(y) 41907 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 41908 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 41909 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 41910 v3.AuxInt = 64 41911 v3.AddArg(y) 41912 v2.AddArg(v3) 41913 v1.AddArg(v2) 41914 v0.AddArg(v1) 41915 v.AddArg(v0) 41916 return true 41917 } 41918 } 41919 func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool { 41920 b := v.Block 41921 _ = b 41922 // match: (Rsh64x8 <t> x y) 41923 // cond: 41924 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 41925 for { 41926 t := v.Type 41927 _ = v.Args[1] 41928 x := v.Args[0] 41929 y := v.Args[1] 41930 v.reset(OpAMD64SARQ) 41931 v.Type = t 41932 v.AddArg(x) 41933 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 41934 v0.AddArg(y) 41935 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 41936 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 41937 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 41938 v3.AuxInt = 64 41939 v3.AddArg(y) 41940 v2.AddArg(v3) 41941 v1.AddArg(v2) 41942 v0.AddArg(v1) 41943 v.AddArg(v0) 41944 return true 41945 } 41946 } 41947 func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool { 41948 b := v.Block 41949 _ = b 41950 // match: (Rsh8Ux16 <t> x y) 41951 // cond: 41952 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 41953 for { 41954 t := v.Type 41955 _ = v.Args[1] 41956 x := v.Args[0] 41957 y := v.Args[1] 41958 v.reset(OpAMD64ANDL) 41959 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 41960 v0.AddArg(x) 41961 v0.AddArg(y) 41962 v.AddArg(v0) 41963 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41964 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 41965 v2.AuxInt = 8 41966 v2.AddArg(y) 41967 v1.AddArg(v2) 41968 v.AddArg(v1) 41969 return true 41970 } 41971 } 41972 func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool { 41973 b := v.Block 41974 _ = b 41975 // match: (Rsh8Ux32 <t> x y) 41976 // cond: 41977 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 41978 for { 41979 t := v.Type 41980 _ = v.Args[1] 41981 x := v.Args[0] 41982 y := v.Args[1] 41983 v.reset(OpAMD64ANDL) 41984 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 41985 v0.AddArg(x) 41986 v0.AddArg(y) 41987 v.AddArg(v0) 41988 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 41989 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 41990 v2.AuxInt = 8 41991 v2.AddArg(y) 41992 v1.AddArg(v2) 41993 v.AddArg(v1) 41994 return true 41995 } 41996 } 41997 func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool { 41998 b := v.Block 41999 _ = b 42000 // match: (Rsh8Ux64 <t> x y) 42001 // cond: 42002 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 42003 for { 42004 t := v.Type 42005 _ = v.Args[1] 42006 x := v.Args[0] 42007 y := v.Args[1] 42008 v.reset(OpAMD64ANDL) 42009 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 42010 v0.AddArg(x) 42011 v0.AddArg(y) 42012 v.AddArg(v0) 42013 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42014 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 42015 v2.AuxInt = 8 42016 v2.AddArg(y) 42017 v1.AddArg(v2) 42018 v.AddArg(v1) 42019 return true 42020 } 42021 } 42022 func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool { 42023 b := v.Block 42024 _ = b 42025 // match: (Rsh8Ux8 <t> x y) 42026 // cond: 42027 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 42028 for { 42029 t := v.Type 42030 _ = v.Args[1] 42031 x := v.Args[0] 42032 y := v.Args[1] 42033 v.reset(OpAMD64ANDL) 42034 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 42035 v0.AddArg(x) 42036 v0.AddArg(y) 42037 v.AddArg(v0) 42038 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42039 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 42040 v2.AuxInt = 8 42041 v2.AddArg(y) 42042 v1.AddArg(v2) 42043 v.AddArg(v1) 42044 return true 42045 } 42046 } 42047 func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool { 42048 b := v.Block 42049 _ = b 42050 // match: (Rsh8x16 <t> x y) 42051 // cond: 42052 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 42053 for { 42054 t := v.Type 42055 _ = v.Args[1] 42056 x := v.Args[0] 42057 y := v.Args[1] 42058 v.reset(OpAMD64SARB) 42059 v.Type = t 42060 v.AddArg(x) 42061 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 42062 v0.AddArg(y) 42063 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 42064 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 42065 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 42066 v3.AuxInt = 8 42067 v3.AddArg(y) 42068 v2.AddArg(v3) 42069 v1.AddArg(v2) 42070 v0.AddArg(v1) 42071 v.AddArg(v0) 42072 return true 42073 } 42074 } 42075 func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool { 42076 b := v.Block 42077 _ = b 42078 // match: (Rsh8x32 <t> x y) 42079 // cond: 42080 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 42081 for { 42082 t := v.Type 42083 _ = v.Args[1] 42084 x := v.Args[0] 42085 y := v.Args[1] 42086 v.reset(OpAMD64SARB) 42087 v.Type = t 42088 v.AddArg(x) 42089 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 42090 v0.AddArg(y) 42091 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 42092 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 42093 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 42094 v3.AuxInt = 8 42095 v3.AddArg(y) 42096 v2.AddArg(v3) 42097 v1.AddArg(v2) 42098 v0.AddArg(v1) 42099 v.AddArg(v0) 42100 return true 42101 } 42102 } 42103 func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool { 42104 b := v.Block 42105 _ = b 42106 // match: (Rsh8x64 <t> x y) 42107 // cond: 42108 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 42109 for { 42110 t := v.Type 42111 _ = v.Args[1] 42112 x := v.Args[0] 42113 y := v.Args[1] 42114 v.reset(OpAMD64SARB) 42115 v.Type = t 42116 v.AddArg(x) 42117 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 42118 v0.AddArg(y) 42119 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 42120 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 42121 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 42122 v3.AuxInt = 8 42123 v3.AddArg(y) 42124 v2.AddArg(v3) 42125 v1.AddArg(v2) 42126 v0.AddArg(v1) 42127 v.AddArg(v0) 42128 return true 42129 } 42130 } 42131 func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { 42132 b := v.Block 42133 _ = b 42134 // match: (Rsh8x8 <t> x y) 42135 // cond: 42136 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 42137 for { 42138 t := v.Type 42139 _ = v.Args[1] 42140 x := v.Args[0] 42141 y := v.Args[1] 42142 v.reset(OpAMD64SARB) 42143 v.Type = t 42144 v.AddArg(x) 42145 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 42146 v0.AddArg(y) 42147 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 42148 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 42149 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 42150 v3.AuxInt = 8 42151 v3.AddArg(y) 42152 v2.AddArg(v3) 42153 v1.AddArg(v2) 42154 v0.AddArg(v1) 42155 v.AddArg(v0) 42156 return true 42157 } 42158 } 42159 func rewriteValueAMD64_OpSelect0_0(v *Value) bool { 42160 b := v.Block 42161 _ = b 42162 // match: (Select0 <t> (AddTupleFirst32 val tuple)) 42163 // cond: 42164 // result: (ADDL val (Select0 <t> tuple)) 42165 for { 42166 t := v.Type 42167 v_0 := v.Args[0] 42168 if v_0.Op != OpAMD64AddTupleFirst32 { 42169 break 42170 } 42171 _ = v_0.Args[1] 42172 val := v_0.Args[0] 42173 tuple := v_0.Args[1] 42174 v.reset(OpAMD64ADDL) 42175 v.AddArg(val) 42176 v0 := b.NewValue0(v.Pos, OpSelect0, t) 42177 v0.AddArg(tuple) 42178 v.AddArg(v0) 42179 return true 42180 } 42181 // match: (Select0 <t> (AddTupleFirst64 val tuple)) 42182 // cond: 42183 // result: (ADDQ val (Select0 <t> tuple)) 42184 for { 42185 t := v.Type 42186 v_0 := v.Args[0] 42187 if v_0.Op != OpAMD64AddTupleFirst64 { 42188 break 42189 } 42190 _ = v_0.Args[1] 42191 val := v_0.Args[0] 42192 tuple := v_0.Args[1] 42193 v.reset(OpAMD64ADDQ) 42194 v.AddArg(val) 42195 v0 := b.NewValue0(v.Pos, OpSelect0, t) 42196 v0.AddArg(tuple) 42197 v.AddArg(v0) 42198 return true 42199 } 42200 return false 42201 } 42202 func rewriteValueAMD64_OpSelect1_0(v *Value) bool { 42203 // match: (Select1 (AddTupleFirst32 _ tuple)) 42204 // cond: 42205 // result: (Select1 tuple) 42206 for { 42207 v_0 := v.Args[0] 42208 if v_0.Op != OpAMD64AddTupleFirst32 { 42209 break 42210 } 42211 _ = v_0.Args[1] 42212 tuple := v_0.Args[1] 42213 v.reset(OpSelect1) 42214 v.AddArg(tuple) 42215 return true 42216 } 42217 // match: (Select1 (AddTupleFirst64 _ tuple)) 42218 // cond: 42219 // result: (Select1 tuple) 42220 for { 42221 v_0 := v.Args[0] 42222 if v_0.Op != OpAMD64AddTupleFirst64 { 42223 break 42224 } 42225 _ = v_0.Args[1] 42226 tuple := v_0.Args[1] 42227 v.reset(OpSelect1) 42228 v.AddArg(tuple) 42229 return true 42230 } 42231 return false 42232 } 42233 func rewriteValueAMD64_OpSignExt16to32_0(v *Value) bool { 42234 // match: (SignExt16to32 x) 42235 // cond: 42236 // result: (MOVWQSX x) 42237 for { 42238 x := v.Args[0] 42239 v.reset(OpAMD64MOVWQSX) 42240 v.AddArg(x) 42241 return true 42242 } 42243 } 42244 func rewriteValueAMD64_OpSignExt16to64_0(v *Value) bool { 42245 // match: (SignExt16to64 x) 42246 // cond: 42247 // result: (MOVWQSX x) 42248 for { 42249 x := v.Args[0] 42250 v.reset(OpAMD64MOVWQSX) 42251 v.AddArg(x) 42252 return true 42253 } 42254 } 42255 func rewriteValueAMD64_OpSignExt32to64_0(v *Value) bool { 42256 // match: (SignExt32to64 x) 42257 // cond: 42258 // result: (MOVLQSX x) 42259 for { 42260 x := v.Args[0] 42261 v.reset(OpAMD64MOVLQSX) 42262 v.AddArg(x) 42263 return true 42264 } 42265 } 42266 func rewriteValueAMD64_OpSignExt8to16_0(v *Value) bool { 42267 // match: (SignExt8to16 x) 42268 // cond: 42269 // result: (MOVBQSX x) 42270 for { 42271 x := v.Args[0] 42272 v.reset(OpAMD64MOVBQSX) 42273 v.AddArg(x) 42274 return true 42275 } 42276 } 42277 func rewriteValueAMD64_OpSignExt8to32_0(v *Value) bool { 42278 // match: (SignExt8to32 x) 42279 // cond: 42280 // result: (MOVBQSX x) 42281 for { 42282 x := v.Args[0] 42283 v.reset(OpAMD64MOVBQSX) 42284 v.AddArg(x) 42285 return true 42286 } 42287 } 42288 func rewriteValueAMD64_OpSignExt8to64_0(v *Value) bool { 42289 // match: (SignExt8to64 x) 42290 // cond: 42291 // result: (MOVBQSX x) 42292 for { 42293 x := v.Args[0] 42294 v.reset(OpAMD64MOVBQSX) 42295 v.AddArg(x) 42296 return true 42297 } 42298 } 42299 func rewriteValueAMD64_OpSlicemask_0(v *Value) bool { 42300 b := v.Block 42301 _ = b 42302 // match: (Slicemask <t> x) 42303 // cond: 42304 // result: (SARQconst (NEGQ <t> x) [63]) 42305 for { 42306 t := v.Type 42307 x := v.Args[0] 42308 v.reset(OpAMD64SARQconst) 42309 v.AuxInt = 63 42310 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 42311 v0.AddArg(x) 42312 v.AddArg(v0) 42313 return true 42314 } 42315 } 42316 func rewriteValueAMD64_OpSqrt_0(v *Value) bool { 42317 // match: (Sqrt x) 42318 // cond: 42319 // result: (SQRTSD x) 42320 for { 42321 x := v.Args[0] 42322 v.reset(OpAMD64SQRTSD) 42323 v.AddArg(x) 42324 return true 42325 } 42326 } 42327 func rewriteValueAMD64_OpStaticCall_0(v *Value) bool { 42328 // match: (StaticCall [argwid] {target} mem) 42329 // cond: 42330 // result: (CALLstatic [argwid] {target} mem) 42331 for { 42332 argwid := v.AuxInt 42333 target := v.Aux 42334 mem := v.Args[0] 42335 v.reset(OpAMD64CALLstatic) 42336 v.AuxInt = argwid 42337 v.Aux = target 42338 v.AddArg(mem) 42339 return true 42340 } 42341 } 42342 func rewriteValueAMD64_OpStore_0(v *Value) bool { 42343 // match: (Store {t} ptr val mem) 42344 // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) 42345 // result: (MOVSDstore ptr val mem) 42346 for { 42347 t := v.Aux 42348 _ = v.Args[2] 42349 ptr := v.Args[0] 42350 val := v.Args[1] 42351 mem := v.Args[2] 42352 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { 42353 break 42354 } 42355 v.reset(OpAMD64MOVSDstore) 42356 v.AddArg(ptr) 42357 v.AddArg(val) 42358 v.AddArg(mem) 42359 return true 42360 } 42361 // match: (Store {t} ptr val mem) 42362 // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) 42363 // result: (MOVSSstore ptr val mem) 42364 for { 42365 t := v.Aux 42366 _ = v.Args[2] 42367 ptr := v.Args[0] 42368 val := v.Args[1] 42369 mem := v.Args[2] 42370 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { 42371 break 42372 } 42373 v.reset(OpAMD64MOVSSstore) 42374 v.AddArg(ptr) 42375 v.AddArg(val) 42376 v.AddArg(mem) 42377 return true 42378 } 42379 // match: (Store {t} ptr val mem) 42380 // cond: t.(*types.Type).Size() == 8 42381 // result: (MOVQstore ptr val mem) 42382 for { 42383 t := v.Aux 42384 _ = v.Args[2] 42385 ptr := v.Args[0] 42386 val := v.Args[1] 42387 mem := v.Args[2] 42388 if !(t.(*types.Type).Size() == 8) { 42389 break 42390 } 42391 v.reset(OpAMD64MOVQstore) 42392 v.AddArg(ptr) 42393 v.AddArg(val) 42394 v.AddArg(mem) 42395 return true 42396 } 42397 // match: (Store {t} ptr val mem) 42398 // cond: t.(*types.Type).Size() == 4 42399 // result: (MOVLstore ptr val mem) 42400 for { 42401 t := v.Aux 42402 _ = v.Args[2] 42403 ptr := v.Args[0] 42404 val := v.Args[1] 42405 mem := v.Args[2] 42406 if !(t.(*types.Type).Size() == 4) { 42407 break 42408 } 42409 v.reset(OpAMD64MOVLstore) 42410 v.AddArg(ptr) 42411 v.AddArg(val) 42412 v.AddArg(mem) 42413 return true 42414 } 42415 // match: (Store {t} ptr val mem) 42416 // cond: t.(*types.Type).Size() == 2 42417 // result: (MOVWstore ptr val mem) 42418 for { 42419 t := v.Aux 42420 _ = v.Args[2] 42421 ptr := v.Args[0] 42422 val := v.Args[1] 42423 mem := v.Args[2] 42424 if !(t.(*types.Type).Size() == 2) { 42425 break 42426 } 42427 v.reset(OpAMD64MOVWstore) 42428 v.AddArg(ptr) 42429 v.AddArg(val) 42430 v.AddArg(mem) 42431 return true 42432 } 42433 // match: (Store {t} ptr val mem) 42434 // cond: t.(*types.Type).Size() == 1 42435 // result: (MOVBstore ptr val mem) 42436 for { 42437 t := v.Aux 42438 _ = v.Args[2] 42439 ptr := v.Args[0] 42440 val := v.Args[1] 42441 mem := v.Args[2] 42442 if !(t.(*types.Type).Size() == 1) { 42443 break 42444 } 42445 v.reset(OpAMD64MOVBstore) 42446 v.AddArg(ptr) 42447 v.AddArg(val) 42448 v.AddArg(mem) 42449 return true 42450 } 42451 return false 42452 } 42453 func rewriteValueAMD64_OpSub16_0(v *Value) bool { 42454 // match: (Sub16 x y) 42455 // cond: 42456 // result: (SUBL x y) 42457 for { 42458 _ = v.Args[1] 42459 x := v.Args[0] 42460 y := v.Args[1] 42461 v.reset(OpAMD64SUBL) 42462 v.AddArg(x) 42463 v.AddArg(y) 42464 return true 42465 } 42466 } 42467 func rewriteValueAMD64_OpSub32_0(v *Value) bool { 42468 // match: (Sub32 x y) 42469 // cond: 42470 // result: (SUBL x y) 42471 for { 42472 _ = v.Args[1] 42473 x := v.Args[0] 42474 y := v.Args[1] 42475 v.reset(OpAMD64SUBL) 42476 v.AddArg(x) 42477 v.AddArg(y) 42478 return true 42479 } 42480 } 42481 func rewriteValueAMD64_OpSub32F_0(v *Value) bool { 42482 // match: (Sub32F x y) 42483 // cond: 42484 // result: (SUBSS x y) 42485 for { 42486 _ = v.Args[1] 42487 x := v.Args[0] 42488 y := v.Args[1] 42489 v.reset(OpAMD64SUBSS) 42490 v.AddArg(x) 42491 v.AddArg(y) 42492 return true 42493 } 42494 } 42495 func rewriteValueAMD64_OpSub64_0(v *Value) bool { 42496 // match: (Sub64 x y) 42497 // cond: 42498 // result: (SUBQ x y) 42499 for { 42500 _ = v.Args[1] 42501 x := v.Args[0] 42502 y := v.Args[1] 42503 v.reset(OpAMD64SUBQ) 42504 v.AddArg(x) 42505 v.AddArg(y) 42506 return true 42507 } 42508 } 42509 func rewriteValueAMD64_OpSub64F_0(v *Value) bool { 42510 // match: (Sub64F x y) 42511 // cond: 42512 // result: (SUBSD x y) 42513 for { 42514 _ = v.Args[1] 42515 x := v.Args[0] 42516 y := v.Args[1] 42517 v.reset(OpAMD64SUBSD) 42518 v.AddArg(x) 42519 v.AddArg(y) 42520 return true 42521 } 42522 } 42523 func rewriteValueAMD64_OpSub8_0(v *Value) bool { 42524 // match: (Sub8 x y) 42525 // cond: 42526 // result: (SUBL x y) 42527 for { 42528 _ = v.Args[1] 42529 x := v.Args[0] 42530 y := v.Args[1] 42531 v.reset(OpAMD64SUBL) 42532 v.AddArg(x) 42533 v.AddArg(y) 42534 return true 42535 } 42536 } 42537 func rewriteValueAMD64_OpSubPtr_0(v *Value) bool { 42538 b := v.Block 42539 _ = b 42540 config := b.Func.Config 42541 _ = config 42542 // match: (SubPtr x y) 42543 // cond: config.PtrSize == 8 42544 // result: (SUBQ x y) 42545 for { 42546 _ = v.Args[1] 42547 x := v.Args[0] 42548 y := v.Args[1] 42549 if !(config.PtrSize == 8) { 42550 break 42551 } 42552 v.reset(OpAMD64SUBQ) 42553 v.AddArg(x) 42554 v.AddArg(y) 42555 return true 42556 } 42557 // match: (SubPtr x y) 42558 // cond: config.PtrSize == 4 42559 // result: (SUBL x y) 42560 for { 42561 _ = v.Args[1] 42562 x := v.Args[0] 42563 y := v.Args[1] 42564 if !(config.PtrSize == 4) { 42565 break 42566 } 42567 v.reset(OpAMD64SUBL) 42568 v.AddArg(x) 42569 v.AddArg(y) 42570 return true 42571 } 42572 return false 42573 } 42574 func rewriteValueAMD64_OpTrunc16to8_0(v *Value) bool { 42575 // match: (Trunc16to8 x) 42576 // cond: 42577 // result: x 42578 for { 42579 x := v.Args[0] 42580 v.reset(OpCopy) 42581 v.Type = x.Type 42582 v.AddArg(x) 42583 return true 42584 } 42585 } 42586 func rewriteValueAMD64_OpTrunc32to16_0(v *Value) bool { 42587 // match: (Trunc32to16 x) 42588 // cond: 42589 // result: x 42590 for { 42591 x := v.Args[0] 42592 v.reset(OpCopy) 42593 v.Type = x.Type 42594 v.AddArg(x) 42595 return true 42596 } 42597 } 42598 func rewriteValueAMD64_OpTrunc32to8_0(v *Value) bool { 42599 // match: (Trunc32to8 x) 42600 // cond: 42601 // result: x 42602 for { 42603 x := v.Args[0] 42604 v.reset(OpCopy) 42605 v.Type = x.Type 42606 v.AddArg(x) 42607 return true 42608 } 42609 } 42610 func rewriteValueAMD64_OpTrunc64to16_0(v *Value) bool { 42611 // match: (Trunc64to16 x) 42612 // cond: 42613 // result: x 42614 for { 42615 x := v.Args[0] 42616 v.reset(OpCopy) 42617 v.Type = x.Type 42618 v.AddArg(x) 42619 return true 42620 } 42621 } 42622 func rewriteValueAMD64_OpTrunc64to32_0(v *Value) bool { 42623 // match: (Trunc64to32 x) 42624 // cond: 42625 // result: x 42626 for { 42627 x := v.Args[0] 42628 v.reset(OpCopy) 42629 v.Type = x.Type 42630 v.AddArg(x) 42631 return true 42632 } 42633 } 42634 func rewriteValueAMD64_OpTrunc64to8_0(v *Value) bool { 42635 // match: (Trunc64to8 x) 42636 // cond: 42637 // result: x 42638 for { 42639 x := v.Args[0] 42640 v.reset(OpCopy) 42641 v.Type = x.Type 42642 v.AddArg(x) 42643 return true 42644 } 42645 } 42646 func rewriteValueAMD64_OpXor16_0(v *Value) bool { 42647 // match: (Xor16 x y) 42648 // cond: 42649 // result: (XORL x y) 42650 for { 42651 _ = v.Args[1] 42652 x := v.Args[0] 42653 y := v.Args[1] 42654 v.reset(OpAMD64XORL) 42655 v.AddArg(x) 42656 v.AddArg(y) 42657 return true 42658 } 42659 } 42660 func rewriteValueAMD64_OpXor32_0(v *Value) bool { 42661 // match: (Xor32 x y) 42662 // cond: 42663 // result: (XORL x y) 42664 for { 42665 _ = v.Args[1] 42666 x := v.Args[0] 42667 y := v.Args[1] 42668 v.reset(OpAMD64XORL) 42669 v.AddArg(x) 42670 v.AddArg(y) 42671 return true 42672 } 42673 } 42674 func rewriteValueAMD64_OpXor64_0(v *Value) bool { 42675 // match: (Xor64 x y) 42676 // cond: 42677 // result: (XORQ x y) 42678 for { 42679 _ = v.Args[1] 42680 x := v.Args[0] 42681 y := v.Args[1] 42682 v.reset(OpAMD64XORQ) 42683 v.AddArg(x) 42684 v.AddArg(y) 42685 return true 42686 } 42687 } 42688 func rewriteValueAMD64_OpXor8_0(v *Value) bool { 42689 // match: (Xor8 x y) 42690 // cond: 42691 // result: (XORL x y) 42692 for { 42693 _ = v.Args[1] 42694 x := v.Args[0] 42695 y := v.Args[1] 42696 v.reset(OpAMD64XORL) 42697 v.AddArg(x) 42698 v.AddArg(y) 42699 return true 42700 } 42701 } 42702 func rewriteValueAMD64_OpZero_0(v *Value) bool { 42703 b := v.Block 42704 _ = b 42705 // match: (Zero [0] _ mem) 42706 // cond: 42707 // result: mem 42708 for { 42709 if v.AuxInt != 0 { 42710 break 42711 } 42712 _ = v.Args[1] 42713 mem := v.Args[1] 42714 v.reset(OpCopy) 42715 v.Type = mem.Type 42716 v.AddArg(mem) 42717 return true 42718 } 42719 // match: (Zero [1] destptr mem) 42720 // cond: 42721 // result: (MOVBstoreconst [0] destptr mem) 42722 for { 42723 if v.AuxInt != 1 { 42724 break 42725 } 42726 _ = v.Args[1] 42727 destptr := v.Args[0] 42728 mem := v.Args[1] 42729 v.reset(OpAMD64MOVBstoreconst) 42730 v.AuxInt = 0 42731 v.AddArg(destptr) 42732 v.AddArg(mem) 42733 return true 42734 } 42735 // match: (Zero [2] destptr mem) 42736 // cond: 42737 // result: (MOVWstoreconst [0] destptr mem) 42738 for { 42739 if v.AuxInt != 2 { 42740 break 42741 } 42742 _ = v.Args[1] 42743 destptr := v.Args[0] 42744 mem := v.Args[1] 42745 v.reset(OpAMD64MOVWstoreconst) 42746 v.AuxInt = 0 42747 v.AddArg(destptr) 42748 v.AddArg(mem) 42749 return true 42750 } 42751 // match: (Zero [4] destptr mem) 42752 // cond: 42753 // result: (MOVLstoreconst [0] destptr mem) 42754 for { 42755 if v.AuxInt != 4 { 42756 break 42757 } 42758 _ = v.Args[1] 42759 destptr := v.Args[0] 42760 mem := v.Args[1] 42761 v.reset(OpAMD64MOVLstoreconst) 42762 v.AuxInt = 0 42763 v.AddArg(destptr) 42764 v.AddArg(mem) 42765 return true 42766 } 42767 // match: (Zero [8] destptr mem) 42768 // cond: 42769 // result: (MOVQstoreconst [0] destptr mem) 42770 for { 42771 if v.AuxInt != 8 { 42772 break 42773 } 42774 _ = v.Args[1] 42775 destptr := v.Args[0] 42776 mem := v.Args[1] 42777 v.reset(OpAMD64MOVQstoreconst) 42778 v.AuxInt = 0 42779 v.AddArg(destptr) 42780 v.AddArg(mem) 42781 return true 42782 } 42783 // match: (Zero [3] destptr mem) 42784 // cond: 42785 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 42786 for { 42787 if v.AuxInt != 3 { 42788 break 42789 } 42790 _ = v.Args[1] 42791 destptr := v.Args[0] 42792 mem := v.Args[1] 42793 v.reset(OpAMD64MOVBstoreconst) 42794 v.AuxInt = makeValAndOff(0, 2) 42795 v.AddArg(destptr) 42796 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) 42797 v0.AuxInt = 0 42798 v0.AddArg(destptr) 42799 v0.AddArg(mem) 42800 v.AddArg(v0) 42801 return true 42802 } 42803 // match: (Zero [5] destptr mem) 42804 // cond: 42805 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 42806 for { 42807 if v.AuxInt != 5 { 42808 break 42809 } 42810 _ = v.Args[1] 42811 destptr := v.Args[0] 42812 mem := v.Args[1] 42813 v.reset(OpAMD64MOVBstoreconst) 42814 v.AuxInt = makeValAndOff(0, 4) 42815 v.AddArg(destptr) 42816 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 42817 v0.AuxInt = 0 42818 v0.AddArg(destptr) 42819 v0.AddArg(mem) 42820 v.AddArg(v0) 42821 return true 42822 } 42823 // match: (Zero [6] destptr mem) 42824 // cond: 42825 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 42826 for { 42827 if v.AuxInt != 6 { 42828 break 42829 } 42830 _ = v.Args[1] 42831 destptr := v.Args[0] 42832 mem := v.Args[1] 42833 v.reset(OpAMD64MOVWstoreconst) 42834 v.AuxInt = makeValAndOff(0, 4) 42835 v.AddArg(destptr) 42836 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 42837 v0.AuxInt = 0 42838 v0.AddArg(destptr) 42839 v0.AddArg(mem) 42840 v.AddArg(v0) 42841 return true 42842 } 42843 // match: (Zero [7] destptr mem) 42844 // cond: 42845 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 42846 for { 42847 if v.AuxInt != 7 { 42848 break 42849 } 42850 _ = v.Args[1] 42851 destptr := v.Args[0] 42852 mem := v.Args[1] 42853 v.reset(OpAMD64MOVLstoreconst) 42854 v.AuxInt = makeValAndOff(0, 3) 42855 v.AddArg(destptr) 42856 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 42857 v0.AuxInt = 0 42858 v0.AddArg(destptr) 42859 v0.AddArg(mem) 42860 v.AddArg(v0) 42861 return true 42862 } 42863 // match: (Zero [s] destptr mem) 42864 // cond: s%8 != 0 && s > 8 42865 // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [0] destptr mem)) 42866 for { 42867 s := v.AuxInt 42868 _ = v.Args[1] 42869 destptr := v.Args[0] 42870 mem := v.Args[1] 42871 if !(s%8 != 0 && s > 8) { 42872 break 42873 } 42874 v.reset(OpZero) 42875 v.AuxInt = s - s%8 42876 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 42877 v0.AuxInt = s % 8 42878 v0.AddArg(destptr) 42879 v.AddArg(v0) 42880 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 42881 v1.AuxInt = 0 42882 v1.AddArg(destptr) 42883 v1.AddArg(mem) 42884 v.AddArg(v1) 42885 return true 42886 } 42887 return false 42888 } 42889 func rewriteValueAMD64_OpZero_10(v *Value) bool { 42890 b := v.Block 42891 _ = b 42892 config := b.Func.Config 42893 _ = config 42894 typ := &b.Func.Config.Types 42895 _ = typ 42896 // match: (Zero [16] destptr mem) 42897 // cond: 42898 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 42899 for { 42900 if v.AuxInt != 16 { 42901 break 42902 } 42903 _ = v.Args[1] 42904 destptr := v.Args[0] 42905 mem := v.Args[1] 42906 v.reset(OpAMD64MOVQstoreconst) 42907 v.AuxInt = makeValAndOff(0, 8) 42908 v.AddArg(destptr) 42909 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 42910 v0.AuxInt = 0 42911 v0.AddArg(destptr) 42912 v0.AddArg(mem) 42913 v.AddArg(v0) 42914 return true 42915 } 42916 // match: (Zero [24] destptr mem) 42917 // cond: 42918 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 42919 for { 42920 if v.AuxInt != 24 { 42921 break 42922 } 42923 _ = v.Args[1] 42924 destptr := v.Args[0] 42925 mem := v.Args[1] 42926 v.reset(OpAMD64MOVQstoreconst) 42927 v.AuxInt = makeValAndOff(0, 16) 42928 v.AddArg(destptr) 42929 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 42930 v0.AuxInt = makeValAndOff(0, 8) 42931 v0.AddArg(destptr) 42932 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 42933 v1.AuxInt = 0 42934 v1.AddArg(destptr) 42935 v1.AddArg(mem) 42936 v0.AddArg(v1) 42937 v.AddArg(v0) 42938 return true 42939 } 42940 // match: (Zero [32] destptr mem) 42941 // cond: 42942 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 42943 for { 42944 if v.AuxInt != 32 { 42945 break 42946 } 42947 _ = v.Args[1] 42948 destptr := v.Args[0] 42949 mem := v.Args[1] 42950 v.reset(OpAMD64MOVQstoreconst) 42951 v.AuxInt = makeValAndOff(0, 24) 42952 v.AddArg(destptr) 42953 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 42954 v0.AuxInt = makeValAndOff(0, 16) 42955 v0.AddArg(destptr) 42956 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 42957 v1.AuxInt = makeValAndOff(0, 8) 42958 v1.AddArg(destptr) 42959 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 42960 v2.AuxInt = 0 42961 v2.AddArg(destptr) 42962 v2.AddArg(mem) 42963 v1.AddArg(v2) 42964 v0.AddArg(v1) 42965 v.AddArg(v0) 42966 return true 42967 } 42968 // match: (Zero [s] destptr mem) 42969 // cond: s <= 1024 && s%8 == 0 && s%16 != 0 && !config.noDuffDevice 42970 // result: (Zero [s-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) 42971 for { 42972 s := v.AuxInt 42973 _ = v.Args[1] 42974 destptr := v.Args[0] 42975 mem := v.Args[1] 42976 if !(s <= 1024 && s%8 == 0 && s%16 != 0 && !config.noDuffDevice) { 42977 break 42978 } 42979 v.reset(OpZero) 42980 v.AuxInt = s - 8 42981 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 42982 v0.AuxInt = 8 42983 v0.AddArg(destptr) 42984 v.AddArg(v0) 42985 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 42986 v1.AddArg(destptr) 42987 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 42988 v2.AuxInt = 0 42989 v1.AddArg(v2) 42990 v1.AddArg(mem) 42991 v.AddArg(v1) 42992 return true 42993 } 42994 // match: (Zero [s] destptr mem) 42995 // cond: s <= 1024 && s%16 == 0 && !config.noDuffDevice 42996 // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem) 42997 for { 42998 s := v.AuxInt 42999 _ = v.Args[1] 43000 destptr := v.Args[0] 43001 mem := v.Args[1] 43002 if !(s <= 1024 && s%16 == 0 && !config.noDuffDevice) { 43003 break 43004 } 43005 v.reset(OpAMD64DUFFZERO) 43006 v.AuxInt = s 43007 v.AddArg(destptr) 43008 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 43009 v0.AuxInt = 0 43010 v.AddArg(v0) 43011 v.AddArg(mem) 43012 return true 43013 } 43014 // match: (Zero [s] destptr mem) 43015 // cond: (s > 1024 || (config.noDuffDevice && s > 32)) && s%8 == 0 43016 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) 43017 for { 43018 s := v.AuxInt 43019 _ = v.Args[1] 43020 destptr := v.Args[0] 43021 mem := v.Args[1] 43022 if !((s > 1024 || (config.noDuffDevice && s > 32)) && s%8 == 0) { 43023 break 43024 } 43025 v.reset(OpAMD64REPSTOSQ) 43026 v.AddArg(destptr) 43027 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 43028 v0.AuxInt = s / 8 43029 v.AddArg(v0) 43030 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 43031 v1.AuxInt = 0 43032 v.AddArg(v1) 43033 v.AddArg(mem) 43034 return true 43035 } 43036 return false 43037 } 43038 func rewriteValueAMD64_OpZeroExt16to32_0(v *Value) bool { 43039 // match: (ZeroExt16to32 x) 43040 // cond: 43041 // result: (MOVWQZX x) 43042 for { 43043 x := v.Args[0] 43044 v.reset(OpAMD64MOVWQZX) 43045 v.AddArg(x) 43046 return true 43047 } 43048 } 43049 func rewriteValueAMD64_OpZeroExt16to64_0(v *Value) bool { 43050 // match: (ZeroExt16to64 x) 43051 // cond: 43052 // result: (MOVWQZX x) 43053 for { 43054 x := v.Args[0] 43055 v.reset(OpAMD64MOVWQZX) 43056 v.AddArg(x) 43057 return true 43058 } 43059 } 43060 func rewriteValueAMD64_OpZeroExt32to64_0(v *Value) bool { 43061 // match: (ZeroExt32to64 x) 43062 // cond: 43063 // result: (MOVLQZX x) 43064 for { 43065 x := v.Args[0] 43066 v.reset(OpAMD64MOVLQZX) 43067 v.AddArg(x) 43068 return true 43069 } 43070 } 43071 func rewriteValueAMD64_OpZeroExt8to16_0(v *Value) bool { 43072 // match: (ZeroExt8to16 x) 43073 // cond: 43074 // result: (MOVBQZX x) 43075 for { 43076 x := v.Args[0] 43077 v.reset(OpAMD64MOVBQZX) 43078 v.AddArg(x) 43079 return true 43080 } 43081 } 43082 func rewriteValueAMD64_OpZeroExt8to32_0(v *Value) bool { 43083 // match: (ZeroExt8to32 x) 43084 // cond: 43085 // result: (MOVBQZX x) 43086 for { 43087 x := v.Args[0] 43088 v.reset(OpAMD64MOVBQZX) 43089 v.AddArg(x) 43090 return true 43091 } 43092 } 43093 func rewriteValueAMD64_OpZeroExt8to64_0(v *Value) bool { 43094 // match: (ZeroExt8to64 x) 43095 // cond: 43096 // result: (MOVBQZX x) 43097 for { 43098 x := v.Args[0] 43099 v.reset(OpAMD64MOVBQZX) 43100 v.AddArg(x) 43101 return true 43102 } 43103 } 43104 func rewriteBlockAMD64(b *Block) bool { 43105 config := b.Func.Config 43106 _ = config 43107 fe := b.Func.fe 43108 _ = fe 43109 typ := &config.Types 43110 _ = typ 43111 switch b.Kind { 43112 case BlockAMD64EQ: 43113 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) 43114 // cond: !config.nacl 43115 // result: (UGE (BTL x y)) 43116 for { 43117 v := b.Control 43118 if v.Op != OpAMD64TESTL { 43119 break 43120 } 43121 _ = v.Args[1] 43122 v_0 := v.Args[0] 43123 if v_0.Op != OpAMD64SHLL { 43124 break 43125 } 43126 _ = v_0.Args[1] 43127 v_0_0 := v_0.Args[0] 43128 if v_0_0.Op != OpAMD64MOVLconst { 43129 break 43130 } 43131 if v_0_0.AuxInt != 1 { 43132 break 43133 } 43134 x := v_0.Args[1] 43135 y := v.Args[1] 43136 if !(!config.nacl) { 43137 break 43138 } 43139 b.Kind = BlockAMD64UGE 43140 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 43141 v0.AddArg(x) 43142 v0.AddArg(y) 43143 b.SetControl(v0) 43144 return true 43145 } 43146 // match: (EQ (TESTL y (SHLL (MOVLconst [1]) x))) 43147 // cond: !config.nacl 43148 // result: (UGE (BTL x y)) 43149 for { 43150 v := b.Control 43151 if v.Op != OpAMD64TESTL { 43152 break 43153 } 43154 _ = v.Args[1] 43155 y := v.Args[0] 43156 v_1 := v.Args[1] 43157 if v_1.Op != OpAMD64SHLL { 43158 break 43159 } 43160 _ = v_1.Args[1] 43161 v_1_0 := v_1.Args[0] 43162 if v_1_0.Op != OpAMD64MOVLconst { 43163 break 43164 } 43165 if v_1_0.AuxInt != 1 { 43166 break 43167 } 43168 x := v_1.Args[1] 43169 if !(!config.nacl) { 43170 break 43171 } 43172 b.Kind = BlockAMD64UGE 43173 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 43174 v0.AddArg(x) 43175 v0.AddArg(y) 43176 b.SetControl(v0) 43177 return true 43178 } 43179 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 43180 // cond: !config.nacl 43181 // result: (UGE (BTQ x y)) 43182 for { 43183 v := b.Control 43184 if v.Op != OpAMD64TESTQ { 43185 break 43186 } 43187 _ = v.Args[1] 43188 v_0 := v.Args[0] 43189 if v_0.Op != OpAMD64SHLQ { 43190 break 43191 } 43192 _ = v_0.Args[1] 43193 v_0_0 := v_0.Args[0] 43194 if v_0_0.Op != OpAMD64MOVQconst { 43195 break 43196 } 43197 if v_0_0.AuxInt != 1 { 43198 break 43199 } 43200 x := v_0.Args[1] 43201 y := v.Args[1] 43202 if !(!config.nacl) { 43203 break 43204 } 43205 b.Kind = BlockAMD64UGE 43206 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 43207 v0.AddArg(x) 43208 v0.AddArg(y) 43209 b.SetControl(v0) 43210 return true 43211 } 43212 // match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 43213 // cond: !config.nacl 43214 // result: (UGE (BTQ x y)) 43215 for { 43216 v := b.Control 43217 if v.Op != OpAMD64TESTQ { 43218 break 43219 } 43220 _ = v.Args[1] 43221 y := v.Args[0] 43222 v_1 := v.Args[1] 43223 if v_1.Op != OpAMD64SHLQ { 43224 break 43225 } 43226 _ = v_1.Args[1] 43227 v_1_0 := v_1.Args[0] 43228 if v_1_0.Op != OpAMD64MOVQconst { 43229 break 43230 } 43231 if v_1_0.AuxInt != 1 { 43232 break 43233 } 43234 x := v_1.Args[1] 43235 if !(!config.nacl) { 43236 break 43237 } 43238 b.Kind = BlockAMD64UGE 43239 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 43240 v0.AddArg(x) 43241 v0.AddArg(y) 43242 b.SetControl(v0) 43243 return true 43244 } 43245 // match: (EQ (TESTLconst [c] x)) 43246 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 43247 // result: (UGE (BTLconst [log2(c)] x)) 43248 for { 43249 v := b.Control 43250 if v.Op != OpAMD64TESTLconst { 43251 break 43252 } 43253 c := v.AuxInt 43254 x := v.Args[0] 43255 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 43256 break 43257 } 43258 b.Kind = BlockAMD64UGE 43259 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 43260 v0.AuxInt = log2(c) 43261 v0.AddArg(x) 43262 b.SetControl(v0) 43263 return true 43264 } 43265 // match: (EQ (TESTQconst [c] x)) 43266 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 43267 // result: (UGE (BTQconst [log2(c)] x)) 43268 for { 43269 v := b.Control 43270 if v.Op != OpAMD64TESTQconst { 43271 break 43272 } 43273 c := v.AuxInt 43274 x := v.Args[0] 43275 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 43276 break 43277 } 43278 b.Kind = BlockAMD64UGE 43279 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 43280 v0.AuxInt = log2(c) 43281 v0.AddArg(x) 43282 b.SetControl(v0) 43283 return true 43284 } 43285 // match: (EQ (TESTQ (MOVQconst [c]) x)) 43286 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 43287 // result: (UGE (BTQconst [log2(c)] x)) 43288 for { 43289 v := b.Control 43290 if v.Op != OpAMD64TESTQ { 43291 break 43292 } 43293 _ = v.Args[1] 43294 v_0 := v.Args[0] 43295 if v_0.Op != OpAMD64MOVQconst { 43296 break 43297 } 43298 c := v_0.AuxInt 43299 x := v.Args[1] 43300 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 43301 break 43302 } 43303 b.Kind = BlockAMD64UGE 43304 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 43305 v0.AuxInt = log2(c) 43306 v0.AddArg(x) 43307 b.SetControl(v0) 43308 return true 43309 } 43310 // match: (EQ (TESTQ x (MOVQconst [c]))) 43311 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 43312 // result: (UGE (BTQconst [log2(c)] x)) 43313 for { 43314 v := b.Control 43315 if v.Op != OpAMD64TESTQ { 43316 break 43317 } 43318 _ = v.Args[1] 43319 x := v.Args[0] 43320 v_1 := v.Args[1] 43321 if v_1.Op != OpAMD64MOVQconst { 43322 break 43323 } 43324 c := v_1.AuxInt 43325 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 43326 break 43327 } 43328 b.Kind = BlockAMD64UGE 43329 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 43330 v0.AuxInt = log2(c) 43331 v0.AddArg(x) 43332 b.SetControl(v0) 43333 return true 43334 } 43335 // match: (EQ (InvertFlags cmp) yes no) 43336 // cond: 43337 // result: (EQ cmp yes no) 43338 for { 43339 v := b.Control 43340 if v.Op != OpAMD64InvertFlags { 43341 break 43342 } 43343 cmp := v.Args[0] 43344 b.Kind = BlockAMD64EQ 43345 b.SetControl(cmp) 43346 return true 43347 } 43348 // match: (EQ (FlagEQ) yes no) 43349 // cond: 43350 // result: (First nil yes no) 43351 for { 43352 v := b.Control 43353 if v.Op != OpAMD64FlagEQ { 43354 break 43355 } 43356 b.Kind = BlockFirst 43357 b.SetControl(nil) 43358 return true 43359 } 43360 // match: (EQ (FlagLT_ULT) yes no) 43361 // cond: 43362 // result: (First nil no yes) 43363 for { 43364 v := b.Control 43365 if v.Op != OpAMD64FlagLT_ULT { 43366 break 43367 } 43368 b.Kind = BlockFirst 43369 b.SetControl(nil) 43370 b.swapSuccessors() 43371 return true 43372 } 43373 // match: (EQ (FlagLT_UGT) yes no) 43374 // cond: 43375 // result: (First nil no yes) 43376 for { 43377 v := b.Control 43378 if v.Op != OpAMD64FlagLT_UGT { 43379 break 43380 } 43381 b.Kind = BlockFirst 43382 b.SetControl(nil) 43383 b.swapSuccessors() 43384 return true 43385 } 43386 // match: (EQ (FlagGT_ULT) yes no) 43387 // cond: 43388 // result: (First nil no yes) 43389 for { 43390 v := b.Control 43391 if v.Op != OpAMD64FlagGT_ULT { 43392 break 43393 } 43394 b.Kind = BlockFirst 43395 b.SetControl(nil) 43396 b.swapSuccessors() 43397 return true 43398 } 43399 // match: (EQ (FlagGT_UGT) yes no) 43400 // cond: 43401 // result: (First nil no yes) 43402 for { 43403 v := b.Control 43404 if v.Op != OpAMD64FlagGT_UGT { 43405 break 43406 } 43407 b.Kind = BlockFirst 43408 b.SetControl(nil) 43409 b.swapSuccessors() 43410 return true 43411 } 43412 case BlockAMD64GE: 43413 // match: (GE (InvertFlags cmp) yes no) 43414 // cond: 43415 // result: (LE cmp yes no) 43416 for { 43417 v := b.Control 43418 if v.Op != OpAMD64InvertFlags { 43419 break 43420 } 43421 cmp := v.Args[0] 43422 b.Kind = BlockAMD64LE 43423 b.SetControl(cmp) 43424 return true 43425 } 43426 // match: (GE (FlagEQ) yes no) 43427 // cond: 43428 // result: (First nil yes no) 43429 for { 43430 v := b.Control 43431 if v.Op != OpAMD64FlagEQ { 43432 break 43433 } 43434 b.Kind = BlockFirst 43435 b.SetControl(nil) 43436 return true 43437 } 43438 // match: (GE (FlagLT_ULT) yes no) 43439 // cond: 43440 // result: (First nil no yes) 43441 for { 43442 v := b.Control 43443 if v.Op != OpAMD64FlagLT_ULT { 43444 break 43445 } 43446 b.Kind = BlockFirst 43447 b.SetControl(nil) 43448 b.swapSuccessors() 43449 return true 43450 } 43451 // match: (GE (FlagLT_UGT) yes no) 43452 // cond: 43453 // result: (First nil no yes) 43454 for { 43455 v := b.Control 43456 if v.Op != OpAMD64FlagLT_UGT { 43457 break 43458 } 43459 b.Kind = BlockFirst 43460 b.SetControl(nil) 43461 b.swapSuccessors() 43462 return true 43463 } 43464 // match: (GE (FlagGT_ULT) yes no) 43465 // cond: 43466 // result: (First nil yes no) 43467 for { 43468 v := b.Control 43469 if v.Op != OpAMD64FlagGT_ULT { 43470 break 43471 } 43472 b.Kind = BlockFirst 43473 b.SetControl(nil) 43474 return true 43475 } 43476 // match: (GE (FlagGT_UGT) yes no) 43477 // cond: 43478 // result: (First nil yes no) 43479 for { 43480 v := b.Control 43481 if v.Op != OpAMD64FlagGT_UGT { 43482 break 43483 } 43484 b.Kind = BlockFirst 43485 b.SetControl(nil) 43486 return true 43487 } 43488 case BlockAMD64GT: 43489 // match: (GT (InvertFlags cmp) yes no) 43490 // cond: 43491 // result: (LT cmp yes no) 43492 for { 43493 v := b.Control 43494 if v.Op != OpAMD64InvertFlags { 43495 break 43496 } 43497 cmp := v.Args[0] 43498 b.Kind = BlockAMD64LT 43499 b.SetControl(cmp) 43500 return true 43501 } 43502 // match: (GT (FlagEQ) yes no) 43503 // cond: 43504 // result: (First nil no yes) 43505 for { 43506 v := b.Control 43507 if v.Op != OpAMD64FlagEQ { 43508 break 43509 } 43510 b.Kind = BlockFirst 43511 b.SetControl(nil) 43512 b.swapSuccessors() 43513 return true 43514 } 43515 // match: (GT (FlagLT_ULT) yes no) 43516 // cond: 43517 // result: (First nil no yes) 43518 for { 43519 v := b.Control 43520 if v.Op != OpAMD64FlagLT_ULT { 43521 break 43522 } 43523 b.Kind = BlockFirst 43524 b.SetControl(nil) 43525 b.swapSuccessors() 43526 return true 43527 } 43528 // match: (GT (FlagLT_UGT) yes no) 43529 // cond: 43530 // result: (First nil no yes) 43531 for { 43532 v := b.Control 43533 if v.Op != OpAMD64FlagLT_UGT { 43534 break 43535 } 43536 b.Kind = BlockFirst 43537 b.SetControl(nil) 43538 b.swapSuccessors() 43539 return true 43540 } 43541 // match: (GT (FlagGT_ULT) yes no) 43542 // cond: 43543 // result: (First nil yes no) 43544 for { 43545 v := b.Control 43546 if v.Op != OpAMD64FlagGT_ULT { 43547 break 43548 } 43549 b.Kind = BlockFirst 43550 b.SetControl(nil) 43551 return true 43552 } 43553 // match: (GT (FlagGT_UGT) yes no) 43554 // cond: 43555 // result: (First nil yes no) 43556 for { 43557 v := b.Control 43558 if v.Op != OpAMD64FlagGT_UGT { 43559 break 43560 } 43561 b.Kind = BlockFirst 43562 b.SetControl(nil) 43563 return true 43564 } 43565 case BlockIf: 43566 // match: (If (SETL cmp) yes no) 43567 // cond: 43568 // result: (LT cmp yes no) 43569 for { 43570 v := b.Control 43571 if v.Op != OpAMD64SETL { 43572 break 43573 } 43574 cmp := v.Args[0] 43575 b.Kind = BlockAMD64LT 43576 b.SetControl(cmp) 43577 return true 43578 } 43579 // match: (If (SETLE cmp) yes no) 43580 // cond: 43581 // result: (LE cmp yes no) 43582 for { 43583 v := b.Control 43584 if v.Op != OpAMD64SETLE { 43585 break 43586 } 43587 cmp := v.Args[0] 43588 b.Kind = BlockAMD64LE 43589 b.SetControl(cmp) 43590 return true 43591 } 43592 // match: (If (SETG cmp) yes no) 43593 // cond: 43594 // result: (GT cmp yes no) 43595 for { 43596 v := b.Control 43597 if v.Op != OpAMD64SETG { 43598 break 43599 } 43600 cmp := v.Args[0] 43601 b.Kind = BlockAMD64GT 43602 b.SetControl(cmp) 43603 return true 43604 } 43605 // match: (If (SETGE cmp) yes no) 43606 // cond: 43607 // result: (GE cmp yes no) 43608 for { 43609 v := b.Control 43610 if v.Op != OpAMD64SETGE { 43611 break 43612 } 43613 cmp := v.Args[0] 43614 b.Kind = BlockAMD64GE 43615 b.SetControl(cmp) 43616 return true 43617 } 43618 // match: (If (SETEQ cmp) yes no) 43619 // cond: 43620 // result: (EQ cmp yes no) 43621 for { 43622 v := b.Control 43623 if v.Op != OpAMD64SETEQ { 43624 break 43625 } 43626 cmp := v.Args[0] 43627 b.Kind = BlockAMD64EQ 43628 b.SetControl(cmp) 43629 return true 43630 } 43631 // match: (If (SETNE cmp) yes no) 43632 // cond: 43633 // result: (NE cmp yes no) 43634 for { 43635 v := b.Control 43636 if v.Op != OpAMD64SETNE { 43637 break 43638 } 43639 cmp := v.Args[0] 43640 b.Kind = BlockAMD64NE 43641 b.SetControl(cmp) 43642 return true 43643 } 43644 // match: (If (SETB cmp) yes no) 43645 // cond: 43646 // result: (ULT cmp yes no) 43647 for { 43648 v := b.Control 43649 if v.Op != OpAMD64SETB { 43650 break 43651 } 43652 cmp := v.Args[0] 43653 b.Kind = BlockAMD64ULT 43654 b.SetControl(cmp) 43655 return true 43656 } 43657 // match: (If (SETBE cmp) yes no) 43658 // cond: 43659 // result: (ULE cmp yes no) 43660 for { 43661 v := b.Control 43662 if v.Op != OpAMD64SETBE { 43663 break 43664 } 43665 cmp := v.Args[0] 43666 b.Kind = BlockAMD64ULE 43667 b.SetControl(cmp) 43668 return true 43669 } 43670 // match: (If (SETA cmp) yes no) 43671 // cond: 43672 // result: (UGT cmp yes no) 43673 for { 43674 v := b.Control 43675 if v.Op != OpAMD64SETA { 43676 break 43677 } 43678 cmp := v.Args[0] 43679 b.Kind = BlockAMD64UGT 43680 b.SetControl(cmp) 43681 return true 43682 } 43683 // match: (If (SETAE cmp) yes no) 43684 // cond: 43685 // result: (UGE cmp yes no) 43686 for { 43687 v := b.Control 43688 if v.Op != OpAMD64SETAE { 43689 break 43690 } 43691 cmp := v.Args[0] 43692 b.Kind = BlockAMD64UGE 43693 b.SetControl(cmp) 43694 return true 43695 } 43696 // match: (If (SETGF cmp) yes no) 43697 // cond: 43698 // result: (UGT cmp yes no) 43699 for { 43700 v := b.Control 43701 if v.Op != OpAMD64SETGF { 43702 break 43703 } 43704 cmp := v.Args[0] 43705 b.Kind = BlockAMD64UGT 43706 b.SetControl(cmp) 43707 return true 43708 } 43709 // match: (If (SETGEF cmp) yes no) 43710 // cond: 43711 // result: (UGE cmp yes no) 43712 for { 43713 v := b.Control 43714 if v.Op != OpAMD64SETGEF { 43715 break 43716 } 43717 cmp := v.Args[0] 43718 b.Kind = BlockAMD64UGE 43719 b.SetControl(cmp) 43720 return true 43721 } 43722 // match: (If (SETEQF cmp) yes no) 43723 // cond: 43724 // result: (EQF cmp yes no) 43725 for { 43726 v := b.Control 43727 if v.Op != OpAMD64SETEQF { 43728 break 43729 } 43730 cmp := v.Args[0] 43731 b.Kind = BlockAMD64EQF 43732 b.SetControl(cmp) 43733 return true 43734 } 43735 // match: (If (SETNEF cmp) yes no) 43736 // cond: 43737 // result: (NEF cmp yes no) 43738 for { 43739 v := b.Control 43740 if v.Op != OpAMD64SETNEF { 43741 break 43742 } 43743 cmp := v.Args[0] 43744 b.Kind = BlockAMD64NEF 43745 b.SetControl(cmp) 43746 return true 43747 } 43748 // match: (If cond yes no) 43749 // cond: 43750 // result: (NE (TESTB cond cond) yes no) 43751 for { 43752 v := b.Control 43753 _ = v 43754 cond := b.Control 43755 b.Kind = BlockAMD64NE 43756 v0 := b.NewValue0(v.Pos, OpAMD64TESTB, types.TypeFlags) 43757 v0.AddArg(cond) 43758 v0.AddArg(cond) 43759 b.SetControl(v0) 43760 return true 43761 } 43762 case BlockAMD64LE: 43763 // match: (LE (InvertFlags cmp) yes no) 43764 // cond: 43765 // result: (GE cmp yes no) 43766 for { 43767 v := b.Control 43768 if v.Op != OpAMD64InvertFlags { 43769 break 43770 } 43771 cmp := v.Args[0] 43772 b.Kind = BlockAMD64GE 43773 b.SetControl(cmp) 43774 return true 43775 } 43776 // match: (LE (FlagEQ) yes no) 43777 // cond: 43778 // result: (First nil yes no) 43779 for { 43780 v := b.Control 43781 if v.Op != OpAMD64FlagEQ { 43782 break 43783 } 43784 b.Kind = BlockFirst 43785 b.SetControl(nil) 43786 return true 43787 } 43788 // match: (LE (FlagLT_ULT) yes no) 43789 // cond: 43790 // result: (First nil yes no) 43791 for { 43792 v := b.Control 43793 if v.Op != OpAMD64FlagLT_ULT { 43794 break 43795 } 43796 b.Kind = BlockFirst 43797 b.SetControl(nil) 43798 return true 43799 } 43800 // match: (LE (FlagLT_UGT) yes no) 43801 // cond: 43802 // result: (First nil yes no) 43803 for { 43804 v := b.Control 43805 if v.Op != OpAMD64FlagLT_UGT { 43806 break 43807 } 43808 b.Kind = BlockFirst 43809 b.SetControl(nil) 43810 return true 43811 } 43812 // match: (LE (FlagGT_ULT) yes no) 43813 // cond: 43814 // result: (First nil no yes) 43815 for { 43816 v := b.Control 43817 if v.Op != OpAMD64FlagGT_ULT { 43818 break 43819 } 43820 b.Kind = BlockFirst 43821 b.SetControl(nil) 43822 b.swapSuccessors() 43823 return true 43824 } 43825 // match: (LE (FlagGT_UGT) yes no) 43826 // cond: 43827 // result: (First nil no yes) 43828 for { 43829 v := b.Control 43830 if v.Op != OpAMD64FlagGT_UGT { 43831 break 43832 } 43833 b.Kind = BlockFirst 43834 b.SetControl(nil) 43835 b.swapSuccessors() 43836 return true 43837 } 43838 case BlockAMD64LT: 43839 // match: (LT (InvertFlags cmp) yes no) 43840 // cond: 43841 // result: (GT cmp yes no) 43842 for { 43843 v := b.Control 43844 if v.Op != OpAMD64InvertFlags { 43845 break 43846 } 43847 cmp := v.Args[0] 43848 b.Kind = BlockAMD64GT 43849 b.SetControl(cmp) 43850 return true 43851 } 43852 // match: (LT (FlagEQ) yes no) 43853 // cond: 43854 // result: (First nil no yes) 43855 for { 43856 v := b.Control 43857 if v.Op != OpAMD64FlagEQ { 43858 break 43859 } 43860 b.Kind = BlockFirst 43861 b.SetControl(nil) 43862 b.swapSuccessors() 43863 return true 43864 } 43865 // match: (LT (FlagLT_ULT) yes no) 43866 // cond: 43867 // result: (First nil yes no) 43868 for { 43869 v := b.Control 43870 if v.Op != OpAMD64FlagLT_ULT { 43871 break 43872 } 43873 b.Kind = BlockFirst 43874 b.SetControl(nil) 43875 return true 43876 } 43877 // match: (LT (FlagLT_UGT) yes no) 43878 // cond: 43879 // result: (First nil yes no) 43880 for { 43881 v := b.Control 43882 if v.Op != OpAMD64FlagLT_UGT { 43883 break 43884 } 43885 b.Kind = BlockFirst 43886 b.SetControl(nil) 43887 return true 43888 } 43889 // match: (LT (FlagGT_ULT) yes no) 43890 // cond: 43891 // result: (First nil no yes) 43892 for { 43893 v := b.Control 43894 if v.Op != OpAMD64FlagGT_ULT { 43895 break 43896 } 43897 b.Kind = BlockFirst 43898 b.SetControl(nil) 43899 b.swapSuccessors() 43900 return true 43901 } 43902 // match: (LT (FlagGT_UGT) yes no) 43903 // cond: 43904 // result: (First nil no yes) 43905 for { 43906 v := b.Control 43907 if v.Op != OpAMD64FlagGT_UGT { 43908 break 43909 } 43910 b.Kind = BlockFirst 43911 b.SetControl(nil) 43912 b.swapSuccessors() 43913 return true 43914 } 43915 case BlockAMD64NE: 43916 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 43917 // cond: 43918 // result: (LT cmp yes no) 43919 for { 43920 v := b.Control 43921 if v.Op != OpAMD64TESTB { 43922 break 43923 } 43924 _ = v.Args[1] 43925 v_0 := v.Args[0] 43926 if v_0.Op != OpAMD64SETL { 43927 break 43928 } 43929 cmp := v_0.Args[0] 43930 v_1 := v.Args[1] 43931 if v_1.Op != OpAMD64SETL { 43932 break 43933 } 43934 if cmp != v_1.Args[0] { 43935 break 43936 } 43937 b.Kind = BlockAMD64LT 43938 b.SetControl(cmp) 43939 return true 43940 } 43941 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 43942 // cond: 43943 // result: (LT cmp yes no) 43944 for { 43945 v := b.Control 43946 if v.Op != OpAMD64TESTB { 43947 break 43948 } 43949 _ = v.Args[1] 43950 v_0 := v.Args[0] 43951 if v_0.Op != OpAMD64SETL { 43952 break 43953 } 43954 cmp := v_0.Args[0] 43955 v_1 := v.Args[1] 43956 if v_1.Op != OpAMD64SETL { 43957 break 43958 } 43959 if cmp != v_1.Args[0] { 43960 break 43961 } 43962 b.Kind = BlockAMD64LT 43963 b.SetControl(cmp) 43964 return true 43965 } 43966 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 43967 // cond: 43968 // result: (LE cmp yes no) 43969 for { 43970 v := b.Control 43971 if v.Op != OpAMD64TESTB { 43972 break 43973 } 43974 _ = v.Args[1] 43975 v_0 := v.Args[0] 43976 if v_0.Op != OpAMD64SETLE { 43977 break 43978 } 43979 cmp := v_0.Args[0] 43980 v_1 := v.Args[1] 43981 if v_1.Op != OpAMD64SETLE { 43982 break 43983 } 43984 if cmp != v_1.Args[0] { 43985 break 43986 } 43987 b.Kind = BlockAMD64LE 43988 b.SetControl(cmp) 43989 return true 43990 } 43991 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 43992 // cond: 43993 // result: (LE cmp yes no) 43994 for { 43995 v := b.Control 43996 if v.Op != OpAMD64TESTB { 43997 break 43998 } 43999 _ = v.Args[1] 44000 v_0 := v.Args[0] 44001 if v_0.Op != OpAMD64SETLE { 44002 break 44003 } 44004 cmp := v_0.Args[0] 44005 v_1 := v.Args[1] 44006 if v_1.Op != OpAMD64SETLE { 44007 break 44008 } 44009 if cmp != v_1.Args[0] { 44010 break 44011 } 44012 b.Kind = BlockAMD64LE 44013 b.SetControl(cmp) 44014 return true 44015 } 44016 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 44017 // cond: 44018 // result: (GT cmp yes no) 44019 for { 44020 v := b.Control 44021 if v.Op != OpAMD64TESTB { 44022 break 44023 } 44024 _ = v.Args[1] 44025 v_0 := v.Args[0] 44026 if v_0.Op != OpAMD64SETG { 44027 break 44028 } 44029 cmp := v_0.Args[0] 44030 v_1 := v.Args[1] 44031 if v_1.Op != OpAMD64SETG { 44032 break 44033 } 44034 if cmp != v_1.Args[0] { 44035 break 44036 } 44037 b.Kind = BlockAMD64GT 44038 b.SetControl(cmp) 44039 return true 44040 } 44041 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 44042 // cond: 44043 // result: (GT cmp yes no) 44044 for { 44045 v := b.Control 44046 if v.Op != OpAMD64TESTB { 44047 break 44048 } 44049 _ = v.Args[1] 44050 v_0 := v.Args[0] 44051 if v_0.Op != OpAMD64SETG { 44052 break 44053 } 44054 cmp := v_0.Args[0] 44055 v_1 := v.Args[1] 44056 if v_1.Op != OpAMD64SETG { 44057 break 44058 } 44059 if cmp != v_1.Args[0] { 44060 break 44061 } 44062 b.Kind = BlockAMD64GT 44063 b.SetControl(cmp) 44064 return true 44065 } 44066 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 44067 // cond: 44068 // result: (GE cmp yes no) 44069 for { 44070 v := b.Control 44071 if v.Op != OpAMD64TESTB { 44072 break 44073 } 44074 _ = v.Args[1] 44075 v_0 := v.Args[0] 44076 if v_0.Op != OpAMD64SETGE { 44077 break 44078 } 44079 cmp := v_0.Args[0] 44080 v_1 := v.Args[1] 44081 if v_1.Op != OpAMD64SETGE { 44082 break 44083 } 44084 if cmp != v_1.Args[0] { 44085 break 44086 } 44087 b.Kind = BlockAMD64GE 44088 b.SetControl(cmp) 44089 return true 44090 } 44091 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 44092 // cond: 44093 // result: (GE cmp yes no) 44094 for { 44095 v := b.Control 44096 if v.Op != OpAMD64TESTB { 44097 break 44098 } 44099 _ = v.Args[1] 44100 v_0 := v.Args[0] 44101 if v_0.Op != OpAMD64SETGE { 44102 break 44103 } 44104 cmp := v_0.Args[0] 44105 v_1 := v.Args[1] 44106 if v_1.Op != OpAMD64SETGE { 44107 break 44108 } 44109 if cmp != v_1.Args[0] { 44110 break 44111 } 44112 b.Kind = BlockAMD64GE 44113 b.SetControl(cmp) 44114 return true 44115 } 44116 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 44117 // cond: 44118 // result: (EQ cmp yes no) 44119 for { 44120 v := b.Control 44121 if v.Op != OpAMD64TESTB { 44122 break 44123 } 44124 _ = v.Args[1] 44125 v_0 := v.Args[0] 44126 if v_0.Op != OpAMD64SETEQ { 44127 break 44128 } 44129 cmp := v_0.Args[0] 44130 v_1 := v.Args[1] 44131 if v_1.Op != OpAMD64SETEQ { 44132 break 44133 } 44134 if cmp != v_1.Args[0] { 44135 break 44136 } 44137 b.Kind = BlockAMD64EQ 44138 b.SetControl(cmp) 44139 return true 44140 } 44141 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 44142 // cond: 44143 // result: (EQ cmp yes no) 44144 for { 44145 v := b.Control 44146 if v.Op != OpAMD64TESTB { 44147 break 44148 } 44149 _ = v.Args[1] 44150 v_0 := v.Args[0] 44151 if v_0.Op != OpAMD64SETEQ { 44152 break 44153 } 44154 cmp := v_0.Args[0] 44155 v_1 := v.Args[1] 44156 if v_1.Op != OpAMD64SETEQ { 44157 break 44158 } 44159 if cmp != v_1.Args[0] { 44160 break 44161 } 44162 b.Kind = BlockAMD64EQ 44163 b.SetControl(cmp) 44164 return true 44165 } 44166 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 44167 // cond: 44168 // result: (NE cmp yes no) 44169 for { 44170 v := b.Control 44171 if v.Op != OpAMD64TESTB { 44172 break 44173 } 44174 _ = v.Args[1] 44175 v_0 := v.Args[0] 44176 if v_0.Op != OpAMD64SETNE { 44177 break 44178 } 44179 cmp := v_0.Args[0] 44180 v_1 := v.Args[1] 44181 if v_1.Op != OpAMD64SETNE { 44182 break 44183 } 44184 if cmp != v_1.Args[0] { 44185 break 44186 } 44187 b.Kind = BlockAMD64NE 44188 b.SetControl(cmp) 44189 return true 44190 } 44191 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 44192 // cond: 44193 // result: (NE cmp yes no) 44194 for { 44195 v := b.Control 44196 if v.Op != OpAMD64TESTB { 44197 break 44198 } 44199 _ = v.Args[1] 44200 v_0 := v.Args[0] 44201 if v_0.Op != OpAMD64SETNE { 44202 break 44203 } 44204 cmp := v_0.Args[0] 44205 v_1 := v.Args[1] 44206 if v_1.Op != OpAMD64SETNE { 44207 break 44208 } 44209 if cmp != v_1.Args[0] { 44210 break 44211 } 44212 b.Kind = BlockAMD64NE 44213 b.SetControl(cmp) 44214 return true 44215 } 44216 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 44217 // cond: 44218 // result: (ULT cmp yes no) 44219 for { 44220 v := b.Control 44221 if v.Op != OpAMD64TESTB { 44222 break 44223 } 44224 _ = v.Args[1] 44225 v_0 := v.Args[0] 44226 if v_0.Op != OpAMD64SETB { 44227 break 44228 } 44229 cmp := v_0.Args[0] 44230 v_1 := v.Args[1] 44231 if v_1.Op != OpAMD64SETB { 44232 break 44233 } 44234 if cmp != v_1.Args[0] { 44235 break 44236 } 44237 b.Kind = BlockAMD64ULT 44238 b.SetControl(cmp) 44239 return true 44240 } 44241 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 44242 // cond: 44243 // result: (ULT cmp yes no) 44244 for { 44245 v := b.Control 44246 if v.Op != OpAMD64TESTB { 44247 break 44248 } 44249 _ = v.Args[1] 44250 v_0 := v.Args[0] 44251 if v_0.Op != OpAMD64SETB { 44252 break 44253 } 44254 cmp := v_0.Args[0] 44255 v_1 := v.Args[1] 44256 if v_1.Op != OpAMD64SETB { 44257 break 44258 } 44259 if cmp != v_1.Args[0] { 44260 break 44261 } 44262 b.Kind = BlockAMD64ULT 44263 b.SetControl(cmp) 44264 return true 44265 } 44266 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 44267 // cond: 44268 // result: (ULE cmp yes no) 44269 for { 44270 v := b.Control 44271 if v.Op != OpAMD64TESTB { 44272 break 44273 } 44274 _ = v.Args[1] 44275 v_0 := v.Args[0] 44276 if v_0.Op != OpAMD64SETBE { 44277 break 44278 } 44279 cmp := v_0.Args[0] 44280 v_1 := v.Args[1] 44281 if v_1.Op != OpAMD64SETBE { 44282 break 44283 } 44284 if cmp != v_1.Args[0] { 44285 break 44286 } 44287 b.Kind = BlockAMD64ULE 44288 b.SetControl(cmp) 44289 return true 44290 } 44291 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 44292 // cond: 44293 // result: (ULE cmp yes no) 44294 for { 44295 v := b.Control 44296 if v.Op != OpAMD64TESTB { 44297 break 44298 } 44299 _ = v.Args[1] 44300 v_0 := v.Args[0] 44301 if v_0.Op != OpAMD64SETBE { 44302 break 44303 } 44304 cmp := v_0.Args[0] 44305 v_1 := v.Args[1] 44306 if v_1.Op != OpAMD64SETBE { 44307 break 44308 } 44309 if cmp != v_1.Args[0] { 44310 break 44311 } 44312 b.Kind = BlockAMD64ULE 44313 b.SetControl(cmp) 44314 return true 44315 } 44316 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 44317 // cond: 44318 // result: (UGT cmp yes no) 44319 for { 44320 v := b.Control 44321 if v.Op != OpAMD64TESTB { 44322 break 44323 } 44324 _ = v.Args[1] 44325 v_0 := v.Args[0] 44326 if v_0.Op != OpAMD64SETA { 44327 break 44328 } 44329 cmp := v_0.Args[0] 44330 v_1 := v.Args[1] 44331 if v_1.Op != OpAMD64SETA { 44332 break 44333 } 44334 if cmp != v_1.Args[0] { 44335 break 44336 } 44337 b.Kind = BlockAMD64UGT 44338 b.SetControl(cmp) 44339 return true 44340 } 44341 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 44342 // cond: 44343 // result: (UGT cmp yes no) 44344 for { 44345 v := b.Control 44346 if v.Op != OpAMD64TESTB { 44347 break 44348 } 44349 _ = v.Args[1] 44350 v_0 := v.Args[0] 44351 if v_0.Op != OpAMD64SETA { 44352 break 44353 } 44354 cmp := v_0.Args[0] 44355 v_1 := v.Args[1] 44356 if v_1.Op != OpAMD64SETA { 44357 break 44358 } 44359 if cmp != v_1.Args[0] { 44360 break 44361 } 44362 b.Kind = BlockAMD64UGT 44363 b.SetControl(cmp) 44364 return true 44365 } 44366 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 44367 // cond: 44368 // result: (UGE cmp yes no) 44369 for { 44370 v := b.Control 44371 if v.Op != OpAMD64TESTB { 44372 break 44373 } 44374 _ = v.Args[1] 44375 v_0 := v.Args[0] 44376 if v_0.Op != OpAMD64SETAE { 44377 break 44378 } 44379 cmp := v_0.Args[0] 44380 v_1 := v.Args[1] 44381 if v_1.Op != OpAMD64SETAE { 44382 break 44383 } 44384 if cmp != v_1.Args[0] { 44385 break 44386 } 44387 b.Kind = BlockAMD64UGE 44388 b.SetControl(cmp) 44389 return true 44390 } 44391 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 44392 // cond: 44393 // result: (UGE cmp yes no) 44394 for { 44395 v := b.Control 44396 if v.Op != OpAMD64TESTB { 44397 break 44398 } 44399 _ = v.Args[1] 44400 v_0 := v.Args[0] 44401 if v_0.Op != OpAMD64SETAE { 44402 break 44403 } 44404 cmp := v_0.Args[0] 44405 v_1 := v.Args[1] 44406 if v_1.Op != OpAMD64SETAE { 44407 break 44408 } 44409 if cmp != v_1.Args[0] { 44410 break 44411 } 44412 b.Kind = BlockAMD64UGE 44413 b.SetControl(cmp) 44414 return true 44415 } 44416 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) 44417 // cond: !config.nacl 44418 // result: (ULT (BTL x y)) 44419 for { 44420 v := b.Control 44421 if v.Op != OpAMD64TESTL { 44422 break 44423 } 44424 _ = v.Args[1] 44425 v_0 := v.Args[0] 44426 if v_0.Op != OpAMD64SHLL { 44427 break 44428 } 44429 _ = v_0.Args[1] 44430 v_0_0 := v_0.Args[0] 44431 if v_0_0.Op != OpAMD64MOVLconst { 44432 break 44433 } 44434 if v_0_0.AuxInt != 1 { 44435 break 44436 } 44437 x := v_0.Args[1] 44438 y := v.Args[1] 44439 if !(!config.nacl) { 44440 break 44441 } 44442 b.Kind = BlockAMD64ULT 44443 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 44444 v0.AddArg(x) 44445 v0.AddArg(y) 44446 b.SetControl(v0) 44447 return true 44448 } 44449 // match: (NE (TESTL y (SHLL (MOVLconst [1]) x))) 44450 // cond: !config.nacl 44451 // result: (ULT (BTL x y)) 44452 for { 44453 v := b.Control 44454 if v.Op != OpAMD64TESTL { 44455 break 44456 } 44457 _ = v.Args[1] 44458 y := v.Args[0] 44459 v_1 := v.Args[1] 44460 if v_1.Op != OpAMD64SHLL { 44461 break 44462 } 44463 _ = v_1.Args[1] 44464 v_1_0 := v_1.Args[0] 44465 if v_1_0.Op != OpAMD64MOVLconst { 44466 break 44467 } 44468 if v_1_0.AuxInt != 1 { 44469 break 44470 } 44471 x := v_1.Args[1] 44472 if !(!config.nacl) { 44473 break 44474 } 44475 b.Kind = BlockAMD64ULT 44476 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 44477 v0.AddArg(x) 44478 v0.AddArg(y) 44479 b.SetControl(v0) 44480 return true 44481 } 44482 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 44483 // cond: !config.nacl 44484 // result: (ULT (BTQ x y)) 44485 for { 44486 v := b.Control 44487 if v.Op != OpAMD64TESTQ { 44488 break 44489 } 44490 _ = v.Args[1] 44491 v_0 := v.Args[0] 44492 if v_0.Op != OpAMD64SHLQ { 44493 break 44494 } 44495 _ = v_0.Args[1] 44496 v_0_0 := v_0.Args[0] 44497 if v_0_0.Op != OpAMD64MOVQconst { 44498 break 44499 } 44500 if v_0_0.AuxInt != 1 { 44501 break 44502 } 44503 x := v_0.Args[1] 44504 y := v.Args[1] 44505 if !(!config.nacl) { 44506 break 44507 } 44508 b.Kind = BlockAMD64ULT 44509 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 44510 v0.AddArg(x) 44511 v0.AddArg(y) 44512 b.SetControl(v0) 44513 return true 44514 } 44515 // match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x))) 44516 // cond: !config.nacl 44517 // result: (ULT (BTQ x y)) 44518 for { 44519 v := b.Control 44520 if v.Op != OpAMD64TESTQ { 44521 break 44522 } 44523 _ = v.Args[1] 44524 y := v.Args[0] 44525 v_1 := v.Args[1] 44526 if v_1.Op != OpAMD64SHLQ { 44527 break 44528 } 44529 _ = v_1.Args[1] 44530 v_1_0 := v_1.Args[0] 44531 if v_1_0.Op != OpAMD64MOVQconst { 44532 break 44533 } 44534 if v_1_0.AuxInt != 1 { 44535 break 44536 } 44537 x := v_1.Args[1] 44538 if !(!config.nacl) { 44539 break 44540 } 44541 b.Kind = BlockAMD64ULT 44542 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 44543 v0.AddArg(x) 44544 v0.AddArg(y) 44545 b.SetControl(v0) 44546 return true 44547 } 44548 // match: (NE (TESTLconst [c] x)) 44549 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 44550 // result: (ULT (BTLconst [log2(c)] x)) 44551 for { 44552 v := b.Control 44553 if v.Op != OpAMD64TESTLconst { 44554 break 44555 } 44556 c := v.AuxInt 44557 x := v.Args[0] 44558 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 44559 break 44560 } 44561 b.Kind = BlockAMD64ULT 44562 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 44563 v0.AuxInt = log2(c) 44564 v0.AddArg(x) 44565 b.SetControl(v0) 44566 return true 44567 } 44568 // match: (NE (TESTQconst [c] x)) 44569 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 44570 // result: (ULT (BTQconst [log2(c)] x)) 44571 for { 44572 v := b.Control 44573 if v.Op != OpAMD64TESTQconst { 44574 break 44575 } 44576 c := v.AuxInt 44577 x := v.Args[0] 44578 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 44579 break 44580 } 44581 b.Kind = BlockAMD64ULT 44582 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 44583 v0.AuxInt = log2(c) 44584 v0.AddArg(x) 44585 b.SetControl(v0) 44586 return true 44587 } 44588 // match: (NE (TESTQ (MOVQconst [c]) x)) 44589 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 44590 // result: (ULT (BTQconst [log2(c)] x)) 44591 for { 44592 v := b.Control 44593 if v.Op != OpAMD64TESTQ { 44594 break 44595 } 44596 _ = v.Args[1] 44597 v_0 := v.Args[0] 44598 if v_0.Op != OpAMD64MOVQconst { 44599 break 44600 } 44601 c := v_0.AuxInt 44602 x := v.Args[1] 44603 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 44604 break 44605 } 44606 b.Kind = BlockAMD64ULT 44607 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 44608 v0.AuxInt = log2(c) 44609 v0.AddArg(x) 44610 b.SetControl(v0) 44611 return true 44612 } 44613 // match: (NE (TESTQ x (MOVQconst [c]))) 44614 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 44615 // result: (ULT (BTQconst [log2(c)] x)) 44616 for { 44617 v := b.Control 44618 if v.Op != OpAMD64TESTQ { 44619 break 44620 } 44621 _ = v.Args[1] 44622 x := v.Args[0] 44623 v_1 := v.Args[1] 44624 if v_1.Op != OpAMD64MOVQconst { 44625 break 44626 } 44627 c := v_1.AuxInt 44628 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 44629 break 44630 } 44631 b.Kind = BlockAMD64ULT 44632 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 44633 v0.AuxInt = log2(c) 44634 v0.AddArg(x) 44635 b.SetControl(v0) 44636 return true 44637 } 44638 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 44639 // cond: 44640 // result: (UGT cmp yes no) 44641 for { 44642 v := b.Control 44643 if v.Op != OpAMD64TESTB { 44644 break 44645 } 44646 _ = v.Args[1] 44647 v_0 := v.Args[0] 44648 if v_0.Op != OpAMD64SETGF { 44649 break 44650 } 44651 cmp := v_0.Args[0] 44652 v_1 := v.Args[1] 44653 if v_1.Op != OpAMD64SETGF { 44654 break 44655 } 44656 if cmp != v_1.Args[0] { 44657 break 44658 } 44659 b.Kind = BlockAMD64UGT 44660 b.SetControl(cmp) 44661 return true 44662 } 44663 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 44664 // cond: 44665 // result: (UGT cmp yes no) 44666 for { 44667 v := b.Control 44668 if v.Op != OpAMD64TESTB { 44669 break 44670 } 44671 _ = v.Args[1] 44672 v_0 := v.Args[0] 44673 if v_0.Op != OpAMD64SETGF { 44674 break 44675 } 44676 cmp := v_0.Args[0] 44677 v_1 := v.Args[1] 44678 if v_1.Op != OpAMD64SETGF { 44679 break 44680 } 44681 if cmp != v_1.Args[0] { 44682 break 44683 } 44684 b.Kind = BlockAMD64UGT 44685 b.SetControl(cmp) 44686 return true 44687 } 44688 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 44689 // cond: 44690 // result: (UGE cmp yes no) 44691 for { 44692 v := b.Control 44693 if v.Op != OpAMD64TESTB { 44694 break 44695 } 44696 _ = v.Args[1] 44697 v_0 := v.Args[0] 44698 if v_0.Op != OpAMD64SETGEF { 44699 break 44700 } 44701 cmp := v_0.Args[0] 44702 v_1 := v.Args[1] 44703 if v_1.Op != OpAMD64SETGEF { 44704 break 44705 } 44706 if cmp != v_1.Args[0] { 44707 break 44708 } 44709 b.Kind = BlockAMD64UGE 44710 b.SetControl(cmp) 44711 return true 44712 } 44713 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 44714 // cond: 44715 // result: (UGE cmp yes no) 44716 for { 44717 v := b.Control 44718 if v.Op != OpAMD64TESTB { 44719 break 44720 } 44721 _ = v.Args[1] 44722 v_0 := v.Args[0] 44723 if v_0.Op != OpAMD64SETGEF { 44724 break 44725 } 44726 cmp := v_0.Args[0] 44727 v_1 := v.Args[1] 44728 if v_1.Op != OpAMD64SETGEF { 44729 break 44730 } 44731 if cmp != v_1.Args[0] { 44732 break 44733 } 44734 b.Kind = BlockAMD64UGE 44735 b.SetControl(cmp) 44736 return true 44737 } 44738 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 44739 // cond: 44740 // result: (EQF cmp yes no) 44741 for { 44742 v := b.Control 44743 if v.Op != OpAMD64TESTB { 44744 break 44745 } 44746 _ = v.Args[1] 44747 v_0 := v.Args[0] 44748 if v_0.Op != OpAMD64SETEQF { 44749 break 44750 } 44751 cmp := v_0.Args[0] 44752 v_1 := v.Args[1] 44753 if v_1.Op != OpAMD64SETEQF { 44754 break 44755 } 44756 if cmp != v_1.Args[0] { 44757 break 44758 } 44759 b.Kind = BlockAMD64EQF 44760 b.SetControl(cmp) 44761 return true 44762 } 44763 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 44764 // cond: 44765 // result: (EQF cmp yes no) 44766 for { 44767 v := b.Control 44768 if v.Op != OpAMD64TESTB { 44769 break 44770 } 44771 _ = v.Args[1] 44772 v_0 := v.Args[0] 44773 if v_0.Op != OpAMD64SETEQF { 44774 break 44775 } 44776 cmp := v_0.Args[0] 44777 v_1 := v.Args[1] 44778 if v_1.Op != OpAMD64SETEQF { 44779 break 44780 } 44781 if cmp != v_1.Args[0] { 44782 break 44783 } 44784 b.Kind = BlockAMD64EQF 44785 b.SetControl(cmp) 44786 return true 44787 } 44788 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 44789 // cond: 44790 // result: (NEF cmp yes no) 44791 for { 44792 v := b.Control 44793 if v.Op != OpAMD64TESTB { 44794 break 44795 } 44796 _ = v.Args[1] 44797 v_0 := v.Args[0] 44798 if v_0.Op != OpAMD64SETNEF { 44799 break 44800 } 44801 cmp := v_0.Args[0] 44802 v_1 := v.Args[1] 44803 if v_1.Op != OpAMD64SETNEF { 44804 break 44805 } 44806 if cmp != v_1.Args[0] { 44807 break 44808 } 44809 b.Kind = BlockAMD64NEF 44810 b.SetControl(cmp) 44811 return true 44812 } 44813 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 44814 // cond: 44815 // result: (NEF cmp yes no) 44816 for { 44817 v := b.Control 44818 if v.Op != OpAMD64TESTB { 44819 break 44820 } 44821 _ = v.Args[1] 44822 v_0 := v.Args[0] 44823 if v_0.Op != OpAMD64SETNEF { 44824 break 44825 } 44826 cmp := v_0.Args[0] 44827 v_1 := v.Args[1] 44828 if v_1.Op != OpAMD64SETNEF { 44829 break 44830 } 44831 if cmp != v_1.Args[0] { 44832 break 44833 } 44834 b.Kind = BlockAMD64NEF 44835 b.SetControl(cmp) 44836 return true 44837 } 44838 // match: (NE (InvertFlags cmp) yes no) 44839 // cond: 44840 // result: (NE cmp yes no) 44841 for { 44842 v := b.Control 44843 if v.Op != OpAMD64InvertFlags { 44844 break 44845 } 44846 cmp := v.Args[0] 44847 b.Kind = BlockAMD64NE 44848 b.SetControl(cmp) 44849 return true 44850 } 44851 // match: (NE (FlagEQ) yes no) 44852 // cond: 44853 // result: (First nil no yes) 44854 for { 44855 v := b.Control 44856 if v.Op != OpAMD64FlagEQ { 44857 break 44858 } 44859 b.Kind = BlockFirst 44860 b.SetControl(nil) 44861 b.swapSuccessors() 44862 return true 44863 } 44864 // match: (NE (FlagLT_ULT) yes no) 44865 // cond: 44866 // result: (First nil yes no) 44867 for { 44868 v := b.Control 44869 if v.Op != OpAMD64FlagLT_ULT { 44870 break 44871 } 44872 b.Kind = BlockFirst 44873 b.SetControl(nil) 44874 return true 44875 } 44876 // match: (NE (FlagLT_UGT) yes no) 44877 // cond: 44878 // result: (First nil yes no) 44879 for { 44880 v := b.Control 44881 if v.Op != OpAMD64FlagLT_UGT { 44882 break 44883 } 44884 b.Kind = BlockFirst 44885 b.SetControl(nil) 44886 return true 44887 } 44888 // match: (NE (FlagGT_ULT) yes no) 44889 // cond: 44890 // result: (First nil yes no) 44891 for { 44892 v := b.Control 44893 if v.Op != OpAMD64FlagGT_ULT { 44894 break 44895 } 44896 b.Kind = BlockFirst 44897 b.SetControl(nil) 44898 return true 44899 } 44900 // match: (NE (FlagGT_UGT) yes no) 44901 // cond: 44902 // result: (First nil yes no) 44903 for { 44904 v := b.Control 44905 if v.Op != OpAMD64FlagGT_UGT { 44906 break 44907 } 44908 b.Kind = BlockFirst 44909 b.SetControl(nil) 44910 return true 44911 } 44912 case BlockAMD64UGE: 44913 // match: (UGE (InvertFlags cmp) yes no) 44914 // cond: 44915 // result: (ULE cmp yes no) 44916 for { 44917 v := b.Control 44918 if v.Op != OpAMD64InvertFlags { 44919 break 44920 } 44921 cmp := v.Args[0] 44922 b.Kind = BlockAMD64ULE 44923 b.SetControl(cmp) 44924 return true 44925 } 44926 // match: (UGE (FlagEQ) yes no) 44927 // cond: 44928 // result: (First nil yes no) 44929 for { 44930 v := b.Control 44931 if v.Op != OpAMD64FlagEQ { 44932 break 44933 } 44934 b.Kind = BlockFirst 44935 b.SetControl(nil) 44936 return true 44937 } 44938 // match: (UGE (FlagLT_ULT) yes no) 44939 // cond: 44940 // result: (First nil no yes) 44941 for { 44942 v := b.Control 44943 if v.Op != OpAMD64FlagLT_ULT { 44944 break 44945 } 44946 b.Kind = BlockFirst 44947 b.SetControl(nil) 44948 b.swapSuccessors() 44949 return true 44950 } 44951 // match: (UGE (FlagLT_UGT) yes no) 44952 // cond: 44953 // result: (First nil yes no) 44954 for { 44955 v := b.Control 44956 if v.Op != OpAMD64FlagLT_UGT { 44957 break 44958 } 44959 b.Kind = BlockFirst 44960 b.SetControl(nil) 44961 return true 44962 } 44963 // match: (UGE (FlagGT_ULT) yes no) 44964 // cond: 44965 // result: (First nil no yes) 44966 for { 44967 v := b.Control 44968 if v.Op != OpAMD64FlagGT_ULT { 44969 break 44970 } 44971 b.Kind = BlockFirst 44972 b.SetControl(nil) 44973 b.swapSuccessors() 44974 return true 44975 } 44976 // match: (UGE (FlagGT_UGT) yes no) 44977 // cond: 44978 // result: (First nil yes no) 44979 for { 44980 v := b.Control 44981 if v.Op != OpAMD64FlagGT_UGT { 44982 break 44983 } 44984 b.Kind = BlockFirst 44985 b.SetControl(nil) 44986 return true 44987 } 44988 case BlockAMD64UGT: 44989 // match: (UGT (InvertFlags cmp) yes no) 44990 // cond: 44991 // result: (ULT cmp yes no) 44992 for { 44993 v := b.Control 44994 if v.Op != OpAMD64InvertFlags { 44995 break 44996 } 44997 cmp := v.Args[0] 44998 b.Kind = BlockAMD64ULT 44999 b.SetControl(cmp) 45000 return true 45001 } 45002 // match: (UGT (FlagEQ) yes no) 45003 // cond: 45004 // result: (First nil no yes) 45005 for { 45006 v := b.Control 45007 if v.Op != OpAMD64FlagEQ { 45008 break 45009 } 45010 b.Kind = BlockFirst 45011 b.SetControl(nil) 45012 b.swapSuccessors() 45013 return true 45014 } 45015 // match: (UGT (FlagLT_ULT) yes no) 45016 // cond: 45017 // result: (First nil no yes) 45018 for { 45019 v := b.Control 45020 if v.Op != OpAMD64FlagLT_ULT { 45021 break 45022 } 45023 b.Kind = BlockFirst 45024 b.SetControl(nil) 45025 b.swapSuccessors() 45026 return true 45027 } 45028 // match: (UGT (FlagLT_UGT) yes no) 45029 // cond: 45030 // result: (First nil yes no) 45031 for { 45032 v := b.Control 45033 if v.Op != OpAMD64FlagLT_UGT { 45034 break 45035 } 45036 b.Kind = BlockFirst 45037 b.SetControl(nil) 45038 return true 45039 } 45040 // match: (UGT (FlagGT_ULT) yes no) 45041 // cond: 45042 // result: (First nil no yes) 45043 for { 45044 v := b.Control 45045 if v.Op != OpAMD64FlagGT_ULT { 45046 break 45047 } 45048 b.Kind = BlockFirst 45049 b.SetControl(nil) 45050 b.swapSuccessors() 45051 return true 45052 } 45053 // match: (UGT (FlagGT_UGT) yes no) 45054 // cond: 45055 // result: (First nil yes no) 45056 for { 45057 v := b.Control 45058 if v.Op != OpAMD64FlagGT_UGT { 45059 break 45060 } 45061 b.Kind = BlockFirst 45062 b.SetControl(nil) 45063 return true 45064 } 45065 case BlockAMD64ULE: 45066 // match: (ULE (InvertFlags cmp) yes no) 45067 // cond: 45068 // result: (UGE cmp yes no) 45069 for { 45070 v := b.Control 45071 if v.Op != OpAMD64InvertFlags { 45072 break 45073 } 45074 cmp := v.Args[0] 45075 b.Kind = BlockAMD64UGE 45076 b.SetControl(cmp) 45077 return true 45078 } 45079 // match: (ULE (FlagEQ) yes no) 45080 // cond: 45081 // result: (First nil yes no) 45082 for { 45083 v := b.Control 45084 if v.Op != OpAMD64FlagEQ { 45085 break 45086 } 45087 b.Kind = BlockFirst 45088 b.SetControl(nil) 45089 return true 45090 } 45091 // match: (ULE (FlagLT_ULT) yes no) 45092 // cond: 45093 // result: (First nil yes no) 45094 for { 45095 v := b.Control 45096 if v.Op != OpAMD64FlagLT_ULT { 45097 break 45098 } 45099 b.Kind = BlockFirst 45100 b.SetControl(nil) 45101 return true 45102 } 45103 // match: (ULE (FlagLT_UGT) yes no) 45104 // cond: 45105 // result: (First nil no yes) 45106 for { 45107 v := b.Control 45108 if v.Op != OpAMD64FlagLT_UGT { 45109 break 45110 } 45111 b.Kind = BlockFirst 45112 b.SetControl(nil) 45113 b.swapSuccessors() 45114 return true 45115 } 45116 // match: (ULE (FlagGT_ULT) yes no) 45117 // cond: 45118 // result: (First nil yes no) 45119 for { 45120 v := b.Control 45121 if v.Op != OpAMD64FlagGT_ULT { 45122 break 45123 } 45124 b.Kind = BlockFirst 45125 b.SetControl(nil) 45126 return true 45127 } 45128 // match: (ULE (FlagGT_UGT) yes no) 45129 // cond: 45130 // result: (First nil no yes) 45131 for { 45132 v := b.Control 45133 if v.Op != OpAMD64FlagGT_UGT { 45134 break 45135 } 45136 b.Kind = BlockFirst 45137 b.SetControl(nil) 45138 b.swapSuccessors() 45139 return true 45140 } 45141 case BlockAMD64ULT: 45142 // match: (ULT (InvertFlags cmp) yes no) 45143 // cond: 45144 // result: (UGT cmp yes no) 45145 for { 45146 v := b.Control 45147 if v.Op != OpAMD64InvertFlags { 45148 break 45149 } 45150 cmp := v.Args[0] 45151 b.Kind = BlockAMD64UGT 45152 b.SetControl(cmp) 45153 return true 45154 } 45155 // match: (ULT (FlagEQ) yes no) 45156 // cond: 45157 // result: (First nil no yes) 45158 for { 45159 v := b.Control 45160 if v.Op != OpAMD64FlagEQ { 45161 break 45162 } 45163 b.Kind = BlockFirst 45164 b.SetControl(nil) 45165 b.swapSuccessors() 45166 return true 45167 } 45168 // match: (ULT (FlagLT_ULT) yes no) 45169 // cond: 45170 // result: (First nil yes no) 45171 for { 45172 v := b.Control 45173 if v.Op != OpAMD64FlagLT_ULT { 45174 break 45175 } 45176 b.Kind = BlockFirst 45177 b.SetControl(nil) 45178 return true 45179 } 45180 // match: (ULT (FlagLT_UGT) yes no) 45181 // cond: 45182 // result: (First nil no yes) 45183 for { 45184 v := b.Control 45185 if v.Op != OpAMD64FlagLT_UGT { 45186 break 45187 } 45188 b.Kind = BlockFirst 45189 b.SetControl(nil) 45190 b.swapSuccessors() 45191 return true 45192 } 45193 // match: (ULT (FlagGT_ULT) yes no) 45194 // cond: 45195 // result: (First nil yes no) 45196 for { 45197 v := b.Control 45198 if v.Op != OpAMD64FlagGT_ULT { 45199 break 45200 } 45201 b.Kind = BlockFirst 45202 b.SetControl(nil) 45203 return true 45204 } 45205 // match: (ULT (FlagGT_UGT) yes no) 45206 // cond: 45207 // result: (First nil no yes) 45208 for { 45209 v := b.Control 45210 if v.Op != OpAMD64FlagGT_UGT { 45211 break 45212 } 45213 b.Kind = BlockFirst 45214 b.SetControl(nil) 45215 b.swapSuccessors() 45216 return true 45217 } 45218 } 45219 return false 45220 }