github.com/mattn/go@v0.0.0-20171011075504-07f7db3ea99f/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // Code generated from gen/AMD64.rules; DO NOT EDIT. 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 import "cmd/internal/obj" 8 import "cmd/internal/objabi" 9 import "cmd/compile/internal/types" 10 11 var _ = math.MinInt8 // in case not otherwise used 12 var _ = obj.ANOP // in case not otherwise used 13 var _ = objabi.GOROOT // in case not otherwise used 14 var _ = types.TypeMem // in case not otherwise used 15 16 func rewriteValueAMD64(v *Value) bool { 17 switch v.Op { 18 case OpAMD64ADDL: 19 return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) 20 case OpAMD64ADDLconst: 21 return rewriteValueAMD64_OpAMD64ADDLconst_0(v) 22 case OpAMD64ADDLconstmem: 23 return rewriteValueAMD64_OpAMD64ADDLconstmem_0(v) 24 case OpAMD64ADDLmem: 25 return rewriteValueAMD64_OpAMD64ADDLmem_0(v) 26 case OpAMD64ADDQ: 27 return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) 28 case OpAMD64ADDQconst: 29 return rewriteValueAMD64_OpAMD64ADDQconst_0(v) 30 case OpAMD64ADDQconstmem: 31 return rewriteValueAMD64_OpAMD64ADDQconstmem_0(v) 32 case OpAMD64ADDQmem: 33 return rewriteValueAMD64_OpAMD64ADDQmem_0(v) 34 case OpAMD64ADDSD: 35 return rewriteValueAMD64_OpAMD64ADDSD_0(v) 36 case OpAMD64ADDSDmem: 37 return rewriteValueAMD64_OpAMD64ADDSDmem_0(v) 38 case OpAMD64ADDSS: 39 return rewriteValueAMD64_OpAMD64ADDSS_0(v) 40 case OpAMD64ADDSSmem: 41 return rewriteValueAMD64_OpAMD64ADDSSmem_0(v) 42 case OpAMD64ANDL: 43 return rewriteValueAMD64_OpAMD64ANDL_0(v) 44 case OpAMD64ANDLconst: 45 return rewriteValueAMD64_OpAMD64ANDLconst_0(v) 46 case OpAMD64ANDLmem: 47 return rewriteValueAMD64_OpAMD64ANDLmem_0(v) 48 case OpAMD64ANDQ: 49 return rewriteValueAMD64_OpAMD64ANDQ_0(v) 50 case OpAMD64ANDQconst: 51 return rewriteValueAMD64_OpAMD64ANDQconst_0(v) 52 case OpAMD64ANDQmem: 53 return rewriteValueAMD64_OpAMD64ANDQmem_0(v) 54 case OpAMD64BSFQ: 55 return rewriteValueAMD64_OpAMD64BSFQ_0(v) 56 case OpAMD64BTQconst: 57 return rewriteValueAMD64_OpAMD64BTQconst_0(v) 58 case OpAMD64CMOVQEQ: 59 return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v) 60 case OpAMD64CMPB: 61 return rewriteValueAMD64_OpAMD64CMPB_0(v) 62 case OpAMD64CMPBconst: 63 return rewriteValueAMD64_OpAMD64CMPBconst_0(v) 64 case OpAMD64CMPL: 65 return rewriteValueAMD64_OpAMD64CMPL_0(v) 66 case OpAMD64CMPLconst: 67 return rewriteValueAMD64_OpAMD64CMPLconst_0(v) 68 case OpAMD64CMPQ: 69 return rewriteValueAMD64_OpAMD64CMPQ_0(v) 70 case OpAMD64CMPQconst: 71 return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v) 72 case OpAMD64CMPW: 73 return rewriteValueAMD64_OpAMD64CMPW_0(v) 74 case OpAMD64CMPWconst: 75 return rewriteValueAMD64_OpAMD64CMPWconst_0(v) 76 case OpAMD64CMPXCHGLlock: 77 return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v) 78 case OpAMD64CMPXCHGQlock: 79 return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v) 80 case OpAMD64LEAL: 81 return rewriteValueAMD64_OpAMD64LEAL_0(v) 82 case OpAMD64LEAQ: 83 return rewriteValueAMD64_OpAMD64LEAQ_0(v) 84 case OpAMD64LEAQ1: 85 return rewriteValueAMD64_OpAMD64LEAQ1_0(v) 86 case OpAMD64LEAQ2: 87 return rewriteValueAMD64_OpAMD64LEAQ2_0(v) 88 case OpAMD64LEAQ4: 89 return rewriteValueAMD64_OpAMD64LEAQ4_0(v) 90 case OpAMD64LEAQ8: 91 return rewriteValueAMD64_OpAMD64LEAQ8_0(v) 92 case OpAMD64MOVBQSX: 93 return rewriteValueAMD64_OpAMD64MOVBQSX_0(v) 94 case OpAMD64MOVBQSXload: 95 return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v) 96 case OpAMD64MOVBQZX: 97 return rewriteValueAMD64_OpAMD64MOVBQZX_0(v) 98 case OpAMD64MOVBload: 99 return rewriteValueAMD64_OpAMD64MOVBload_0(v) 100 case OpAMD64MOVBloadidx1: 101 return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) 102 case OpAMD64MOVBstore: 103 return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) 104 case OpAMD64MOVBstoreconst: 105 return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) 106 case OpAMD64MOVBstoreconstidx1: 107 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v) 108 case OpAMD64MOVBstoreidx1: 109 return rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v) 110 case OpAMD64MOVLQSX: 111 return rewriteValueAMD64_OpAMD64MOVLQSX_0(v) 112 case OpAMD64MOVLQSXload: 113 return rewriteValueAMD64_OpAMD64MOVLQSXload_0(v) 114 case OpAMD64MOVLQZX: 115 return rewriteValueAMD64_OpAMD64MOVLQZX_0(v) 116 case OpAMD64MOVLatomicload: 117 return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v) 118 case OpAMD64MOVLf2i: 119 return rewriteValueAMD64_OpAMD64MOVLf2i_0(v) 120 case OpAMD64MOVLi2f: 121 return rewriteValueAMD64_OpAMD64MOVLi2f_0(v) 122 case OpAMD64MOVLload: 123 return rewriteValueAMD64_OpAMD64MOVLload_0(v) 124 case OpAMD64MOVLloadidx1: 125 return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v) 126 case OpAMD64MOVLloadidx4: 127 return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v) 128 case OpAMD64MOVLloadidx8: 129 return rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v) 130 case OpAMD64MOVLstore: 131 return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) 132 case OpAMD64MOVLstoreconst: 133 return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v) 134 case OpAMD64MOVLstoreconstidx1: 135 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v) 136 case OpAMD64MOVLstoreconstidx4: 137 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v) 138 case OpAMD64MOVLstoreidx1: 139 return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v) 140 case OpAMD64MOVLstoreidx4: 141 return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v) 142 case OpAMD64MOVLstoreidx8: 143 return rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v) 144 case OpAMD64MOVOload: 145 return rewriteValueAMD64_OpAMD64MOVOload_0(v) 146 case OpAMD64MOVOstore: 147 return rewriteValueAMD64_OpAMD64MOVOstore_0(v) 148 case OpAMD64MOVQatomicload: 149 return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v) 150 case OpAMD64MOVQf2i: 151 return rewriteValueAMD64_OpAMD64MOVQf2i_0(v) 152 case OpAMD64MOVQi2f: 153 return rewriteValueAMD64_OpAMD64MOVQi2f_0(v) 154 case OpAMD64MOVQload: 155 return rewriteValueAMD64_OpAMD64MOVQload_0(v) 156 case OpAMD64MOVQloadidx1: 157 return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v) 158 case OpAMD64MOVQloadidx8: 159 return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v) 160 case OpAMD64MOVQstore: 161 return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v) 162 case OpAMD64MOVQstoreconst: 163 return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v) 164 case OpAMD64MOVQstoreconstidx1: 165 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v) 166 case OpAMD64MOVQstoreconstidx8: 167 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v) 168 case OpAMD64MOVQstoreidx1: 169 return rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v) 170 case OpAMD64MOVQstoreidx8: 171 return rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v) 172 case OpAMD64MOVSDload: 173 return rewriteValueAMD64_OpAMD64MOVSDload_0(v) 174 case OpAMD64MOVSDloadidx1: 175 return rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v) 176 case OpAMD64MOVSDloadidx8: 177 return rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v) 178 case OpAMD64MOVSDstore: 179 return rewriteValueAMD64_OpAMD64MOVSDstore_0(v) 180 case OpAMD64MOVSDstoreidx1: 181 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v) 182 case OpAMD64MOVSDstoreidx8: 183 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v) 184 case OpAMD64MOVSSload: 185 return rewriteValueAMD64_OpAMD64MOVSSload_0(v) 186 case OpAMD64MOVSSloadidx1: 187 return rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v) 188 case OpAMD64MOVSSloadidx4: 189 return rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v) 190 case OpAMD64MOVSSstore: 191 return rewriteValueAMD64_OpAMD64MOVSSstore_0(v) 192 case OpAMD64MOVSSstoreidx1: 193 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v) 194 case OpAMD64MOVSSstoreidx4: 195 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v) 196 case OpAMD64MOVWQSX: 197 return rewriteValueAMD64_OpAMD64MOVWQSX_0(v) 198 case OpAMD64MOVWQSXload: 199 return rewriteValueAMD64_OpAMD64MOVWQSXload_0(v) 200 case OpAMD64MOVWQZX: 201 return rewriteValueAMD64_OpAMD64MOVWQZX_0(v) 202 case OpAMD64MOVWload: 203 return rewriteValueAMD64_OpAMD64MOVWload_0(v) 204 case OpAMD64MOVWloadidx1: 205 return rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v) 206 case OpAMD64MOVWloadidx2: 207 return rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v) 208 case OpAMD64MOVWstore: 209 return rewriteValueAMD64_OpAMD64MOVWstore_0(v) || rewriteValueAMD64_OpAMD64MOVWstore_10(v) 210 case OpAMD64MOVWstoreconst: 211 return rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v) 212 case OpAMD64MOVWstoreconstidx1: 213 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v) 214 case OpAMD64MOVWstoreconstidx2: 215 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v) 216 case OpAMD64MOVWstoreidx1: 217 return rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v) 218 case OpAMD64MOVWstoreidx2: 219 return rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v) 220 case OpAMD64MULL: 221 return rewriteValueAMD64_OpAMD64MULL_0(v) 222 case OpAMD64MULLconst: 223 return rewriteValueAMD64_OpAMD64MULLconst_0(v) 224 case OpAMD64MULQ: 225 return rewriteValueAMD64_OpAMD64MULQ_0(v) 226 case OpAMD64MULQconst: 227 return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) 228 case OpAMD64MULSD: 229 return rewriteValueAMD64_OpAMD64MULSD_0(v) 230 case OpAMD64MULSDmem: 231 return rewriteValueAMD64_OpAMD64MULSDmem_0(v) 232 case OpAMD64MULSS: 233 return rewriteValueAMD64_OpAMD64MULSS_0(v) 234 case OpAMD64MULSSmem: 235 return rewriteValueAMD64_OpAMD64MULSSmem_0(v) 236 case OpAMD64NEGL: 237 return rewriteValueAMD64_OpAMD64NEGL_0(v) 238 case OpAMD64NEGQ: 239 return rewriteValueAMD64_OpAMD64NEGQ_0(v) 240 case OpAMD64NOTL: 241 return rewriteValueAMD64_OpAMD64NOTL_0(v) 242 case OpAMD64NOTQ: 243 return rewriteValueAMD64_OpAMD64NOTQ_0(v) 244 case OpAMD64ORL: 245 return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v) 246 case OpAMD64ORLconst: 247 return rewriteValueAMD64_OpAMD64ORLconst_0(v) 248 case OpAMD64ORLmem: 249 return rewriteValueAMD64_OpAMD64ORLmem_0(v) 250 case OpAMD64ORQ: 251 return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v) 252 case OpAMD64ORQconst: 253 return rewriteValueAMD64_OpAMD64ORQconst_0(v) 254 case OpAMD64ORQmem: 255 return rewriteValueAMD64_OpAMD64ORQmem_0(v) 256 case OpAMD64ROLB: 257 return rewriteValueAMD64_OpAMD64ROLB_0(v) 258 case OpAMD64ROLBconst: 259 return rewriteValueAMD64_OpAMD64ROLBconst_0(v) 260 case OpAMD64ROLL: 261 return rewriteValueAMD64_OpAMD64ROLL_0(v) 262 case OpAMD64ROLLconst: 263 return rewriteValueAMD64_OpAMD64ROLLconst_0(v) 264 case OpAMD64ROLQ: 265 return rewriteValueAMD64_OpAMD64ROLQ_0(v) 266 case OpAMD64ROLQconst: 267 return rewriteValueAMD64_OpAMD64ROLQconst_0(v) 268 case OpAMD64ROLW: 269 return rewriteValueAMD64_OpAMD64ROLW_0(v) 270 case OpAMD64ROLWconst: 271 return rewriteValueAMD64_OpAMD64ROLWconst_0(v) 272 case OpAMD64RORB: 273 return rewriteValueAMD64_OpAMD64RORB_0(v) 274 case OpAMD64RORL: 275 return rewriteValueAMD64_OpAMD64RORL_0(v) 276 case OpAMD64RORQ: 277 return rewriteValueAMD64_OpAMD64RORQ_0(v) 278 case OpAMD64RORW: 279 return rewriteValueAMD64_OpAMD64RORW_0(v) 280 case OpAMD64SARB: 281 return rewriteValueAMD64_OpAMD64SARB_0(v) 282 case OpAMD64SARBconst: 283 return rewriteValueAMD64_OpAMD64SARBconst_0(v) 284 case OpAMD64SARL: 285 return rewriteValueAMD64_OpAMD64SARL_0(v) 286 case OpAMD64SARLconst: 287 return rewriteValueAMD64_OpAMD64SARLconst_0(v) 288 case OpAMD64SARQ: 289 return rewriteValueAMD64_OpAMD64SARQ_0(v) 290 case OpAMD64SARQconst: 291 return rewriteValueAMD64_OpAMD64SARQconst_0(v) 292 case OpAMD64SARW: 293 return rewriteValueAMD64_OpAMD64SARW_0(v) 294 case OpAMD64SARWconst: 295 return rewriteValueAMD64_OpAMD64SARWconst_0(v) 296 case OpAMD64SBBLcarrymask: 297 return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v) 298 case OpAMD64SBBQcarrymask: 299 return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v) 300 case OpAMD64SETA: 301 return rewriteValueAMD64_OpAMD64SETA_0(v) 302 case OpAMD64SETAE: 303 return rewriteValueAMD64_OpAMD64SETAE_0(v) 304 case OpAMD64SETAEmem: 305 return rewriteValueAMD64_OpAMD64SETAEmem_0(v) 306 case OpAMD64SETAmem: 307 return rewriteValueAMD64_OpAMD64SETAmem_0(v) 308 case OpAMD64SETB: 309 return rewriteValueAMD64_OpAMD64SETB_0(v) 310 case OpAMD64SETBE: 311 return rewriteValueAMD64_OpAMD64SETBE_0(v) 312 case OpAMD64SETBEmem: 313 return rewriteValueAMD64_OpAMD64SETBEmem_0(v) 314 case OpAMD64SETBmem: 315 return rewriteValueAMD64_OpAMD64SETBmem_0(v) 316 case OpAMD64SETEQ: 317 return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) 318 case OpAMD64SETEQmem: 319 return rewriteValueAMD64_OpAMD64SETEQmem_0(v) 320 case OpAMD64SETG: 321 return rewriteValueAMD64_OpAMD64SETG_0(v) 322 case OpAMD64SETGE: 323 return rewriteValueAMD64_OpAMD64SETGE_0(v) 324 case OpAMD64SETGEmem: 325 return rewriteValueAMD64_OpAMD64SETGEmem_0(v) 326 case OpAMD64SETGmem: 327 return rewriteValueAMD64_OpAMD64SETGmem_0(v) 328 case OpAMD64SETL: 329 return rewriteValueAMD64_OpAMD64SETL_0(v) 330 case OpAMD64SETLE: 331 return rewriteValueAMD64_OpAMD64SETLE_0(v) 332 case OpAMD64SETLEmem: 333 return rewriteValueAMD64_OpAMD64SETLEmem_0(v) 334 case OpAMD64SETLmem: 335 return rewriteValueAMD64_OpAMD64SETLmem_0(v) 336 case OpAMD64SETNE: 337 return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) 338 case OpAMD64SETNEmem: 339 return rewriteValueAMD64_OpAMD64SETNEmem_0(v) 340 case OpAMD64SHLL: 341 return rewriteValueAMD64_OpAMD64SHLL_0(v) 342 case OpAMD64SHLLconst: 343 return rewriteValueAMD64_OpAMD64SHLLconst_0(v) 344 case OpAMD64SHLQ: 345 return rewriteValueAMD64_OpAMD64SHLQ_0(v) 346 case OpAMD64SHLQconst: 347 return rewriteValueAMD64_OpAMD64SHLQconst_0(v) 348 case OpAMD64SHRB: 349 return rewriteValueAMD64_OpAMD64SHRB_0(v) 350 case OpAMD64SHRBconst: 351 return rewriteValueAMD64_OpAMD64SHRBconst_0(v) 352 case OpAMD64SHRL: 353 return rewriteValueAMD64_OpAMD64SHRL_0(v) 354 case OpAMD64SHRLconst: 355 return rewriteValueAMD64_OpAMD64SHRLconst_0(v) 356 case OpAMD64SHRQ: 357 return rewriteValueAMD64_OpAMD64SHRQ_0(v) 358 case OpAMD64SHRQconst: 359 return rewriteValueAMD64_OpAMD64SHRQconst_0(v) 360 case OpAMD64SHRW: 361 return rewriteValueAMD64_OpAMD64SHRW_0(v) 362 case OpAMD64SHRWconst: 363 return rewriteValueAMD64_OpAMD64SHRWconst_0(v) 364 case OpAMD64SUBL: 365 return rewriteValueAMD64_OpAMD64SUBL_0(v) 366 case OpAMD64SUBLconst: 367 return rewriteValueAMD64_OpAMD64SUBLconst_0(v) 368 case OpAMD64SUBLmem: 369 return rewriteValueAMD64_OpAMD64SUBLmem_0(v) 370 case OpAMD64SUBQ: 371 return rewriteValueAMD64_OpAMD64SUBQ_0(v) 372 case OpAMD64SUBQconst: 373 return rewriteValueAMD64_OpAMD64SUBQconst_0(v) 374 case OpAMD64SUBQmem: 375 return rewriteValueAMD64_OpAMD64SUBQmem_0(v) 376 case OpAMD64SUBSD: 377 return rewriteValueAMD64_OpAMD64SUBSD_0(v) 378 case OpAMD64SUBSDmem: 379 return rewriteValueAMD64_OpAMD64SUBSDmem_0(v) 380 case OpAMD64SUBSS: 381 return rewriteValueAMD64_OpAMD64SUBSS_0(v) 382 case OpAMD64SUBSSmem: 383 return rewriteValueAMD64_OpAMD64SUBSSmem_0(v) 384 case OpAMD64TESTB: 385 return rewriteValueAMD64_OpAMD64TESTB_0(v) 386 case OpAMD64TESTL: 387 return rewriteValueAMD64_OpAMD64TESTL_0(v) 388 case OpAMD64TESTQ: 389 return rewriteValueAMD64_OpAMD64TESTQ_0(v) 390 case OpAMD64TESTW: 391 return rewriteValueAMD64_OpAMD64TESTW_0(v) 392 case OpAMD64XADDLlock: 393 return rewriteValueAMD64_OpAMD64XADDLlock_0(v) 394 case OpAMD64XADDQlock: 395 return rewriteValueAMD64_OpAMD64XADDQlock_0(v) 396 case OpAMD64XCHGL: 397 return rewriteValueAMD64_OpAMD64XCHGL_0(v) 398 case OpAMD64XCHGQ: 399 return rewriteValueAMD64_OpAMD64XCHGQ_0(v) 400 case OpAMD64XORL: 401 return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v) 402 case OpAMD64XORLconst: 403 return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v) 404 case OpAMD64XORLmem: 405 return rewriteValueAMD64_OpAMD64XORLmem_0(v) 406 case OpAMD64XORQ: 407 return rewriteValueAMD64_OpAMD64XORQ_0(v) 408 case OpAMD64XORQconst: 409 return rewriteValueAMD64_OpAMD64XORQconst_0(v) 410 case OpAMD64XORQmem: 411 return rewriteValueAMD64_OpAMD64XORQmem_0(v) 412 case OpAdd16: 413 return rewriteValueAMD64_OpAdd16_0(v) 414 case OpAdd32: 415 return rewriteValueAMD64_OpAdd32_0(v) 416 case OpAdd32F: 417 return rewriteValueAMD64_OpAdd32F_0(v) 418 case OpAdd64: 419 return rewriteValueAMD64_OpAdd64_0(v) 420 case OpAdd64F: 421 return rewriteValueAMD64_OpAdd64F_0(v) 422 case OpAdd8: 423 return rewriteValueAMD64_OpAdd8_0(v) 424 case OpAddPtr: 425 return rewriteValueAMD64_OpAddPtr_0(v) 426 case OpAddr: 427 return rewriteValueAMD64_OpAddr_0(v) 428 case OpAnd16: 429 return rewriteValueAMD64_OpAnd16_0(v) 430 case OpAnd32: 431 return rewriteValueAMD64_OpAnd32_0(v) 432 case OpAnd64: 433 return rewriteValueAMD64_OpAnd64_0(v) 434 case OpAnd8: 435 return rewriteValueAMD64_OpAnd8_0(v) 436 case OpAndB: 437 return rewriteValueAMD64_OpAndB_0(v) 438 case OpAtomicAdd32: 439 return rewriteValueAMD64_OpAtomicAdd32_0(v) 440 case OpAtomicAdd64: 441 return rewriteValueAMD64_OpAtomicAdd64_0(v) 442 case OpAtomicAnd8: 443 return rewriteValueAMD64_OpAtomicAnd8_0(v) 444 case OpAtomicCompareAndSwap32: 445 return rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v) 446 case OpAtomicCompareAndSwap64: 447 return rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v) 448 case OpAtomicExchange32: 449 return rewriteValueAMD64_OpAtomicExchange32_0(v) 450 case OpAtomicExchange64: 451 return rewriteValueAMD64_OpAtomicExchange64_0(v) 452 case OpAtomicLoad32: 453 return rewriteValueAMD64_OpAtomicLoad32_0(v) 454 case OpAtomicLoad64: 455 return rewriteValueAMD64_OpAtomicLoad64_0(v) 456 case OpAtomicLoadPtr: 457 return rewriteValueAMD64_OpAtomicLoadPtr_0(v) 458 case OpAtomicOr8: 459 return rewriteValueAMD64_OpAtomicOr8_0(v) 460 case OpAtomicStore32: 461 return rewriteValueAMD64_OpAtomicStore32_0(v) 462 case OpAtomicStore64: 463 return rewriteValueAMD64_OpAtomicStore64_0(v) 464 case OpAtomicStorePtrNoWB: 465 return rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v) 466 case OpAvg64u: 467 return rewriteValueAMD64_OpAvg64u_0(v) 468 case OpBitLen32: 469 return rewriteValueAMD64_OpBitLen32_0(v) 470 case OpBitLen64: 471 return rewriteValueAMD64_OpBitLen64_0(v) 472 case OpBswap32: 473 return rewriteValueAMD64_OpBswap32_0(v) 474 case OpBswap64: 475 return rewriteValueAMD64_OpBswap64_0(v) 476 case OpClosureCall: 477 return rewriteValueAMD64_OpClosureCall_0(v) 478 case OpCom16: 479 return rewriteValueAMD64_OpCom16_0(v) 480 case OpCom32: 481 return rewriteValueAMD64_OpCom32_0(v) 482 case OpCom64: 483 return rewriteValueAMD64_OpCom64_0(v) 484 case OpCom8: 485 return rewriteValueAMD64_OpCom8_0(v) 486 case OpConst16: 487 return rewriteValueAMD64_OpConst16_0(v) 488 case OpConst32: 489 return rewriteValueAMD64_OpConst32_0(v) 490 case OpConst32F: 491 return rewriteValueAMD64_OpConst32F_0(v) 492 case OpConst64: 493 return rewriteValueAMD64_OpConst64_0(v) 494 case OpConst64F: 495 return rewriteValueAMD64_OpConst64F_0(v) 496 case OpConst8: 497 return rewriteValueAMD64_OpConst8_0(v) 498 case OpConstBool: 499 return rewriteValueAMD64_OpConstBool_0(v) 500 case OpConstNil: 501 return rewriteValueAMD64_OpConstNil_0(v) 502 case OpConvert: 503 return rewriteValueAMD64_OpConvert_0(v) 504 case OpCtz32: 505 return rewriteValueAMD64_OpCtz32_0(v) 506 case OpCtz64: 507 return rewriteValueAMD64_OpCtz64_0(v) 508 case OpCvt32Fto32: 509 return rewriteValueAMD64_OpCvt32Fto32_0(v) 510 case OpCvt32Fto64: 511 return rewriteValueAMD64_OpCvt32Fto64_0(v) 512 case OpCvt32Fto64F: 513 return rewriteValueAMD64_OpCvt32Fto64F_0(v) 514 case OpCvt32to32F: 515 return rewriteValueAMD64_OpCvt32to32F_0(v) 516 case OpCvt32to64F: 517 return rewriteValueAMD64_OpCvt32to64F_0(v) 518 case OpCvt64Fto32: 519 return rewriteValueAMD64_OpCvt64Fto32_0(v) 520 case OpCvt64Fto32F: 521 return rewriteValueAMD64_OpCvt64Fto32F_0(v) 522 case OpCvt64Fto64: 523 return rewriteValueAMD64_OpCvt64Fto64_0(v) 524 case OpCvt64to32F: 525 return rewriteValueAMD64_OpCvt64to32F_0(v) 526 case OpCvt64to64F: 527 return rewriteValueAMD64_OpCvt64to64F_0(v) 528 case OpDiv128u: 529 return rewriteValueAMD64_OpDiv128u_0(v) 530 case OpDiv16: 531 return rewriteValueAMD64_OpDiv16_0(v) 532 case OpDiv16u: 533 return rewriteValueAMD64_OpDiv16u_0(v) 534 case OpDiv32: 535 return rewriteValueAMD64_OpDiv32_0(v) 536 case OpDiv32F: 537 return rewriteValueAMD64_OpDiv32F_0(v) 538 case OpDiv32u: 539 return rewriteValueAMD64_OpDiv32u_0(v) 540 case OpDiv64: 541 return rewriteValueAMD64_OpDiv64_0(v) 542 case OpDiv64F: 543 return rewriteValueAMD64_OpDiv64F_0(v) 544 case OpDiv64u: 545 return rewriteValueAMD64_OpDiv64u_0(v) 546 case OpDiv8: 547 return rewriteValueAMD64_OpDiv8_0(v) 548 case OpDiv8u: 549 return rewriteValueAMD64_OpDiv8u_0(v) 550 case OpEq16: 551 return rewriteValueAMD64_OpEq16_0(v) 552 case OpEq32: 553 return rewriteValueAMD64_OpEq32_0(v) 554 case OpEq32F: 555 return rewriteValueAMD64_OpEq32F_0(v) 556 case OpEq64: 557 return rewriteValueAMD64_OpEq64_0(v) 558 case OpEq64F: 559 return rewriteValueAMD64_OpEq64F_0(v) 560 case OpEq8: 561 return rewriteValueAMD64_OpEq8_0(v) 562 case OpEqB: 563 return rewriteValueAMD64_OpEqB_0(v) 564 case OpEqPtr: 565 return rewriteValueAMD64_OpEqPtr_0(v) 566 case OpGeq16: 567 return rewriteValueAMD64_OpGeq16_0(v) 568 case OpGeq16U: 569 return rewriteValueAMD64_OpGeq16U_0(v) 570 case OpGeq32: 571 return rewriteValueAMD64_OpGeq32_0(v) 572 case OpGeq32F: 573 return rewriteValueAMD64_OpGeq32F_0(v) 574 case OpGeq32U: 575 return rewriteValueAMD64_OpGeq32U_0(v) 576 case OpGeq64: 577 return rewriteValueAMD64_OpGeq64_0(v) 578 case OpGeq64F: 579 return rewriteValueAMD64_OpGeq64F_0(v) 580 case OpGeq64U: 581 return rewriteValueAMD64_OpGeq64U_0(v) 582 case OpGeq8: 583 return rewriteValueAMD64_OpGeq8_0(v) 584 case OpGeq8U: 585 return rewriteValueAMD64_OpGeq8U_0(v) 586 case OpGetCallerPC: 587 return rewriteValueAMD64_OpGetCallerPC_0(v) 588 case OpGetCallerSP: 589 return rewriteValueAMD64_OpGetCallerSP_0(v) 590 case OpGetClosurePtr: 591 return rewriteValueAMD64_OpGetClosurePtr_0(v) 592 case OpGetG: 593 return rewriteValueAMD64_OpGetG_0(v) 594 case OpGreater16: 595 return rewriteValueAMD64_OpGreater16_0(v) 596 case OpGreater16U: 597 return rewriteValueAMD64_OpGreater16U_0(v) 598 case OpGreater32: 599 return rewriteValueAMD64_OpGreater32_0(v) 600 case OpGreater32F: 601 return rewriteValueAMD64_OpGreater32F_0(v) 602 case OpGreater32U: 603 return rewriteValueAMD64_OpGreater32U_0(v) 604 case OpGreater64: 605 return rewriteValueAMD64_OpGreater64_0(v) 606 case OpGreater64F: 607 return rewriteValueAMD64_OpGreater64F_0(v) 608 case OpGreater64U: 609 return rewriteValueAMD64_OpGreater64U_0(v) 610 case OpGreater8: 611 return rewriteValueAMD64_OpGreater8_0(v) 612 case OpGreater8U: 613 return rewriteValueAMD64_OpGreater8U_0(v) 614 case OpHmul32: 615 return rewriteValueAMD64_OpHmul32_0(v) 616 case OpHmul32u: 617 return rewriteValueAMD64_OpHmul32u_0(v) 618 case OpHmul64: 619 return rewriteValueAMD64_OpHmul64_0(v) 620 case OpHmul64u: 621 return rewriteValueAMD64_OpHmul64u_0(v) 622 case OpInt64Hi: 623 return rewriteValueAMD64_OpInt64Hi_0(v) 624 case OpInterCall: 625 return rewriteValueAMD64_OpInterCall_0(v) 626 case OpIsInBounds: 627 return rewriteValueAMD64_OpIsInBounds_0(v) 628 case OpIsNonNil: 629 return rewriteValueAMD64_OpIsNonNil_0(v) 630 case OpIsSliceInBounds: 631 return rewriteValueAMD64_OpIsSliceInBounds_0(v) 632 case OpLeq16: 633 return rewriteValueAMD64_OpLeq16_0(v) 634 case OpLeq16U: 635 return rewriteValueAMD64_OpLeq16U_0(v) 636 case OpLeq32: 637 return rewriteValueAMD64_OpLeq32_0(v) 638 case OpLeq32F: 639 return rewriteValueAMD64_OpLeq32F_0(v) 640 case OpLeq32U: 641 return rewriteValueAMD64_OpLeq32U_0(v) 642 case OpLeq64: 643 return rewriteValueAMD64_OpLeq64_0(v) 644 case OpLeq64F: 645 return rewriteValueAMD64_OpLeq64F_0(v) 646 case OpLeq64U: 647 return rewriteValueAMD64_OpLeq64U_0(v) 648 case OpLeq8: 649 return rewriteValueAMD64_OpLeq8_0(v) 650 case OpLeq8U: 651 return rewriteValueAMD64_OpLeq8U_0(v) 652 case OpLess16: 653 return rewriteValueAMD64_OpLess16_0(v) 654 case OpLess16U: 655 return rewriteValueAMD64_OpLess16U_0(v) 656 case OpLess32: 657 return rewriteValueAMD64_OpLess32_0(v) 658 case OpLess32F: 659 return rewriteValueAMD64_OpLess32F_0(v) 660 case OpLess32U: 661 return rewriteValueAMD64_OpLess32U_0(v) 662 case OpLess64: 663 return rewriteValueAMD64_OpLess64_0(v) 664 case OpLess64F: 665 return rewriteValueAMD64_OpLess64F_0(v) 666 case OpLess64U: 667 return rewriteValueAMD64_OpLess64U_0(v) 668 case OpLess8: 669 return rewriteValueAMD64_OpLess8_0(v) 670 case OpLess8U: 671 return rewriteValueAMD64_OpLess8U_0(v) 672 case OpLoad: 673 return rewriteValueAMD64_OpLoad_0(v) 674 case OpLsh16x16: 675 return rewriteValueAMD64_OpLsh16x16_0(v) 676 case OpLsh16x32: 677 return rewriteValueAMD64_OpLsh16x32_0(v) 678 case OpLsh16x64: 679 return rewriteValueAMD64_OpLsh16x64_0(v) 680 case OpLsh16x8: 681 return rewriteValueAMD64_OpLsh16x8_0(v) 682 case OpLsh32x16: 683 return rewriteValueAMD64_OpLsh32x16_0(v) 684 case OpLsh32x32: 685 return rewriteValueAMD64_OpLsh32x32_0(v) 686 case OpLsh32x64: 687 return rewriteValueAMD64_OpLsh32x64_0(v) 688 case OpLsh32x8: 689 return rewriteValueAMD64_OpLsh32x8_0(v) 690 case OpLsh64x16: 691 return rewriteValueAMD64_OpLsh64x16_0(v) 692 case OpLsh64x32: 693 return rewriteValueAMD64_OpLsh64x32_0(v) 694 case OpLsh64x64: 695 return rewriteValueAMD64_OpLsh64x64_0(v) 696 case OpLsh64x8: 697 return rewriteValueAMD64_OpLsh64x8_0(v) 698 case OpLsh8x16: 699 return rewriteValueAMD64_OpLsh8x16_0(v) 700 case OpLsh8x32: 701 return rewriteValueAMD64_OpLsh8x32_0(v) 702 case OpLsh8x64: 703 return rewriteValueAMD64_OpLsh8x64_0(v) 704 case OpLsh8x8: 705 return rewriteValueAMD64_OpLsh8x8_0(v) 706 case OpMod16: 707 return rewriteValueAMD64_OpMod16_0(v) 708 case OpMod16u: 709 return rewriteValueAMD64_OpMod16u_0(v) 710 case OpMod32: 711 return rewriteValueAMD64_OpMod32_0(v) 712 case OpMod32u: 713 return rewriteValueAMD64_OpMod32u_0(v) 714 case OpMod64: 715 return rewriteValueAMD64_OpMod64_0(v) 716 case OpMod64u: 717 return rewriteValueAMD64_OpMod64u_0(v) 718 case OpMod8: 719 return rewriteValueAMD64_OpMod8_0(v) 720 case OpMod8u: 721 return rewriteValueAMD64_OpMod8u_0(v) 722 case OpMove: 723 return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) 724 case OpMul16: 725 return rewriteValueAMD64_OpMul16_0(v) 726 case OpMul32: 727 return rewriteValueAMD64_OpMul32_0(v) 728 case OpMul32F: 729 return rewriteValueAMD64_OpMul32F_0(v) 730 case OpMul64: 731 return rewriteValueAMD64_OpMul64_0(v) 732 case OpMul64F: 733 return rewriteValueAMD64_OpMul64F_0(v) 734 case OpMul64uhilo: 735 return rewriteValueAMD64_OpMul64uhilo_0(v) 736 case OpMul8: 737 return rewriteValueAMD64_OpMul8_0(v) 738 case OpNeg16: 739 return rewriteValueAMD64_OpNeg16_0(v) 740 case OpNeg32: 741 return rewriteValueAMD64_OpNeg32_0(v) 742 case OpNeg32F: 743 return rewriteValueAMD64_OpNeg32F_0(v) 744 case OpNeg64: 745 return rewriteValueAMD64_OpNeg64_0(v) 746 case OpNeg64F: 747 return rewriteValueAMD64_OpNeg64F_0(v) 748 case OpNeg8: 749 return rewriteValueAMD64_OpNeg8_0(v) 750 case OpNeq16: 751 return rewriteValueAMD64_OpNeq16_0(v) 752 case OpNeq32: 753 return rewriteValueAMD64_OpNeq32_0(v) 754 case OpNeq32F: 755 return rewriteValueAMD64_OpNeq32F_0(v) 756 case OpNeq64: 757 return rewriteValueAMD64_OpNeq64_0(v) 758 case OpNeq64F: 759 return rewriteValueAMD64_OpNeq64F_0(v) 760 case OpNeq8: 761 return rewriteValueAMD64_OpNeq8_0(v) 762 case OpNeqB: 763 return rewriteValueAMD64_OpNeqB_0(v) 764 case OpNeqPtr: 765 return rewriteValueAMD64_OpNeqPtr_0(v) 766 case OpNilCheck: 767 return rewriteValueAMD64_OpNilCheck_0(v) 768 case OpNot: 769 return rewriteValueAMD64_OpNot_0(v) 770 case OpOffPtr: 771 return rewriteValueAMD64_OpOffPtr_0(v) 772 case OpOr16: 773 return rewriteValueAMD64_OpOr16_0(v) 774 case OpOr32: 775 return rewriteValueAMD64_OpOr32_0(v) 776 case OpOr64: 777 return rewriteValueAMD64_OpOr64_0(v) 778 case OpOr8: 779 return rewriteValueAMD64_OpOr8_0(v) 780 case OpOrB: 781 return rewriteValueAMD64_OpOrB_0(v) 782 case OpPopCount16: 783 return rewriteValueAMD64_OpPopCount16_0(v) 784 case OpPopCount32: 785 return rewriteValueAMD64_OpPopCount32_0(v) 786 case OpPopCount64: 787 return rewriteValueAMD64_OpPopCount64_0(v) 788 case OpPopCount8: 789 return rewriteValueAMD64_OpPopCount8_0(v) 790 case OpRound32F: 791 return rewriteValueAMD64_OpRound32F_0(v) 792 case OpRound64F: 793 return rewriteValueAMD64_OpRound64F_0(v) 794 case OpRsh16Ux16: 795 return rewriteValueAMD64_OpRsh16Ux16_0(v) 796 case OpRsh16Ux32: 797 return rewriteValueAMD64_OpRsh16Ux32_0(v) 798 case OpRsh16Ux64: 799 return rewriteValueAMD64_OpRsh16Ux64_0(v) 800 case OpRsh16Ux8: 801 return rewriteValueAMD64_OpRsh16Ux8_0(v) 802 case OpRsh16x16: 803 return rewriteValueAMD64_OpRsh16x16_0(v) 804 case OpRsh16x32: 805 return rewriteValueAMD64_OpRsh16x32_0(v) 806 case OpRsh16x64: 807 return rewriteValueAMD64_OpRsh16x64_0(v) 808 case OpRsh16x8: 809 return rewriteValueAMD64_OpRsh16x8_0(v) 810 case OpRsh32Ux16: 811 return rewriteValueAMD64_OpRsh32Ux16_0(v) 812 case OpRsh32Ux32: 813 return rewriteValueAMD64_OpRsh32Ux32_0(v) 814 case OpRsh32Ux64: 815 return rewriteValueAMD64_OpRsh32Ux64_0(v) 816 case OpRsh32Ux8: 817 return rewriteValueAMD64_OpRsh32Ux8_0(v) 818 case OpRsh32x16: 819 return rewriteValueAMD64_OpRsh32x16_0(v) 820 case OpRsh32x32: 821 return rewriteValueAMD64_OpRsh32x32_0(v) 822 case OpRsh32x64: 823 return rewriteValueAMD64_OpRsh32x64_0(v) 824 case OpRsh32x8: 825 return rewriteValueAMD64_OpRsh32x8_0(v) 826 case OpRsh64Ux16: 827 return rewriteValueAMD64_OpRsh64Ux16_0(v) 828 case OpRsh64Ux32: 829 return rewriteValueAMD64_OpRsh64Ux32_0(v) 830 case OpRsh64Ux64: 831 return rewriteValueAMD64_OpRsh64Ux64_0(v) 832 case OpRsh64Ux8: 833 return rewriteValueAMD64_OpRsh64Ux8_0(v) 834 case OpRsh64x16: 835 return rewriteValueAMD64_OpRsh64x16_0(v) 836 case OpRsh64x32: 837 return rewriteValueAMD64_OpRsh64x32_0(v) 838 case OpRsh64x64: 839 return rewriteValueAMD64_OpRsh64x64_0(v) 840 case OpRsh64x8: 841 return rewriteValueAMD64_OpRsh64x8_0(v) 842 case OpRsh8Ux16: 843 return rewriteValueAMD64_OpRsh8Ux16_0(v) 844 case OpRsh8Ux32: 845 return rewriteValueAMD64_OpRsh8Ux32_0(v) 846 case OpRsh8Ux64: 847 return rewriteValueAMD64_OpRsh8Ux64_0(v) 848 case OpRsh8Ux8: 849 return rewriteValueAMD64_OpRsh8Ux8_0(v) 850 case OpRsh8x16: 851 return rewriteValueAMD64_OpRsh8x16_0(v) 852 case OpRsh8x32: 853 return rewriteValueAMD64_OpRsh8x32_0(v) 854 case OpRsh8x64: 855 return rewriteValueAMD64_OpRsh8x64_0(v) 856 case OpRsh8x8: 857 return rewriteValueAMD64_OpRsh8x8_0(v) 858 case OpSelect0: 859 return rewriteValueAMD64_OpSelect0_0(v) 860 case OpSelect1: 861 return rewriteValueAMD64_OpSelect1_0(v) 862 case OpSignExt16to32: 863 return rewriteValueAMD64_OpSignExt16to32_0(v) 864 case OpSignExt16to64: 865 return rewriteValueAMD64_OpSignExt16to64_0(v) 866 case OpSignExt32to64: 867 return rewriteValueAMD64_OpSignExt32to64_0(v) 868 case OpSignExt8to16: 869 return rewriteValueAMD64_OpSignExt8to16_0(v) 870 case OpSignExt8to32: 871 return rewriteValueAMD64_OpSignExt8to32_0(v) 872 case OpSignExt8to64: 873 return rewriteValueAMD64_OpSignExt8to64_0(v) 874 case OpSlicemask: 875 return rewriteValueAMD64_OpSlicemask_0(v) 876 case OpSqrt: 877 return rewriteValueAMD64_OpSqrt_0(v) 878 case OpStaticCall: 879 return rewriteValueAMD64_OpStaticCall_0(v) 880 case OpStore: 881 return rewriteValueAMD64_OpStore_0(v) 882 case OpSub16: 883 return rewriteValueAMD64_OpSub16_0(v) 884 case OpSub32: 885 return rewriteValueAMD64_OpSub32_0(v) 886 case OpSub32F: 887 return rewriteValueAMD64_OpSub32F_0(v) 888 case OpSub64: 889 return rewriteValueAMD64_OpSub64_0(v) 890 case OpSub64F: 891 return rewriteValueAMD64_OpSub64F_0(v) 892 case OpSub8: 893 return rewriteValueAMD64_OpSub8_0(v) 894 case OpSubPtr: 895 return rewriteValueAMD64_OpSubPtr_0(v) 896 case OpTrunc16to8: 897 return rewriteValueAMD64_OpTrunc16to8_0(v) 898 case OpTrunc32to16: 899 return rewriteValueAMD64_OpTrunc32to16_0(v) 900 case OpTrunc32to8: 901 return rewriteValueAMD64_OpTrunc32to8_0(v) 902 case OpTrunc64to16: 903 return rewriteValueAMD64_OpTrunc64to16_0(v) 904 case OpTrunc64to32: 905 return rewriteValueAMD64_OpTrunc64to32_0(v) 906 case OpTrunc64to8: 907 return rewriteValueAMD64_OpTrunc64to8_0(v) 908 case OpXor16: 909 return rewriteValueAMD64_OpXor16_0(v) 910 case OpXor32: 911 return rewriteValueAMD64_OpXor32_0(v) 912 case OpXor64: 913 return rewriteValueAMD64_OpXor64_0(v) 914 case OpXor8: 915 return rewriteValueAMD64_OpXor8_0(v) 916 case OpZero: 917 return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) || rewriteValueAMD64_OpZero_20(v) 918 case OpZeroExt16to32: 919 return rewriteValueAMD64_OpZeroExt16to32_0(v) 920 case OpZeroExt16to64: 921 return rewriteValueAMD64_OpZeroExt16to64_0(v) 922 case OpZeroExt32to64: 923 return rewriteValueAMD64_OpZeroExt32to64_0(v) 924 case OpZeroExt8to16: 925 return rewriteValueAMD64_OpZeroExt8to16_0(v) 926 case OpZeroExt8to32: 927 return rewriteValueAMD64_OpZeroExt8to32_0(v) 928 case OpZeroExt8to64: 929 return rewriteValueAMD64_OpZeroExt8to64_0(v) 930 } 931 return false 932 } 933 func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { 934 // match: (ADDL x (MOVLconst [c])) 935 // cond: 936 // result: (ADDLconst [c] x) 937 for { 938 _ = v.Args[1] 939 x := v.Args[0] 940 v_1 := v.Args[1] 941 if v_1.Op != OpAMD64MOVLconst { 942 break 943 } 944 c := v_1.AuxInt 945 v.reset(OpAMD64ADDLconst) 946 v.AuxInt = c 947 v.AddArg(x) 948 return true 949 } 950 // match: (ADDL (MOVLconst [c]) x) 951 // cond: 952 // result: (ADDLconst [c] x) 953 for { 954 _ = v.Args[1] 955 v_0 := v.Args[0] 956 if v_0.Op != OpAMD64MOVLconst { 957 break 958 } 959 c := v_0.AuxInt 960 x := v.Args[1] 961 v.reset(OpAMD64ADDLconst) 962 v.AuxInt = c 963 v.AddArg(x) 964 return true 965 } 966 // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) 967 // cond: d==32-c 968 // result: (ROLLconst x [c]) 969 for { 970 _ = v.Args[1] 971 v_0 := v.Args[0] 972 if v_0.Op != OpAMD64SHLLconst { 973 break 974 } 975 c := v_0.AuxInt 976 x := v_0.Args[0] 977 v_1 := v.Args[1] 978 if v_1.Op != OpAMD64SHRLconst { 979 break 980 } 981 d := v_1.AuxInt 982 if x != v_1.Args[0] { 983 break 984 } 985 if !(d == 32-c) { 986 break 987 } 988 v.reset(OpAMD64ROLLconst) 989 v.AuxInt = c 990 v.AddArg(x) 991 return true 992 } 993 // match: (ADDL (SHRLconst x [d]) (SHLLconst x [c])) 994 // cond: d==32-c 995 // result: (ROLLconst x [c]) 996 for { 997 _ = v.Args[1] 998 v_0 := v.Args[0] 999 if v_0.Op != OpAMD64SHRLconst { 1000 break 1001 } 1002 d := v_0.AuxInt 1003 x := v_0.Args[0] 1004 v_1 := v.Args[1] 1005 if v_1.Op != OpAMD64SHLLconst { 1006 break 1007 } 1008 c := v_1.AuxInt 1009 if x != v_1.Args[0] { 1010 break 1011 } 1012 if !(d == 32-c) { 1013 break 1014 } 1015 v.reset(OpAMD64ROLLconst) 1016 v.AuxInt = c 1017 v.AddArg(x) 1018 return true 1019 } 1020 // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) 1021 // cond: d==16-c && c < 16 && t.Size() == 2 1022 // result: (ROLWconst x [c]) 1023 for { 1024 t := v.Type 1025 _ = v.Args[1] 1026 v_0 := v.Args[0] 1027 if v_0.Op != OpAMD64SHLLconst { 1028 break 1029 } 1030 c := v_0.AuxInt 1031 x := v_0.Args[0] 1032 v_1 := v.Args[1] 1033 if v_1.Op != OpAMD64SHRWconst { 1034 break 1035 } 1036 d := v_1.AuxInt 1037 if x != v_1.Args[0] { 1038 break 1039 } 1040 if !(d == 16-c && c < 16 && t.Size() == 2) { 1041 break 1042 } 1043 v.reset(OpAMD64ROLWconst) 1044 v.AuxInt = c 1045 v.AddArg(x) 1046 return true 1047 } 1048 // match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c])) 1049 // cond: d==16-c && c < 16 && t.Size() == 2 1050 // result: (ROLWconst x [c]) 1051 for { 1052 t := v.Type 1053 _ = v.Args[1] 1054 v_0 := v.Args[0] 1055 if v_0.Op != OpAMD64SHRWconst { 1056 break 1057 } 1058 d := v_0.AuxInt 1059 x := v_0.Args[0] 1060 v_1 := v.Args[1] 1061 if v_1.Op != OpAMD64SHLLconst { 1062 break 1063 } 1064 c := v_1.AuxInt 1065 if x != v_1.Args[0] { 1066 break 1067 } 1068 if !(d == 16-c && c < 16 && t.Size() == 2) { 1069 break 1070 } 1071 v.reset(OpAMD64ROLWconst) 1072 v.AuxInt = c 1073 v.AddArg(x) 1074 return true 1075 } 1076 // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) 1077 // cond: d==8-c && c < 8 && t.Size() == 1 1078 // result: (ROLBconst x [c]) 1079 for { 1080 t := v.Type 1081 _ = v.Args[1] 1082 v_0 := v.Args[0] 1083 if v_0.Op != OpAMD64SHLLconst { 1084 break 1085 } 1086 c := v_0.AuxInt 1087 x := v_0.Args[0] 1088 v_1 := v.Args[1] 1089 if v_1.Op != OpAMD64SHRBconst { 1090 break 1091 } 1092 d := v_1.AuxInt 1093 if x != v_1.Args[0] { 1094 break 1095 } 1096 if !(d == 8-c && c < 8 && t.Size() == 1) { 1097 break 1098 } 1099 v.reset(OpAMD64ROLBconst) 1100 v.AuxInt = c 1101 v.AddArg(x) 1102 return true 1103 } 1104 // match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c])) 1105 // cond: d==8-c && c < 8 && t.Size() == 1 1106 // result: (ROLBconst x [c]) 1107 for { 1108 t := v.Type 1109 _ = v.Args[1] 1110 v_0 := v.Args[0] 1111 if v_0.Op != OpAMD64SHRBconst { 1112 break 1113 } 1114 d := v_0.AuxInt 1115 x := v_0.Args[0] 1116 v_1 := v.Args[1] 1117 if v_1.Op != OpAMD64SHLLconst { 1118 break 1119 } 1120 c := v_1.AuxInt 1121 if x != v_1.Args[0] { 1122 break 1123 } 1124 if !(d == 8-c && c < 8 && t.Size() == 1) { 1125 break 1126 } 1127 v.reset(OpAMD64ROLBconst) 1128 v.AuxInt = c 1129 v.AddArg(x) 1130 return true 1131 } 1132 // match: (ADDL x (NEGL y)) 1133 // cond: 1134 // result: (SUBL x y) 1135 for { 1136 _ = v.Args[1] 1137 x := v.Args[0] 1138 v_1 := v.Args[1] 1139 if v_1.Op != OpAMD64NEGL { 1140 break 1141 } 1142 y := v_1.Args[0] 1143 v.reset(OpAMD64SUBL) 1144 v.AddArg(x) 1145 v.AddArg(y) 1146 return true 1147 } 1148 // match: (ADDL (NEGL y) x) 1149 // cond: 1150 // result: (SUBL x y) 1151 for { 1152 _ = v.Args[1] 1153 v_0 := v.Args[0] 1154 if v_0.Op != OpAMD64NEGL { 1155 break 1156 } 1157 y := v_0.Args[0] 1158 x := v.Args[1] 1159 v.reset(OpAMD64SUBL) 1160 v.AddArg(x) 1161 v.AddArg(y) 1162 return true 1163 } 1164 return false 1165 } 1166 func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { 1167 // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) 1168 // cond: canMergeLoad(v, l, x) && clobber(l) 1169 // result: (ADDLmem x [off] {sym} ptr mem) 1170 for { 1171 _ = v.Args[1] 1172 x := v.Args[0] 1173 l := v.Args[1] 1174 if l.Op != OpAMD64MOVLload { 1175 break 1176 } 1177 off := l.AuxInt 1178 sym := l.Aux 1179 _ = l.Args[1] 1180 ptr := l.Args[0] 1181 mem := l.Args[1] 1182 if !(canMergeLoad(v, l, x) && clobber(l)) { 1183 break 1184 } 1185 v.reset(OpAMD64ADDLmem) 1186 v.AuxInt = off 1187 v.Aux = sym 1188 v.AddArg(x) 1189 v.AddArg(ptr) 1190 v.AddArg(mem) 1191 return true 1192 } 1193 // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) 1194 // cond: canMergeLoad(v, l, x) && clobber(l) 1195 // result: (ADDLmem x [off] {sym} ptr mem) 1196 for { 1197 _ = v.Args[1] 1198 l := v.Args[0] 1199 if l.Op != OpAMD64MOVLload { 1200 break 1201 } 1202 off := l.AuxInt 1203 sym := l.Aux 1204 _ = l.Args[1] 1205 ptr := l.Args[0] 1206 mem := l.Args[1] 1207 x := v.Args[1] 1208 if !(canMergeLoad(v, l, x) && clobber(l)) { 1209 break 1210 } 1211 v.reset(OpAMD64ADDLmem) 1212 v.AuxInt = off 1213 v.Aux = sym 1214 v.AddArg(x) 1215 v.AddArg(ptr) 1216 v.AddArg(mem) 1217 return true 1218 } 1219 return false 1220 } 1221 func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { 1222 // match: (ADDLconst [c] x) 1223 // cond: int32(c)==0 1224 // result: x 1225 for { 1226 c := v.AuxInt 1227 x := v.Args[0] 1228 if !(int32(c) == 0) { 1229 break 1230 } 1231 v.reset(OpCopy) 1232 v.Type = x.Type 1233 v.AddArg(x) 1234 return true 1235 } 1236 // match: (ADDLconst [c] (MOVLconst [d])) 1237 // cond: 1238 // result: (MOVLconst [int64(int32(c+d))]) 1239 for { 1240 c := v.AuxInt 1241 v_0 := v.Args[0] 1242 if v_0.Op != OpAMD64MOVLconst { 1243 break 1244 } 1245 d := v_0.AuxInt 1246 v.reset(OpAMD64MOVLconst) 1247 v.AuxInt = int64(int32(c + d)) 1248 return true 1249 } 1250 // match: (ADDLconst [c] (ADDLconst [d] x)) 1251 // cond: 1252 // result: (ADDLconst [int64(int32(c+d))] x) 1253 for { 1254 c := v.AuxInt 1255 v_0 := v.Args[0] 1256 if v_0.Op != OpAMD64ADDLconst { 1257 break 1258 } 1259 d := v_0.AuxInt 1260 x := v_0.Args[0] 1261 v.reset(OpAMD64ADDLconst) 1262 v.AuxInt = int64(int32(c + d)) 1263 v.AddArg(x) 1264 return true 1265 } 1266 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1267 // cond: is32Bit(c+d) 1268 // result: (LEAL [c+d] {s} x) 1269 for { 1270 c := v.AuxInt 1271 v_0 := v.Args[0] 1272 if v_0.Op != OpAMD64LEAL { 1273 break 1274 } 1275 d := v_0.AuxInt 1276 s := v_0.Aux 1277 x := v_0.Args[0] 1278 if !(is32Bit(c + d)) { 1279 break 1280 } 1281 v.reset(OpAMD64LEAL) 1282 v.AuxInt = c + d 1283 v.Aux = s 1284 v.AddArg(x) 1285 return true 1286 } 1287 return false 1288 } 1289 func rewriteValueAMD64_OpAMD64ADDLconstmem_0(v *Value) bool { 1290 b := v.Block 1291 _ = b 1292 typ := &b.Func.Config.Types 1293 _ = typ 1294 // match: (ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _)) 1295 // cond: 1296 // result: (ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x)) 1297 for { 1298 valOff := v.AuxInt 1299 sym := v.Aux 1300 _ = v.Args[1] 1301 ptr := v.Args[0] 1302 v_1 := v.Args[1] 1303 if v_1.Op != OpAMD64MOVSSstore { 1304 break 1305 } 1306 if v_1.AuxInt != ValAndOff(valOff).Off() { 1307 break 1308 } 1309 if v_1.Aux != sym { 1310 break 1311 } 1312 _ = v_1.Args[2] 1313 if ptr != v_1.Args[0] { 1314 break 1315 } 1316 x := v_1.Args[1] 1317 v.reset(OpAMD64ADDLconst) 1318 v.AuxInt = ValAndOff(valOff).Val() 1319 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 1320 v0.AddArg(x) 1321 v.AddArg(v0) 1322 return true 1323 } 1324 return false 1325 } 1326 func rewriteValueAMD64_OpAMD64ADDLmem_0(v *Value) bool { 1327 b := v.Block 1328 _ = b 1329 typ := &b.Func.Config.Types 1330 _ = typ 1331 // match: (ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 1332 // cond: 1333 // result: (ADDL x (MOVLf2i y)) 1334 for { 1335 off := v.AuxInt 1336 sym := v.Aux 1337 _ = v.Args[2] 1338 x := v.Args[0] 1339 ptr := v.Args[1] 1340 v_2 := v.Args[2] 1341 if v_2.Op != OpAMD64MOVSSstore { 1342 break 1343 } 1344 if v_2.AuxInt != off { 1345 break 1346 } 1347 if v_2.Aux != sym { 1348 break 1349 } 1350 _ = v_2.Args[2] 1351 if ptr != v_2.Args[0] { 1352 break 1353 } 1354 y := v_2.Args[1] 1355 v.reset(OpAMD64ADDL) 1356 v.AddArg(x) 1357 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 1358 v0.AddArg(y) 1359 v.AddArg(v0) 1360 return true 1361 } 1362 return false 1363 } 1364 func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { 1365 // match: (ADDQ x (MOVQconst [c])) 1366 // cond: is32Bit(c) 1367 // result: (ADDQconst [c] x) 1368 for { 1369 _ = v.Args[1] 1370 x := v.Args[0] 1371 v_1 := v.Args[1] 1372 if v_1.Op != OpAMD64MOVQconst { 1373 break 1374 } 1375 c := v_1.AuxInt 1376 if !(is32Bit(c)) { 1377 break 1378 } 1379 v.reset(OpAMD64ADDQconst) 1380 v.AuxInt = c 1381 v.AddArg(x) 1382 return true 1383 } 1384 // match: (ADDQ (MOVQconst [c]) x) 1385 // cond: is32Bit(c) 1386 // result: (ADDQconst [c] x) 1387 for { 1388 _ = v.Args[1] 1389 v_0 := v.Args[0] 1390 if v_0.Op != OpAMD64MOVQconst { 1391 break 1392 } 1393 c := v_0.AuxInt 1394 x := v.Args[1] 1395 if !(is32Bit(c)) { 1396 break 1397 } 1398 v.reset(OpAMD64ADDQconst) 1399 v.AuxInt = c 1400 v.AddArg(x) 1401 return true 1402 } 1403 // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) 1404 // cond: d==64-c 1405 // result: (ROLQconst x [c]) 1406 for { 1407 _ = v.Args[1] 1408 v_0 := v.Args[0] 1409 if v_0.Op != OpAMD64SHLQconst { 1410 break 1411 } 1412 c := v_0.AuxInt 1413 x := v_0.Args[0] 1414 v_1 := v.Args[1] 1415 if v_1.Op != OpAMD64SHRQconst { 1416 break 1417 } 1418 d := v_1.AuxInt 1419 if x != v_1.Args[0] { 1420 break 1421 } 1422 if !(d == 64-c) { 1423 break 1424 } 1425 v.reset(OpAMD64ROLQconst) 1426 v.AuxInt = c 1427 v.AddArg(x) 1428 return true 1429 } 1430 // match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c])) 1431 // cond: d==64-c 1432 // result: (ROLQconst x [c]) 1433 for { 1434 _ = v.Args[1] 1435 v_0 := v.Args[0] 1436 if v_0.Op != OpAMD64SHRQconst { 1437 break 1438 } 1439 d := v_0.AuxInt 1440 x := v_0.Args[0] 1441 v_1 := v.Args[1] 1442 if v_1.Op != OpAMD64SHLQconst { 1443 break 1444 } 1445 c := v_1.AuxInt 1446 if x != v_1.Args[0] { 1447 break 1448 } 1449 if !(d == 64-c) { 1450 break 1451 } 1452 v.reset(OpAMD64ROLQconst) 1453 v.AuxInt = c 1454 v.AddArg(x) 1455 return true 1456 } 1457 // match: (ADDQ x (SHLQconst [3] y)) 1458 // cond: 1459 // result: (LEAQ8 x y) 1460 for { 1461 _ = v.Args[1] 1462 x := v.Args[0] 1463 v_1 := v.Args[1] 1464 if v_1.Op != OpAMD64SHLQconst { 1465 break 1466 } 1467 if v_1.AuxInt != 3 { 1468 break 1469 } 1470 y := v_1.Args[0] 1471 v.reset(OpAMD64LEAQ8) 1472 v.AddArg(x) 1473 v.AddArg(y) 1474 return true 1475 } 1476 // match: (ADDQ (SHLQconst [3] y) x) 1477 // cond: 1478 // result: (LEAQ8 x y) 1479 for { 1480 _ = v.Args[1] 1481 v_0 := v.Args[0] 1482 if v_0.Op != OpAMD64SHLQconst { 1483 break 1484 } 1485 if v_0.AuxInt != 3 { 1486 break 1487 } 1488 y := v_0.Args[0] 1489 x := v.Args[1] 1490 v.reset(OpAMD64LEAQ8) 1491 v.AddArg(x) 1492 v.AddArg(y) 1493 return true 1494 } 1495 // match: (ADDQ x (SHLQconst [2] y)) 1496 // cond: 1497 // result: (LEAQ4 x y) 1498 for { 1499 _ = v.Args[1] 1500 x := v.Args[0] 1501 v_1 := v.Args[1] 1502 if v_1.Op != OpAMD64SHLQconst { 1503 break 1504 } 1505 if v_1.AuxInt != 2 { 1506 break 1507 } 1508 y := v_1.Args[0] 1509 v.reset(OpAMD64LEAQ4) 1510 v.AddArg(x) 1511 v.AddArg(y) 1512 return true 1513 } 1514 // match: (ADDQ (SHLQconst [2] y) x) 1515 // cond: 1516 // result: (LEAQ4 x y) 1517 for { 1518 _ = v.Args[1] 1519 v_0 := v.Args[0] 1520 if v_0.Op != OpAMD64SHLQconst { 1521 break 1522 } 1523 if v_0.AuxInt != 2 { 1524 break 1525 } 1526 y := v_0.Args[0] 1527 x := v.Args[1] 1528 v.reset(OpAMD64LEAQ4) 1529 v.AddArg(x) 1530 v.AddArg(y) 1531 return true 1532 } 1533 // match: (ADDQ x (SHLQconst [1] y)) 1534 // cond: 1535 // result: (LEAQ2 x y) 1536 for { 1537 _ = v.Args[1] 1538 x := v.Args[0] 1539 v_1 := v.Args[1] 1540 if v_1.Op != OpAMD64SHLQconst { 1541 break 1542 } 1543 if v_1.AuxInt != 1 { 1544 break 1545 } 1546 y := v_1.Args[0] 1547 v.reset(OpAMD64LEAQ2) 1548 v.AddArg(x) 1549 v.AddArg(y) 1550 return true 1551 } 1552 // match: (ADDQ (SHLQconst [1] y) x) 1553 // cond: 1554 // result: (LEAQ2 x y) 1555 for { 1556 _ = v.Args[1] 1557 v_0 := v.Args[0] 1558 if v_0.Op != OpAMD64SHLQconst { 1559 break 1560 } 1561 if v_0.AuxInt != 1 { 1562 break 1563 } 1564 y := v_0.Args[0] 1565 x := v.Args[1] 1566 v.reset(OpAMD64LEAQ2) 1567 v.AddArg(x) 1568 v.AddArg(y) 1569 return true 1570 } 1571 return false 1572 } 1573 func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { 1574 // match: (ADDQ x (ADDQ y y)) 1575 // cond: 1576 // result: (LEAQ2 x y) 1577 for { 1578 _ = v.Args[1] 1579 x := v.Args[0] 1580 v_1 := v.Args[1] 1581 if v_1.Op != OpAMD64ADDQ { 1582 break 1583 } 1584 _ = v_1.Args[1] 1585 y := v_1.Args[0] 1586 if y != v_1.Args[1] { 1587 break 1588 } 1589 v.reset(OpAMD64LEAQ2) 1590 v.AddArg(x) 1591 v.AddArg(y) 1592 return true 1593 } 1594 // match: (ADDQ (ADDQ y y) x) 1595 // cond: 1596 // result: (LEAQ2 x y) 1597 for { 1598 _ = v.Args[1] 1599 v_0 := v.Args[0] 1600 if v_0.Op != OpAMD64ADDQ { 1601 break 1602 } 1603 _ = v_0.Args[1] 1604 y := v_0.Args[0] 1605 if y != v_0.Args[1] { 1606 break 1607 } 1608 x := v.Args[1] 1609 v.reset(OpAMD64LEAQ2) 1610 v.AddArg(x) 1611 v.AddArg(y) 1612 return true 1613 } 1614 // match: (ADDQ x (ADDQ x y)) 1615 // cond: 1616 // result: (LEAQ2 y x) 1617 for { 1618 _ = v.Args[1] 1619 x := v.Args[0] 1620 v_1 := v.Args[1] 1621 if v_1.Op != OpAMD64ADDQ { 1622 break 1623 } 1624 _ = v_1.Args[1] 1625 if x != v_1.Args[0] { 1626 break 1627 } 1628 y := v_1.Args[1] 1629 v.reset(OpAMD64LEAQ2) 1630 v.AddArg(y) 1631 v.AddArg(x) 1632 return true 1633 } 1634 // match: (ADDQ x (ADDQ y x)) 1635 // cond: 1636 // result: (LEAQ2 y x) 1637 for { 1638 _ = v.Args[1] 1639 x := v.Args[0] 1640 v_1 := v.Args[1] 1641 if v_1.Op != OpAMD64ADDQ { 1642 break 1643 } 1644 _ = v_1.Args[1] 1645 y := v_1.Args[0] 1646 if x != v_1.Args[1] { 1647 break 1648 } 1649 v.reset(OpAMD64LEAQ2) 1650 v.AddArg(y) 1651 v.AddArg(x) 1652 return true 1653 } 1654 // match: (ADDQ (ADDQ x y) x) 1655 // cond: 1656 // result: (LEAQ2 y x) 1657 for { 1658 _ = v.Args[1] 1659 v_0 := v.Args[0] 1660 if v_0.Op != OpAMD64ADDQ { 1661 break 1662 } 1663 _ = v_0.Args[1] 1664 x := v_0.Args[0] 1665 y := v_0.Args[1] 1666 if x != v.Args[1] { 1667 break 1668 } 1669 v.reset(OpAMD64LEAQ2) 1670 v.AddArg(y) 1671 v.AddArg(x) 1672 return true 1673 } 1674 // match: (ADDQ (ADDQ y x) x) 1675 // cond: 1676 // result: (LEAQ2 y x) 1677 for { 1678 _ = v.Args[1] 1679 v_0 := v.Args[0] 1680 if v_0.Op != OpAMD64ADDQ { 1681 break 1682 } 1683 _ = v_0.Args[1] 1684 y := v_0.Args[0] 1685 x := v_0.Args[1] 1686 if x != v.Args[1] { 1687 break 1688 } 1689 v.reset(OpAMD64LEAQ2) 1690 v.AddArg(y) 1691 v.AddArg(x) 1692 return true 1693 } 1694 // match: (ADDQ (ADDQconst [c] x) y) 1695 // cond: 1696 // result: (LEAQ1 [c] x y) 1697 for { 1698 _ = v.Args[1] 1699 v_0 := v.Args[0] 1700 if v_0.Op != OpAMD64ADDQconst { 1701 break 1702 } 1703 c := v_0.AuxInt 1704 x := v_0.Args[0] 1705 y := v.Args[1] 1706 v.reset(OpAMD64LEAQ1) 1707 v.AuxInt = c 1708 v.AddArg(x) 1709 v.AddArg(y) 1710 return true 1711 } 1712 // match: (ADDQ y (ADDQconst [c] x)) 1713 // cond: 1714 // result: (LEAQ1 [c] x y) 1715 for { 1716 _ = v.Args[1] 1717 y := v.Args[0] 1718 v_1 := v.Args[1] 1719 if v_1.Op != OpAMD64ADDQconst { 1720 break 1721 } 1722 c := v_1.AuxInt 1723 x := v_1.Args[0] 1724 v.reset(OpAMD64LEAQ1) 1725 v.AuxInt = c 1726 v.AddArg(x) 1727 v.AddArg(y) 1728 return true 1729 } 1730 // match: (ADDQ x (LEAQ [c] {s} y)) 1731 // cond: x.Op != OpSB && y.Op != OpSB 1732 // result: (LEAQ1 [c] {s} x y) 1733 for { 1734 _ = v.Args[1] 1735 x := v.Args[0] 1736 v_1 := v.Args[1] 1737 if v_1.Op != OpAMD64LEAQ { 1738 break 1739 } 1740 c := v_1.AuxInt 1741 s := v_1.Aux 1742 y := v_1.Args[0] 1743 if !(x.Op != OpSB && y.Op != OpSB) { 1744 break 1745 } 1746 v.reset(OpAMD64LEAQ1) 1747 v.AuxInt = c 1748 v.Aux = s 1749 v.AddArg(x) 1750 v.AddArg(y) 1751 return true 1752 } 1753 // match: (ADDQ (LEAQ [c] {s} y) x) 1754 // cond: x.Op != OpSB && y.Op != OpSB 1755 // result: (LEAQ1 [c] {s} x y) 1756 for { 1757 _ = v.Args[1] 1758 v_0 := v.Args[0] 1759 if v_0.Op != OpAMD64LEAQ { 1760 break 1761 } 1762 c := v_0.AuxInt 1763 s := v_0.Aux 1764 y := v_0.Args[0] 1765 x := v.Args[1] 1766 if !(x.Op != OpSB && y.Op != OpSB) { 1767 break 1768 } 1769 v.reset(OpAMD64LEAQ1) 1770 v.AuxInt = c 1771 v.Aux = s 1772 v.AddArg(x) 1773 v.AddArg(y) 1774 return true 1775 } 1776 return false 1777 } 1778 func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { 1779 // match: (ADDQ x (NEGQ y)) 1780 // cond: 1781 // result: (SUBQ x y) 1782 for { 1783 _ = v.Args[1] 1784 x := v.Args[0] 1785 v_1 := v.Args[1] 1786 if v_1.Op != OpAMD64NEGQ { 1787 break 1788 } 1789 y := v_1.Args[0] 1790 v.reset(OpAMD64SUBQ) 1791 v.AddArg(x) 1792 v.AddArg(y) 1793 return true 1794 } 1795 // match: (ADDQ (NEGQ y) x) 1796 // cond: 1797 // result: (SUBQ x y) 1798 for { 1799 _ = v.Args[1] 1800 v_0 := v.Args[0] 1801 if v_0.Op != OpAMD64NEGQ { 1802 break 1803 } 1804 y := v_0.Args[0] 1805 x := v.Args[1] 1806 v.reset(OpAMD64SUBQ) 1807 v.AddArg(x) 1808 v.AddArg(y) 1809 return true 1810 } 1811 // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) 1812 // cond: canMergeLoad(v, l, x) && clobber(l) 1813 // result: (ADDQmem x [off] {sym} ptr mem) 1814 for { 1815 _ = v.Args[1] 1816 x := v.Args[0] 1817 l := v.Args[1] 1818 if l.Op != OpAMD64MOVQload { 1819 break 1820 } 1821 off := l.AuxInt 1822 sym := l.Aux 1823 _ = l.Args[1] 1824 ptr := l.Args[0] 1825 mem := l.Args[1] 1826 if !(canMergeLoad(v, l, x) && clobber(l)) { 1827 break 1828 } 1829 v.reset(OpAMD64ADDQmem) 1830 v.AuxInt = off 1831 v.Aux = sym 1832 v.AddArg(x) 1833 v.AddArg(ptr) 1834 v.AddArg(mem) 1835 return true 1836 } 1837 // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) 1838 // cond: canMergeLoad(v, l, x) && clobber(l) 1839 // result: (ADDQmem x [off] {sym} ptr mem) 1840 for { 1841 _ = v.Args[1] 1842 l := v.Args[0] 1843 if l.Op != OpAMD64MOVQload { 1844 break 1845 } 1846 off := l.AuxInt 1847 sym := l.Aux 1848 _ = l.Args[1] 1849 ptr := l.Args[0] 1850 mem := l.Args[1] 1851 x := v.Args[1] 1852 if !(canMergeLoad(v, l, x) && clobber(l)) { 1853 break 1854 } 1855 v.reset(OpAMD64ADDQmem) 1856 v.AuxInt = off 1857 v.Aux = sym 1858 v.AddArg(x) 1859 v.AddArg(ptr) 1860 v.AddArg(mem) 1861 return true 1862 } 1863 return false 1864 } 1865 func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { 1866 // match: (ADDQconst [c] (ADDQ x y)) 1867 // cond: 1868 // result: (LEAQ1 [c] x y) 1869 for { 1870 c := v.AuxInt 1871 v_0 := v.Args[0] 1872 if v_0.Op != OpAMD64ADDQ { 1873 break 1874 } 1875 _ = v_0.Args[1] 1876 x := v_0.Args[0] 1877 y := v_0.Args[1] 1878 v.reset(OpAMD64LEAQ1) 1879 v.AuxInt = c 1880 v.AddArg(x) 1881 v.AddArg(y) 1882 return true 1883 } 1884 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1885 // cond: is32Bit(c+d) 1886 // result: (LEAQ [c+d] {s} x) 1887 for { 1888 c := v.AuxInt 1889 v_0 := v.Args[0] 1890 if v_0.Op != OpAMD64LEAQ { 1891 break 1892 } 1893 d := v_0.AuxInt 1894 s := v_0.Aux 1895 x := v_0.Args[0] 1896 if !(is32Bit(c + d)) { 1897 break 1898 } 1899 v.reset(OpAMD64LEAQ) 1900 v.AuxInt = c + d 1901 v.Aux = s 1902 v.AddArg(x) 1903 return true 1904 } 1905 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1906 // cond: is32Bit(c+d) 1907 // result: (LEAQ1 [c+d] {s} x y) 1908 for { 1909 c := v.AuxInt 1910 v_0 := v.Args[0] 1911 if v_0.Op != OpAMD64LEAQ1 { 1912 break 1913 } 1914 d := v_0.AuxInt 1915 s := v_0.Aux 1916 _ = v_0.Args[1] 1917 x := v_0.Args[0] 1918 y := v_0.Args[1] 1919 if !(is32Bit(c + d)) { 1920 break 1921 } 1922 v.reset(OpAMD64LEAQ1) 1923 v.AuxInt = c + d 1924 v.Aux = s 1925 v.AddArg(x) 1926 v.AddArg(y) 1927 return true 1928 } 1929 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1930 // cond: is32Bit(c+d) 1931 // result: (LEAQ2 [c+d] {s} x y) 1932 for { 1933 c := v.AuxInt 1934 v_0 := v.Args[0] 1935 if v_0.Op != OpAMD64LEAQ2 { 1936 break 1937 } 1938 d := v_0.AuxInt 1939 s := v_0.Aux 1940 _ = v_0.Args[1] 1941 x := v_0.Args[0] 1942 y := v_0.Args[1] 1943 if !(is32Bit(c + d)) { 1944 break 1945 } 1946 v.reset(OpAMD64LEAQ2) 1947 v.AuxInt = c + d 1948 v.Aux = s 1949 v.AddArg(x) 1950 v.AddArg(y) 1951 return true 1952 } 1953 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1954 // cond: is32Bit(c+d) 1955 // result: (LEAQ4 [c+d] {s} x y) 1956 for { 1957 c := v.AuxInt 1958 v_0 := v.Args[0] 1959 if v_0.Op != OpAMD64LEAQ4 { 1960 break 1961 } 1962 d := v_0.AuxInt 1963 s := v_0.Aux 1964 _ = v_0.Args[1] 1965 x := v_0.Args[0] 1966 y := v_0.Args[1] 1967 if !(is32Bit(c + d)) { 1968 break 1969 } 1970 v.reset(OpAMD64LEAQ4) 1971 v.AuxInt = c + d 1972 v.Aux = s 1973 v.AddArg(x) 1974 v.AddArg(y) 1975 return true 1976 } 1977 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1978 // cond: is32Bit(c+d) 1979 // result: (LEAQ8 [c+d] {s} x y) 1980 for { 1981 c := v.AuxInt 1982 v_0 := v.Args[0] 1983 if v_0.Op != OpAMD64LEAQ8 { 1984 break 1985 } 1986 d := v_0.AuxInt 1987 s := v_0.Aux 1988 _ = v_0.Args[1] 1989 x := v_0.Args[0] 1990 y := v_0.Args[1] 1991 if !(is32Bit(c + d)) { 1992 break 1993 } 1994 v.reset(OpAMD64LEAQ8) 1995 v.AuxInt = c + d 1996 v.Aux = s 1997 v.AddArg(x) 1998 v.AddArg(y) 1999 return true 2000 } 2001 // match: (ADDQconst [0] x) 2002 // cond: 2003 // result: x 2004 for { 2005 if v.AuxInt != 0 { 2006 break 2007 } 2008 x := v.Args[0] 2009 v.reset(OpCopy) 2010 v.Type = x.Type 2011 v.AddArg(x) 2012 return true 2013 } 2014 // match: (ADDQconst [c] (MOVQconst [d])) 2015 // cond: 2016 // result: (MOVQconst [c+d]) 2017 for { 2018 c := v.AuxInt 2019 v_0 := v.Args[0] 2020 if v_0.Op != OpAMD64MOVQconst { 2021 break 2022 } 2023 d := v_0.AuxInt 2024 v.reset(OpAMD64MOVQconst) 2025 v.AuxInt = c + d 2026 return true 2027 } 2028 // match: (ADDQconst [c] (ADDQconst [d] x)) 2029 // cond: is32Bit(c+d) 2030 // result: (ADDQconst [c+d] x) 2031 for { 2032 c := v.AuxInt 2033 v_0 := v.Args[0] 2034 if v_0.Op != OpAMD64ADDQconst { 2035 break 2036 } 2037 d := v_0.AuxInt 2038 x := v_0.Args[0] 2039 if !(is32Bit(c + d)) { 2040 break 2041 } 2042 v.reset(OpAMD64ADDQconst) 2043 v.AuxInt = c + d 2044 v.AddArg(x) 2045 return true 2046 } 2047 return false 2048 } 2049 func rewriteValueAMD64_OpAMD64ADDQconstmem_0(v *Value) bool { 2050 b := v.Block 2051 _ = b 2052 typ := &b.Func.Config.Types 2053 _ = typ 2054 // match: (ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _)) 2055 // cond: 2056 // result: (ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x)) 2057 for { 2058 valOff := v.AuxInt 2059 sym := v.Aux 2060 _ = v.Args[1] 2061 ptr := v.Args[0] 2062 v_1 := v.Args[1] 2063 if v_1.Op != OpAMD64MOVSDstore { 2064 break 2065 } 2066 if v_1.AuxInt != ValAndOff(valOff).Off() { 2067 break 2068 } 2069 if v_1.Aux != sym { 2070 break 2071 } 2072 _ = v_1.Args[2] 2073 if ptr != v_1.Args[0] { 2074 break 2075 } 2076 x := v_1.Args[1] 2077 v.reset(OpAMD64ADDQconst) 2078 v.AuxInt = ValAndOff(valOff).Val() 2079 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 2080 v0.AddArg(x) 2081 v.AddArg(v0) 2082 return true 2083 } 2084 return false 2085 } 2086 func rewriteValueAMD64_OpAMD64ADDQmem_0(v *Value) bool { 2087 b := v.Block 2088 _ = b 2089 typ := &b.Func.Config.Types 2090 _ = typ 2091 // match: (ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 2092 // cond: 2093 // result: (ADDQ x (MOVQf2i y)) 2094 for { 2095 off := v.AuxInt 2096 sym := v.Aux 2097 _ = v.Args[2] 2098 x := v.Args[0] 2099 ptr := v.Args[1] 2100 v_2 := v.Args[2] 2101 if v_2.Op != OpAMD64MOVSDstore { 2102 break 2103 } 2104 if v_2.AuxInt != off { 2105 break 2106 } 2107 if v_2.Aux != sym { 2108 break 2109 } 2110 _ = v_2.Args[2] 2111 if ptr != v_2.Args[0] { 2112 break 2113 } 2114 y := v_2.Args[1] 2115 v.reset(OpAMD64ADDQ) 2116 v.AddArg(x) 2117 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 2118 v0.AddArg(y) 2119 v.AddArg(v0) 2120 return true 2121 } 2122 return false 2123 } 2124 func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { 2125 // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) 2126 // cond: canMergeLoad(v, l, x) && clobber(l) 2127 // result: (ADDSDmem x [off] {sym} ptr mem) 2128 for { 2129 _ = v.Args[1] 2130 x := v.Args[0] 2131 l := v.Args[1] 2132 if l.Op != OpAMD64MOVSDload { 2133 break 2134 } 2135 off := l.AuxInt 2136 sym := l.Aux 2137 _ = l.Args[1] 2138 ptr := l.Args[0] 2139 mem := l.Args[1] 2140 if !(canMergeLoad(v, l, x) && clobber(l)) { 2141 break 2142 } 2143 v.reset(OpAMD64ADDSDmem) 2144 v.AuxInt = off 2145 v.Aux = sym 2146 v.AddArg(x) 2147 v.AddArg(ptr) 2148 v.AddArg(mem) 2149 return true 2150 } 2151 // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) 2152 // cond: canMergeLoad(v, l, x) && clobber(l) 2153 // result: (ADDSDmem x [off] {sym} ptr mem) 2154 for { 2155 _ = v.Args[1] 2156 l := v.Args[0] 2157 if l.Op != OpAMD64MOVSDload { 2158 break 2159 } 2160 off := l.AuxInt 2161 sym := l.Aux 2162 _ = l.Args[1] 2163 ptr := l.Args[0] 2164 mem := l.Args[1] 2165 x := v.Args[1] 2166 if !(canMergeLoad(v, l, x) && clobber(l)) { 2167 break 2168 } 2169 v.reset(OpAMD64ADDSDmem) 2170 v.AuxInt = off 2171 v.Aux = sym 2172 v.AddArg(x) 2173 v.AddArg(ptr) 2174 v.AddArg(mem) 2175 return true 2176 } 2177 return false 2178 } 2179 func rewriteValueAMD64_OpAMD64ADDSDmem_0(v *Value) bool { 2180 b := v.Block 2181 _ = b 2182 typ := &b.Func.Config.Types 2183 _ = typ 2184 // match: (ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 2185 // cond: 2186 // result: (ADDSD x (MOVQi2f y)) 2187 for { 2188 off := v.AuxInt 2189 sym := v.Aux 2190 _ = v.Args[2] 2191 x := v.Args[0] 2192 ptr := v.Args[1] 2193 v_2 := v.Args[2] 2194 if v_2.Op != OpAMD64MOVQstore { 2195 break 2196 } 2197 if v_2.AuxInt != off { 2198 break 2199 } 2200 if v_2.Aux != sym { 2201 break 2202 } 2203 _ = v_2.Args[2] 2204 if ptr != v_2.Args[0] { 2205 break 2206 } 2207 y := v_2.Args[1] 2208 v.reset(OpAMD64ADDSD) 2209 v.AddArg(x) 2210 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 2211 v0.AddArg(y) 2212 v.AddArg(v0) 2213 return true 2214 } 2215 return false 2216 } 2217 func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { 2218 // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) 2219 // cond: canMergeLoad(v, l, x) && clobber(l) 2220 // result: (ADDSSmem x [off] {sym} ptr mem) 2221 for { 2222 _ = v.Args[1] 2223 x := v.Args[0] 2224 l := v.Args[1] 2225 if l.Op != OpAMD64MOVSSload { 2226 break 2227 } 2228 off := l.AuxInt 2229 sym := l.Aux 2230 _ = l.Args[1] 2231 ptr := l.Args[0] 2232 mem := l.Args[1] 2233 if !(canMergeLoad(v, l, x) && clobber(l)) { 2234 break 2235 } 2236 v.reset(OpAMD64ADDSSmem) 2237 v.AuxInt = off 2238 v.Aux = sym 2239 v.AddArg(x) 2240 v.AddArg(ptr) 2241 v.AddArg(mem) 2242 return true 2243 } 2244 // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) 2245 // cond: canMergeLoad(v, l, x) && clobber(l) 2246 // result: (ADDSSmem x [off] {sym} ptr mem) 2247 for { 2248 _ = v.Args[1] 2249 l := v.Args[0] 2250 if l.Op != OpAMD64MOVSSload { 2251 break 2252 } 2253 off := l.AuxInt 2254 sym := l.Aux 2255 _ = l.Args[1] 2256 ptr := l.Args[0] 2257 mem := l.Args[1] 2258 x := v.Args[1] 2259 if !(canMergeLoad(v, l, x) && clobber(l)) { 2260 break 2261 } 2262 v.reset(OpAMD64ADDSSmem) 2263 v.AuxInt = off 2264 v.Aux = sym 2265 v.AddArg(x) 2266 v.AddArg(ptr) 2267 v.AddArg(mem) 2268 return true 2269 } 2270 return false 2271 } 2272 func rewriteValueAMD64_OpAMD64ADDSSmem_0(v *Value) bool { 2273 b := v.Block 2274 _ = b 2275 typ := &b.Func.Config.Types 2276 _ = typ 2277 // match: (ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 2278 // cond: 2279 // result: (ADDSS x (MOVLi2f y)) 2280 for { 2281 off := v.AuxInt 2282 sym := v.Aux 2283 _ = v.Args[2] 2284 x := v.Args[0] 2285 ptr := v.Args[1] 2286 v_2 := v.Args[2] 2287 if v_2.Op != OpAMD64MOVLstore { 2288 break 2289 } 2290 if v_2.AuxInt != off { 2291 break 2292 } 2293 if v_2.Aux != sym { 2294 break 2295 } 2296 _ = v_2.Args[2] 2297 if ptr != v_2.Args[0] { 2298 break 2299 } 2300 y := v_2.Args[1] 2301 v.reset(OpAMD64ADDSS) 2302 v.AddArg(x) 2303 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 2304 v0.AddArg(y) 2305 v.AddArg(v0) 2306 return true 2307 } 2308 return false 2309 } 2310 func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { 2311 // match: (ANDL x (MOVLconst [c])) 2312 // cond: 2313 // result: (ANDLconst [c] x) 2314 for { 2315 _ = v.Args[1] 2316 x := v.Args[0] 2317 v_1 := v.Args[1] 2318 if v_1.Op != OpAMD64MOVLconst { 2319 break 2320 } 2321 c := v_1.AuxInt 2322 v.reset(OpAMD64ANDLconst) 2323 v.AuxInt = c 2324 v.AddArg(x) 2325 return true 2326 } 2327 // match: (ANDL (MOVLconst [c]) x) 2328 // cond: 2329 // result: (ANDLconst [c] x) 2330 for { 2331 _ = v.Args[1] 2332 v_0 := v.Args[0] 2333 if v_0.Op != OpAMD64MOVLconst { 2334 break 2335 } 2336 c := v_0.AuxInt 2337 x := v.Args[1] 2338 v.reset(OpAMD64ANDLconst) 2339 v.AuxInt = c 2340 v.AddArg(x) 2341 return true 2342 } 2343 // match: (ANDL x x) 2344 // cond: 2345 // result: x 2346 for { 2347 _ = v.Args[1] 2348 x := v.Args[0] 2349 if x != v.Args[1] { 2350 break 2351 } 2352 v.reset(OpCopy) 2353 v.Type = x.Type 2354 v.AddArg(x) 2355 return true 2356 } 2357 // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) 2358 // cond: canMergeLoad(v, l, x) && clobber(l) 2359 // result: (ANDLmem x [off] {sym} ptr mem) 2360 for { 2361 _ = v.Args[1] 2362 x := v.Args[0] 2363 l := v.Args[1] 2364 if l.Op != OpAMD64MOVLload { 2365 break 2366 } 2367 off := l.AuxInt 2368 sym := l.Aux 2369 _ = l.Args[1] 2370 ptr := l.Args[0] 2371 mem := l.Args[1] 2372 if !(canMergeLoad(v, l, x) && clobber(l)) { 2373 break 2374 } 2375 v.reset(OpAMD64ANDLmem) 2376 v.AuxInt = off 2377 v.Aux = sym 2378 v.AddArg(x) 2379 v.AddArg(ptr) 2380 v.AddArg(mem) 2381 return true 2382 } 2383 // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) 2384 // cond: canMergeLoad(v, l, x) && clobber(l) 2385 // result: (ANDLmem x [off] {sym} ptr mem) 2386 for { 2387 _ = v.Args[1] 2388 l := v.Args[0] 2389 if l.Op != OpAMD64MOVLload { 2390 break 2391 } 2392 off := l.AuxInt 2393 sym := l.Aux 2394 _ = l.Args[1] 2395 ptr := l.Args[0] 2396 mem := l.Args[1] 2397 x := v.Args[1] 2398 if !(canMergeLoad(v, l, x) && clobber(l)) { 2399 break 2400 } 2401 v.reset(OpAMD64ANDLmem) 2402 v.AuxInt = off 2403 v.Aux = sym 2404 v.AddArg(x) 2405 v.AddArg(ptr) 2406 v.AddArg(mem) 2407 return true 2408 } 2409 return false 2410 } 2411 func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { 2412 // match: (ANDLconst [c] (ANDLconst [d] x)) 2413 // cond: 2414 // result: (ANDLconst [c & d] x) 2415 for { 2416 c := v.AuxInt 2417 v_0 := v.Args[0] 2418 if v_0.Op != OpAMD64ANDLconst { 2419 break 2420 } 2421 d := v_0.AuxInt 2422 x := v_0.Args[0] 2423 v.reset(OpAMD64ANDLconst) 2424 v.AuxInt = c & d 2425 v.AddArg(x) 2426 return true 2427 } 2428 // match: (ANDLconst [0xFF] x) 2429 // cond: 2430 // result: (MOVBQZX x) 2431 for { 2432 if v.AuxInt != 0xFF { 2433 break 2434 } 2435 x := v.Args[0] 2436 v.reset(OpAMD64MOVBQZX) 2437 v.AddArg(x) 2438 return true 2439 } 2440 // match: (ANDLconst [0xFFFF] x) 2441 // cond: 2442 // result: (MOVWQZX x) 2443 for { 2444 if v.AuxInt != 0xFFFF { 2445 break 2446 } 2447 x := v.Args[0] 2448 v.reset(OpAMD64MOVWQZX) 2449 v.AddArg(x) 2450 return true 2451 } 2452 // match: (ANDLconst [c] _) 2453 // cond: int32(c)==0 2454 // result: (MOVLconst [0]) 2455 for { 2456 c := v.AuxInt 2457 if !(int32(c) == 0) { 2458 break 2459 } 2460 v.reset(OpAMD64MOVLconst) 2461 v.AuxInt = 0 2462 return true 2463 } 2464 // match: (ANDLconst [c] x) 2465 // cond: int32(c)==-1 2466 // result: x 2467 for { 2468 c := v.AuxInt 2469 x := v.Args[0] 2470 if !(int32(c) == -1) { 2471 break 2472 } 2473 v.reset(OpCopy) 2474 v.Type = x.Type 2475 v.AddArg(x) 2476 return true 2477 } 2478 // match: (ANDLconst [c] (MOVLconst [d])) 2479 // cond: 2480 // result: (MOVLconst [c&d]) 2481 for { 2482 c := v.AuxInt 2483 v_0 := v.Args[0] 2484 if v_0.Op != OpAMD64MOVLconst { 2485 break 2486 } 2487 d := v_0.AuxInt 2488 v.reset(OpAMD64MOVLconst) 2489 v.AuxInt = c & d 2490 return true 2491 } 2492 return false 2493 } 2494 func rewriteValueAMD64_OpAMD64ANDLmem_0(v *Value) bool { 2495 b := v.Block 2496 _ = b 2497 typ := &b.Func.Config.Types 2498 _ = typ 2499 // match: (ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 2500 // cond: 2501 // result: (ANDL x (MOVLf2i y)) 2502 for { 2503 off := v.AuxInt 2504 sym := v.Aux 2505 _ = v.Args[2] 2506 x := v.Args[0] 2507 ptr := v.Args[1] 2508 v_2 := v.Args[2] 2509 if v_2.Op != OpAMD64MOVSSstore { 2510 break 2511 } 2512 if v_2.AuxInt != off { 2513 break 2514 } 2515 if v_2.Aux != sym { 2516 break 2517 } 2518 _ = v_2.Args[2] 2519 if ptr != v_2.Args[0] { 2520 break 2521 } 2522 y := v_2.Args[1] 2523 v.reset(OpAMD64ANDL) 2524 v.AddArg(x) 2525 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 2526 v0.AddArg(y) 2527 v.AddArg(v0) 2528 return true 2529 } 2530 return false 2531 } 2532 func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { 2533 // match: (ANDQ x (MOVQconst [c])) 2534 // cond: is32Bit(c) 2535 // result: (ANDQconst [c] x) 2536 for { 2537 _ = v.Args[1] 2538 x := v.Args[0] 2539 v_1 := v.Args[1] 2540 if v_1.Op != OpAMD64MOVQconst { 2541 break 2542 } 2543 c := v_1.AuxInt 2544 if !(is32Bit(c)) { 2545 break 2546 } 2547 v.reset(OpAMD64ANDQconst) 2548 v.AuxInt = c 2549 v.AddArg(x) 2550 return true 2551 } 2552 // match: (ANDQ (MOVQconst [c]) x) 2553 // cond: is32Bit(c) 2554 // result: (ANDQconst [c] x) 2555 for { 2556 _ = v.Args[1] 2557 v_0 := v.Args[0] 2558 if v_0.Op != OpAMD64MOVQconst { 2559 break 2560 } 2561 c := v_0.AuxInt 2562 x := v.Args[1] 2563 if !(is32Bit(c)) { 2564 break 2565 } 2566 v.reset(OpAMD64ANDQconst) 2567 v.AuxInt = c 2568 v.AddArg(x) 2569 return true 2570 } 2571 // match: (ANDQ x x) 2572 // cond: 2573 // result: x 2574 for { 2575 _ = v.Args[1] 2576 x := v.Args[0] 2577 if x != v.Args[1] { 2578 break 2579 } 2580 v.reset(OpCopy) 2581 v.Type = x.Type 2582 v.AddArg(x) 2583 return true 2584 } 2585 // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) 2586 // cond: canMergeLoad(v, l, x) && clobber(l) 2587 // result: (ANDQmem x [off] {sym} ptr mem) 2588 for { 2589 _ = v.Args[1] 2590 x := v.Args[0] 2591 l := v.Args[1] 2592 if l.Op != OpAMD64MOVQload { 2593 break 2594 } 2595 off := l.AuxInt 2596 sym := l.Aux 2597 _ = l.Args[1] 2598 ptr := l.Args[0] 2599 mem := l.Args[1] 2600 if !(canMergeLoad(v, l, x) && clobber(l)) { 2601 break 2602 } 2603 v.reset(OpAMD64ANDQmem) 2604 v.AuxInt = off 2605 v.Aux = sym 2606 v.AddArg(x) 2607 v.AddArg(ptr) 2608 v.AddArg(mem) 2609 return true 2610 } 2611 // match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x) 2612 // cond: canMergeLoad(v, l, x) && clobber(l) 2613 // result: (ANDQmem x [off] {sym} ptr mem) 2614 for { 2615 _ = v.Args[1] 2616 l := v.Args[0] 2617 if l.Op != OpAMD64MOVQload { 2618 break 2619 } 2620 off := l.AuxInt 2621 sym := l.Aux 2622 _ = l.Args[1] 2623 ptr := l.Args[0] 2624 mem := l.Args[1] 2625 x := v.Args[1] 2626 if !(canMergeLoad(v, l, x) && clobber(l)) { 2627 break 2628 } 2629 v.reset(OpAMD64ANDQmem) 2630 v.AuxInt = off 2631 v.Aux = sym 2632 v.AddArg(x) 2633 v.AddArg(ptr) 2634 v.AddArg(mem) 2635 return true 2636 } 2637 return false 2638 } 2639 func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { 2640 // match: (ANDQconst [c] (ANDQconst [d] x)) 2641 // cond: 2642 // result: (ANDQconst [c & d] x) 2643 for { 2644 c := v.AuxInt 2645 v_0 := v.Args[0] 2646 if v_0.Op != OpAMD64ANDQconst { 2647 break 2648 } 2649 d := v_0.AuxInt 2650 x := v_0.Args[0] 2651 v.reset(OpAMD64ANDQconst) 2652 v.AuxInt = c & d 2653 v.AddArg(x) 2654 return true 2655 } 2656 // match: (ANDQconst [0xFF] x) 2657 // cond: 2658 // result: (MOVBQZX x) 2659 for { 2660 if v.AuxInt != 0xFF { 2661 break 2662 } 2663 x := v.Args[0] 2664 v.reset(OpAMD64MOVBQZX) 2665 v.AddArg(x) 2666 return true 2667 } 2668 // match: (ANDQconst [0xFFFF] x) 2669 // cond: 2670 // result: (MOVWQZX x) 2671 for { 2672 if v.AuxInt != 0xFFFF { 2673 break 2674 } 2675 x := v.Args[0] 2676 v.reset(OpAMD64MOVWQZX) 2677 v.AddArg(x) 2678 return true 2679 } 2680 // match: (ANDQconst [0xFFFFFFFF] x) 2681 // cond: 2682 // result: (MOVLQZX x) 2683 for { 2684 if v.AuxInt != 0xFFFFFFFF { 2685 break 2686 } 2687 x := v.Args[0] 2688 v.reset(OpAMD64MOVLQZX) 2689 v.AddArg(x) 2690 return true 2691 } 2692 // match: (ANDQconst [0] _) 2693 // cond: 2694 // result: (MOVQconst [0]) 2695 for { 2696 if v.AuxInt != 0 { 2697 break 2698 } 2699 v.reset(OpAMD64MOVQconst) 2700 v.AuxInt = 0 2701 return true 2702 } 2703 // match: (ANDQconst [-1] x) 2704 // cond: 2705 // result: x 2706 for { 2707 if v.AuxInt != -1 { 2708 break 2709 } 2710 x := v.Args[0] 2711 v.reset(OpCopy) 2712 v.Type = x.Type 2713 v.AddArg(x) 2714 return true 2715 } 2716 // match: (ANDQconst [c] (MOVQconst [d])) 2717 // cond: 2718 // result: (MOVQconst [c&d]) 2719 for { 2720 c := v.AuxInt 2721 v_0 := v.Args[0] 2722 if v_0.Op != OpAMD64MOVQconst { 2723 break 2724 } 2725 d := v_0.AuxInt 2726 v.reset(OpAMD64MOVQconst) 2727 v.AuxInt = c & d 2728 return true 2729 } 2730 return false 2731 } 2732 func rewriteValueAMD64_OpAMD64ANDQmem_0(v *Value) bool { 2733 b := v.Block 2734 _ = b 2735 typ := &b.Func.Config.Types 2736 _ = typ 2737 // match: (ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 2738 // cond: 2739 // result: (ANDQ x (MOVQf2i y)) 2740 for { 2741 off := v.AuxInt 2742 sym := v.Aux 2743 _ = v.Args[2] 2744 x := v.Args[0] 2745 ptr := v.Args[1] 2746 v_2 := v.Args[2] 2747 if v_2.Op != OpAMD64MOVSDstore { 2748 break 2749 } 2750 if v_2.AuxInt != off { 2751 break 2752 } 2753 if v_2.Aux != sym { 2754 break 2755 } 2756 _ = v_2.Args[2] 2757 if ptr != v_2.Args[0] { 2758 break 2759 } 2760 y := v_2.Args[1] 2761 v.reset(OpAMD64ANDQ) 2762 v.AddArg(x) 2763 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 2764 v0.AddArg(y) 2765 v.AddArg(v0) 2766 return true 2767 } 2768 return false 2769 } 2770 func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool { 2771 b := v.Block 2772 _ = b 2773 // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) 2774 // cond: 2775 // result: (BSFQ (ORQconst <t> [1<<8] x)) 2776 for { 2777 v_0 := v.Args[0] 2778 if v_0.Op != OpAMD64ORQconst { 2779 break 2780 } 2781 t := v_0.Type 2782 if v_0.AuxInt != 1<<8 { 2783 break 2784 } 2785 v_0_0 := v_0.Args[0] 2786 if v_0_0.Op != OpAMD64MOVBQZX { 2787 break 2788 } 2789 x := v_0_0.Args[0] 2790 v.reset(OpAMD64BSFQ) 2791 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2792 v0.AuxInt = 1 << 8 2793 v0.AddArg(x) 2794 v.AddArg(v0) 2795 return true 2796 } 2797 // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) 2798 // cond: 2799 // result: (BSFQ (ORQconst <t> [1<<16] x)) 2800 for { 2801 v_0 := v.Args[0] 2802 if v_0.Op != OpAMD64ORQconst { 2803 break 2804 } 2805 t := v_0.Type 2806 if v_0.AuxInt != 1<<16 { 2807 break 2808 } 2809 v_0_0 := v_0.Args[0] 2810 if v_0_0.Op != OpAMD64MOVWQZX { 2811 break 2812 } 2813 x := v_0_0.Args[0] 2814 v.reset(OpAMD64BSFQ) 2815 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2816 v0.AuxInt = 1 << 16 2817 v0.AddArg(x) 2818 v.AddArg(v0) 2819 return true 2820 } 2821 return false 2822 } 2823 func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool { 2824 // match: (BTQconst [c] x) 2825 // cond: c < 32 2826 // result: (BTLconst [c] x) 2827 for { 2828 c := v.AuxInt 2829 x := v.Args[0] 2830 if !(c < 32) { 2831 break 2832 } 2833 v.reset(OpAMD64BTLconst) 2834 v.AuxInt = c 2835 v.AddArg(x) 2836 return true 2837 } 2838 return false 2839 } 2840 func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool { 2841 // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) 2842 // cond: c != 0 2843 // result: x 2844 for { 2845 _ = v.Args[2] 2846 x := v.Args[0] 2847 v_2 := v.Args[2] 2848 if v_2.Op != OpSelect1 { 2849 break 2850 } 2851 v_2_0 := v_2.Args[0] 2852 if v_2_0.Op != OpAMD64BSFQ { 2853 break 2854 } 2855 v_2_0_0 := v_2_0.Args[0] 2856 if v_2_0_0.Op != OpAMD64ORQconst { 2857 break 2858 } 2859 c := v_2_0_0.AuxInt 2860 if !(c != 0) { 2861 break 2862 } 2863 v.reset(OpCopy) 2864 v.Type = x.Type 2865 v.AddArg(x) 2866 return true 2867 } 2868 return false 2869 } 2870 func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { 2871 b := v.Block 2872 _ = b 2873 // match: (CMPB x (MOVLconst [c])) 2874 // cond: 2875 // result: (CMPBconst x [int64(int8(c))]) 2876 for { 2877 _ = v.Args[1] 2878 x := v.Args[0] 2879 v_1 := v.Args[1] 2880 if v_1.Op != OpAMD64MOVLconst { 2881 break 2882 } 2883 c := v_1.AuxInt 2884 v.reset(OpAMD64CMPBconst) 2885 v.AuxInt = int64(int8(c)) 2886 v.AddArg(x) 2887 return true 2888 } 2889 // match: (CMPB (MOVLconst [c]) x) 2890 // cond: 2891 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 2892 for { 2893 _ = v.Args[1] 2894 v_0 := v.Args[0] 2895 if v_0.Op != OpAMD64MOVLconst { 2896 break 2897 } 2898 c := v_0.AuxInt 2899 x := v.Args[1] 2900 v.reset(OpAMD64InvertFlags) 2901 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 2902 v0.AuxInt = int64(int8(c)) 2903 v0.AddArg(x) 2904 v.AddArg(v0) 2905 return true 2906 } 2907 return false 2908 } 2909 func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool { 2910 // match: (CMPBconst (MOVLconst [x]) [y]) 2911 // cond: int8(x)==int8(y) 2912 // result: (FlagEQ) 2913 for { 2914 y := v.AuxInt 2915 v_0 := v.Args[0] 2916 if v_0.Op != OpAMD64MOVLconst { 2917 break 2918 } 2919 x := v_0.AuxInt 2920 if !(int8(x) == int8(y)) { 2921 break 2922 } 2923 v.reset(OpAMD64FlagEQ) 2924 return true 2925 } 2926 // match: (CMPBconst (MOVLconst [x]) [y]) 2927 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 2928 // result: (FlagLT_ULT) 2929 for { 2930 y := v.AuxInt 2931 v_0 := v.Args[0] 2932 if v_0.Op != OpAMD64MOVLconst { 2933 break 2934 } 2935 x := v_0.AuxInt 2936 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 2937 break 2938 } 2939 v.reset(OpAMD64FlagLT_ULT) 2940 return true 2941 } 2942 // match: (CMPBconst (MOVLconst [x]) [y]) 2943 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 2944 // result: (FlagLT_UGT) 2945 for { 2946 y := v.AuxInt 2947 v_0 := v.Args[0] 2948 if v_0.Op != OpAMD64MOVLconst { 2949 break 2950 } 2951 x := v_0.AuxInt 2952 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 2953 break 2954 } 2955 v.reset(OpAMD64FlagLT_UGT) 2956 return true 2957 } 2958 // match: (CMPBconst (MOVLconst [x]) [y]) 2959 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 2960 // result: (FlagGT_ULT) 2961 for { 2962 y := v.AuxInt 2963 v_0 := v.Args[0] 2964 if v_0.Op != OpAMD64MOVLconst { 2965 break 2966 } 2967 x := v_0.AuxInt 2968 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 2969 break 2970 } 2971 v.reset(OpAMD64FlagGT_ULT) 2972 return true 2973 } 2974 // match: (CMPBconst (MOVLconst [x]) [y]) 2975 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 2976 // result: (FlagGT_UGT) 2977 for { 2978 y := v.AuxInt 2979 v_0 := v.Args[0] 2980 if v_0.Op != OpAMD64MOVLconst { 2981 break 2982 } 2983 x := v_0.AuxInt 2984 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 2985 break 2986 } 2987 v.reset(OpAMD64FlagGT_UGT) 2988 return true 2989 } 2990 // match: (CMPBconst (ANDLconst _ [m]) [n]) 2991 // cond: 0 <= int8(m) && int8(m) < int8(n) 2992 // result: (FlagLT_ULT) 2993 for { 2994 n := v.AuxInt 2995 v_0 := v.Args[0] 2996 if v_0.Op != OpAMD64ANDLconst { 2997 break 2998 } 2999 m := v_0.AuxInt 3000 if !(0 <= int8(m) && int8(m) < int8(n)) { 3001 break 3002 } 3003 v.reset(OpAMD64FlagLT_ULT) 3004 return true 3005 } 3006 // match: (CMPBconst (ANDL x y) [0]) 3007 // cond: 3008 // result: (TESTB x y) 3009 for { 3010 if v.AuxInt != 0 { 3011 break 3012 } 3013 v_0 := v.Args[0] 3014 if v_0.Op != OpAMD64ANDL { 3015 break 3016 } 3017 _ = v_0.Args[1] 3018 x := v_0.Args[0] 3019 y := v_0.Args[1] 3020 v.reset(OpAMD64TESTB) 3021 v.AddArg(x) 3022 v.AddArg(y) 3023 return true 3024 } 3025 // match: (CMPBconst (ANDLconst [c] x) [0]) 3026 // cond: 3027 // result: (TESTBconst [int64(int8(c))] x) 3028 for { 3029 if v.AuxInt != 0 { 3030 break 3031 } 3032 v_0 := v.Args[0] 3033 if v_0.Op != OpAMD64ANDLconst { 3034 break 3035 } 3036 c := v_0.AuxInt 3037 x := v_0.Args[0] 3038 v.reset(OpAMD64TESTBconst) 3039 v.AuxInt = int64(int8(c)) 3040 v.AddArg(x) 3041 return true 3042 } 3043 // match: (CMPBconst x [0]) 3044 // cond: 3045 // result: (TESTB x x) 3046 for { 3047 if v.AuxInt != 0 { 3048 break 3049 } 3050 x := v.Args[0] 3051 v.reset(OpAMD64TESTB) 3052 v.AddArg(x) 3053 v.AddArg(x) 3054 return true 3055 } 3056 return false 3057 } 3058 func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { 3059 b := v.Block 3060 _ = b 3061 // match: (CMPL x (MOVLconst [c])) 3062 // cond: 3063 // result: (CMPLconst x [c]) 3064 for { 3065 _ = v.Args[1] 3066 x := v.Args[0] 3067 v_1 := v.Args[1] 3068 if v_1.Op != OpAMD64MOVLconst { 3069 break 3070 } 3071 c := v_1.AuxInt 3072 v.reset(OpAMD64CMPLconst) 3073 v.AuxInt = c 3074 v.AddArg(x) 3075 return true 3076 } 3077 // match: (CMPL (MOVLconst [c]) x) 3078 // cond: 3079 // result: (InvertFlags (CMPLconst x [c])) 3080 for { 3081 _ = v.Args[1] 3082 v_0 := v.Args[0] 3083 if v_0.Op != OpAMD64MOVLconst { 3084 break 3085 } 3086 c := v_0.AuxInt 3087 x := v.Args[1] 3088 v.reset(OpAMD64InvertFlags) 3089 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 3090 v0.AuxInt = c 3091 v0.AddArg(x) 3092 v.AddArg(v0) 3093 return true 3094 } 3095 return false 3096 } 3097 func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool { 3098 // match: (CMPLconst (MOVLconst [x]) [y]) 3099 // cond: int32(x)==int32(y) 3100 // result: (FlagEQ) 3101 for { 3102 y := v.AuxInt 3103 v_0 := v.Args[0] 3104 if v_0.Op != OpAMD64MOVLconst { 3105 break 3106 } 3107 x := v_0.AuxInt 3108 if !(int32(x) == int32(y)) { 3109 break 3110 } 3111 v.reset(OpAMD64FlagEQ) 3112 return true 3113 } 3114 // match: (CMPLconst (MOVLconst [x]) [y]) 3115 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 3116 // result: (FlagLT_ULT) 3117 for { 3118 y := v.AuxInt 3119 v_0 := v.Args[0] 3120 if v_0.Op != OpAMD64MOVLconst { 3121 break 3122 } 3123 x := v_0.AuxInt 3124 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 3125 break 3126 } 3127 v.reset(OpAMD64FlagLT_ULT) 3128 return true 3129 } 3130 // match: (CMPLconst (MOVLconst [x]) [y]) 3131 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 3132 // result: (FlagLT_UGT) 3133 for { 3134 y := v.AuxInt 3135 v_0 := v.Args[0] 3136 if v_0.Op != OpAMD64MOVLconst { 3137 break 3138 } 3139 x := v_0.AuxInt 3140 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 3141 break 3142 } 3143 v.reset(OpAMD64FlagLT_UGT) 3144 return true 3145 } 3146 // match: (CMPLconst (MOVLconst [x]) [y]) 3147 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 3148 // result: (FlagGT_ULT) 3149 for { 3150 y := v.AuxInt 3151 v_0 := v.Args[0] 3152 if v_0.Op != OpAMD64MOVLconst { 3153 break 3154 } 3155 x := v_0.AuxInt 3156 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 3157 break 3158 } 3159 v.reset(OpAMD64FlagGT_ULT) 3160 return true 3161 } 3162 // match: (CMPLconst (MOVLconst [x]) [y]) 3163 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 3164 // result: (FlagGT_UGT) 3165 for { 3166 y := v.AuxInt 3167 v_0 := v.Args[0] 3168 if v_0.Op != OpAMD64MOVLconst { 3169 break 3170 } 3171 x := v_0.AuxInt 3172 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 3173 break 3174 } 3175 v.reset(OpAMD64FlagGT_UGT) 3176 return true 3177 } 3178 // match: (CMPLconst (SHRLconst _ [c]) [n]) 3179 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 3180 // result: (FlagLT_ULT) 3181 for { 3182 n := v.AuxInt 3183 v_0 := v.Args[0] 3184 if v_0.Op != OpAMD64SHRLconst { 3185 break 3186 } 3187 c := v_0.AuxInt 3188 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 3189 break 3190 } 3191 v.reset(OpAMD64FlagLT_ULT) 3192 return true 3193 } 3194 // match: (CMPLconst (ANDLconst _ [m]) [n]) 3195 // cond: 0 <= int32(m) && int32(m) < int32(n) 3196 // result: (FlagLT_ULT) 3197 for { 3198 n := v.AuxInt 3199 v_0 := v.Args[0] 3200 if v_0.Op != OpAMD64ANDLconst { 3201 break 3202 } 3203 m := v_0.AuxInt 3204 if !(0 <= int32(m) && int32(m) < int32(n)) { 3205 break 3206 } 3207 v.reset(OpAMD64FlagLT_ULT) 3208 return true 3209 } 3210 // match: (CMPLconst (ANDL x y) [0]) 3211 // cond: 3212 // result: (TESTL x y) 3213 for { 3214 if v.AuxInt != 0 { 3215 break 3216 } 3217 v_0 := v.Args[0] 3218 if v_0.Op != OpAMD64ANDL { 3219 break 3220 } 3221 _ = v_0.Args[1] 3222 x := v_0.Args[0] 3223 y := v_0.Args[1] 3224 v.reset(OpAMD64TESTL) 3225 v.AddArg(x) 3226 v.AddArg(y) 3227 return true 3228 } 3229 // match: (CMPLconst (ANDLconst [c] x) [0]) 3230 // cond: 3231 // result: (TESTLconst [c] x) 3232 for { 3233 if v.AuxInt != 0 { 3234 break 3235 } 3236 v_0 := v.Args[0] 3237 if v_0.Op != OpAMD64ANDLconst { 3238 break 3239 } 3240 c := v_0.AuxInt 3241 x := v_0.Args[0] 3242 v.reset(OpAMD64TESTLconst) 3243 v.AuxInt = c 3244 v.AddArg(x) 3245 return true 3246 } 3247 // match: (CMPLconst x [0]) 3248 // cond: 3249 // result: (TESTL x x) 3250 for { 3251 if v.AuxInt != 0 { 3252 break 3253 } 3254 x := v.Args[0] 3255 v.reset(OpAMD64TESTL) 3256 v.AddArg(x) 3257 v.AddArg(x) 3258 return true 3259 } 3260 return false 3261 } 3262 func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { 3263 b := v.Block 3264 _ = b 3265 // match: (CMPQ x (MOVQconst [c])) 3266 // cond: is32Bit(c) 3267 // result: (CMPQconst x [c]) 3268 for { 3269 _ = v.Args[1] 3270 x := v.Args[0] 3271 v_1 := v.Args[1] 3272 if v_1.Op != OpAMD64MOVQconst { 3273 break 3274 } 3275 c := v_1.AuxInt 3276 if !(is32Bit(c)) { 3277 break 3278 } 3279 v.reset(OpAMD64CMPQconst) 3280 v.AuxInt = c 3281 v.AddArg(x) 3282 return true 3283 } 3284 // match: (CMPQ (MOVQconst [c]) x) 3285 // cond: is32Bit(c) 3286 // result: (InvertFlags (CMPQconst x [c])) 3287 for { 3288 _ = v.Args[1] 3289 v_0 := v.Args[0] 3290 if v_0.Op != OpAMD64MOVQconst { 3291 break 3292 } 3293 c := v_0.AuxInt 3294 x := v.Args[1] 3295 if !(is32Bit(c)) { 3296 break 3297 } 3298 v.reset(OpAMD64InvertFlags) 3299 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 3300 v0.AuxInt = c 3301 v0.AddArg(x) 3302 v.AddArg(v0) 3303 return true 3304 } 3305 return false 3306 } 3307 func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool { 3308 // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) 3309 // cond: 3310 // result: (FlagLT_ULT) 3311 for { 3312 if v.AuxInt != 32 { 3313 break 3314 } 3315 v_0 := v.Args[0] 3316 if v_0.Op != OpAMD64NEGQ { 3317 break 3318 } 3319 v_0_0 := v_0.Args[0] 3320 if v_0_0.Op != OpAMD64ADDQconst { 3321 break 3322 } 3323 if v_0_0.AuxInt != -16 { 3324 break 3325 } 3326 v_0_0_0 := v_0_0.Args[0] 3327 if v_0_0_0.Op != OpAMD64ANDQconst { 3328 break 3329 } 3330 if v_0_0_0.AuxInt != 15 { 3331 break 3332 } 3333 v.reset(OpAMD64FlagLT_ULT) 3334 return true 3335 } 3336 // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) 3337 // cond: 3338 // result: (FlagLT_ULT) 3339 for { 3340 if v.AuxInt != 32 { 3341 break 3342 } 3343 v_0 := v.Args[0] 3344 if v_0.Op != OpAMD64NEGQ { 3345 break 3346 } 3347 v_0_0 := v_0.Args[0] 3348 if v_0_0.Op != OpAMD64ADDQconst { 3349 break 3350 } 3351 if v_0_0.AuxInt != -8 { 3352 break 3353 } 3354 v_0_0_0 := v_0_0.Args[0] 3355 if v_0_0_0.Op != OpAMD64ANDQconst { 3356 break 3357 } 3358 if v_0_0_0.AuxInt != 7 { 3359 break 3360 } 3361 v.reset(OpAMD64FlagLT_ULT) 3362 return true 3363 } 3364 // match: (CMPQconst (MOVQconst [x]) [y]) 3365 // cond: x==y 3366 // result: (FlagEQ) 3367 for { 3368 y := v.AuxInt 3369 v_0 := v.Args[0] 3370 if v_0.Op != OpAMD64MOVQconst { 3371 break 3372 } 3373 x := v_0.AuxInt 3374 if !(x == y) { 3375 break 3376 } 3377 v.reset(OpAMD64FlagEQ) 3378 return true 3379 } 3380 // match: (CMPQconst (MOVQconst [x]) [y]) 3381 // cond: x<y && uint64(x)<uint64(y) 3382 // result: (FlagLT_ULT) 3383 for { 3384 y := v.AuxInt 3385 v_0 := v.Args[0] 3386 if v_0.Op != OpAMD64MOVQconst { 3387 break 3388 } 3389 x := v_0.AuxInt 3390 if !(x < y && uint64(x) < uint64(y)) { 3391 break 3392 } 3393 v.reset(OpAMD64FlagLT_ULT) 3394 return true 3395 } 3396 // match: (CMPQconst (MOVQconst [x]) [y]) 3397 // cond: x<y && uint64(x)>uint64(y) 3398 // result: (FlagLT_UGT) 3399 for { 3400 y := v.AuxInt 3401 v_0 := v.Args[0] 3402 if v_0.Op != OpAMD64MOVQconst { 3403 break 3404 } 3405 x := v_0.AuxInt 3406 if !(x < y && uint64(x) > uint64(y)) { 3407 break 3408 } 3409 v.reset(OpAMD64FlagLT_UGT) 3410 return true 3411 } 3412 // match: (CMPQconst (MOVQconst [x]) [y]) 3413 // cond: x>y && uint64(x)<uint64(y) 3414 // result: (FlagGT_ULT) 3415 for { 3416 y := v.AuxInt 3417 v_0 := v.Args[0] 3418 if v_0.Op != OpAMD64MOVQconst { 3419 break 3420 } 3421 x := v_0.AuxInt 3422 if !(x > y && uint64(x) < uint64(y)) { 3423 break 3424 } 3425 v.reset(OpAMD64FlagGT_ULT) 3426 return true 3427 } 3428 // match: (CMPQconst (MOVQconst [x]) [y]) 3429 // cond: x>y && uint64(x)>uint64(y) 3430 // result: (FlagGT_UGT) 3431 for { 3432 y := v.AuxInt 3433 v_0 := v.Args[0] 3434 if v_0.Op != OpAMD64MOVQconst { 3435 break 3436 } 3437 x := v_0.AuxInt 3438 if !(x > y && uint64(x) > uint64(y)) { 3439 break 3440 } 3441 v.reset(OpAMD64FlagGT_UGT) 3442 return true 3443 } 3444 // match: (CMPQconst (MOVBQZX _) [c]) 3445 // cond: 0xFF < c 3446 // result: (FlagLT_ULT) 3447 for { 3448 c := v.AuxInt 3449 v_0 := v.Args[0] 3450 if v_0.Op != OpAMD64MOVBQZX { 3451 break 3452 } 3453 if !(0xFF < c) { 3454 break 3455 } 3456 v.reset(OpAMD64FlagLT_ULT) 3457 return true 3458 } 3459 // match: (CMPQconst (MOVWQZX _) [c]) 3460 // cond: 0xFFFF < c 3461 // result: (FlagLT_ULT) 3462 for { 3463 c := v.AuxInt 3464 v_0 := v.Args[0] 3465 if v_0.Op != OpAMD64MOVWQZX { 3466 break 3467 } 3468 if !(0xFFFF < c) { 3469 break 3470 } 3471 v.reset(OpAMD64FlagLT_ULT) 3472 return true 3473 } 3474 // match: (CMPQconst (MOVLQZX _) [c]) 3475 // cond: 0xFFFFFFFF < c 3476 // result: (FlagLT_ULT) 3477 for { 3478 c := v.AuxInt 3479 v_0 := v.Args[0] 3480 if v_0.Op != OpAMD64MOVLQZX { 3481 break 3482 } 3483 if !(0xFFFFFFFF < c) { 3484 break 3485 } 3486 v.reset(OpAMD64FlagLT_ULT) 3487 return true 3488 } 3489 return false 3490 } 3491 func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool { 3492 // match: (CMPQconst (SHRQconst _ [c]) [n]) 3493 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 3494 // result: (FlagLT_ULT) 3495 for { 3496 n := v.AuxInt 3497 v_0 := v.Args[0] 3498 if v_0.Op != OpAMD64SHRQconst { 3499 break 3500 } 3501 c := v_0.AuxInt 3502 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 3503 break 3504 } 3505 v.reset(OpAMD64FlagLT_ULT) 3506 return true 3507 } 3508 // match: (CMPQconst (ANDQconst _ [m]) [n]) 3509 // cond: 0 <= m && m < n 3510 // result: (FlagLT_ULT) 3511 for { 3512 n := v.AuxInt 3513 v_0 := v.Args[0] 3514 if v_0.Op != OpAMD64ANDQconst { 3515 break 3516 } 3517 m := v_0.AuxInt 3518 if !(0 <= m && m < n) { 3519 break 3520 } 3521 v.reset(OpAMD64FlagLT_ULT) 3522 return true 3523 } 3524 // match: (CMPQconst (ANDLconst _ [m]) [n]) 3525 // cond: 0 <= m && m < n 3526 // result: (FlagLT_ULT) 3527 for { 3528 n := v.AuxInt 3529 v_0 := v.Args[0] 3530 if v_0.Op != OpAMD64ANDLconst { 3531 break 3532 } 3533 m := v_0.AuxInt 3534 if !(0 <= m && m < n) { 3535 break 3536 } 3537 v.reset(OpAMD64FlagLT_ULT) 3538 return true 3539 } 3540 // match: (CMPQconst (ANDQ x y) [0]) 3541 // cond: 3542 // result: (TESTQ x y) 3543 for { 3544 if v.AuxInt != 0 { 3545 break 3546 } 3547 v_0 := v.Args[0] 3548 if v_0.Op != OpAMD64ANDQ { 3549 break 3550 } 3551 _ = v_0.Args[1] 3552 x := v_0.Args[0] 3553 y := v_0.Args[1] 3554 v.reset(OpAMD64TESTQ) 3555 v.AddArg(x) 3556 v.AddArg(y) 3557 return true 3558 } 3559 // match: (CMPQconst (ANDQconst [c] x) [0]) 3560 // cond: 3561 // result: (TESTQconst [c] x) 3562 for { 3563 if v.AuxInt != 0 { 3564 break 3565 } 3566 v_0 := v.Args[0] 3567 if v_0.Op != OpAMD64ANDQconst { 3568 break 3569 } 3570 c := v_0.AuxInt 3571 x := v_0.Args[0] 3572 v.reset(OpAMD64TESTQconst) 3573 v.AuxInt = c 3574 v.AddArg(x) 3575 return true 3576 } 3577 // match: (CMPQconst x [0]) 3578 // cond: 3579 // result: (TESTQ x x) 3580 for { 3581 if v.AuxInt != 0 { 3582 break 3583 } 3584 x := v.Args[0] 3585 v.reset(OpAMD64TESTQ) 3586 v.AddArg(x) 3587 v.AddArg(x) 3588 return true 3589 } 3590 return false 3591 } 3592 func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { 3593 b := v.Block 3594 _ = b 3595 // match: (CMPW x (MOVLconst [c])) 3596 // cond: 3597 // result: (CMPWconst x [int64(int16(c))]) 3598 for { 3599 _ = v.Args[1] 3600 x := v.Args[0] 3601 v_1 := v.Args[1] 3602 if v_1.Op != OpAMD64MOVLconst { 3603 break 3604 } 3605 c := v_1.AuxInt 3606 v.reset(OpAMD64CMPWconst) 3607 v.AuxInt = int64(int16(c)) 3608 v.AddArg(x) 3609 return true 3610 } 3611 // match: (CMPW (MOVLconst [c]) x) 3612 // cond: 3613 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 3614 for { 3615 _ = v.Args[1] 3616 v_0 := v.Args[0] 3617 if v_0.Op != OpAMD64MOVLconst { 3618 break 3619 } 3620 c := v_0.AuxInt 3621 x := v.Args[1] 3622 v.reset(OpAMD64InvertFlags) 3623 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 3624 v0.AuxInt = int64(int16(c)) 3625 v0.AddArg(x) 3626 v.AddArg(v0) 3627 return true 3628 } 3629 return false 3630 } 3631 func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool { 3632 // match: (CMPWconst (MOVLconst [x]) [y]) 3633 // cond: int16(x)==int16(y) 3634 // result: (FlagEQ) 3635 for { 3636 y := v.AuxInt 3637 v_0 := v.Args[0] 3638 if v_0.Op != OpAMD64MOVLconst { 3639 break 3640 } 3641 x := v_0.AuxInt 3642 if !(int16(x) == int16(y)) { 3643 break 3644 } 3645 v.reset(OpAMD64FlagEQ) 3646 return true 3647 } 3648 // match: (CMPWconst (MOVLconst [x]) [y]) 3649 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 3650 // result: (FlagLT_ULT) 3651 for { 3652 y := v.AuxInt 3653 v_0 := v.Args[0] 3654 if v_0.Op != OpAMD64MOVLconst { 3655 break 3656 } 3657 x := v_0.AuxInt 3658 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 3659 break 3660 } 3661 v.reset(OpAMD64FlagLT_ULT) 3662 return true 3663 } 3664 // match: (CMPWconst (MOVLconst [x]) [y]) 3665 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 3666 // result: (FlagLT_UGT) 3667 for { 3668 y := v.AuxInt 3669 v_0 := v.Args[0] 3670 if v_0.Op != OpAMD64MOVLconst { 3671 break 3672 } 3673 x := v_0.AuxInt 3674 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 3675 break 3676 } 3677 v.reset(OpAMD64FlagLT_UGT) 3678 return true 3679 } 3680 // match: (CMPWconst (MOVLconst [x]) [y]) 3681 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 3682 // result: (FlagGT_ULT) 3683 for { 3684 y := v.AuxInt 3685 v_0 := v.Args[0] 3686 if v_0.Op != OpAMD64MOVLconst { 3687 break 3688 } 3689 x := v_0.AuxInt 3690 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 3691 break 3692 } 3693 v.reset(OpAMD64FlagGT_ULT) 3694 return true 3695 } 3696 // match: (CMPWconst (MOVLconst [x]) [y]) 3697 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 3698 // result: (FlagGT_UGT) 3699 for { 3700 y := v.AuxInt 3701 v_0 := v.Args[0] 3702 if v_0.Op != OpAMD64MOVLconst { 3703 break 3704 } 3705 x := v_0.AuxInt 3706 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 3707 break 3708 } 3709 v.reset(OpAMD64FlagGT_UGT) 3710 return true 3711 } 3712 // match: (CMPWconst (ANDLconst _ [m]) [n]) 3713 // cond: 0 <= int16(m) && int16(m) < int16(n) 3714 // result: (FlagLT_ULT) 3715 for { 3716 n := v.AuxInt 3717 v_0 := v.Args[0] 3718 if v_0.Op != OpAMD64ANDLconst { 3719 break 3720 } 3721 m := v_0.AuxInt 3722 if !(0 <= int16(m) && int16(m) < int16(n)) { 3723 break 3724 } 3725 v.reset(OpAMD64FlagLT_ULT) 3726 return true 3727 } 3728 // match: (CMPWconst (ANDL x y) [0]) 3729 // cond: 3730 // result: (TESTW x y) 3731 for { 3732 if v.AuxInt != 0 { 3733 break 3734 } 3735 v_0 := v.Args[0] 3736 if v_0.Op != OpAMD64ANDL { 3737 break 3738 } 3739 _ = v_0.Args[1] 3740 x := v_0.Args[0] 3741 y := v_0.Args[1] 3742 v.reset(OpAMD64TESTW) 3743 v.AddArg(x) 3744 v.AddArg(y) 3745 return true 3746 } 3747 // match: (CMPWconst (ANDLconst [c] x) [0]) 3748 // cond: 3749 // result: (TESTWconst [int64(int16(c))] x) 3750 for { 3751 if v.AuxInt != 0 { 3752 break 3753 } 3754 v_0 := v.Args[0] 3755 if v_0.Op != OpAMD64ANDLconst { 3756 break 3757 } 3758 c := v_0.AuxInt 3759 x := v_0.Args[0] 3760 v.reset(OpAMD64TESTWconst) 3761 v.AuxInt = int64(int16(c)) 3762 v.AddArg(x) 3763 return true 3764 } 3765 // match: (CMPWconst x [0]) 3766 // cond: 3767 // result: (TESTW x x) 3768 for { 3769 if v.AuxInt != 0 { 3770 break 3771 } 3772 x := v.Args[0] 3773 v.reset(OpAMD64TESTW) 3774 v.AddArg(x) 3775 v.AddArg(x) 3776 return true 3777 } 3778 return false 3779 } 3780 func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool { 3781 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3782 // cond: is32Bit(off1+off2) 3783 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 3784 for { 3785 off1 := v.AuxInt 3786 sym := v.Aux 3787 _ = v.Args[3] 3788 v_0 := v.Args[0] 3789 if v_0.Op != OpAMD64ADDQconst { 3790 break 3791 } 3792 off2 := v_0.AuxInt 3793 ptr := v_0.Args[0] 3794 old := v.Args[1] 3795 new_ := v.Args[2] 3796 mem := v.Args[3] 3797 if !(is32Bit(off1 + off2)) { 3798 break 3799 } 3800 v.reset(OpAMD64CMPXCHGLlock) 3801 v.AuxInt = off1 + off2 3802 v.Aux = sym 3803 v.AddArg(ptr) 3804 v.AddArg(old) 3805 v.AddArg(new_) 3806 v.AddArg(mem) 3807 return true 3808 } 3809 return false 3810 } 3811 func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool { 3812 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3813 // cond: is32Bit(off1+off2) 3814 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 3815 for { 3816 off1 := v.AuxInt 3817 sym := v.Aux 3818 _ = v.Args[3] 3819 v_0 := v.Args[0] 3820 if v_0.Op != OpAMD64ADDQconst { 3821 break 3822 } 3823 off2 := v_0.AuxInt 3824 ptr := v_0.Args[0] 3825 old := v.Args[1] 3826 new_ := v.Args[2] 3827 mem := v.Args[3] 3828 if !(is32Bit(off1 + off2)) { 3829 break 3830 } 3831 v.reset(OpAMD64CMPXCHGQlock) 3832 v.AuxInt = off1 + off2 3833 v.Aux = sym 3834 v.AddArg(ptr) 3835 v.AddArg(old) 3836 v.AddArg(new_) 3837 v.AddArg(mem) 3838 return true 3839 } 3840 return false 3841 } 3842 func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool { 3843 // match: (LEAL [c] {s} (ADDLconst [d] x)) 3844 // cond: is32Bit(c+d) 3845 // result: (LEAL [c+d] {s} x) 3846 for { 3847 c := v.AuxInt 3848 s := v.Aux 3849 v_0 := v.Args[0] 3850 if v_0.Op != OpAMD64ADDLconst { 3851 break 3852 } 3853 d := v_0.AuxInt 3854 x := v_0.Args[0] 3855 if !(is32Bit(c + d)) { 3856 break 3857 } 3858 v.reset(OpAMD64LEAL) 3859 v.AuxInt = c + d 3860 v.Aux = s 3861 v.AddArg(x) 3862 return true 3863 } 3864 return false 3865 } 3866 func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool { 3867 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 3868 // cond: is32Bit(c+d) 3869 // result: (LEAQ [c+d] {s} x) 3870 for { 3871 c := v.AuxInt 3872 s := v.Aux 3873 v_0 := v.Args[0] 3874 if v_0.Op != OpAMD64ADDQconst { 3875 break 3876 } 3877 d := v_0.AuxInt 3878 x := v_0.Args[0] 3879 if !(is32Bit(c + d)) { 3880 break 3881 } 3882 v.reset(OpAMD64LEAQ) 3883 v.AuxInt = c + d 3884 v.Aux = s 3885 v.AddArg(x) 3886 return true 3887 } 3888 // match: (LEAQ [c] {s} (ADDQ x y)) 3889 // cond: x.Op != OpSB && y.Op != OpSB 3890 // result: (LEAQ1 [c] {s} x y) 3891 for { 3892 c := v.AuxInt 3893 s := v.Aux 3894 v_0 := v.Args[0] 3895 if v_0.Op != OpAMD64ADDQ { 3896 break 3897 } 3898 _ = v_0.Args[1] 3899 x := v_0.Args[0] 3900 y := v_0.Args[1] 3901 if !(x.Op != OpSB && y.Op != OpSB) { 3902 break 3903 } 3904 v.reset(OpAMD64LEAQ1) 3905 v.AuxInt = c 3906 v.Aux = s 3907 v.AddArg(x) 3908 v.AddArg(y) 3909 return true 3910 } 3911 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 3912 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3913 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 3914 for { 3915 off1 := v.AuxInt 3916 sym1 := v.Aux 3917 v_0 := v.Args[0] 3918 if v_0.Op != OpAMD64LEAQ { 3919 break 3920 } 3921 off2 := v_0.AuxInt 3922 sym2 := v_0.Aux 3923 x := v_0.Args[0] 3924 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3925 break 3926 } 3927 v.reset(OpAMD64LEAQ) 3928 v.AuxInt = off1 + off2 3929 v.Aux = mergeSym(sym1, sym2) 3930 v.AddArg(x) 3931 return true 3932 } 3933 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 3934 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3935 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3936 for { 3937 off1 := v.AuxInt 3938 sym1 := v.Aux 3939 v_0 := v.Args[0] 3940 if v_0.Op != OpAMD64LEAQ1 { 3941 break 3942 } 3943 off2 := v_0.AuxInt 3944 sym2 := v_0.Aux 3945 _ = v_0.Args[1] 3946 x := v_0.Args[0] 3947 y := v_0.Args[1] 3948 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3949 break 3950 } 3951 v.reset(OpAMD64LEAQ1) 3952 v.AuxInt = off1 + off2 3953 v.Aux = mergeSym(sym1, sym2) 3954 v.AddArg(x) 3955 v.AddArg(y) 3956 return true 3957 } 3958 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 3959 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3960 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3961 for { 3962 off1 := v.AuxInt 3963 sym1 := v.Aux 3964 v_0 := v.Args[0] 3965 if v_0.Op != OpAMD64LEAQ2 { 3966 break 3967 } 3968 off2 := v_0.AuxInt 3969 sym2 := v_0.Aux 3970 _ = v_0.Args[1] 3971 x := v_0.Args[0] 3972 y := v_0.Args[1] 3973 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3974 break 3975 } 3976 v.reset(OpAMD64LEAQ2) 3977 v.AuxInt = off1 + off2 3978 v.Aux = mergeSym(sym1, sym2) 3979 v.AddArg(x) 3980 v.AddArg(y) 3981 return true 3982 } 3983 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 3984 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3985 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3986 for { 3987 off1 := v.AuxInt 3988 sym1 := v.Aux 3989 v_0 := v.Args[0] 3990 if v_0.Op != OpAMD64LEAQ4 { 3991 break 3992 } 3993 off2 := v_0.AuxInt 3994 sym2 := v_0.Aux 3995 _ = v_0.Args[1] 3996 x := v_0.Args[0] 3997 y := v_0.Args[1] 3998 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3999 break 4000 } 4001 v.reset(OpAMD64LEAQ4) 4002 v.AuxInt = off1 + off2 4003 v.Aux = mergeSym(sym1, sym2) 4004 v.AddArg(x) 4005 v.AddArg(y) 4006 return true 4007 } 4008 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 4009 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4010 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 4011 for { 4012 off1 := v.AuxInt 4013 sym1 := v.Aux 4014 v_0 := v.Args[0] 4015 if v_0.Op != OpAMD64LEAQ8 { 4016 break 4017 } 4018 off2 := v_0.AuxInt 4019 sym2 := v_0.Aux 4020 _ = v_0.Args[1] 4021 x := v_0.Args[0] 4022 y := v_0.Args[1] 4023 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4024 break 4025 } 4026 v.reset(OpAMD64LEAQ8) 4027 v.AuxInt = off1 + off2 4028 v.Aux = mergeSym(sym1, sym2) 4029 v.AddArg(x) 4030 v.AddArg(y) 4031 return true 4032 } 4033 return false 4034 } 4035 func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { 4036 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 4037 // cond: is32Bit(c+d) && x.Op != OpSB 4038 // result: (LEAQ1 [c+d] {s} x y) 4039 for { 4040 c := v.AuxInt 4041 s := v.Aux 4042 _ = v.Args[1] 4043 v_0 := v.Args[0] 4044 if v_0.Op != OpAMD64ADDQconst { 4045 break 4046 } 4047 d := v_0.AuxInt 4048 x := v_0.Args[0] 4049 y := v.Args[1] 4050 if !(is32Bit(c+d) && x.Op != OpSB) { 4051 break 4052 } 4053 v.reset(OpAMD64LEAQ1) 4054 v.AuxInt = c + d 4055 v.Aux = s 4056 v.AddArg(x) 4057 v.AddArg(y) 4058 return true 4059 } 4060 // match: (LEAQ1 [c] {s} y (ADDQconst [d] x)) 4061 // cond: is32Bit(c+d) && x.Op != OpSB 4062 // result: (LEAQ1 [c+d] {s} x y) 4063 for { 4064 c := v.AuxInt 4065 s := v.Aux 4066 _ = v.Args[1] 4067 y := v.Args[0] 4068 v_1 := v.Args[1] 4069 if v_1.Op != OpAMD64ADDQconst { 4070 break 4071 } 4072 d := v_1.AuxInt 4073 x := v_1.Args[0] 4074 if !(is32Bit(c+d) && x.Op != OpSB) { 4075 break 4076 } 4077 v.reset(OpAMD64LEAQ1) 4078 v.AuxInt = c + d 4079 v.Aux = s 4080 v.AddArg(x) 4081 v.AddArg(y) 4082 return true 4083 } 4084 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 4085 // cond: 4086 // result: (LEAQ2 [c] {s} x y) 4087 for { 4088 c := v.AuxInt 4089 s := v.Aux 4090 _ = v.Args[1] 4091 x := v.Args[0] 4092 v_1 := v.Args[1] 4093 if v_1.Op != OpAMD64SHLQconst { 4094 break 4095 } 4096 if v_1.AuxInt != 1 { 4097 break 4098 } 4099 y := v_1.Args[0] 4100 v.reset(OpAMD64LEAQ2) 4101 v.AuxInt = c 4102 v.Aux = s 4103 v.AddArg(x) 4104 v.AddArg(y) 4105 return true 4106 } 4107 // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x) 4108 // cond: 4109 // result: (LEAQ2 [c] {s} x y) 4110 for { 4111 c := v.AuxInt 4112 s := v.Aux 4113 _ = v.Args[1] 4114 v_0 := v.Args[0] 4115 if v_0.Op != OpAMD64SHLQconst { 4116 break 4117 } 4118 if v_0.AuxInt != 1 { 4119 break 4120 } 4121 y := v_0.Args[0] 4122 x := v.Args[1] 4123 v.reset(OpAMD64LEAQ2) 4124 v.AuxInt = c 4125 v.Aux = s 4126 v.AddArg(x) 4127 v.AddArg(y) 4128 return true 4129 } 4130 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 4131 // cond: 4132 // result: (LEAQ4 [c] {s} x y) 4133 for { 4134 c := v.AuxInt 4135 s := v.Aux 4136 _ = v.Args[1] 4137 x := v.Args[0] 4138 v_1 := v.Args[1] 4139 if v_1.Op != OpAMD64SHLQconst { 4140 break 4141 } 4142 if v_1.AuxInt != 2 { 4143 break 4144 } 4145 y := v_1.Args[0] 4146 v.reset(OpAMD64LEAQ4) 4147 v.AuxInt = c 4148 v.Aux = s 4149 v.AddArg(x) 4150 v.AddArg(y) 4151 return true 4152 } 4153 // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x) 4154 // cond: 4155 // result: (LEAQ4 [c] {s} x y) 4156 for { 4157 c := v.AuxInt 4158 s := v.Aux 4159 _ = v.Args[1] 4160 v_0 := v.Args[0] 4161 if v_0.Op != OpAMD64SHLQconst { 4162 break 4163 } 4164 if v_0.AuxInt != 2 { 4165 break 4166 } 4167 y := v_0.Args[0] 4168 x := v.Args[1] 4169 v.reset(OpAMD64LEAQ4) 4170 v.AuxInt = c 4171 v.Aux = s 4172 v.AddArg(x) 4173 v.AddArg(y) 4174 return true 4175 } 4176 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 4177 // cond: 4178 // result: (LEAQ8 [c] {s} x y) 4179 for { 4180 c := v.AuxInt 4181 s := v.Aux 4182 _ = v.Args[1] 4183 x := v.Args[0] 4184 v_1 := v.Args[1] 4185 if v_1.Op != OpAMD64SHLQconst { 4186 break 4187 } 4188 if v_1.AuxInt != 3 { 4189 break 4190 } 4191 y := v_1.Args[0] 4192 v.reset(OpAMD64LEAQ8) 4193 v.AuxInt = c 4194 v.Aux = s 4195 v.AddArg(x) 4196 v.AddArg(y) 4197 return true 4198 } 4199 // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x) 4200 // cond: 4201 // result: (LEAQ8 [c] {s} x y) 4202 for { 4203 c := v.AuxInt 4204 s := v.Aux 4205 _ = v.Args[1] 4206 v_0 := v.Args[0] 4207 if v_0.Op != OpAMD64SHLQconst { 4208 break 4209 } 4210 if v_0.AuxInt != 3 { 4211 break 4212 } 4213 y := v_0.Args[0] 4214 x := v.Args[1] 4215 v.reset(OpAMD64LEAQ8) 4216 v.AuxInt = c 4217 v.Aux = s 4218 v.AddArg(x) 4219 v.AddArg(y) 4220 return true 4221 } 4222 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4223 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4224 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 4225 for { 4226 off1 := v.AuxInt 4227 sym1 := v.Aux 4228 _ = v.Args[1] 4229 v_0 := v.Args[0] 4230 if v_0.Op != OpAMD64LEAQ { 4231 break 4232 } 4233 off2 := v_0.AuxInt 4234 sym2 := v_0.Aux 4235 x := v_0.Args[0] 4236 y := v.Args[1] 4237 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4238 break 4239 } 4240 v.reset(OpAMD64LEAQ1) 4241 v.AuxInt = off1 + off2 4242 v.Aux = mergeSym(sym1, sym2) 4243 v.AddArg(x) 4244 v.AddArg(y) 4245 return true 4246 } 4247 // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x)) 4248 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4249 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 4250 for { 4251 off1 := v.AuxInt 4252 sym1 := v.Aux 4253 _ = v.Args[1] 4254 y := v.Args[0] 4255 v_1 := v.Args[1] 4256 if v_1.Op != OpAMD64LEAQ { 4257 break 4258 } 4259 off2 := v_1.AuxInt 4260 sym2 := v_1.Aux 4261 x := v_1.Args[0] 4262 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4263 break 4264 } 4265 v.reset(OpAMD64LEAQ1) 4266 v.AuxInt = off1 + off2 4267 v.Aux = mergeSym(sym1, sym2) 4268 v.AddArg(x) 4269 v.AddArg(y) 4270 return true 4271 } 4272 return false 4273 } 4274 func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool { 4275 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 4276 // cond: is32Bit(c+d) && x.Op != OpSB 4277 // result: (LEAQ2 [c+d] {s} x y) 4278 for { 4279 c := v.AuxInt 4280 s := v.Aux 4281 _ = v.Args[1] 4282 v_0 := v.Args[0] 4283 if v_0.Op != OpAMD64ADDQconst { 4284 break 4285 } 4286 d := v_0.AuxInt 4287 x := v_0.Args[0] 4288 y := v.Args[1] 4289 if !(is32Bit(c+d) && x.Op != OpSB) { 4290 break 4291 } 4292 v.reset(OpAMD64LEAQ2) 4293 v.AuxInt = c + d 4294 v.Aux = s 4295 v.AddArg(x) 4296 v.AddArg(y) 4297 return true 4298 } 4299 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 4300 // cond: is32Bit(c+2*d) && y.Op != OpSB 4301 // result: (LEAQ2 [c+2*d] {s} x y) 4302 for { 4303 c := v.AuxInt 4304 s := v.Aux 4305 _ = v.Args[1] 4306 x := v.Args[0] 4307 v_1 := v.Args[1] 4308 if v_1.Op != OpAMD64ADDQconst { 4309 break 4310 } 4311 d := v_1.AuxInt 4312 y := v_1.Args[0] 4313 if !(is32Bit(c+2*d) && y.Op != OpSB) { 4314 break 4315 } 4316 v.reset(OpAMD64LEAQ2) 4317 v.AuxInt = c + 2*d 4318 v.Aux = s 4319 v.AddArg(x) 4320 v.AddArg(y) 4321 return true 4322 } 4323 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 4324 // cond: 4325 // result: (LEAQ4 [c] {s} x y) 4326 for { 4327 c := v.AuxInt 4328 s := v.Aux 4329 _ = v.Args[1] 4330 x := v.Args[0] 4331 v_1 := v.Args[1] 4332 if v_1.Op != OpAMD64SHLQconst { 4333 break 4334 } 4335 if v_1.AuxInt != 1 { 4336 break 4337 } 4338 y := v_1.Args[0] 4339 v.reset(OpAMD64LEAQ4) 4340 v.AuxInt = c 4341 v.Aux = s 4342 v.AddArg(x) 4343 v.AddArg(y) 4344 return true 4345 } 4346 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 4347 // cond: 4348 // result: (LEAQ8 [c] {s} x y) 4349 for { 4350 c := v.AuxInt 4351 s := v.Aux 4352 _ = v.Args[1] 4353 x := v.Args[0] 4354 v_1 := v.Args[1] 4355 if v_1.Op != OpAMD64SHLQconst { 4356 break 4357 } 4358 if v_1.AuxInt != 2 { 4359 break 4360 } 4361 y := v_1.Args[0] 4362 v.reset(OpAMD64LEAQ8) 4363 v.AuxInt = c 4364 v.Aux = s 4365 v.AddArg(x) 4366 v.AddArg(y) 4367 return true 4368 } 4369 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4370 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4371 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 4372 for { 4373 off1 := v.AuxInt 4374 sym1 := v.Aux 4375 _ = v.Args[1] 4376 v_0 := v.Args[0] 4377 if v_0.Op != OpAMD64LEAQ { 4378 break 4379 } 4380 off2 := v_0.AuxInt 4381 sym2 := v_0.Aux 4382 x := v_0.Args[0] 4383 y := v.Args[1] 4384 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4385 break 4386 } 4387 v.reset(OpAMD64LEAQ2) 4388 v.AuxInt = off1 + off2 4389 v.Aux = mergeSym(sym1, sym2) 4390 v.AddArg(x) 4391 v.AddArg(y) 4392 return true 4393 } 4394 return false 4395 } 4396 func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool { 4397 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 4398 // cond: is32Bit(c+d) && x.Op != OpSB 4399 // result: (LEAQ4 [c+d] {s} x y) 4400 for { 4401 c := v.AuxInt 4402 s := v.Aux 4403 _ = v.Args[1] 4404 v_0 := v.Args[0] 4405 if v_0.Op != OpAMD64ADDQconst { 4406 break 4407 } 4408 d := v_0.AuxInt 4409 x := v_0.Args[0] 4410 y := v.Args[1] 4411 if !(is32Bit(c+d) && x.Op != OpSB) { 4412 break 4413 } 4414 v.reset(OpAMD64LEAQ4) 4415 v.AuxInt = c + d 4416 v.Aux = s 4417 v.AddArg(x) 4418 v.AddArg(y) 4419 return true 4420 } 4421 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 4422 // cond: is32Bit(c+4*d) && y.Op != OpSB 4423 // result: (LEAQ4 [c+4*d] {s} x y) 4424 for { 4425 c := v.AuxInt 4426 s := v.Aux 4427 _ = v.Args[1] 4428 x := v.Args[0] 4429 v_1 := v.Args[1] 4430 if v_1.Op != OpAMD64ADDQconst { 4431 break 4432 } 4433 d := v_1.AuxInt 4434 y := v_1.Args[0] 4435 if !(is32Bit(c+4*d) && y.Op != OpSB) { 4436 break 4437 } 4438 v.reset(OpAMD64LEAQ4) 4439 v.AuxInt = c + 4*d 4440 v.Aux = s 4441 v.AddArg(x) 4442 v.AddArg(y) 4443 return true 4444 } 4445 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 4446 // cond: 4447 // result: (LEAQ8 [c] {s} x y) 4448 for { 4449 c := v.AuxInt 4450 s := v.Aux 4451 _ = v.Args[1] 4452 x := v.Args[0] 4453 v_1 := v.Args[1] 4454 if v_1.Op != OpAMD64SHLQconst { 4455 break 4456 } 4457 if v_1.AuxInt != 1 { 4458 break 4459 } 4460 y := v_1.Args[0] 4461 v.reset(OpAMD64LEAQ8) 4462 v.AuxInt = c 4463 v.Aux = s 4464 v.AddArg(x) 4465 v.AddArg(y) 4466 return true 4467 } 4468 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4469 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4470 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 4471 for { 4472 off1 := v.AuxInt 4473 sym1 := v.Aux 4474 _ = v.Args[1] 4475 v_0 := v.Args[0] 4476 if v_0.Op != OpAMD64LEAQ { 4477 break 4478 } 4479 off2 := v_0.AuxInt 4480 sym2 := v_0.Aux 4481 x := v_0.Args[0] 4482 y := v.Args[1] 4483 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4484 break 4485 } 4486 v.reset(OpAMD64LEAQ4) 4487 v.AuxInt = off1 + off2 4488 v.Aux = mergeSym(sym1, sym2) 4489 v.AddArg(x) 4490 v.AddArg(y) 4491 return true 4492 } 4493 return false 4494 } 4495 func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool { 4496 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 4497 // cond: is32Bit(c+d) && x.Op != OpSB 4498 // result: (LEAQ8 [c+d] {s} x y) 4499 for { 4500 c := v.AuxInt 4501 s := v.Aux 4502 _ = v.Args[1] 4503 v_0 := v.Args[0] 4504 if v_0.Op != OpAMD64ADDQconst { 4505 break 4506 } 4507 d := v_0.AuxInt 4508 x := v_0.Args[0] 4509 y := v.Args[1] 4510 if !(is32Bit(c+d) && x.Op != OpSB) { 4511 break 4512 } 4513 v.reset(OpAMD64LEAQ8) 4514 v.AuxInt = c + d 4515 v.Aux = s 4516 v.AddArg(x) 4517 v.AddArg(y) 4518 return true 4519 } 4520 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 4521 // cond: is32Bit(c+8*d) && y.Op != OpSB 4522 // result: (LEAQ8 [c+8*d] {s} x y) 4523 for { 4524 c := v.AuxInt 4525 s := v.Aux 4526 _ = v.Args[1] 4527 x := v.Args[0] 4528 v_1 := v.Args[1] 4529 if v_1.Op != OpAMD64ADDQconst { 4530 break 4531 } 4532 d := v_1.AuxInt 4533 y := v_1.Args[0] 4534 if !(is32Bit(c+8*d) && y.Op != OpSB) { 4535 break 4536 } 4537 v.reset(OpAMD64LEAQ8) 4538 v.AuxInt = c + 8*d 4539 v.Aux = s 4540 v.AddArg(x) 4541 v.AddArg(y) 4542 return true 4543 } 4544 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4545 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4546 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 4547 for { 4548 off1 := v.AuxInt 4549 sym1 := v.Aux 4550 _ = v.Args[1] 4551 v_0 := v.Args[0] 4552 if v_0.Op != OpAMD64LEAQ { 4553 break 4554 } 4555 off2 := v_0.AuxInt 4556 sym2 := v_0.Aux 4557 x := v_0.Args[0] 4558 y := v.Args[1] 4559 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4560 break 4561 } 4562 v.reset(OpAMD64LEAQ8) 4563 v.AuxInt = off1 + off2 4564 v.Aux = mergeSym(sym1, sym2) 4565 v.AddArg(x) 4566 v.AddArg(y) 4567 return true 4568 } 4569 return false 4570 } 4571 func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { 4572 b := v.Block 4573 _ = b 4574 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 4575 // cond: x.Uses == 1 && clobber(x) 4576 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4577 for { 4578 x := v.Args[0] 4579 if x.Op != OpAMD64MOVBload { 4580 break 4581 } 4582 off := x.AuxInt 4583 sym := x.Aux 4584 _ = x.Args[1] 4585 ptr := x.Args[0] 4586 mem := x.Args[1] 4587 if !(x.Uses == 1 && clobber(x)) { 4588 break 4589 } 4590 b = x.Block 4591 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4592 v.reset(OpCopy) 4593 v.AddArg(v0) 4594 v0.AuxInt = off 4595 v0.Aux = sym 4596 v0.AddArg(ptr) 4597 v0.AddArg(mem) 4598 return true 4599 } 4600 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 4601 // cond: x.Uses == 1 && clobber(x) 4602 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4603 for { 4604 x := v.Args[0] 4605 if x.Op != OpAMD64MOVWload { 4606 break 4607 } 4608 off := x.AuxInt 4609 sym := x.Aux 4610 _ = x.Args[1] 4611 ptr := x.Args[0] 4612 mem := x.Args[1] 4613 if !(x.Uses == 1 && clobber(x)) { 4614 break 4615 } 4616 b = x.Block 4617 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4618 v.reset(OpCopy) 4619 v.AddArg(v0) 4620 v0.AuxInt = off 4621 v0.Aux = sym 4622 v0.AddArg(ptr) 4623 v0.AddArg(mem) 4624 return true 4625 } 4626 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 4627 // cond: x.Uses == 1 && clobber(x) 4628 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4629 for { 4630 x := v.Args[0] 4631 if x.Op != OpAMD64MOVLload { 4632 break 4633 } 4634 off := x.AuxInt 4635 sym := x.Aux 4636 _ = x.Args[1] 4637 ptr := x.Args[0] 4638 mem := x.Args[1] 4639 if !(x.Uses == 1 && clobber(x)) { 4640 break 4641 } 4642 b = x.Block 4643 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4644 v.reset(OpCopy) 4645 v.AddArg(v0) 4646 v0.AuxInt = off 4647 v0.Aux = sym 4648 v0.AddArg(ptr) 4649 v0.AddArg(mem) 4650 return true 4651 } 4652 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 4653 // cond: x.Uses == 1 && clobber(x) 4654 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4655 for { 4656 x := v.Args[0] 4657 if x.Op != OpAMD64MOVQload { 4658 break 4659 } 4660 off := x.AuxInt 4661 sym := x.Aux 4662 _ = x.Args[1] 4663 ptr := x.Args[0] 4664 mem := x.Args[1] 4665 if !(x.Uses == 1 && clobber(x)) { 4666 break 4667 } 4668 b = x.Block 4669 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4670 v.reset(OpCopy) 4671 v.AddArg(v0) 4672 v0.AuxInt = off 4673 v0.Aux = sym 4674 v0.AddArg(ptr) 4675 v0.AddArg(mem) 4676 return true 4677 } 4678 // match: (MOVBQSX (ANDLconst [c] x)) 4679 // cond: c & 0x80 == 0 4680 // result: (ANDLconst [c & 0x7f] x) 4681 for { 4682 v_0 := v.Args[0] 4683 if v_0.Op != OpAMD64ANDLconst { 4684 break 4685 } 4686 c := v_0.AuxInt 4687 x := v_0.Args[0] 4688 if !(c&0x80 == 0) { 4689 break 4690 } 4691 v.reset(OpAMD64ANDLconst) 4692 v.AuxInt = c & 0x7f 4693 v.AddArg(x) 4694 return true 4695 } 4696 // match: (MOVBQSX (MOVBQSX x)) 4697 // cond: 4698 // result: (MOVBQSX x) 4699 for { 4700 v_0 := v.Args[0] 4701 if v_0.Op != OpAMD64MOVBQSX { 4702 break 4703 } 4704 x := v_0.Args[0] 4705 v.reset(OpAMD64MOVBQSX) 4706 v.AddArg(x) 4707 return true 4708 } 4709 return false 4710 } 4711 func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool { 4712 // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4713 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4714 // result: (MOVBQSX x) 4715 for { 4716 off := v.AuxInt 4717 sym := v.Aux 4718 _ = v.Args[1] 4719 ptr := v.Args[0] 4720 v_1 := v.Args[1] 4721 if v_1.Op != OpAMD64MOVBstore { 4722 break 4723 } 4724 off2 := v_1.AuxInt 4725 sym2 := v_1.Aux 4726 _ = v_1.Args[2] 4727 ptr2 := v_1.Args[0] 4728 x := v_1.Args[1] 4729 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4730 break 4731 } 4732 v.reset(OpAMD64MOVBQSX) 4733 v.AddArg(x) 4734 return true 4735 } 4736 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4737 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4738 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4739 for { 4740 off1 := v.AuxInt 4741 sym1 := v.Aux 4742 _ = v.Args[1] 4743 v_0 := v.Args[0] 4744 if v_0.Op != OpAMD64LEAQ { 4745 break 4746 } 4747 off2 := v_0.AuxInt 4748 sym2 := v_0.Aux 4749 base := v_0.Args[0] 4750 mem := v.Args[1] 4751 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4752 break 4753 } 4754 v.reset(OpAMD64MOVBQSXload) 4755 v.AuxInt = off1 + off2 4756 v.Aux = mergeSym(sym1, sym2) 4757 v.AddArg(base) 4758 v.AddArg(mem) 4759 return true 4760 } 4761 return false 4762 } 4763 func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { 4764 b := v.Block 4765 _ = b 4766 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 4767 // cond: x.Uses == 1 && clobber(x) 4768 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4769 for { 4770 x := v.Args[0] 4771 if x.Op != OpAMD64MOVBload { 4772 break 4773 } 4774 off := x.AuxInt 4775 sym := x.Aux 4776 _ = x.Args[1] 4777 ptr := x.Args[0] 4778 mem := x.Args[1] 4779 if !(x.Uses == 1 && clobber(x)) { 4780 break 4781 } 4782 b = x.Block 4783 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4784 v.reset(OpCopy) 4785 v.AddArg(v0) 4786 v0.AuxInt = off 4787 v0.Aux = sym 4788 v0.AddArg(ptr) 4789 v0.AddArg(mem) 4790 return true 4791 } 4792 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 4793 // cond: x.Uses == 1 && clobber(x) 4794 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4795 for { 4796 x := v.Args[0] 4797 if x.Op != OpAMD64MOVWload { 4798 break 4799 } 4800 off := x.AuxInt 4801 sym := x.Aux 4802 _ = x.Args[1] 4803 ptr := x.Args[0] 4804 mem := x.Args[1] 4805 if !(x.Uses == 1 && clobber(x)) { 4806 break 4807 } 4808 b = x.Block 4809 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4810 v.reset(OpCopy) 4811 v.AddArg(v0) 4812 v0.AuxInt = off 4813 v0.Aux = sym 4814 v0.AddArg(ptr) 4815 v0.AddArg(mem) 4816 return true 4817 } 4818 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 4819 // cond: x.Uses == 1 && clobber(x) 4820 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4821 for { 4822 x := v.Args[0] 4823 if x.Op != OpAMD64MOVLload { 4824 break 4825 } 4826 off := x.AuxInt 4827 sym := x.Aux 4828 _ = x.Args[1] 4829 ptr := x.Args[0] 4830 mem := x.Args[1] 4831 if !(x.Uses == 1 && clobber(x)) { 4832 break 4833 } 4834 b = x.Block 4835 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4836 v.reset(OpCopy) 4837 v.AddArg(v0) 4838 v0.AuxInt = off 4839 v0.Aux = sym 4840 v0.AddArg(ptr) 4841 v0.AddArg(mem) 4842 return true 4843 } 4844 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 4845 // cond: x.Uses == 1 && clobber(x) 4846 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4847 for { 4848 x := v.Args[0] 4849 if x.Op != OpAMD64MOVQload { 4850 break 4851 } 4852 off := x.AuxInt 4853 sym := x.Aux 4854 _ = x.Args[1] 4855 ptr := x.Args[0] 4856 mem := x.Args[1] 4857 if !(x.Uses == 1 && clobber(x)) { 4858 break 4859 } 4860 b = x.Block 4861 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4862 v.reset(OpCopy) 4863 v.AddArg(v0) 4864 v0.AuxInt = off 4865 v0.Aux = sym 4866 v0.AddArg(ptr) 4867 v0.AddArg(mem) 4868 return true 4869 } 4870 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 4871 // cond: x.Uses == 1 && clobber(x) 4872 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 4873 for { 4874 x := v.Args[0] 4875 if x.Op != OpAMD64MOVBloadidx1 { 4876 break 4877 } 4878 off := x.AuxInt 4879 sym := x.Aux 4880 _ = x.Args[2] 4881 ptr := x.Args[0] 4882 idx := x.Args[1] 4883 mem := x.Args[2] 4884 if !(x.Uses == 1 && clobber(x)) { 4885 break 4886 } 4887 b = x.Block 4888 v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) 4889 v.reset(OpCopy) 4890 v.AddArg(v0) 4891 v0.AuxInt = off 4892 v0.Aux = sym 4893 v0.AddArg(ptr) 4894 v0.AddArg(idx) 4895 v0.AddArg(mem) 4896 return true 4897 } 4898 // match: (MOVBQZX (ANDLconst [c] x)) 4899 // cond: 4900 // result: (ANDLconst [c & 0xff] x) 4901 for { 4902 v_0 := v.Args[0] 4903 if v_0.Op != OpAMD64ANDLconst { 4904 break 4905 } 4906 c := v_0.AuxInt 4907 x := v_0.Args[0] 4908 v.reset(OpAMD64ANDLconst) 4909 v.AuxInt = c & 0xff 4910 v.AddArg(x) 4911 return true 4912 } 4913 // match: (MOVBQZX (MOVBQZX x)) 4914 // cond: 4915 // result: (MOVBQZX x) 4916 for { 4917 v_0 := v.Args[0] 4918 if v_0.Op != OpAMD64MOVBQZX { 4919 break 4920 } 4921 x := v_0.Args[0] 4922 v.reset(OpAMD64MOVBQZX) 4923 v.AddArg(x) 4924 return true 4925 } 4926 return false 4927 } 4928 func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { 4929 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4930 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4931 // result: (MOVBQZX x) 4932 for { 4933 off := v.AuxInt 4934 sym := v.Aux 4935 _ = v.Args[1] 4936 ptr := v.Args[0] 4937 v_1 := v.Args[1] 4938 if v_1.Op != OpAMD64MOVBstore { 4939 break 4940 } 4941 off2 := v_1.AuxInt 4942 sym2 := v_1.Aux 4943 _ = v_1.Args[2] 4944 ptr2 := v_1.Args[0] 4945 x := v_1.Args[1] 4946 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4947 break 4948 } 4949 v.reset(OpAMD64MOVBQZX) 4950 v.AddArg(x) 4951 return true 4952 } 4953 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 4954 // cond: is32Bit(off1+off2) 4955 // result: (MOVBload [off1+off2] {sym} ptr mem) 4956 for { 4957 off1 := v.AuxInt 4958 sym := v.Aux 4959 _ = v.Args[1] 4960 v_0 := v.Args[0] 4961 if v_0.Op != OpAMD64ADDQconst { 4962 break 4963 } 4964 off2 := v_0.AuxInt 4965 ptr := v_0.Args[0] 4966 mem := v.Args[1] 4967 if !(is32Bit(off1 + off2)) { 4968 break 4969 } 4970 v.reset(OpAMD64MOVBload) 4971 v.AuxInt = off1 + off2 4972 v.Aux = sym 4973 v.AddArg(ptr) 4974 v.AddArg(mem) 4975 return true 4976 } 4977 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4978 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4979 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4980 for { 4981 off1 := v.AuxInt 4982 sym1 := v.Aux 4983 _ = v.Args[1] 4984 v_0 := v.Args[0] 4985 if v_0.Op != OpAMD64LEAQ { 4986 break 4987 } 4988 off2 := v_0.AuxInt 4989 sym2 := v_0.Aux 4990 base := v_0.Args[0] 4991 mem := v.Args[1] 4992 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4993 break 4994 } 4995 v.reset(OpAMD64MOVBload) 4996 v.AuxInt = off1 + off2 4997 v.Aux = mergeSym(sym1, sym2) 4998 v.AddArg(base) 4999 v.AddArg(mem) 5000 return true 5001 } 5002 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 5003 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5004 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 5005 for { 5006 off1 := v.AuxInt 5007 sym1 := v.Aux 5008 _ = v.Args[1] 5009 v_0 := v.Args[0] 5010 if v_0.Op != OpAMD64LEAQ1 { 5011 break 5012 } 5013 off2 := v_0.AuxInt 5014 sym2 := v_0.Aux 5015 _ = v_0.Args[1] 5016 ptr := v_0.Args[0] 5017 idx := v_0.Args[1] 5018 mem := v.Args[1] 5019 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5020 break 5021 } 5022 v.reset(OpAMD64MOVBloadidx1) 5023 v.AuxInt = off1 + off2 5024 v.Aux = mergeSym(sym1, sym2) 5025 v.AddArg(ptr) 5026 v.AddArg(idx) 5027 v.AddArg(mem) 5028 return true 5029 } 5030 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 5031 // cond: ptr.Op != OpSB 5032 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 5033 for { 5034 off := v.AuxInt 5035 sym := v.Aux 5036 _ = v.Args[1] 5037 v_0 := v.Args[0] 5038 if v_0.Op != OpAMD64ADDQ { 5039 break 5040 } 5041 _ = v_0.Args[1] 5042 ptr := v_0.Args[0] 5043 idx := v_0.Args[1] 5044 mem := v.Args[1] 5045 if !(ptr.Op != OpSB) { 5046 break 5047 } 5048 v.reset(OpAMD64MOVBloadidx1) 5049 v.AuxInt = off 5050 v.Aux = sym 5051 v.AddArg(ptr) 5052 v.AddArg(idx) 5053 v.AddArg(mem) 5054 return true 5055 } 5056 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 5057 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 5058 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 5059 for { 5060 off1 := v.AuxInt 5061 sym1 := v.Aux 5062 _ = v.Args[1] 5063 v_0 := v.Args[0] 5064 if v_0.Op != OpAMD64LEAL { 5065 break 5066 } 5067 off2 := v_0.AuxInt 5068 sym2 := v_0.Aux 5069 base := v_0.Args[0] 5070 mem := v.Args[1] 5071 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 5072 break 5073 } 5074 v.reset(OpAMD64MOVBload) 5075 v.AuxInt = off1 + off2 5076 v.Aux = mergeSym(sym1, sym2) 5077 v.AddArg(base) 5078 v.AddArg(mem) 5079 return true 5080 } 5081 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 5082 // cond: is32Bit(off1+off2) 5083 // result: (MOVBload [off1+off2] {sym} ptr mem) 5084 for { 5085 off1 := v.AuxInt 5086 sym := v.Aux 5087 _ = v.Args[1] 5088 v_0 := v.Args[0] 5089 if v_0.Op != OpAMD64ADDLconst { 5090 break 5091 } 5092 off2 := v_0.AuxInt 5093 ptr := v_0.Args[0] 5094 mem := v.Args[1] 5095 if !(is32Bit(off1 + off2)) { 5096 break 5097 } 5098 v.reset(OpAMD64MOVBload) 5099 v.AuxInt = off1 + off2 5100 v.Aux = sym 5101 v.AddArg(ptr) 5102 v.AddArg(mem) 5103 return true 5104 } 5105 return false 5106 } 5107 func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { 5108 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 5109 // cond: is32Bit(c+d) 5110 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5111 for { 5112 c := v.AuxInt 5113 sym := v.Aux 5114 _ = v.Args[2] 5115 v_0 := v.Args[0] 5116 if v_0.Op != OpAMD64ADDQconst { 5117 break 5118 } 5119 d := v_0.AuxInt 5120 ptr := v_0.Args[0] 5121 idx := v.Args[1] 5122 mem := v.Args[2] 5123 if !(is32Bit(c + d)) { 5124 break 5125 } 5126 v.reset(OpAMD64MOVBloadidx1) 5127 v.AuxInt = c + d 5128 v.Aux = sym 5129 v.AddArg(ptr) 5130 v.AddArg(idx) 5131 v.AddArg(mem) 5132 return true 5133 } 5134 // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 5135 // cond: is32Bit(c+d) 5136 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5137 for { 5138 c := v.AuxInt 5139 sym := v.Aux 5140 _ = v.Args[2] 5141 idx := v.Args[0] 5142 v_1 := v.Args[1] 5143 if v_1.Op != OpAMD64ADDQconst { 5144 break 5145 } 5146 d := v_1.AuxInt 5147 ptr := v_1.Args[0] 5148 mem := v.Args[2] 5149 if !(is32Bit(c + d)) { 5150 break 5151 } 5152 v.reset(OpAMD64MOVBloadidx1) 5153 v.AuxInt = c + d 5154 v.Aux = sym 5155 v.AddArg(ptr) 5156 v.AddArg(idx) 5157 v.AddArg(mem) 5158 return true 5159 } 5160 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 5161 // cond: is32Bit(c+d) 5162 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5163 for { 5164 c := v.AuxInt 5165 sym := v.Aux 5166 _ = v.Args[2] 5167 ptr := v.Args[0] 5168 v_1 := v.Args[1] 5169 if v_1.Op != OpAMD64ADDQconst { 5170 break 5171 } 5172 d := v_1.AuxInt 5173 idx := v_1.Args[0] 5174 mem := v.Args[2] 5175 if !(is32Bit(c + d)) { 5176 break 5177 } 5178 v.reset(OpAMD64MOVBloadidx1) 5179 v.AuxInt = c + d 5180 v.Aux = sym 5181 v.AddArg(ptr) 5182 v.AddArg(idx) 5183 v.AddArg(mem) 5184 return true 5185 } 5186 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 5187 // cond: is32Bit(c+d) 5188 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5189 for { 5190 c := v.AuxInt 5191 sym := v.Aux 5192 _ = v.Args[2] 5193 v_0 := v.Args[0] 5194 if v_0.Op != OpAMD64ADDQconst { 5195 break 5196 } 5197 d := v_0.AuxInt 5198 idx := v_0.Args[0] 5199 ptr := v.Args[1] 5200 mem := v.Args[2] 5201 if !(is32Bit(c + d)) { 5202 break 5203 } 5204 v.reset(OpAMD64MOVBloadidx1) 5205 v.AuxInt = c + d 5206 v.Aux = sym 5207 v.AddArg(ptr) 5208 v.AddArg(idx) 5209 v.AddArg(mem) 5210 return true 5211 } 5212 return false 5213 } 5214 func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { 5215 // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem) 5216 // cond: y.Uses == 1 5217 // result: (SETLmem [off] {sym} ptr x mem) 5218 for { 5219 off := v.AuxInt 5220 sym := v.Aux 5221 _ = v.Args[2] 5222 ptr := v.Args[0] 5223 y := v.Args[1] 5224 if y.Op != OpAMD64SETL { 5225 break 5226 } 5227 x := y.Args[0] 5228 mem := v.Args[2] 5229 if !(y.Uses == 1) { 5230 break 5231 } 5232 v.reset(OpAMD64SETLmem) 5233 v.AuxInt = off 5234 v.Aux = sym 5235 v.AddArg(ptr) 5236 v.AddArg(x) 5237 v.AddArg(mem) 5238 return true 5239 } 5240 // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem) 5241 // cond: y.Uses == 1 5242 // result: (SETLEmem [off] {sym} ptr x mem) 5243 for { 5244 off := v.AuxInt 5245 sym := v.Aux 5246 _ = v.Args[2] 5247 ptr := v.Args[0] 5248 y := v.Args[1] 5249 if y.Op != OpAMD64SETLE { 5250 break 5251 } 5252 x := y.Args[0] 5253 mem := v.Args[2] 5254 if !(y.Uses == 1) { 5255 break 5256 } 5257 v.reset(OpAMD64SETLEmem) 5258 v.AuxInt = off 5259 v.Aux = sym 5260 v.AddArg(ptr) 5261 v.AddArg(x) 5262 v.AddArg(mem) 5263 return true 5264 } 5265 // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem) 5266 // cond: y.Uses == 1 5267 // result: (SETGmem [off] {sym} ptr x mem) 5268 for { 5269 off := v.AuxInt 5270 sym := v.Aux 5271 _ = v.Args[2] 5272 ptr := v.Args[0] 5273 y := v.Args[1] 5274 if y.Op != OpAMD64SETG { 5275 break 5276 } 5277 x := y.Args[0] 5278 mem := v.Args[2] 5279 if !(y.Uses == 1) { 5280 break 5281 } 5282 v.reset(OpAMD64SETGmem) 5283 v.AuxInt = off 5284 v.Aux = sym 5285 v.AddArg(ptr) 5286 v.AddArg(x) 5287 v.AddArg(mem) 5288 return true 5289 } 5290 // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem) 5291 // cond: y.Uses == 1 5292 // result: (SETGEmem [off] {sym} ptr x mem) 5293 for { 5294 off := v.AuxInt 5295 sym := v.Aux 5296 _ = v.Args[2] 5297 ptr := v.Args[0] 5298 y := v.Args[1] 5299 if y.Op != OpAMD64SETGE { 5300 break 5301 } 5302 x := y.Args[0] 5303 mem := v.Args[2] 5304 if !(y.Uses == 1) { 5305 break 5306 } 5307 v.reset(OpAMD64SETGEmem) 5308 v.AuxInt = off 5309 v.Aux = sym 5310 v.AddArg(ptr) 5311 v.AddArg(x) 5312 v.AddArg(mem) 5313 return true 5314 } 5315 // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) 5316 // cond: y.Uses == 1 5317 // result: (SETEQmem [off] {sym} ptr x mem) 5318 for { 5319 off := v.AuxInt 5320 sym := v.Aux 5321 _ = v.Args[2] 5322 ptr := v.Args[0] 5323 y := v.Args[1] 5324 if y.Op != OpAMD64SETEQ { 5325 break 5326 } 5327 x := y.Args[0] 5328 mem := v.Args[2] 5329 if !(y.Uses == 1) { 5330 break 5331 } 5332 v.reset(OpAMD64SETEQmem) 5333 v.AuxInt = off 5334 v.Aux = sym 5335 v.AddArg(ptr) 5336 v.AddArg(x) 5337 v.AddArg(mem) 5338 return true 5339 } 5340 // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem) 5341 // cond: y.Uses == 1 5342 // result: (SETNEmem [off] {sym} ptr x mem) 5343 for { 5344 off := v.AuxInt 5345 sym := v.Aux 5346 _ = v.Args[2] 5347 ptr := v.Args[0] 5348 y := v.Args[1] 5349 if y.Op != OpAMD64SETNE { 5350 break 5351 } 5352 x := y.Args[0] 5353 mem := v.Args[2] 5354 if !(y.Uses == 1) { 5355 break 5356 } 5357 v.reset(OpAMD64SETNEmem) 5358 v.AuxInt = off 5359 v.Aux = sym 5360 v.AddArg(ptr) 5361 v.AddArg(x) 5362 v.AddArg(mem) 5363 return true 5364 } 5365 // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem) 5366 // cond: y.Uses == 1 5367 // result: (SETBmem [off] {sym} ptr x mem) 5368 for { 5369 off := v.AuxInt 5370 sym := v.Aux 5371 _ = v.Args[2] 5372 ptr := v.Args[0] 5373 y := v.Args[1] 5374 if y.Op != OpAMD64SETB { 5375 break 5376 } 5377 x := y.Args[0] 5378 mem := v.Args[2] 5379 if !(y.Uses == 1) { 5380 break 5381 } 5382 v.reset(OpAMD64SETBmem) 5383 v.AuxInt = off 5384 v.Aux = sym 5385 v.AddArg(ptr) 5386 v.AddArg(x) 5387 v.AddArg(mem) 5388 return true 5389 } 5390 // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem) 5391 // cond: y.Uses == 1 5392 // result: (SETBEmem [off] {sym} ptr x mem) 5393 for { 5394 off := v.AuxInt 5395 sym := v.Aux 5396 _ = v.Args[2] 5397 ptr := v.Args[0] 5398 y := v.Args[1] 5399 if y.Op != OpAMD64SETBE { 5400 break 5401 } 5402 x := y.Args[0] 5403 mem := v.Args[2] 5404 if !(y.Uses == 1) { 5405 break 5406 } 5407 v.reset(OpAMD64SETBEmem) 5408 v.AuxInt = off 5409 v.Aux = sym 5410 v.AddArg(ptr) 5411 v.AddArg(x) 5412 v.AddArg(mem) 5413 return true 5414 } 5415 // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem) 5416 // cond: y.Uses == 1 5417 // result: (SETAmem [off] {sym} ptr x mem) 5418 for { 5419 off := v.AuxInt 5420 sym := v.Aux 5421 _ = v.Args[2] 5422 ptr := v.Args[0] 5423 y := v.Args[1] 5424 if y.Op != OpAMD64SETA { 5425 break 5426 } 5427 x := y.Args[0] 5428 mem := v.Args[2] 5429 if !(y.Uses == 1) { 5430 break 5431 } 5432 v.reset(OpAMD64SETAmem) 5433 v.AuxInt = off 5434 v.Aux = sym 5435 v.AddArg(ptr) 5436 v.AddArg(x) 5437 v.AddArg(mem) 5438 return true 5439 } 5440 // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem) 5441 // cond: y.Uses == 1 5442 // result: (SETAEmem [off] {sym} ptr x mem) 5443 for { 5444 off := v.AuxInt 5445 sym := v.Aux 5446 _ = v.Args[2] 5447 ptr := v.Args[0] 5448 y := v.Args[1] 5449 if y.Op != OpAMD64SETAE { 5450 break 5451 } 5452 x := y.Args[0] 5453 mem := v.Args[2] 5454 if !(y.Uses == 1) { 5455 break 5456 } 5457 v.reset(OpAMD64SETAEmem) 5458 v.AuxInt = off 5459 v.Aux = sym 5460 v.AddArg(ptr) 5461 v.AddArg(x) 5462 v.AddArg(mem) 5463 return true 5464 } 5465 return false 5466 } 5467 func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { 5468 b := v.Block 5469 _ = b 5470 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 5471 // cond: 5472 // result: (MOVBstore [off] {sym} ptr x mem) 5473 for { 5474 off := v.AuxInt 5475 sym := v.Aux 5476 _ = v.Args[2] 5477 ptr := v.Args[0] 5478 v_1 := v.Args[1] 5479 if v_1.Op != OpAMD64MOVBQSX { 5480 break 5481 } 5482 x := v_1.Args[0] 5483 mem := v.Args[2] 5484 v.reset(OpAMD64MOVBstore) 5485 v.AuxInt = off 5486 v.Aux = sym 5487 v.AddArg(ptr) 5488 v.AddArg(x) 5489 v.AddArg(mem) 5490 return true 5491 } 5492 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 5493 // cond: 5494 // result: (MOVBstore [off] {sym} ptr x mem) 5495 for { 5496 off := v.AuxInt 5497 sym := v.Aux 5498 _ = v.Args[2] 5499 ptr := v.Args[0] 5500 v_1 := v.Args[1] 5501 if v_1.Op != OpAMD64MOVBQZX { 5502 break 5503 } 5504 x := v_1.Args[0] 5505 mem := v.Args[2] 5506 v.reset(OpAMD64MOVBstore) 5507 v.AuxInt = off 5508 v.Aux = sym 5509 v.AddArg(ptr) 5510 v.AddArg(x) 5511 v.AddArg(mem) 5512 return true 5513 } 5514 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 5515 // cond: is32Bit(off1+off2) 5516 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 5517 for { 5518 off1 := v.AuxInt 5519 sym := v.Aux 5520 _ = v.Args[2] 5521 v_0 := v.Args[0] 5522 if v_0.Op != OpAMD64ADDQconst { 5523 break 5524 } 5525 off2 := v_0.AuxInt 5526 ptr := v_0.Args[0] 5527 val := v.Args[1] 5528 mem := v.Args[2] 5529 if !(is32Bit(off1 + off2)) { 5530 break 5531 } 5532 v.reset(OpAMD64MOVBstore) 5533 v.AuxInt = off1 + off2 5534 v.Aux = sym 5535 v.AddArg(ptr) 5536 v.AddArg(val) 5537 v.AddArg(mem) 5538 return true 5539 } 5540 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 5541 // cond: validOff(off) 5542 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 5543 for { 5544 off := v.AuxInt 5545 sym := v.Aux 5546 _ = v.Args[2] 5547 ptr := v.Args[0] 5548 v_1 := v.Args[1] 5549 if v_1.Op != OpAMD64MOVLconst { 5550 break 5551 } 5552 c := v_1.AuxInt 5553 mem := v.Args[2] 5554 if !(validOff(off)) { 5555 break 5556 } 5557 v.reset(OpAMD64MOVBstoreconst) 5558 v.AuxInt = makeValAndOff(int64(int8(c)), off) 5559 v.Aux = sym 5560 v.AddArg(ptr) 5561 v.AddArg(mem) 5562 return true 5563 } 5564 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5565 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5566 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5567 for { 5568 off1 := v.AuxInt 5569 sym1 := v.Aux 5570 _ = v.Args[2] 5571 v_0 := v.Args[0] 5572 if v_0.Op != OpAMD64LEAQ { 5573 break 5574 } 5575 off2 := v_0.AuxInt 5576 sym2 := v_0.Aux 5577 base := v_0.Args[0] 5578 val := v.Args[1] 5579 mem := v.Args[2] 5580 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5581 break 5582 } 5583 v.reset(OpAMD64MOVBstore) 5584 v.AuxInt = off1 + off2 5585 v.Aux = mergeSym(sym1, sym2) 5586 v.AddArg(base) 5587 v.AddArg(val) 5588 v.AddArg(mem) 5589 return true 5590 } 5591 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 5592 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5593 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 5594 for { 5595 off1 := v.AuxInt 5596 sym1 := v.Aux 5597 _ = v.Args[2] 5598 v_0 := v.Args[0] 5599 if v_0.Op != OpAMD64LEAQ1 { 5600 break 5601 } 5602 off2 := v_0.AuxInt 5603 sym2 := v_0.Aux 5604 _ = v_0.Args[1] 5605 ptr := v_0.Args[0] 5606 idx := v_0.Args[1] 5607 val := v.Args[1] 5608 mem := v.Args[2] 5609 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5610 break 5611 } 5612 v.reset(OpAMD64MOVBstoreidx1) 5613 v.AuxInt = off1 + off2 5614 v.Aux = mergeSym(sym1, sym2) 5615 v.AddArg(ptr) 5616 v.AddArg(idx) 5617 v.AddArg(val) 5618 v.AddArg(mem) 5619 return true 5620 } 5621 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 5622 // cond: ptr.Op != OpSB 5623 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 5624 for { 5625 off := v.AuxInt 5626 sym := v.Aux 5627 _ = v.Args[2] 5628 v_0 := v.Args[0] 5629 if v_0.Op != OpAMD64ADDQ { 5630 break 5631 } 5632 _ = v_0.Args[1] 5633 ptr := v_0.Args[0] 5634 idx := v_0.Args[1] 5635 val := v.Args[1] 5636 mem := v.Args[2] 5637 if !(ptr.Op != OpSB) { 5638 break 5639 } 5640 v.reset(OpAMD64MOVBstoreidx1) 5641 v.AuxInt = off 5642 v.Aux = sym 5643 v.AddArg(ptr) 5644 v.AddArg(idx) 5645 v.AddArg(val) 5646 v.AddArg(mem) 5647 return true 5648 } 5649 // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) 5650 // cond: x0.Uses == 1 && clobber(x0) 5651 // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem) 5652 for { 5653 i := v.AuxInt 5654 s := v.Aux 5655 _ = v.Args[2] 5656 p := v.Args[0] 5657 w := v.Args[1] 5658 x0 := v.Args[2] 5659 if x0.Op != OpAMD64MOVBstore { 5660 break 5661 } 5662 if x0.AuxInt != i-1 { 5663 break 5664 } 5665 if x0.Aux != s { 5666 break 5667 } 5668 _ = x0.Args[2] 5669 if p != x0.Args[0] { 5670 break 5671 } 5672 x0_1 := x0.Args[1] 5673 if x0_1.Op != OpAMD64SHRWconst { 5674 break 5675 } 5676 if x0_1.AuxInt != 8 { 5677 break 5678 } 5679 if w != x0_1.Args[0] { 5680 break 5681 } 5682 mem := x0.Args[2] 5683 if !(x0.Uses == 1 && clobber(x0)) { 5684 break 5685 } 5686 v.reset(OpAMD64MOVWstore) 5687 v.AuxInt = i - 1 5688 v.Aux = s 5689 v.AddArg(p) 5690 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 5691 v0.AuxInt = 8 5692 v0.AddArg(w) 5693 v.AddArg(v0) 5694 v.AddArg(mem) 5695 return true 5696 } 5697 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 5698 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 5699 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) 5700 for { 5701 i := v.AuxInt 5702 s := v.Aux 5703 _ = v.Args[2] 5704 p := v.Args[0] 5705 w := v.Args[1] 5706 x2 := v.Args[2] 5707 if x2.Op != OpAMD64MOVBstore { 5708 break 5709 } 5710 if x2.AuxInt != i-1 { 5711 break 5712 } 5713 if x2.Aux != s { 5714 break 5715 } 5716 _ = x2.Args[2] 5717 if p != x2.Args[0] { 5718 break 5719 } 5720 x2_1 := x2.Args[1] 5721 if x2_1.Op != OpAMD64SHRLconst { 5722 break 5723 } 5724 if x2_1.AuxInt != 8 { 5725 break 5726 } 5727 if w != x2_1.Args[0] { 5728 break 5729 } 5730 x1 := x2.Args[2] 5731 if x1.Op != OpAMD64MOVBstore { 5732 break 5733 } 5734 if x1.AuxInt != i-2 { 5735 break 5736 } 5737 if x1.Aux != s { 5738 break 5739 } 5740 _ = x1.Args[2] 5741 if p != x1.Args[0] { 5742 break 5743 } 5744 x1_1 := x1.Args[1] 5745 if x1_1.Op != OpAMD64SHRLconst { 5746 break 5747 } 5748 if x1_1.AuxInt != 16 { 5749 break 5750 } 5751 if w != x1_1.Args[0] { 5752 break 5753 } 5754 x0 := x1.Args[2] 5755 if x0.Op != OpAMD64MOVBstore { 5756 break 5757 } 5758 if x0.AuxInt != i-3 { 5759 break 5760 } 5761 if x0.Aux != s { 5762 break 5763 } 5764 _ = x0.Args[2] 5765 if p != x0.Args[0] { 5766 break 5767 } 5768 x0_1 := x0.Args[1] 5769 if x0_1.Op != OpAMD64SHRLconst { 5770 break 5771 } 5772 if x0_1.AuxInt != 24 { 5773 break 5774 } 5775 if w != x0_1.Args[0] { 5776 break 5777 } 5778 mem := x0.Args[2] 5779 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 5780 break 5781 } 5782 v.reset(OpAMD64MOVLstore) 5783 v.AuxInt = i - 3 5784 v.Aux = s 5785 v.AddArg(p) 5786 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 5787 v0.AddArg(w) 5788 v.AddArg(v0) 5789 v.AddArg(mem) 5790 return true 5791 } 5792 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 5793 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 5794 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) 5795 for { 5796 i := v.AuxInt 5797 s := v.Aux 5798 _ = v.Args[2] 5799 p := v.Args[0] 5800 w := v.Args[1] 5801 x6 := v.Args[2] 5802 if x6.Op != OpAMD64MOVBstore { 5803 break 5804 } 5805 if x6.AuxInt != i-1 { 5806 break 5807 } 5808 if x6.Aux != s { 5809 break 5810 } 5811 _ = x6.Args[2] 5812 if p != x6.Args[0] { 5813 break 5814 } 5815 x6_1 := x6.Args[1] 5816 if x6_1.Op != OpAMD64SHRQconst { 5817 break 5818 } 5819 if x6_1.AuxInt != 8 { 5820 break 5821 } 5822 if w != x6_1.Args[0] { 5823 break 5824 } 5825 x5 := x6.Args[2] 5826 if x5.Op != OpAMD64MOVBstore { 5827 break 5828 } 5829 if x5.AuxInt != i-2 { 5830 break 5831 } 5832 if x5.Aux != s { 5833 break 5834 } 5835 _ = x5.Args[2] 5836 if p != x5.Args[0] { 5837 break 5838 } 5839 x5_1 := x5.Args[1] 5840 if x5_1.Op != OpAMD64SHRQconst { 5841 break 5842 } 5843 if x5_1.AuxInt != 16 { 5844 break 5845 } 5846 if w != x5_1.Args[0] { 5847 break 5848 } 5849 x4 := x5.Args[2] 5850 if x4.Op != OpAMD64MOVBstore { 5851 break 5852 } 5853 if x4.AuxInt != i-3 { 5854 break 5855 } 5856 if x4.Aux != s { 5857 break 5858 } 5859 _ = x4.Args[2] 5860 if p != x4.Args[0] { 5861 break 5862 } 5863 x4_1 := x4.Args[1] 5864 if x4_1.Op != OpAMD64SHRQconst { 5865 break 5866 } 5867 if x4_1.AuxInt != 24 { 5868 break 5869 } 5870 if w != x4_1.Args[0] { 5871 break 5872 } 5873 x3 := x4.Args[2] 5874 if x3.Op != OpAMD64MOVBstore { 5875 break 5876 } 5877 if x3.AuxInt != i-4 { 5878 break 5879 } 5880 if x3.Aux != s { 5881 break 5882 } 5883 _ = x3.Args[2] 5884 if p != x3.Args[0] { 5885 break 5886 } 5887 x3_1 := x3.Args[1] 5888 if x3_1.Op != OpAMD64SHRQconst { 5889 break 5890 } 5891 if x3_1.AuxInt != 32 { 5892 break 5893 } 5894 if w != x3_1.Args[0] { 5895 break 5896 } 5897 x2 := x3.Args[2] 5898 if x2.Op != OpAMD64MOVBstore { 5899 break 5900 } 5901 if x2.AuxInt != i-5 { 5902 break 5903 } 5904 if x2.Aux != s { 5905 break 5906 } 5907 _ = x2.Args[2] 5908 if p != x2.Args[0] { 5909 break 5910 } 5911 x2_1 := x2.Args[1] 5912 if x2_1.Op != OpAMD64SHRQconst { 5913 break 5914 } 5915 if x2_1.AuxInt != 40 { 5916 break 5917 } 5918 if w != x2_1.Args[0] { 5919 break 5920 } 5921 x1 := x2.Args[2] 5922 if x1.Op != OpAMD64MOVBstore { 5923 break 5924 } 5925 if x1.AuxInt != i-6 { 5926 break 5927 } 5928 if x1.Aux != s { 5929 break 5930 } 5931 _ = x1.Args[2] 5932 if p != x1.Args[0] { 5933 break 5934 } 5935 x1_1 := x1.Args[1] 5936 if x1_1.Op != OpAMD64SHRQconst { 5937 break 5938 } 5939 if x1_1.AuxInt != 48 { 5940 break 5941 } 5942 if w != x1_1.Args[0] { 5943 break 5944 } 5945 x0 := x1.Args[2] 5946 if x0.Op != OpAMD64MOVBstore { 5947 break 5948 } 5949 if x0.AuxInt != i-7 { 5950 break 5951 } 5952 if x0.Aux != s { 5953 break 5954 } 5955 _ = x0.Args[2] 5956 if p != x0.Args[0] { 5957 break 5958 } 5959 x0_1 := x0.Args[1] 5960 if x0_1.Op != OpAMD64SHRQconst { 5961 break 5962 } 5963 if x0_1.AuxInt != 56 { 5964 break 5965 } 5966 if w != x0_1.Args[0] { 5967 break 5968 } 5969 mem := x0.Args[2] 5970 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 5971 break 5972 } 5973 v.reset(OpAMD64MOVQstore) 5974 v.AuxInt = i - 7 5975 v.Aux = s 5976 v.AddArg(p) 5977 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 5978 v0.AddArg(w) 5979 v.AddArg(v0) 5980 v.AddArg(mem) 5981 return true 5982 } 5983 return false 5984 } 5985 func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { 5986 b := v.Block 5987 _ = b 5988 typ := &b.Func.Config.Types 5989 _ = typ 5990 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 5991 // cond: x.Uses == 1 && clobber(x) 5992 // result: (MOVWstore [i-1] {s} p w mem) 5993 for { 5994 i := v.AuxInt 5995 s := v.Aux 5996 _ = v.Args[2] 5997 p := v.Args[0] 5998 v_1 := v.Args[1] 5999 if v_1.Op != OpAMD64SHRQconst { 6000 break 6001 } 6002 if v_1.AuxInt != 8 { 6003 break 6004 } 6005 w := v_1.Args[0] 6006 x := v.Args[2] 6007 if x.Op != OpAMD64MOVBstore { 6008 break 6009 } 6010 if x.AuxInt != i-1 { 6011 break 6012 } 6013 if x.Aux != s { 6014 break 6015 } 6016 _ = x.Args[2] 6017 if p != x.Args[0] { 6018 break 6019 } 6020 if w != x.Args[1] { 6021 break 6022 } 6023 mem := x.Args[2] 6024 if !(x.Uses == 1 && clobber(x)) { 6025 break 6026 } 6027 v.reset(OpAMD64MOVWstore) 6028 v.AuxInt = i - 1 6029 v.Aux = s 6030 v.AddArg(p) 6031 v.AddArg(w) 6032 v.AddArg(mem) 6033 return true 6034 } 6035 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 6036 // cond: x.Uses == 1 && clobber(x) 6037 // result: (MOVWstore [i-1] {s} p w0 mem) 6038 for { 6039 i := v.AuxInt 6040 s := v.Aux 6041 _ = v.Args[2] 6042 p := v.Args[0] 6043 v_1 := v.Args[1] 6044 if v_1.Op != OpAMD64SHRQconst { 6045 break 6046 } 6047 j := v_1.AuxInt 6048 w := v_1.Args[0] 6049 x := v.Args[2] 6050 if x.Op != OpAMD64MOVBstore { 6051 break 6052 } 6053 if x.AuxInt != i-1 { 6054 break 6055 } 6056 if x.Aux != s { 6057 break 6058 } 6059 _ = x.Args[2] 6060 if p != x.Args[0] { 6061 break 6062 } 6063 w0 := x.Args[1] 6064 if w0.Op != OpAMD64SHRQconst { 6065 break 6066 } 6067 if w0.AuxInt != j-8 { 6068 break 6069 } 6070 if w != w0.Args[0] { 6071 break 6072 } 6073 mem := x.Args[2] 6074 if !(x.Uses == 1 && clobber(x)) { 6075 break 6076 } 6077 v.reset(OpAMD64MOVWstore) 6078 v.AuxInt = i - 1 6079 v.Aux = s 6080 v.AddArg(p) 6081 v.AddArg(w0) 6082 v.AddArg(mem) 6083 return true 6084 } 6085 // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem)) 6086 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 6087 // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) 6088 for { 6089 i := v.AuxInt 6090 s := v.Aux 6091 _ = v.Args[2] 6092 p := v.Args[0] 6093 x1 := v.Args[1] 6094 if x1.Op != OpAMD64MOVBload { 6095 break 6096 } 6097 j := x1.AuxInt 6098 s2 := x1.Aux 6099 _ = x1.Args[1] 6100 p2 := x1.Args[0] 6101 mem := x1.Args[1] 6102 mem2 := v.Args[2] 6103 if mem2.Op != OpAMD64MOVBstore { 6104 break 6105 } 6106 if mem2.AuxInt != i-1 { 6107 break 6108 } 6109 if mem2.Aux != s { 6110 break 6111 } 6112 _ = mem2.Args[2] 6113 if p != mem2.Args[0] { 6114 break 6115 } 6116 x2 := mem2.Args[1] 6117 if x2.Op != OpAMD64MOVBload { 6118 break 6119 } 6120 if x2.AuxInt != j-1 { 6121 break 6122 } 6123 if x2.Aux != s2 { 6124 break 6125 } 6126 _ = x2.Args[1] 6127 if p2 != x2.Args[0] { 6128 break 6129 } 6130 if mem != x2.Args[1] { 6131 break 6132 } 6133 if mem != mem2.Args[2] { 6134 break 6135 } 6136 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 6137 break 6138 } 6139 v.reset(OpAMD64MOVWstore) 6140 v.AuxInt = i - 1 6141 v.Aux = s 6142 v.AddArg(p) 6143 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 6144 v0.AuxInt = j - 1 6145 v0.Aux = s2 6146 v0.AddArg(p2) 6147 v0.AddArg(mem) 6148 v.AddArg(v0) 6149 v.AddArg(mem) 6150 return true 6151 } 6152 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 6153 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 6154 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6155 for { 6156 off1 := v.AuxInt 6157 sym1 := v.Aux 6158 _ = v.Args[2] 6159 v_0 := v.Args[0] 6160 if v_0.Op != OpAMD64LEAL { 6161 break 6162 } 6163 off2 := v_0.AuxInt 6164 sym2 := v_0.Aux 6165 base := v_0.Args[0] 6166 val := v.Args[1] 6167 mem := v.Args[2] 6168 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 6169 break 6170 } 6171 v.reset(OpAMD64MOVBstore) 6172 v.AuxInt = off1 + off2 6173 v.Aux = mergeSym(sym1, sym2) 6174 v.AddArg(base) 6175 v.AddArg(val) 6176 v.AddArg(mem) 6177 return true 6178 } 6179 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 6180 // cond: is32Bit(off1+off2) 6181 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 6182 for { 6183 off1 := v.AuxInt 6184 sym := v.Aux 6185 _ = v.Args[2] 6186 v_0 := v.Args[0] 6187 if v_0.Op != OpAMD64ADDLconst { 6188 break 6189 } 6190 off2 := v_0.AuxInt 6191 ptr := v_0.Args[0] 6192 val := v.Args[1] 6193 mem := v.Args[2] 6194 if !(is32Bit(off1 + off2)) { 6195 break 6196 } 6197 v.reset(OpAMD64MOVBstore) 6198 v.AuxInt = off1 + off2 6199 v.Aux = sym 6200 v.AddArg(ptr) 6201 v.AddArg(val) 6202 v.AddArg(mem) 6203 return true 6204 } 6205 return false 6206 } 6207 func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { 6208 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 6209 // cond: ValAndOff(sc).canAdd(off) 6210 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6211 for { 6212 sc := v.AuxInt 6213 s := v.Aux 6214 _ = v.Args[1] 6215 v_0 := v.Args[0] 6216 if v_0.Op != OpAMD64ADDQconst { 6217 break 6218 } 6219 off := v_0.AuxInt 6220 ptr := v_0.Args[0] 6221 mem := v.Args[1] 6222 if !(ValAndOff(sc).canAdd(off)) { 6223 break 6224 } 6225 v.reset(OpAMD64MOVBstoreconst) 6226 v.AuxInt = ValAndOff(sc).add(off) 6227 v.Aux = s 6228 v.AddArg(ptr) 6229 v.AddArg(mem) 6230 return true 6231 } 6232 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 6233 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6234 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6235 for { 6236 sc := v.AuxInt 6237 sym1 := v.Aux 6238 _ = v.Args[1] 6239 v_0 := v.Args[0] 6240 if v_0.Op != OpAMD64LEAQ { 6241 break 6242 } 6243 off := v_0.AuxInt 6244 sym2 := v_0.Aux 6245 ptr := v_0.Args[0] 6246 mem := v.Args[1] 6247 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6248 break 6249 } 6250 v.reset(OpAMD64MOVBstoreconst) 6251 v.AuxInt = ValAndOff(sc).add(off) 6252 v.Aux = mergeSym(sym1, sym2) 6253 v.AddArg(ptr) 6254 v.AddArg(mem) 6255 return true 6256 } 6257 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 6258 // cond: canMergeSym(sym1, sym2) 6259 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 6260 for { 6261 x := v.AuxInt 6262 sym1 := v.Aux 6263 _ = v.Args[1] 6264 v_0 := v.Args[0] 6265 if v_0.Op != OpAMD64LEAQ1 { 6266 break 6267 } 6268 off := v_0.AuxInt 6269 sym2 := v_0.Aux 6270 _ = v_0.Args[1] 6271 ptr := v_0.Args[0] 6272 idx := v_0.Args[1] 6273 mem := v.Args[1] 6274 if !(canMergeSym(sym1, sym2)) { 6275 break 6276 } 6277 v.reset(OpAMD64MOVBstoreconstidx1) 6278 v.AuxInt = ValAndOff(x).add(off) 6279 v.Aux = mergeSym(sym1, sym2) 6280 v.AddArg(ptr) 6281 v.AddArg(idx) 6282 v.AddArg(mem) 6283 return true 6284 } 6285 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 6286 // cond: 6287 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 6288 for { 6289 x := v.AuxInt 6290 sym := v.Aux 6291 _ = v.Args[1] 6292 v_0 := v.Args[0] 6293 if v_0.Op != OpAMD64ADDQ { 6294 break 6295 } 6296 _ = v_0.Args[1] 6297 ptr := v_0.Args[0] 6298 idx := v_0.Args[1] 6299 mem := v.Args[1] 6300 v.reset(OpAMD64MOVBstoreconstidx1) 6301 v.AuxInt = x 6302 v.Aux = sym 6303 v.AddArg(ptr) 6304 v.AddArg(idx) 6305 v.AddArg(mem) 6306 return true 6307 } 6308 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 6309 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 6310 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 6311 for { 6312 c := v.AuxInt 6313 s := v.Aux 6314 _ = v.Args[1] 6315 p := v.Args[0] 6316 x := v.Args[1] 6317 if x.Op != OpAMD64MOVBstoreconst { 6318 break 6319 } 6320 a := x.AuxInt 6321 if x.Aux != s { 6322 break 6323 } 6324 _ = x.Args[1] 6325 if p != x.Args[0] { 6326 break 6327 } 6328 mem := x.Args[1] 6329 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 6330 break 6331 } 6332 v.reset(OpAMD64MOVWstoreconst) 6333 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 6334 v.Aux = s 6335 v.AddArg(p) 6336 v.AddArg(mem) 6337 return true 6338 } 6339 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 6340 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6341 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6342 for { 6343 sc := v.AuxInt 6344 sym1 := v.Aux 6345 _ = v.Args[1] 6346 v_0 := v.Args[0] 6347 if v_0.Op != OpAMD64LEAL { 6348 break 6349 } 6350 off := v_0.AuxInt 6351 sym2 := v_0.Aux 6352 ptr := v_0.Args[0] 6353 mem := v.Args[1] 6354 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6355 break 6356 } 6357 v.reset(OpAMD64MOVBstoreconst) 6358 v.AuxInt = ValAndOff(sc).add(off) 6359 v.Aux = mergeSym(sym1, sym2) 6360 v.AddArg(ptr) 6361 v.AddArg(mem) 6362 return true 6363 } 6364 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 6365 // cond: ValAndOff(sc).canAdd(off) 6366 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6367 for { 6368 sc := v.AuxInt 6369 s := v.Aux 6370 _ = v.Args[1] 6371 v_0 := v.Args[0] 6372 if v_0.Op != OpAMD64ADDLconst { 6373 break 6374 } 6375 off := v_0.AuxInt 6376 ptr := v_0.Args[0] 6377 mem := v.Args[1] 6378 if !(ValAndOff(sc).canAdd(off)) { 6379 break 6380 } 6381 v.reset(OpAMD64MOVBstoreconst) 6382 v.AuxInt = ValAndOff(sc).add(off) 6383 v.Aux = s 6384 v.AddArg(ptr) 6385 v.AddArg(mem) 6386 return true 6387 } 6388 return false 6389 } 6390 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { 6391 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 6392 // cond: ValAndOff(x).canAdd(c) 6393 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6394 for { 6395 x := v.AuxInt 6396 sym := v.Aux 6397 _ = v.Args[2] 6398 v_0 := v.Args[0] 6399 if v_0.Op != OpAMD64ADDQconst { 6400 break 6401 } 6402 c := v_0.AuxInt 6403 ptr := v_0.Args[0] 6404 idx := v.Args[1] 6405 mem := v.Args[2] 6406 if !(ValAndOff(x).canAdd(c)) { 6407 break 6408 } 6409 v.reset(OpAMD64MOVBstoreconstidx1) 6410 v.AuxInt = ValAndOff(x).add(c) 6411 v.Aux = sym 6412 v.AddArg(ptr) 6413 v.AddArg(idx) 6414 v.AddArg(mem) 6415 return true 6416 } 6417 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 6418 // cond: ValAndOff(x).canAdd(c) 6419 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6420 for { 6421 x := v.AuxInt 6422 sym := v.Aux 6423 _ = v.Args[2] 6424 ptr := v.Args[0] 6425 v_1 := v.Args[1] 6426 if v_1.Op != OpAMD64ADDQconst { 6427 break 6428 } 6429 c := v_1.AuxInt 6430 idx := v_1.Args[0] 6431 mem := v.Args[2] 6432 if !(ValAndOff(x).canAdd(c)) { 6433 break 6434 } 6435 v.reset(OpAMD64MOVBstoreconstidx1) 6436 v.AuxInt = ValAndOff(x).add(c) 6437 v.Aux = sym 6438 v.AddArg(ptr) 6439 v.AddArg(idx) 6440 v.AddArg(mem) 6441 return true 6442 } 6443 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 6444 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 6445 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 6446 for { 6447 c := v.AuxInt 6448 s := v.Aux 6449 _ = v.Args[2] 6450 p := v.Args[0] 6451 i := v.Args[1] 6452 x := v.Args[2] 6453 if x.Op != OpAMD64MOVBstoreconstidx1 { 6454 break 6455 } 6456 a := x.AuxInt 6457 if x.Aux != s { 6458 break 6459 } 6460 _ = x.Args[2] 6461 if p != x.Args[0] { 6462 break 6463 } 6464 if i != x.Args[1] { 6465 break 6466 } 6467 mem := x.Args[2] 6468 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 6469 break 6470 } 6471 v.reset(OpAMD64MOVWstoreconstidx1) 6472 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 6473 v.Aux = s 6474 v.AddArg(p) 6475 v.AddArg(i) 6476 v.AddArg(mem) 6477 return true 6478 } 6479 return false 6480 } 6481 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { 6482 b := v.Block 6483 _ = b 6484 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 6485 // cond: is32Bit(c+d) 6486 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 6487 for { 6488 c := v.AuxInt 6489 sym := v.Aux 6490 _ = v.Args[3] 6491 v_0 := v.Args[0] 6492 if v_0.Op != OpAMD64ADDQconst { 6493 break 6494 } 6495 d := v_0.AuxInt 6496 ptr := v_0.Args[0] 6497 idx := v.Args[1] 6498 val := v.Args[2] 6499 mem := v.Args[3] 6500 if !(is32Bit(c + d)) { 6501 break 6502 } 6503 v.reset(OpAMD64MOVBstoreidx1) 6504 v.AuxInt = c + d 6505 v.Aux = sym 6506 v.AddArg(ptr) 6507 v.AddArg(idx) 6508 v.AddArg(val) 6509 v.AddArg(mem) 6510 return true 6511 } 6512 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 6513 // cond: is32Bit(c+d) 6514 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 6515 for { 6516 c := v.AuxInt 6517 sym := v.Aux 6518 _ = v.Args[3] 6519 ptr := v.Args[0] 6520 v_1 := v.Args[1] 6521 if v_1.Op != OpAMD64ADDQconst { 6522 break 6523 } 6524 d := v_1.AuxInt 6525 idx := v_1.Args[0] 6526 val := v.Args[2] 6527 mem := v.Args[3] 6528 if !(is32Bit(c + d)) { 6529 break 6530 } 6531 v.reset(OpAMD64MOVBstoreidx1) 6532 v.AuxInt = c + d 6533 v.Aux = sym 6534 v.AddArg(ptr) 6535 v.AddArg(idx) 6536 v.AddArg(val) 6537 v.AddArg(mem) 6538 return true 6539 } 6540 // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) 6541 // cond: x0.Uses == 1 && clobber(x0) 6542 // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem) 6543 for { 6544 i := v.AuxInt 6545 s := v.Aux 6546 _ = v.Args[3] 6547 p := v.Args[0] 6548 idx := v.Args[1] 6549 w := v.Args[2] 6550 x0 := v.Args[3] 6551 if x0.Op != OpAMD64MOVBstoreidx1 { 6552 break 6553 } 6554 if x0.AuxInt != i-1 { 6555 break 6556 } 6557 if x0.Aux != s { 6558 break 6559 } 6560 _ = x0.Args[3] 6561 if p != x0.Args[0] { 6562 break 6563 } 6564 if idx != x0.Args[1] { 6565 break 6566 } 6567 x0_2 := x0.Args[2] 6568 if x0_2.Op != OpAMD64SHRWconst { 6569 break 6570 } 6571 if x0_2.AuxInt != 8 { 6572 break 6573 } 6574 if w != x0_2.Args[0] { 6575 break 6576 } 6577 mem := x0.Args[3] 6578 if !(x0.Uses == 1 && clobber(x0)) { 6579 break 6580 } 6581 v.reset(OpAMD64MOVWstoreidx1) 6582 v.AuxInt = i - 1 6583 v.Aux = s 6584 v.AddArg(p) 6585 v.AddArg(idx) 6586 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 6587 v0.AuxInt = 8 6588 v0.AddArg(w) 6589 v.AddArg(v0) 6590 v.AddArg(mem) 6591 return true 6592 } 6593 // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) 6594 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 6595 // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem) 6596 for { 6597 i := v.AuxInt 6598 s := v.Aux 6599 _ = v.Args[3] 6600 p := v.Args[0] 6601 idx := v.Args[1] 6602 w := v.Args[2] 6603 x2 := v.Args[3] 6604 if x2.Op != OpAMD64MOVBstoreidx1 { 6605 break 6606 } 6607 if x2.AuxInt != i-1 { 6608 break 6609 } 6610 if x2.Aux != s { 6611 break 6612 } 6613 _ = x2.Args[3] 6614 if p != x2.Args[0] { 6615 break 6616 } 6617 if idx != x2.Args[1] { 6618 break 6619 } 6620 x2_2 := x2.Args[2] 6621 if x2_2.Op != OpAMD64SHRLconst { 6622 break 6623 } 6624 if x2_2.AuxInt != 8 { 6625 break 6626 } 6627 if w != x2_2.Args[0] { 6628 break 6629 } 6630 x1 := x2.Args[3] 6631 if x1.Op != OpAMD64MOVBstoreidx1 { 6632 break 6633 } 6634 if x1.AuxInt != i-2 { 6635 break 6636 } 6637 if x1.Aux != s { 6638 break 6639 } 6640 _ = x1.Args[3] 6641 if p != x1.Args[0] { 6642 break 6643 } 6644 if idx != x1.Args[1] { 6645 break 6646 } 6647 x1_2 := x1.Args[2] 6648 if x1_2.Op != OpAMD64SHRLconst { 6649 break 6650 } 6651 if x1_2.AuxInt != 16 { 6652 break 6653 } 6654 if w != x1_2.Args[0] { 6655 break 6656 } 6657 x0 := x1.Args[3] 6658 if x0.Op != OpAMD64MOVBstoreidx1 { 6659 break 6660 } 6661 if x0.AuxInt != i-3 { 6662 break 6663 } 6664 if x0.Aux != s { 6665 break 6666 } 6667 _ = x0.Args[3] 6668 if p != x0.Args[0] { 6669 break 6670 } 6671 if idx != x0.Args[1] { 6672 break 6673 } 6674 x0_2 := x0.Args[2] 6675 if x0_2.Op != OpAMD64SHRLconst { 6676 break 6677 } 6678 if x0_2.AuxInt != 24 { 6679 break 6680 } 6681 if w != x0_2.Args[0] { 6682 break 6683 } 6684 mem := x0.Args[3] 6685 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 6686 break 6687 } 6688 v.reset(OpAMD64MOVLstoreidx1) 6689 v.AuxInt = i - 3 6690 v.Aux = s 6691 v.AddArg(p) 6692 v.AddArg(idx) 6693 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 6694 v0.AddArg(w) 6695 v.AddArg(v0) 6696 v.AddArg(mem) 6697 return true 6698 } 6699 // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) 6700 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 6701 // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem) 6702 for { 6703 i := v.AuxInt 6704 s := v.Aux 6705 _ = v.Args[3] 6706 p := v.Args[0] 6707 idx := v.Args[1] 6708 w := v.Args[2] 6709 x6 := v.Args[3] 6710 if x6.Op != OpAMD64MOVBstoreidx1 { 6711 break 6712 } 6713 if x6.AuxInt != i-1 { 6714 break 6715 } 6716 if x6.Aux != s { 6717 break 6718 } 6719 _ = x6.Args[3] 6720 if p != x6.Args[0] { 6721 break 6722 } 6723 if idx != x6.Args[1] { 6724 break 6725 } 6726 x6_2 := x6.Args[2] 6727 if x6_2.Op != OpAMD64SHRQconst { 6728 break 6729 } 6730 if x6_2.AuxInt != 8 { 6731 break 6732 } 6733 if w != x6_2.Args[0] { 6734 break 6735 } 6736 x5 := x6.Args[3] 6737 if x5.Op != OpAMD64MOVBstoreidx1 { 6738 break 6739 } 6740 if x5.AuxInt != i-2 { 6741 break 6742 } 6743 if x5.Aux != s { 6744 break 6745 } 6746 _ = x5.Args[3] 6747 if p != x5.Args[0] { 6748 break 6749 } 6750 if idx != x5.Args[1] { 6751 break 6752 } 6753 x5_2 := x5.Args[2] 6754 if x5_2.Op != OpAMD64SHRQconst { 6755 break 6756 } 6757 if x5_2.AuxInt != 16 { 6758 break 6759 } 6760 if w != x5_2.Args[0] { 6761 break 6762 } 6763 x4 := x5.Args[3] 6764 if x4.Op != OpAMD64MOVBstoreidx1 { 6765 break 6766 } 6767 if x4.AuxInt != i-3 { 6768 break 6769 } 6770 if x4.Aux != s { 6771 break 6772 } 6773 _ = x4.Args[3] 6774 if p != x4.Args[0] { 6775 break 6776 } 6777 if idx != x4.Args[1] { 6778 break 6779 } 6780 x4_2 := x4.Args[2] 6781 if x4_2.Op != OpAMD64SHRQconst { 6782 break 6783 } 6784 if x4_2.AuxInt != 24 { 6785 break 6786 } 6787 if w != x4_2.Args[0] { 6788 break 6789 } 6790 x3 := x4.Args[3] 6791 if x3.Op != OpAMD64MOVBstoreidx1 { 6792 break 6793 } 6794 if x3.AuxInt != i-4 { 6795 break 6796 } 6797 if x3.Aux != s { 6798 break 6799 } 6800 _ = x3.Args[3] 6801 if p != x3.Args[0] { 6802 break 6803 } 6804 if idx != x3.Args[1] { 6805 break 6806 } 6807 x3_2 := x3.Args[2] 6808 if x3_2.Op != OpAMD64SHRQconst { 6809 break 6810 } 6811 if x3_2.AuxInt != 32 { 6812 break 6813 } 6814 if w != x3_2.Args[0] { 6815 break 6816 } 6817 x2 := x3.Args[3] 6818 if x2.Op != OpAMD64MOVBstoreidx1 { 6819 break 6820 } 6821 if x2.AuxInt != i-5 { 6822 break 6823 } 6824 if x2.Aux != s { 6825 break 6826 } 6827 _ = x2.Args[3] 6828 if p != x2.Args[0] { 6829 break 6830 } 6831 if idx != x2.Args[1] { 6832 break 6833 } 6834 x2_2 := x2.Args[2] 6835 if x2_2.Op != OpAMD64SHRQconst { 6836 break 6837 } 6838 if x2_2.AuxInt != 40 { 6839 break 6840 } 6841 if w != x2_2.Args[0] { 6842 break 6843 } 6844 x1 := x2.Args[3] 6845 if x1.Op != OpAMD64MOVBstoreidx1 { 6846 break 6847 } 6848 if x1.AuxInt != i-6 { 6849 break 6850 } 6851 if x1.Aux != s { 6852 break 6853 } 6854 _ = x1.Args[3] 6855 if p != x1.Args[0] { 6856 break 6857 } 6858 if idx != x1.Args[1] { 6859 break 6860 } 6861 x1_2 := x1.Args[2] 6862 if x1_2.Op != OpAMD64SHRQconst { 6863 break 6864 } 6865 if x1_2.AuxInt != 48 { 6866 break 6867 } 6868 if w != x1_2.Args[0] { 6869 break 6870 } 6871 x0 := x1.Args[3] 6872 if x0.Op != OpAMD64MOVBstoreidx1 { 6873 break 6874 } 6875 if x0.AuxInt != i-7 { 6876 break 6877 } 6878 if x0.Aux != s { 6879 break 6880 } 6881 _ = x0.Args[3] 6882 if p != x0.Args[0] { 6883 break 6884 } 6885 if idx != x0.Args[1] { 6886 break 6887 } 6888 x0_2 := x0.Args[2] 6889 if x0_2.Op != OpAMD64SHRQconst { 6890 break 6891 } 6892 if x0_2.AuxInt != 56 { 6893 break 6894 } 6895 if w != x0_2.Args[0] { 6896 break 6897 } 6898 mem := x0.Args[3] 6899 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 6900 break 6901 } 6902 v.reset(OpAMD64MOVQstoreidx1) 6903 v.AuxInt = i - 7 6904 v.Aux = s 6905 v.AddArg(p) 6906 v.AddArg(idx) 6907 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 6908 v0.AddArg(w) 6909 v.AddArg(v0) 6910 v.AddArg(mem) 6911 return true 6912 } 6913 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 6914 // cond: x.Uses == 1 && clobber(x) 6915 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 6916 for { 6917 i := v.AuxInt 6918 s := v.Aux 6919 _ = v.Args[3] 6920 p := v.Args[0] 6921 idx := v.Args[1] 6922 v_2 := v.Args[2] 6923 if v_2.Op != OpAMD64SHRQconst { 6924 break 6925 } 6926 if v_2.AuxInt != 8 { 6927 break 6928 } 6929 w := v_2.Args[0] 6930 x := v.Args[3] 6931 if x.Op != OpAMD64MOVBstoreidx1 { 6932 break 6933 } 6934 if x.AuxInt != i-1 { 6935 break 6936 } 6937 if x.Aux != s { 6938 break 6939 } 6940 _ = x.Args[3] 6941 if p != x.Args[0] { 6942 break 6943 } 6944 if idx != x.Args[1] { 6945 break 6946 } 6947 if w != x.Args[2] { 6948 break 6949 } 6950 mem := x.Args[3] 6951 if !(x.Uses == 1 && clobber(x)) { 6952 break 6953 } 6954 v.reset(OpAMD64MOVWstoreidx1) 6955 v.AuxInt = i - 1 6956 v.Aux = s 6957 v.AddArg(p) 6958 v.AddArg(idx) 6959 v.AddArg(w) 6960 v.AddArg(mem) 6961 return true 6962 } 6963 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 6964 // cond: x.Uses == 1 && clobber(x) 6965 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 6966 for { 6967 i := v.AuxInt 6968 s := v.Aux 6969 _ = v.Args[3] 6970 p := v.Args[0] 6971 idx := v.Args[1] 6972 v_2 := v.Args[2] 6973 if v_2.Op != OpAMD64SHRQconst { 6974 break 6975 } 6976 j := v_2.AuxInt 6977 w := v_2.Args[0] 6978 x := v.Args[3] 6979 if x.Op != OpAMD64MOVBstoreidx1 { 6980 break 6981 } 6982 if x.AuxInt != i-1 { 6983 break 6984 } 6985 if x.Aux != s { 6986 break 6987 } 6988 _ = x.Args[3] 6989 if p != x.Args[0] { 6990 break 6991 } 6992 if idx != x.Args[1] { 6993 break 6994 } 6995 w0 := x.Args[2] 6996 if w0.Op != OpAMD64SHRQconst { 6997 break 6998 } 6999 if w0.AuxInt != j-8 { 7000 break 7001 } 7002 if w != w0.Args[0] { 7003 break 7004 } 7005 mem := x.Args[3] 7006 if !(x.Uses == 1 && clobber(x)) { 7007 break 7008 } 7009 v.reset(OpAMD64MOVWstoreidx1) 7010 v.AuxInt = i - 1 7011 v.Aux = s 7012 v.AddArg(p) 7013 v.AddArg(idx) 7014 v.AddArg(w0) 7015 v.AddArg(mem) 7016 return true 7017 } 7018 return false 7019 } 7020 func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { 7021 b := v.Block 7022 _ = b 7023 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 7024 // cond: x.Uses == 1 && clobber(x) 7025 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 7026 for { 7027 x := v.Args[0] 7028 if x.Op != OpAMD64MOVLload { 7029 break 7030 } 7031 off := x.AuxInt 7032 sym := x.Aux 7033 _ = x.Args[1] 7034 ptr := x.Args[0] 7035 mem := x.Args[1] 7036 if !(x.Uses == 1 && clobber(x)) { 7037 break 7038 } 7039 b = x.Block 7040 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 7041 v.reset(OpCopy) 7042 v.AddArg(v0) 7043 v0.AuxInt = off 7044 v0.Aux = sym 7045 v0.AddArg(ptr) 7046 v0.AddArg(mem) 7047 return true 7048 } 7049 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 7050 // cond: x.Uses == 1 && clobber(x) 7051 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 7052 for { 7053 x := v.Args[0] 7054 if x.Op != OpAMD64MOVQload { 7055 break 7056 } 7057 off := x.AuxInt 7058 sym := x.Aux 7059 _ = x.Args[1] 7060 ptr := x.Args[0] 7061 mem := x.Args[1] 7062 if !(x.Uses == 1 && clobber(x)) { 7063 break 7064 } 7065 b = x.Block 7066 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 7067 v.reset(OpCopy) 7068 v.AddArg(v0) 7069 v0.AuxInt = off 7070 v0.Aux = sym 7071 v0.AddArg(ptr) 7072 v0.AddArg(mem) 7073 return true 7074 } 7075 // match: (MOVLQSX (ANDLconst [c] x)) 7076 // cond: c & 0x80000000 == 0 7077 // result: (ANDLconst [c & 0x7fffffff] x) 7078 for { 7079 v_0 := v.Args[0] 7080 if v_0.Op != OpAMD64ANDLconst { 7081 break 7082 } 7083 c := v_0.AuxInt 7084 x := v_0.Args[0] 7085 if !(c&0x80000000 == 0) { 7086 break 7087 } 7088 v.reset(OpAMD64ANDLconst) 7089 v.AuxInt = c & 0x7fffffff 7090 v.AddArg(x) 7091 return true 7092 } 7093 // match: (MOVLQSX (MOVLQSX x)) 7094 // cond: 7095 // result: (MOVLQSX x) 7096 for { 7097 v_0 := v.Args[0] 7098 if v_0.Op != OpAMD64MOVLQSX { 7099 break 7100 } 7101 x := v_0.Args[0] 7102 v.reset(OpAMD64MOVLQSX) 7103 v.AddArg(x) 7104 return true 7105 } 7106 // match: (MOVLQSX (MOVWQSX x)) 7107 // cond: 7108 // result: (MOVWQSX x) 7109 for { 7110 v_0 := v.Args[0] 7111 if v_0.Op != OpAMD64MOVWQSX { 7112 break 7113 } 7114 x := v_0.Args[0] 7115 v.reset(OpAMD64MOVWQSX) 7116 v.AddArg(x) 7117 return true 7118 } 7119 // match: (MOVLQSX (MOVBQSX x)) 7120 // cond: 7121 // result: (MOVBQSX x) 7122 for { 7123 v_0 := v.Args[0] 7124 if v_0.Op != OpAMD64MOVBQSX { 7125 break 7126 } 7127 x := v_0.Args[0] 7128 v.reset(OpAMD64MOVBQSX) 7129 v.AddArg(x) 7130 return true 7131 } 7132 return false 7133 } 7134 func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool { 7135 // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 7136 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 7137 // result: (MOVLQSX x) 7138 for { 7139 off := v.AuxInt 7140 sym := v.Aux 7141 _ = v.Args[1] 7142 ptr := v.Args[0] 7143 v_1 := v.Args[1] 7144 if v_1.Op != OpAMD64MOVLstore { 7145 break 7146 } 7147 off2 := v_1.AuxInt 7148 sym2 := v_1.Aux 7149 _ = v_1.Args[2] 7150 ptr2 := v_1.Args[0] 7151 x := v_1.Args[1] 7152 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 7153 break 7154 } 7155 v.reset(OpAMD64MOVLQSX) 7156 v.AddArg(x) 7157 return true 7158 } 7159 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7160 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7161 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7162 for { 7163 off1 := v.AuxInt 7164 sym1 := v.Aux 7165 _ = v.Args[1] 7166 v_0 := v.Args[0] 7167 if v_0.Op != OpAMD64LEAQ { 7168 break 7169 } 7170 off2 := v_0.AuxInt 7171 sym2 := v_0.Aux 7172 base := v_0.Args[0] 7173 mem := v.Args[1] 7174 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7175 break 7176 } 7177 v.reset(OpAMD64MOVLQSXload) 7178 v.AuxInt = off1 + off2 7179 v.Aux = mergeSym(sym1, sym2) 7180 v.AddArg(base) 7181 v.AddArg(mem) 7182 return true 7183 } 7184 return false 7185 } 7186 func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { 7187 b := v.Block 7188 _ = b 7189 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 7190 // cond: x.Uses == 1 && clobber(x) 7191 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 7192 for { 7193 x := v.Args[0] 7194 if x.Op != OpAMD64MOVLload { 7195 break 7196 } 7197 off := x.AuxInt 7198 sym := x.Aux 7199 _ = x.Args[1] 7200 ptr := x.Args[0] 7201 mem := x.Args[1] 7202 if !(x.Uses == 1 && clobber(x)) { 7203 break 7204 } 7205 b = x.Block 7206 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 7207 v.reset(OpCopy) 7208 v.AddArg(v0) 7209 v0.AuxInt = off 7210 v0.Aux = sym 7211 v0.AddArg(ptr) 7212 v0.AddArg(mem) 7213 return true 7214 } 7215 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 7216 // cond: x.Uses == 1 && clobber(x) 7217 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 7218 for { 7219 x := v.Args[0] 7220 if x.Op != OpAMD64MOVQload { 7221 break 7222 } 7223 off := x.AuxInt 7224 sym := x.Aux 7225 _ = x.Args[1] 7226 ptr := x.Args[0] 7227 mem := x.Args[1] 7228 if !(x.Uses == 1 && clobber(x)) { 7229 break 7230 } 7231 b = x.Block 7232 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 7233 v.reset(OpCopy) 7234 v.AddArg(v0) 7235 v0.AuxInt = off 7236 v0.Aux = sym 7237 v0.AddArg(ptr) 7238 v0.AddArg(mem) 7239 return true 7240 } 7241 // match: (MOVLQZX x) 7242 // cond: zeroUpper32Bits(x,3) 7243 // result: x 7244 for { 7245 x := v.Args[0] 7246 if !(zeroUpper32Bits(x, 3)) { 7247 break 7248 } 7249 v.reset(OpCopy) 7250 v.Type = x.Type 7251 v.AddArg(x) 7252 return true 7253 } 7254 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 7255 // cond: x.Uses == 1 && clobber(x) 7256 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 7257 for { 7258 x := v.Args[0] 7259 if x.Op != OpAMD64MOVLloadidx1 { 7260 break 7261 } 7262 off := x.AuxInt 7263 sym := x.Aux 7264 _ = x.Args[2] 7265 ptr := x.Args[0] 7266 idx := x.Args[1] 7267 mem := x.Args[2] 7268 if !(x.Uses == 1 && clobber(x)) { 7269 break 7270 } 7271 b = x.Block 7272 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 7273 v.reset(OpCopy) 7274 v.AddArg(v0) 7275 v0.AuxInt = off 7276 v0.Aux = sym 7277 v0.AddArg(ptr) 7278 v0.AddArg(idx) 7279 v0.AddArg(mem) 7280 return true 7281 } 7282 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 7283 // cond: x.Uses == 1 && clobber(x) 7284 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 7285 for { 7286 x := v.Args[0] 7287 if x.Op != OpAMD64MOVLloadidx4 { 7288 break 7289 } 7290 off := x.AuxInt 7291 sym := x.Aux 7292 _ = x.Args[2] 7293 ptr := x.Args[0] 7294 idx := x.Args[1] 7295 mem := x.Args[2] 7296 if !(x.Uses == 1 && clobber(x)) { 7297 break 7298 } 7299 b = x.Block 7300 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) 7301 v.reset(OpCopy) 7302 v.AddArg(v0) 7303 v0.AuxInt = off 7304 v0.Aux = sym 7305 v0.AddArg(ptr) 7306 v0.AddArg(idx) 7307 v0.AddArg(mem) 7308 return true 7309 } 7310 // match: (MOVLQZX (ANDLconst [c] x)) 7311 // cond: 7312 // result: (ANDLconst [c] x) 7313 for { 7314 v_0 := v.Args[0] 7315 if v_0.Op != OpAMD64ANDLconst { 7316 break 7317 } 7318 c := v_0.AuxInt 7319 x := v_0.Args[0] 7320 v.reset(OpAMD64ANDLconst) 7321 v.AuxInt = c 7322 v.AddArg(x) 7323 return true 7324 } 7325 // match: (MOVLQZX (MOVLQZX x)) 7326 // cond: 7327 // result: (MOVLQZX x) 7328 for { 7329 v_0 := v.Args[0] 7330 if v_0.Op != OpAMD64MOVLQZX { 7331 break 7332 } 7333 x := v_0.Args[0] 7334 v.reset(OpAMD64MOVLQZX) 7335 v.AddArg(x) 7336 return true 7337 } 7338 // match: (MOVLQZX (MOVWQZX x)) 7339 // cond: 7340 // result: (MOVWQZX x) 7341 for { 7342 v_0 := v.Args[0] 7343 if v_0.Op != OpAMD64MOVWQZX { 7344 break 7345 } 7346 x := v_0.Args[0] 7347 v.reset(OpAMD64MOVWQZX) 7348 v.AddArg(x) 7349 return true 7350 } 7351 // match: (MOVLQZX (MOVBQZX x)) 7352 // cond: 7353 // result: (MOVBQZX x) 7354 for { 7355 v_0 := v.Args[0] 7356 if v_0.Op != OpAMD64MOVBQZX { 7357 break 7358 } 7359 x := v_0.Args[0] 7360 v.reset(OpAMD64MOVBQZX) 7361 v.AddArg(x) 7362 return true 7363 } 7364 return false 7365 } 7366 func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool { 7367 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 7368 // cond: is32Bit(off1+off2) 7369 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 7370 for { 7371 off1 := v.AuxInt 7372 sym := v.Aux 7373 _ = v.Args[1] 7374 v_0 := v.Args[0] 7375 if v_0.Op != OpAMD64ADDQconst { 7376 break 7377 } 7378 off2 := v_0.AuxInt 7379 ptr := v_0.Args[0] 7380 mem := v.Args[1] 7381 if !(is32Bit(off1 + off2)) { 7382 break 7383 } 7384 v.reset(OpAMD64MOVLatomicload) 7385 v.AuxInt = off1 + off2 7386 v.Aux = sym 7387 v.AddArg(ptr) 7388 v.AddArg(mem) 7389 return true 7390 } 7391 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 7392 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7393 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 7394 for { 7395 off1 := v.AuxInt 7396 sym1 := v.Aux 7397 _ = v.Args[1] 7398 v_0 := v.Args[0] 7399 if v_0.Op != OpAMD64LEAQ { 7400 break 7401 } 7402 off2 := v_0.AuxInt 7403 sym2 := v_0.Aux 7404 ptr := v_0.Args[0] 7405 mem := v.Args[1] 7406 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7407 break 7408 } 7409 v.reset(OpAMD64MOVLatomicload) 7410 v.AuxInt = off1 + off2 7411 v.Aux = mergeSym(sym1, sym2) 7412 v.AddArg(ptr) 7413 v.AddArg(mem) 7414 return true 7415 } 7416 return false 7417 } 7418 func rewriteValueAMD64_OpAMD64MOVLf2i_0(v *Value) bool { 7419 b := v.Block 7420 _ = b 7421 // match: (MOVLf2i <t> (Arg [off] {sym})) 7422 // cond: 7423 // result: @b.Func.Entry (Arg <t> [off] {sym}) 7424 for { 7425 t := v.Type 7426 v_0 := v.Args[0] 7427 if v_0.Op != OpArg { 7428 break 7429 } 7430 off := v_0.AuxInt 7431 sym := v_0.Aux 7432 b = b.Func.Entry 7433 v0 := b.NewValue0(v.Pos, OpArg, t) 7434 v.reset(OpCopy) 7435 v.AddArg(v0) 7436 v0.AuxInt = off 7437 v0.Aux = sym 7438 return true 7439 } 7440 return false 7441 } 7442 func rewriteValueAMD64_OpAMD64MOVLi2f_0(v *Value) bool { 7443 b := v.Block 7444 _ = b 7445 // match: (MOVLi2f <t> (Arg [off] {sym})) 7446 // cond: 7447 // result: @b.Func.Entry (Arg <t> [off] {sym}) 7448 for { 7449 t := v.Type 7450 v_0 := v.Args[0] 7451 if v_0.Op != OpArg { 7452 break 7453 } 7454 off := v_0.AuxInt 7455 sym := v_0.Aux 7456 b = b.Func.Entry 7457 v0 := b.NewValue0(v.Pos, OpArg, t) 7458 v.reset(OpCopy) 7459 v.AddArg(v0) 7460 v0.AuxInt = off 7461 v0.Aux = sym 7462 return true 7463 } 7464 return false 7465 } 7466 func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { 7467 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 7468 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 7469 // result: (MOVLQZX x) 7470 for { 7471 off := v.AuxInt 7472 sym := v.Aux 7473 _ = v.Args[1] 7474 ptr := v.Args[0] 7475 v_1 := v.Args[1] 7476 if v_1.Op != OpAMD64MOVLstore { 7477 break 7478 } 7479 off2 := v_1.AuxInt 7480 sym2 := v_1.Aux 7481 _ = v_1.Args[2] 7482 ptr2 := v_1.Args[0] 7483 x := v_1.Args[1] 7484 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 7485 break 7486 } 7487 v.reset(OpAMD64MOVLQZX) 7488 v.AddArg(x) 7489 return true 7490 } 7491 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 7492 // cond: is32Bit(off1+off2) 7493 // result: (MOVLload [off1+off2] {sym} ptr mem) 7494 for { 7495 off1 := v.AuxInt 7496 sym := v.Aux 7497 _ = v.Args[1] 7498 v_0 := v.Args[0] 7499 if v_0.Op != OpAMD64ADDQconst { 7500 break 7501 } 7502 off2 := v_0.AuxInt 7503 ptr := v_0.Args[0] 7504 mem := v.Args[1] 7505 if !(is32Bit(off1 + off2)) { 7506 break 7507 } 7508 v.reset(OpAMD64MOVLload) 7509 v.AuxInt = off1 + off2 7510 v.Aux = sym 7511 v.AddArg(ptr) 7512 v.AddArg(mem) 7513 return true 7514 } 7515 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7516 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7517 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7518 for { 7519 off1 := v.AuxInt 7520 sym1 := v.Aux 7521 _ = v.Args[1] 7522 v_0 := v.Args[0] 7523 if v_0.Op != OpAMD64LEAQ { 7524 break 7525 } 7526 off2 := v_0.AuxInt 7527 sym2 := v_0.Aux 7528 base := v_0.Args[0] 7529 mem := v.Args[1] 7530 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7531 break 7532 } 7533 v.reset(OpAMD64MOVLload) 7534 v.AuxInt = off1 + off2 7535 v.Aux = mergeSym(sym1, sym2) 7536 v.AddArg(base) 7537 v.AddArg(mem) 7538 return true 7539 } 7540 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 7541 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7542 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7543 for { 7544 off1 := v.AuxInt 7545 sym1 := v.Aux 7546 _ = v.Args[1] 7547 v_0 := v.Args[0] 7548 if v_0.Op != OpAMD64LEAQ1 { 7549 break 7550 } 7551 off2 := v_0.AuxInt 7552 sym2 := v_0.Aux 7553 _ = v_0.Args[1] 7554 ptr := v_0.Args[0] 7555 idx := v_0.Args[1] 7556 mem := v.Args[1] 7557 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7558 break 7559 } 7560 v.reset(OpAMD64MOVLloadidx1) 7561 v.AuxInt = off1 + off2 7562 v.Aux = mergeSym(sym1, sym2) 7563 v.AddArg(ptr) 7564 v.AddArg(idx) 7565 v.AddArg(mem) 7566 return true 7567 } 7568 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 7569 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7570 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7571 for { 7572 off1 := v.AuxInt 7573 sym1 := v.Aux 7574 _ = v.Args[1] 7575 v_0 := v.Args[0] 7576 if v_0.Op != OpAMD64LEAQ4 { 7577 break 7578 } 7579 off2 := v_0.AuxInt 7580 sym2 := v_0.Aux 7581 _ = v_0.Args[1] 7582 ptr := v_0.Args[0] 7583 idx := v_0.Args[1] 7584 mem := v.Args[1] 7585 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7586 break 7587 } 7588 v.reset(OpAMD64MOVLloadidx4) 7589 v.AuxInt = off1 + off2 7590 v.Aux = mergeSym(sym1, sym2) 7591 v.AddArg(ptr) 7592 v.AddArg(idx) 7593 v.AddArg(mem) 7594 return true 7595 } 7596 // match: (MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 7597 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7598 // result: (MOVLloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7599 for { 7600 off1 := v.AuxInt 7601 sym1 := v.Aux 7602 _ = v.Args[1] 7603 v_0 := v.Args[0] 7604 if v_0.Op != OpAMD64LEAQ8 { 7605 break 7606 } 7607 off2 := v_0.AuxInt 7608 sym2 := v_0.Aux 7609 _ = v_0.Args[1] 7610 ptr := v_0.Args[0] 7611 idx := v_0.Args[1] 7612 mem := v.Args[1] 7613 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7614 break 7615 } 7616 v.reset(OpAMD64MOVLloadidx8) 7617 v.AuxInt = off1 + off2 7618 v.Aux = mergeSym(sym1, sym2) 7619 v.AddArg(ptr) 7620 v.AddArg(idx) 7621 v.AddArg(mem) 7622 return true 7623 } 7624 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 7625 // cond: ptr.Op != OpSB 7626 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 7627 for { 7628 off := v.AuxInt 7629 sym := v.Aux 7630 _ = v.Args[1] 7631 v_0 := v.Args[0] 7632 if v_0.Op != OpAMD64ADDQ { 7633 break 7634 } 7635 _ = v_0.Args[1] 7636 ptr := v_0.Args[0] 7637 idx := v_0.Args[1] 7638 mem := v.Args[1] 7639 if !(ptr.Op != OpSB) { 7640 break 7641 } 7642 v.reset(OpAMD64MOVLloadidx1) 7643 v.AuxInt = off 7644 v.Aux = sym 7645 v.AddArg(ptr) 7646 v.AddArg(idx) 7647 v.AddArg(mem) 7648 return true 7649 } 7650 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 7651 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 7652 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7653 for { 7654 off1 := v.AuxInt 7655 sym1 := v.Aux 7656 _ = v.Args[1] 7657 v_0 := v.Args[0] 7658 if v_0.Op != OpAMD64LEAL { 7659 break 7660 } 7661 off2 := v_0.AuxInt 7662 sym2 := v_0.Aux 7663 base := v_0.Args[0] 7664 mem := v.Args[1] 7665 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 7666 break 7667 } 7668 v.reset(OpAMD64MOVLload) 7669 v.AuxInt = off1 + off2 7670 v.Aux = mergeSym(sym1, sym2) 7671 v.AddArg(base) 7672 v.AddArg(mem) 7673 return true 7674 } 7675 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 7676 // cond: is32Bit(off1+off2) 7677 // result: (MOVLload [off1+off2] {sym} ptr mem) 7678 for { 7679 off1 := v.AuxInt 7680 sym := v.Aux 7681 _ = v.Args[1] 7682 v_0 := v.Args[0] 7683 if v_0.Op != OpAMD64ADDLconst { 7684 break 7685 } 7686 off2 := v_0.AuxInt 7687 ptr := v_0.Args[0] 7688 mem := v.Args[1] 7689 if !(is32Bit(off1 + off2)) { 7690 break 7691 } 7692 v.reset(OpAMD64MOVLload) 7693 v.AuxInt = off1 + off2 7694 v.Aux = sym 7695 v.AddArg(ptr) 7696 v.AddArg(mem) 7697 return true 7698 } 7699 // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) 7700 // cond: 7701 // result: (MOVLf2i val) 7702 for { 7703 off := v.AuxInt 7704 sym := v.Aux 7705 _ = v.Args[1] 7706 ptr := v.Args[0] 7707 v_1 := v.Args[1] 7708 if v_1.Op != OpAMD64MOVSSstore { 7709 break 7710 } 7711 if v_1.AuxInt != off { 7712 break 7713 } 7714 if v_1.Aux != sym { 7715 break 7716 } 7717 _ = v_1.Args[2] 7718 if ptr != v_1.Args[0] { 7719 break 7720 } 7721 val := v_1.Args[1] 7722 v.reset(OpAMD64MOVLf2i) 7723 v.AddArg(val) 7724 return true 7725 } 7726 return false 7727 } 7728 func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { 7729 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 7730 // cond: 7731 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 7732 for { 7733 c := v.AuxInt 7734 sym := v.Aux 7735 _ = v.Args[2] 7736 ptr := v.Args[0] 7737 v_1 := v.Args[1] 7738 if v_1.Op != OpAMD64SHLQconst { 7739 break 7740 } 7741 if v_1.AuxInt != 2 { 7742 break 7743 } 7744 idx := v_1.Args[0] 7745 mem := v.Args[2] 7746 v.reset(OpAMD64MOVLloadidx4) 7747 v.AuxInt = c 7748 v.Aux = sym 7749 v.AddArg(ptr) 7750 v.AddArg(idx) 7751 v.AddArg(mem) 7752 return true 7753 } 7754 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [2] idx) ptr mem) 7755 // cond: 7756 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 7757 for { 7758 c := v.AuxInt 7759 sym := v.Aux 7760 _ = v.Args[2] 7761 v_0 := v.Args[0] 7762 if v_0.Op != OpAMD64SHLQconst { 7763 break 7764 } 7765 if v_0.AuxInt != 2 { 7766 break 7767 } 7768 idx := v_0.Args[0] 7769 ptr := v.Args[1] 7770 mem := v.Args[2] 7771 v.reset(OpAMD64MOVLloadidx4) 7772 v.AuxInt = c 7773 v.Aux = sym 7774 v.AddArg(ptr) 7775 v.AddArg(idx) 7776 v.AddArg(mem) 7777 return true 7778 } 7779 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 7780 // cond: 7781 // result: (MOVLloadidx8 [c] {sym} ptr idx mem) 7782 for { 7783 c := v.AuxInt 7784 sym := v.Aux 7785 _ = v.Args[2] 7786 ptr := v.Args[0] 7787 v_1 := v.Args[1] 7788 if v_1.Op != OpAMD64SHLQconst { 7789 break 7790 } 7791 if v_1.AuxInt != 3 { 7792 break 7793 } 7794 idx := v_1.Args[0] 7795 mem := v.Args[2] 7796 v.reset(OpAMD64MOVLloadidx8) 7797 v.AuxInt = c 7798 v.Aux = sym 7799 v.AddArg(ptr) 7800 v.AddArg(idx) 7801 v.AddArg(mem) 7802 return true 7803 } 7804 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 7805 // cond: 7806 // result: (MOVLloadidx8 [c] {sym} ptr idx mem) 7807 for { 7808 c := v.AuxInt 7809 sym := v.Aux 7810 _ = v.Args[2] 7811 v_0 := v.Args[0] 7812 if v_0.Op != OpAMD64SHLQconst { 7813 break 7814 } 7815 if v_0.AuxInt != 3 { 7816 break 7817 } 7818 idx := v_0.Args[0] 7819 ptr := v.Args[1] 7820 mem := v.Args[2] 7821 v.reset(OpAMD64MOVLloadidx8) 7822 v.AuxInt = c 7823 v.Aux = sym 7824 v.AddArg(ptr) 7825 v.AddArg(idx) 7826 v.AddArg(mem) 7827 return true 7828 } 7829 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 7830 // cond: is32Bit(c+d) 7831 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7832 for { 7833 c := v.AuxInt 7834 sym := v.Aux 7835 _ = v.Args[2] 7836 v_0 := v.Args[0] 7837 if v_0.Op != OpAMD64ADDQconst { 7838 break 7839 } 7840 d := v_0.AuxInt 7841 ptr := v_0.Args[0] 7842 idx := v.Args[1] 7843 mem := v.Args[2] 7844 if !(is32Bit(c + d)) { 7845 break 7846 } 7847 v.reset(OpAMD64MOVLloadidx1) 7848 v.AuxInt = c + d 7849 v.Aux = sym 7850 v.AddArg(ptr) 7851 v.AddArg(idx) 7852 v.AddArg(mem) 7853 return true 7854 } 7855 // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 7856 // cond: is32Bit(c+d) 7857 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7858 for { 7859 c := v.AuxInt 7860 sym := v.Aux 7861 _ = v.Args[2] 7862 idx := v.Args[0] 7863 v_1 := v.Args[1] 7864 if v_1.Op != OpAMD64ADDQconst { 7865 break 7866 } 7867 d := v_1.AuxInt 7868 ptr := v_1.Args[0] 7869 mem := v.Args[2] 7870 if !(is32Bit(c + d)) { 7871 break 7872 } 7873 v.reset(OpAMD64MOVLloadidx1) 7874 v.AuxInt = c + d 7875 v.Aux = sym 7876 v.AddArg(ptr) 7877 v.AddArg(idx) 7878 v.AddArg(mem) 7879 return true 7880 } 7881 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7882 // cond: is32Bit(c+d) 7883 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7884 for { 7885 c := v.AuxInt 7886 sym := v.Aux 7887 _ = v.Args[2] 7888 ptr := v.Args[0] 7889 v_1 := v.Args[1] 7890 if v_1.Op != OpAMD64ADDQconst { 7891 break 7892 } 7893 d := v_1.AuxInt 7894 idx := v_1.Args[0] 7895 mem := v.Args[2] 7896 if !(is32Bit(c + d)) { 7897 break 7898 } 7899 v.reset(OpAMD64MOVLloadidx1) 7900 v.AuxInt = c + d 7901 v.Aux = sym 7902 v.AddArg(ptr) 7903 v.AddArg(idx) 7904 v.AddArg(mem) 7905 return true 7906 } 7907 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 7908 // cond: is32Bit(c+d) 7909 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7910 for { 7911 c := v.AuxInt 7912 sym := v.Aux 7913 _ = v.Args[2] 7914 v_0 := v.Args[0] 7915 if v_0.Op != OpAMD64ADDQconst { 7916 break 7917 } 7918 d := v_0.AuxInt 7919 idx := v_0.Args[0] 7920 ptr := v.Args[1] 7921 mem := v.Args[2] 7922 if !(is32Bit(c + d)) { 7923 break 7924 } 7925 v.reset(OpAMD64MOVLloadidx1) 7926 v.AuxInt = c + d 7927 v.Aux = sym 7928 v.AddArg(ptr) 7929 v.AddArg(idx) 7930 v.AddArg(mem) 7931 return true 7932 } 7933 return false 7934 } 7935 func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { 7936 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 7937 // cond: is32Bit(c+d) 7938 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 7939 for { 7940 c := v.AuxInt 7941 sym := v.Aux 7942 _ = v.Args[2] 7943 v_0 := v.Args[0] 7944 if v_0.Op != OpAMD64ADDQconst { 7945 break 7946 } 7947 d := v_0.AuxInt 7948 ptr := v_0.Args[0] 7949 idx := v.Args[1] 7950 mem := v.Args[2] 7951 if !(is32Bit(c + d)) { 7952 break 7953 } 7954 v.reset(OpAMD64MOVLloadidx4) 7955 v.AuxInt = c + d 7956 v.Aux = sym 7957 v.AddArg(ptr) 7958 v.AddArg(idx) 7959 v.AddArg(mem) 7960 return true 7961 } 7962 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 7963 // cond: is32Bit(c+4*d) 7964 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 7965 for { 7966 c := v.AuxInt 7967 sym := v.Aux 7968 _ = v.Args[2] 7969 ptr := v.Args[0] 7970 v_1 := v.Args[1] 7971 if v_1.Op != OpAMD64ADDQconst { 7972 break 7973 } 7974 d := v_1.AuxInt 7975 idx := v_1.Args[0] 7976 mem := v.Args[2] 7977 if !(is32Bit(c + 4*d)) { 7978 break 7979 } 7980 v.reset(OpAMD64MOVLloadidx4) 7981 v.AuxInt = c + 4*d 7982 v.Aux = sym 7983 v.AddArg(ptr) 7984 v.AddArg(idx) 7985 v.AddArg(mem) 7986 return true 7987 } 7988 return false 7989 } 7990 func rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v *Value) bool { 7991 // match: (MOVLloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 7992 // cond: is32Bit(c+d) 7993 // result: (MOVLloadidx8 [c+d] {sym} ptr idx mem) 7994 for { 7995 c := v.AuxInt 7996 sym := v.Aux 7997 _ = v.Args[2] 7998 v_0 := v.Args[0] 7999 if v_0.Op != OpAMD64ADDQconst { 8000 break 8001 } 8002 d := v_0.AuxInt 8003 ptr := v_0.Args[0] 8004 idx := v.Args[1] 8005 mem := v.Args[2] 8006 if !(is32Bit(c + d)) { 8007 break 8008 } 8009 v.reset(OpAMD64MOVLloadidx8) 8010 v.AuxInt = c + d 8011 v.Aux = sym 8012 v.AddArg(ptr) 8013 v.AddArg(idx) 8014 v.AddArg(mem) 8015 return true 8016 } 8017 // match: (MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 8018 // cond: is32Bit(c+8*d) 8019 // result: (MOVLloadidx8 [c+8*d] {sym} ptr idx mem) 8020 for { 8021 c := v.AuxInt 8022 sym := v.Aux 8023 _ = v.Args[2] 8024 ptr := v.Args[0] 8025 v_1 := v.Args[1] 8026 if v_1.Op != OpAMD64ADDQconst { 8027 break 8028 } 8029 d := v_1.AuxInt 8030 idx := v_1.Args[0] 8031 mem := v.Args[2] 8032 if !(is32Bit(c + 8*d)) { 8033 break 8034 } 8035 v.reset(OpAMD64MOVLloadidx8) 8036 v.AuxInt = c + 8*d 8037 v.Aux = sym 8038 v.AddArg(ptr) 8039 v.AddArg(idx) 8040 v.AddArg(mem) 8041 return true 8042 } 8043 return false 8044 } 8045 func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { 8046 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 8047 // cond: 8048 // result: (MOVLstore [off] {sym} ptr x mem) 8049 for { 8050 off := v.AuxInt 8051 sym := v.Aux 8052 _ = v.Args[2] 8053 ptr := v.Args[0] 8054 v_1 := v.Args[1] 8055 if v_1.Op != OpAMD64MOVLQSX { 8056 break 8057 } 8058 x := v_1.Args[0] 8059 mem := v.Args[2] 8060 v.reset(OpAMD64MOVLstore) 8061 v.AuxInt = off 8062 v.Aux = sym 8063 v.AddArg(ptr) 8064 v.AddArg(x) 8065 v.AddArg(mem) 8066 return true 8067 } 8068 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 8069 // cond: 8070 // result: (MOVLstore [off] {sym} ptr x mem) 8071 for { 8072 off := v.AuxInt 8073 sym := v.Aux 8074 _ = v.Args[2] 8075 ptr := v.Args[0] 8076 v_1 := v.Args[1] 8077 if v_1.Op != OpAMD64MOVLQZX { 8078 break 8079 } 8080 x := v_1.Args[0] 8081 mem := v.Args[2] 8082 v.reset(OpAMD64MOVLstore) 8083 v.AuxInt = off 8084 v.Aux = sym 8085 v.AddArg(ptr) 8086 v.AddArg(x) 8087 v.AddArg(mem) 8088 return true 8089 } 8090 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8091 // cond: is32Bit(off1+off2) 8092 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 8093 for { 8094 off1 := v.AuxInt 8095 sym := v.Aux 8096 _ = v.Args[2] 8097 v_0 := v.Args[0] 8098 if v_0.Op != OpAMD64ADDQconst { 8099 break 8100 } 8101 off2 := v_0.AuxInt 8102 ptr := v_0.Args[0] 8103 val := v.Args[1] 8104 mem := v.Args[2] 8105 if !(is32Bit(off1 + off2)) { 8106 break 8107 } 8108 v.reset(OpAMD64MOVLstore) 8109 v.AuxInt = off1 + off2 8110 v.Aux = sym 8111 v.AddArg(ptr) 8112 v.AddArg(val) 8113 v.AddArg(mem) 8114 return true 8115 } 8116 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 8117 // cond: validOff(off) 8118 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 8119 for { 8120 off := v.AuxInt 8121 sym := v.Aux 8122 _ = v.Args[2] 8123 ptr := v.Args[0] 8124 v_1 := v.Args[1] 8125 if v_1.Op != OpAMD64MOVLconst { 8126 break 8127 } 8128 c := v_1.AuxInt 8129 mem := v.Args[2] 8130 if !(validOff(off)) { 8131 break 8132 } 8133 v.reset(OpAMD64MOVLstoreconst) 8134 v.AuxInt = makeValAndOff(int64(int32(c)), off) 8135 v.Aux = sym 8136 v.AddArg(ptr) 8137 v.AddArg(mem) 8138 return true 8139 } 8140 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8141 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8142 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8143 for { 8144 off1 := v.AuxInt 8145 sym1 := v.Aux 8146 _ = v.Args[2] 8147 v_0 := v.Args[0] 8148 if v_0.Op != OpAMD64LEAQ { 8149 break 8150 } 8151 off2 := v_0.AuxInt 8152 sym2 := v_0.Aux 8153 base := v_0.Args[0] 8154 val := v.Args[1] 8155 mem := v.Args[2] 8156 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8157 break 8158 } 8159 v.reset(OpAMD64MOVLstore) 8160 v.AuxInt = off1 + off2 8161 v.Aux = mergeSym(sym1, sym2) 8162 v.AddArg(base) 8163 v.AddArg(val) 8164 v.AddArg(mem) 8165 return true 8166 } 8167 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8168 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8169 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8170 for { 8171 off1 := v.AuxInt 8172 sym1 := v.Aux 8173 _ = v.Args[2] 8174 v_0 := v.Args[0] 8175 if v_0.Op != OpAMD64LEAQ1 { 8176 break 8177 } 8178 off2 := v_0.AuxInt 8179 sym2 := v_0.Aux 8180 _ = v_0.Args[1] 8181 ptr := v_0.Args[0] 8182 idx := v_0.Args[1] 8183 val := v.Args[1] 8184 mem := v.Args[2] 8185 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8186 break 8187 } 8188 v.reset(OpAMD64MOVLstoreidx1) 8189 v.AuxInt = off1 + off2 8190 v.Aux = mergeSym(sym1, sym2) 8191 v.AddArg(ptr) 8192 v.AddArg(idx) 8193 v.AddArg(val) 8194 v.AddArg(mem) 8195 return true 8196 } 8197 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 8198 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8199 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8200 for { 8201 off1 := v.AuxInt 8202 sym1 := v.Aux 8203 _ = v.Args[2] 8204 v_0 := v.Args[0] 8205 if v_0.Op != OpAMD64LEAQ4 { 8206 break 8207 } 8208 off2 := v_0.AuxInt 8209 sym2 := v_0.Aux 8210 _ = v_0.Args[1] 8211 ptr := v_0.Args[0] 8212 idx := v_0.Args[1] 8213 val := v.Args[1] 8214 mem := v.Args[2] 8215 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8216 break 8217 } 8218 v.reset(OpAMD64MOVLstoreidx4) 8219 v.AuxInt = off1 + off2 8220 v.Aux = mergeSym(sym1, sym2) 8221 v.AddArg(ptr) 8222 v.AddArg(idx) 8223 v.AddArg(val) 8224 v.AddArg(mem) 8225 return true 8226 } 8227 // match: (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 8228 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8229 // result: (MOVLstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8230 for { 8231 off1 := v.AuxInt 8232 sym1 := v.Aux 8233 _ = v.Args[2] 8234 v_0 := v.Args[0] 8235 if v_0.Op != OpAMD64LEAQ8 { 8236 break 8237 } 8238 off2 := v_0.AuxInt 8239 sym2 := v_0.Aux 8240 _ = v_0.Args[1] 8241 ptr := v_0.Args[0] 8242 idx := v_0.Args[1] 8243 val := v.Args[1] 8244 mem := v.Args[2] 8245 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8246 break 8247 } 8248 v.reset(OpAMD64MOVLstoreidx8) 8249 v.AuxInt = off1 + off2 8250 v.Aux = mergeSym(sym1, sym2) 8251 v.AddArg(ptr) 8252 v.AddArg(idx) 8253 v.AddArg(val) 8254 v.AddArg(mem) 8255 return true 8256 } 8257 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 8258 // cond: ptr.Op != OpSB 8259 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 8260 for { 8261 off := v.AuxInt 8262 sym := v.Aux 8263 _ = v.Args[2] 8264 v_0 := v.Args[0] 8265 if v_0.Op != OpAMD64ADDQ { 8266 break 8267 } 8268 _ = v_0.Args[1] 8269 ptr := v_0.Args[0] 8270 idx := v_0.Args[1] 8271 val := v.Args[1] 8272 mem := v.Args[2] 8273 if !(ptr.Op != OpSB) { 8274 break 8275 } 8276 v.reset(OpAMD64MOVLstoreidx1) 8277 v.AuxInt = off 8278 v.Aux = sym 8279 v.AddArg(ptr) 8280 v.AddArg(idx) 8281 v.AddArg(val) 8282 v.AddArg(mem) 8283 return true 8284 } 8285 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 8286 // cond: x.Uses == 1 && clobber(x) 8287 // result: (MOVQstore [i-4] {s} p w mem) 8288 for { 8289 i := v.AuxInt 8290 s := v.Aux 8291 _ = v.Args[2] 8292 p := v.Args[0] 8293 v_1 := v.Args[1] 8294 if v_1.Op != OpAMD64SHRQconst { 8295 break 8296 } 8297 if v_1.AuxInt != 32 { 8298 break 8299 } 8300 w := v_1.Args[0] 8301 x := v.Args[2] 8302 if x.Op != OpAMD64MOVLstore { 8303 break 8304 } 8305 if x.AuxInt != i-4 { 8306 break 8307 } 8308 if x.Aux != s { 8309 break 8310 } 8311 _ = x.Args[2] 8312 if p != x.Args[0] { 8313 break 8314 } 8315 if w != x.Args[1] { 8316 break 8317 } 8318 mem := x.Args[2] 8319 if !(x.Uses == 1 && clobber(x)) { 8320 break 8321 } 8322 v.reset(OpAMD64MOVQstore) 8323 v.AuxInt = i - 4 8324 v.Aux = s 8325 v.AddArg(p) 8326 v.AddArg(w) 8327 v.AddArg(mem) 8328 return true 8329 } 8330 return false 8331 } 8332 func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { 8333 b := v.Block 8334 _ = b 8335 typ := &b.Func.Config.Types 8336 _ = typ 8337 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 8338 // cond: x.Uses == 1 && clobber(x) 8339 // result: (MOVQstore [i-4] {s} p w0 mem) 8340 for { 8341 i := v.AuxInt 8342 s := v.Aux 8343 _ = v.Args[2] 8344 p := v.Args[0] 8345 v_1 := v.Args[1] 8346 if v_1.Op != OpAMD64SHRQconst { 8347 break 8348 } 8349 j := v_1.AuxInt 8350 w := v_1.Args[0] 8351 x := v.Args[2] 8352 if x.Op != OpAMD64MOVLstore { 8353 break 8354 } 8355 if x.AuxInt != i-4 { 8356 break 8357 } 8358 if x.Aux != s { 8359 break 8360 } 8361 _ = x.Args[2] 8362 if p != x.Args[0] { 8363 break 8364 } 8365 w0 := x.Args[1] 8366 if w0.Op != OpAMD64SHRQconst { 8367 break 8368 } 8369 if w0.AuxInt != j-32 { 8370 break 8371 } 8372 if w != w0.Args[0] { 8373 break 8374 } 8375 mem := x.Args[2] 8376 if !(x.Uses == 1 && clobber(x)) { 8377 break 8378 } 8379 v.reset(OpAMD64MOVQstore) 8380 v.AuxInt = i - 4 8381 v.Aux = s 8382 v.AddArg(p) 8383 v.AddArg(w0) 8384 v.AddArg(mem) 8385 return true 8386 } 8387 // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem)) 8388 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 8389 // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) 8390 for { 8391 i := v.AuxInt 8392 s := v.Aux 8393 _ = v.Args[2] 8394 p := v.Args[0] 8395 x1 := v.Args[1] 8396 if x1.Op != OpAMD64MOVLload { 8397 break 8398 } 8399 j := x1.AuxInt 8400 s2 := x1.Aux 8401 _ = x1.Args[1] 8402 p2 := x1.Args[0] 8403 mem := x1.Args[1] 8404 mem2 := v.Args[2] 8405 if mem2.Op != OpAMD64MOVLstore { 8406 break 8407 } 8408 if mem2.AuxInt != i-4 { 8409 break 8410 } 8411 if mem2.Aux != s { 8412 break 8413 } 8414 _ = mem2.Args[2] 8415 if p != mem2.Args[0] { 8416 break 8417 } 8418 x2 := mem2.Args[1] 8419 if x2.Op != OpAMD64MOVLload { 8420 break 8421 } 8422 if x2.AuxInt != j-4 { 8423 break 8424 } 8425 if x2.Aux != s2 { 8426 break 8427 } 8428 _ = x2.Args[1] 8429 if p2 != x2.Args[0] { 8430 break 8431 } 8432 if mem != x2.Args[1] { 8433 break 8434 } 8435 if mem != mem2.Args[2] { 8436 break 8437 } 8438 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 8439 break 8440 } 8441 v.reset(OpAMD64MOVQstore) 8442 v.AuxInt = i - 4 8443 v.Aux = s 8444 v.AddArg(p) 8445 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 8446 v0.AuxInt = j - 4 8447 v0.Aux = s2 8448 v0.AddArg(p2) 8449 v0.AddArg(mem) 8450 v.AddArg(v0) 8451 v.AddArg(mem) 8452 return true 8453 } 8454 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 8455 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 8456 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8457 for { 8458 off1 := v.AuxInt 8459 sym1 := v.Aux 8460 _ = v.Args[2] 8461 v_0 := v.Args[0] 8462 if v_0.Op != OpAMD64LEAL { 8463 break 8464 } 8465 off2 := v_0.AuxInt 8466 sym2 := v_0.Aux 8467 base := v_0.Args[0] 8468 val := v.Args[1] 8469 mem := v.Args[2] 8470 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 8471 break 8472 } 8473 v.reset(OpAMD64MOVLstore) 8474 v.AuxInt = off1 + off2 8475 v.Aux = mergeSym(sym1, sym2) 8476 v.AddArg(base) 8477 v.AddArg(val) 8478 v.AddArg(mem) 8479 return true 8480 } 8481 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 8482 // cond: is32Bit(off1+off2) 8483 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 8484 for { 8485 off1 := v.AuxInt 8486 sym := v.Aux 8487 _ = v.Args[2] 8488 v_0 := v.Args[0] 8489 if v_0.Op != OpAMD64ADDLconst { 8490 break 8491 } 8492 off2 := v_0.AuxInt 8493 ptr := v_0.Args[0] 8494 val := v.Args[1] 8495 mem := v.Args[2] 8496 if !(is32Bit(off1 + off2)) { 8497 break 8498 } 8499 v.reset(OpAMD64MOVLstore) 8500 v.AuxInt = off1 + off2 8501 v.Aux = sym 8502 v.AddArg(ptr) 8503 v.AddArg(val) 8504 v.AddArg(mem) 8505 return true 8506 } 8507 // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 8508 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) 8509 // result: (ADDLconstmem {sym} [makeValAndOff(c,off)] ptr mem) 8510 for { 8511 off := v.AuxInt 8512 sym := v.Aux 8513 _ = v.Args[2] 8514 ptr := v.Args[0] 8515 a := v.Args[1] 8516 if a.Op != OpAMD64ADDLconst { 8517 break 8518 } 8519 c := a.AuxInt 8520 l := a.Args[0] 8521 if l.Op != OpAMD64MOVLload { 8522 break 8523 } 8524 if l.AuxInt != off { 8525 break 8526 } 8527 if l.Aux != sym { 8528 break 8529 } 8530 _ = l.Args[1] 8531 ptr2 := l.Args[0] 8532 mem := l.Args[1] 8533 if mem != v.Args[2] { 8534 break 8535 } 8536 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) { 8537 break 8538 } 8539 v.reset(OpAMD64ADDLconstmem) 8540 v.AuxInt = makeValAndOff(c, off) 8541 v.Aux = sym 8542 v.AddArg(ptr) 8543 v.AddArg(mem) 8544 return true 8545 } 8546 // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem) 8547 // cond: 8548 // result: (MOVSSstore [off] {sym} ptr val mem) 8549 for { 8550 off := v.AuxInt 8551 sym := v.Aux 8552 _ = v.Args[2] 8553 ptr := v.Args[0] 8554 v_1 := v.Args[1] 8555 if v_1.Op != OpAMD64MOVLf2i { 8556 break 8557 } 8558 val := v_1.Args[0] 8559 mem := v.Args[2] 8560 v.reset(OpAMD64MOVSSstore) 8561 v.AuxInt = off 8562 v.Aux = sym 8563 v.AddArg(ptr) 8564 v.AddArg(val) 8565 v.AddArg(mem) 8566 return true 8567 } 8568 return false 8569 } 8570 func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { 8571 b := v.Block 8572 _ = b 8573 typ := &b.Func.Config.Types 8574 _ = typ 8575 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 8576 // cond: ValAndOff(sc).canAdd(off) 8577 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8578 for { 8579 sc := v.AuxInt 8580 s := v.Aux 8581 _ = v.Args[1] 8582 v_0 := v.Args[0] 8583 if v_0.Op != OpAMD64ADDQconst { 8584 break 8585 } 8586 off := v_0.AuxInt 8587 ptr := v_0.Args[0] 8588 mem := v.Args[1] 8589 if !(ValAndOff(sc).canAdd(off)) { 8590 break 8591 } 8592 v.reset(OpAMD64MOVLstoreconst) 8593 v.AuxInt = ValAndOff(sc).add(off) 8594 v.Aux = s 8595 v.AddArg(ptr) 8596 v.AddArg(mem) 8597 return true 8598 } 8599 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 8600 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8601 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8602 for { 8603 sc := v.AuxInt 8604 sym1 := v.Aux 8605 _ = v.Args[1] 8606 v_0 := v.Args[0] 8607 if v_0.Op != OpAMD64LEAQ { 8608 break 8609 } 8610 off := v_0.AuxInt 8611 sym2 := v_0.Aux 8612 ptr := v_0.Args[0] 8613 mem := v.Args[1] 8614 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8615 break 8616 } 8617 v.reset(OpAMD64MOVLstoreconst) 8618 v.AuxInt = ValAndOff(sc).add(off) 8619 v.Aux = mergeSym(sym1, sym2) 8620 v.AddArg(ptr) 8621 v.AddArg(mem) 8622 return true 8623 } 8624 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 8625 // cond: canMergeSym(sym1, sym2) 8626 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8627 for { 8628 x := v.AuxInt 8629 sym1 := v.Aux 8630 _ = v.Args[1] 8631 v_0 := v.Args[0] 8632 if v_0.Op != OpAMD64LEAQ1 { 8633 break 8634 } 8635 off := v_0.AuxInt 8636 sym2 := v_0.Aux 8637 _ = v_0.Args[1] 8638 ptr := v_0.Args[0] 8639 idx := v_0.Args[1] 8640 mem := v.Args[1] 8641 if !(canMergeSym(sym1, sym2)) { 8642 break 8643 } 8644 v.reset(OpAMD64MOVLstoreconstidx1) 8645 v.AuxInt = ValAndOff(x).add(off) 8646 v.Aux = mergeSym(sym1, sym2) 8647 v.AddArg(ptr) 8648 v.AddArg(idx) 8649 v.AddArg(mem) 8650 return true 8651 } 8652 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 8653 // cond: canMergeSym(sym1, sym2) 8654 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8655 for { 8656 x := v.AuxInt 8657 sym1 := v.Aux 8658 _ = v.Args[1] 8659 v_0 := v.Args[0] 8660 if v_0.Op != OpAMD64LEAQ4 { 8661 break 8662 } 8663 off := v_0.AuxInt 8664 sym2 := v_0.Aux 8665 _ = v_0.Args[1] 8666 ptr := v_0.Args[0] 8667 idx := v_0.Args[1] 8668 mem := v.Args[1] 8669 if !(canMergeSym(sym1, sym2)) { 8670 break 8671 } 8672 v.reset(OpAMD64MOVLstoreconstidx4) 8673 v.AuxInt = ValAndOff(x).add(off) 8674 v.Aux = mergeSym(sym1, sym2) 8675 v.AddArg(ptr) 8676 v.AddArg(idx) 8677 v.AddArg(mem) 8678 return true 8679 } 8680 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 8681 // cond: 8682 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 8683 for { 8684 x := v.AuxInt 8685 sym := v.Aux 8686 _ = v.Args[1] 8687 v_0 := v.Args[0] 8688 if v_0.Op != OpAMD64ADDQ { 8689 break 8690 } 8691 _ = v_0.Args[1] 8692 ptr := v_0.Args[0] 8693 idx := v_0.Args[1] 8694 mem := v.Args[1] 8695 v.reset(OpAMD64MOVLstoreconstidx1) 8696 v.AuxInt = x 8697 v.Aux = sym 8698 v.AddArg(ptr) 8699 v.AddArg(idx) 8700 v.AddArg(mem) 8701 return true 8702 } 8703 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 8704 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 8705 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 8706 for { 8707 c := v.AuxInt 8708 s := v.Aux 8709 _ = v.Args[1] 8710 p := v.Args[0] 8711 x := v.Args[1] 8712 if x.Op != OpAMD64MOVLstoreconst { 8713 break 8714 } 8715 a := x.AuxInt 8716 if x.Aux != s { 8717 break 8718 } 8719 _ = x.Args[1] 8720 if p != x.Args[0] { 8721 break 8722 } 8723 mem := x.Args[1] 8724 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 8725 break 8726 } 8727 v.reset(OpAMD64MOVQstore) 8728 v.AuxInt = ValAndOff(a).Off() 8729 v.Aux = s 8730 v.AddArg(p) 8731 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 8732 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 8733 v.AddArg(v0) 8734 v.AddArg(mem) 8735 return true 8736 } 8737 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 8738 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8739 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8740 for { 8741 sc := v.AuxInt 8742 sym1 := v.Aux 8743 _ = v.Args[1] 8744 v_0 := v.Args[0] 8745 if v_0.Op != OpAMD64LEAL { 8746 break 8747 } 8748 off := v_0.AuxInt 8749 sym2 := v_0.Aux 8750 ptr := v_0.Args[0] 8751 mem := v.Args[1] 8752 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8753 break 8754 } 8755 v.reset(OpAMD64MOVLstoreconst) 8756 v.AuxInt = ValAndOff(sc).add(off) 8757 v.Aux = mergeSym(sym1, sym2) 8758 v.AddArg(ptr) 8759 v.AddArg(mem) 8760 return true 8761 } 8762 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 8763 // cond: ValAndOff(sc).canAdd(off) 8764 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8765 for { 8766 sc := v.AuxInt 8767 s := v.Aux 8768 _ = v.Args[1] 8769 v_0 := v.Args[0] 8770 if v_0.Op != OpAMD64ADDLconst { 8771 break 8772 } 8773 off := v_0.AuxInt 8774 ptr := v_0.Args[0] 8775 mem := v.Args[1] 8776 if !(ValAndOff(sc).canAdd(off)) { 8777 break 8778 } 8779 v.reset(OpAMD64MOVLstoreconst) 8780 v.AuxInt = ValAndOff(sc).add(off) 8781 v.Aux = s 8782 v.AddArg(ptr) 8783 v.AddArg(mem) 8784 return true 8785 } 8786 return false 8787 } 8788 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { 8789 b := v.Block 8790 _ = b 8791 typ := &b.Func.Config.Types 8792 _ = typ 8793 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 8794 // cond: 8795 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 8796 for { 8797 c := v.AuxInt 8798 sym := v.Aux 8799 _ = v.Args[2] 8800 ptr := v.Args[0] 8801 v_1 := v.Args[1] 8802 if v_1.Op != OpAMD64SHLQconst { 8803 break 8804 } 8805 if v_1.AuxInt != 2 { 8806 break 8807 } 8808 idx := v_1.Args[0] 8809 mem := v.Args[2] 8810 v.reset(OpAMD64MOVLstoreconstidx4) 8811 v.AuxInt = c 8812 v.Aux = sym 8813 v.AddArg(ptr) 8814 v.AddArg(idx) 8815 v.AddArg(mem) 8816 return true 8817 } 8818 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 8819 // cond: ValAndOff(x).canAdd(c) 8820 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8821 for { 8822 x := v.AuxInt 8823 sym := v.Aux 8824 _ = v.Args[2] 8825 v_0 := v.Args[0] 8826 if v_0.Op != OpAMD64ADDQconst { 8827 break 8828 } 8829 c := v_0.AuxInt 8830 ptr := v_0.Args[0] 8831 idx := v.Args[1] 8832 mem := v.Args[2] 8833 if !(ValAndOff(x).canAdd(c)) { 8834 break 8835 } 8836 v.reset(OpAMD64MOVLstoreconstidx1) 8837 v.AuxInt = ValAndOff(x).add(c) 8838 v.Aux = sym 8839 v.AddArg(ptr) 8840 v.AddArg(idx) 8841 v.AddArg(mem) 8842 return true 8843 } 8844 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 8845 // cond: ValAndOff(x).canAdd(c) 8846 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8847 for { 8848 x := v.AuxInt 8849 sym := v.Aux 8850 _ = v.Args[2] 8851 ptr := v.Args[0] 8852 v_1 := v.Args[1] 8853 if v_1.Op != OpAMD64ADDQconst { 8854 break 8855 } 8856 c := v_1.AuxInt 8857 idx := v_1.Args[0] 8858 mem := v.Args[2] 8859 if !(ValAndOff(x).canAdd(c)) { 8860 break 8861 } 8862 v.reset(OpAMD64MOVLstoreconstidx1) 8863 v.AuxInt = ValAndOff(x).add(c) 8864 v.Aux = sym 8865 v.AddArg(ptr) 8866 v.AddArg(idx) 8867 v.AddArg(mem) 8868 return true 8869 } 8870 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 8871 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 8872 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 8873 for { 8874 c := v.AuxInt 8875 s := v.Aux 8876 _ = v.Args[2] 8877 p := v.Args[0] 8878 i := v.Args[1] 8879 x := v.Args[2] 8880 if x.Op != OpAMD64MOVLstoreconstidx1 { 8881 break 8882 } 8883 a := x.AuxInt 8884 if x.Aux != s { 8885 break 8886 } 8887 _ = x.Args[2] 8888 if p != x.Args[0] { 8889 break 8890 } 8891 if i != x.Args[1] { 8892 break 8893 } 8894 mem := x.Args[2] 8895 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 8896 break 8897 } 8898 v.reset(OpAMD64MOVQstoreidx1) 8899 v.AuxInt = ValAndOff(a).Off() 8900 v.Aux = s 8901 v.AddArg(p) 8902 v.AddArg(i) 8903 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 8904 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 8905 v.AddArg(v0) 8906 v.AddArg(mem) 8907 return true 8908 } 8909 return false 8910 } 8911 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { 8912 b := v.Block 8913 _ = b 8914 typ := &b.Func.Config.Types 8915 _ = typ 8916 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 8917 // cond: ValAndOff(x).canAdd(c) 8918 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8919 for { 8920 x := v.AuxInt 8921 sym := v.Aux 8922 _ = v.Args[2] 8923 v_0 := v.Args[0] 8924 if v_0.Op != OpAMD64ADDQconst { 8925 break 8926 } 8927 c := v_0.AuxInt 8928 ptr := v_0.Args[0] 8929 idx := v.Args[1] 8930 mem := v.Args[2] 8931 if !(ValAndOff(x).canAdd(c)) { 8932 break 8933 } 8934 v.reset(OpAMD64MOVLstoreconstidx4) 8935 v.AuxInt = ValAndOff(x).add(c) 8936 v.Aux = sym 8937 v.AddArg(ptr) 8938 v.AddArg(idx) 8939 v.AddArg(mem) 8940 return true 8941 } 8942 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 8943 // cond: ValAndOff(x).canAdd(4*c) 8944 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 8945 for { 8946 x := v.AuxInt 8947 sym := v.Aux 8948 _ = v.Args[2] 8949 ptr := v.Args[0] 8950 v_1 := v.Args[1] 8951 if v_1.Op != OpAMD64ADDQconst { 8952 break 8953 } 8954 c := v_1.AuxInt 8955 idx := v_1.Args[0] 8956 mem := v.Args[2] 8957 if !(ValAndOff(x).canAdd(4 * c)) { 8958 break 8959 } 8960 v.reset(OpAMD64MOVLstoreconstidx4) 8961 v.AuxInt = ValAndOff(x).add(4 * c) 8962 v.Aux = sym 8963 v.AddArg(ptr) 8964 v.AddArg(idx) 8965 v.AddArg(mem) 8966 return true 8967 } 8968 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 8969 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 8970 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 8971 for { 8972 c := v.AuxInt 8973 s := v.Aux 8974 _ = v.Args[2] 8975 p := v.Args[0] 8976 i := v.Args[1] 8977 x := v.Args[2] 8978 if x.Op != OpAMD64MOVLstoreconstidx4 { 8979 break 8980 } 8981 a := x.AuxInt 8982 if x.Aux != s { 8983 break 8984 } 8985 _ = x.Args[2] 8986 if p != x.Args[0] { 8987 break 8988 } 8989 if i != x.Args[1] { 8990 break 8991 } 8992 mem := x.Args[2] 8993 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 8994 break 8995 } 8996 v.reset(OpAMD64MOVQstoreidx1) 8997 v.AuxInt = ValAndOff(a).Off() 8998 v.Aux = s 8999 v.AddArg(p) 9000 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 9001 v0.AuxInt = 2 9002 v0.AddArg(i) 9003 v.AddArg(v0) 9004 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 9005 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 9006 v.AddArg(v1) 9007 v.AddArg(mem) 9008 return true 9009 } 9010 return false 9011 } 9012 func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { 9013 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 9014 // cond: 9015 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 9016 for { 9017 c := v.AuxInt 9018 sym := v.Aux 9019 _ = v.Args[3] 9020 ptr := v.Args[0] 9021 v_1 := v.Args[1] 9022 if v_1.Op != OpAMD64SHLQconst { 9023 break 9024 } 9025 if v_1.AuxInt != 2 { 9026 break 9027 } 9028 idx := v_1.Args[0] 9029 val := v.Args[2] 9030 mem := v.Args[3] 9031 v.reset(OpAMD64MOVLstoreidx4) 9032 v.AuxInt = c 9033 v.Aux = sym 9034 v.AddArg(ptr) 9035 v.AddArg(idx) 9036 v.AddArg(val) 9037 v.AddArg(mem) 9038 return true 9039 } 9040 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 9041 // cond: 9042 // result: (MOVLstoreidx8 [c] {sym} ptr idx val mem) 9043 for { 9044 c := v.AuxInt 9045 sym := v.Aux 9046 _ = v.Args[3] 9047 ptr := v.Args[0] 9048 v_1 := v.Args[1] 9049 if v_1.Op != OpAMD64SHLQconst { 9050 break 9051 } 9052 if v_1.AuxInt != 3 { 9053 break 9054 } 9055 idx := v_1.Args[0] 9056 val := v.Args[2] 9057 mem := v.Args[3] 9058 v.reset(OpAMD64MOVLstoreidx8) 9059 v.AuxInt = c 9060 v.Aux = sym 9061 v.AddArg(ptr) 9062 v.AddArg(idx) 9063 v.AddArg(val) 9064 v.AddArg(mem) 9065 return true 9066 } 9067 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9068 // cond: is32Bit(c+d) 9069 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 9070 for { 9071 c := v.AuxInt 9072 sym := v.Aux 9073 _ = v.Args[3] 9074 v_0 := v.Args[0] 9075 if v_0.Op != OpAMD64ADDQconst { 9076 break 9077 } 9078 d := v_0.AuxInt 9079 ptr := v_0.Args[0] 9080 idx := v.Args[1] 9081 val := v.Args[2] 9082 mem := v.Args[3] 9083 if !(is32Bit(c + d)) { 9084 break 9085 } 9086 v.reset(OpAMD64MOVLstoreidx1) 9087 v.AuxInt = c + d 9088 v.Aux = sym 9089 v.AddArg(ptr) 9090 v.AddArg(idx) 9091 v.AddArg(val) 9092 v.AddArg(mem) 9093 return true 9094 } 9095 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9096 // cond: is32Bit(c+d) 9097 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 9098 for { 9099 c := v.AuxInt 9100 sym := v.Aux 9101 _ = v.Args[3] 9102 ptr := v.Args[0] 9103 v_1 := v.Args[1] 9104 if v_1.Op != OpAMD64ADDQconst { 9105 break 9106 } 9107 d := v_1.AuxInt 9108 idx := v_1.Args[0] 9109 val := v.Args[2] 9110 mem := v.Args[3] 9111 if !(is32Bit(c + d)) { 9112 break 9113 } 9114 v.reset(OpAMD64MOVLstoreidx1) 9115 v.AuxInt = c + d 9116 v.Aux = sym 9117 v.AddArg(ptr) 9118 v.AddArg(idx) 9119 v.AddArg(val) 9120 v.AddArg(mem) 9121 return true 9122 } 9123 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 9124 // cond: x.Uses == 1 && clobber(x) 9125 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 9126 for { 9127 i := v.AuxInt 9128 s := v.Aux 9129 _ = v.Args[3] 9130 p := v.Args[0] 9131 idx := v.Args[1] 9132 v_2 := v.Args[2] 9133 if v_2.Op != OpAMD64SHRQconst { 9134 break 9135 } 9136 if v_2.AuxInt != 32 { 9137 break 9138 } 9139 w := v_2.Args[0] 9140 x := v.Args[3] 9141 if x.Op != OpAMD64MOVLstoreidx1 { 9142 break 9143 } 9144 if x.AuxInt != i-4 { 9145 break 9146 } 9147 if x.Aux != s { 9148 break 9149 } 9150 _ = x.Args[3] 9151 if p != x.Args[0] { 9152 break 9153 } 9154 if idx != x.Args[1] { 9155 break 9156 } 9157 if w != x.Args[2] { 9158 break 9159 } 9160 mem := x.Args[3] 9161 if !(x.Uses == 1 && clobber(x)) { 9162 break 9163 } 9164 v.reset(OpAMD64MOVQstoreidx1) 9165 v.AuxInt = i - 4 9166 v.Aux = s 9167 v.AddArg(p) 9168 v.AddArg(idx) 9169 v.AddArg(w) 9170 v.AddArg(mem) 9171 return true 9172 } 9173 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 9174 // cond: x.Uses == 1 && clobber(x) 9175 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 9176 for { 9177 i := v.AuxInt 9178 s := v.Aux 9179 _ = v.Args[3] 9180 p := v.Args[0] 9181 idx := v.Args[1] 9182 v_2 := v.Args[2] 9183 if v_2.Op != OpAMD64SHRQconst { 9184 break 9185 } 9186 j := v_2.AuxInt 9187 w := v_2.Args[0] 9188 x := v.Args[3] 9189 if x.Op != OpAMD64MOVLstoreidx1 { 9190 break 9191 } 9192 if x.AuxInt != i-4 { 9193 break 9194 } 9195 if x.Aux != s { 9196 break 9197 } 9198 _ = x.Args[3] 9199 if p != x.Args[0] { 9200 break 9201 } 9202 if idx != x.Args[1] { 9203 break 9204 } 9205 w0 := x.Args[2] 9206 if w0.Op != OpAMD64SHRQconst { 9207 break 9208 } 9209 if w0.AuxInt != j-32 { 9210 break 9211 } 9212 if w != w0.Args[0] { 9213 break 9214 } 9215 mem := x.Args[3] 9216 if !(x.Uses == 1 && clobber(x)) { 9217 break 9218 } 9219 v.reset(OpAMD64MOVQstoreidx1) 9220 v.AuxInt = i - 4 9221 v.Aux = s 9222 v.AddArg(p) 9223 v.AddArg(idx) 9224 v.AddArg(w0) 9225 v.AddArg(mem) 9226 return true 9227 } 9228 return false 9229 } 9230 func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { 9231 b := v.Block 9232 _ = b 9233 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9234 // cond: is32Bit(c+d) 9235 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 9236 for { 9237 c := v.AuxInt 9238 sym := v.Aux 9239 _ = v.Args[3] 9240 v_0 := v.Args[0] 9241 if v_0.Op != OpAMD64ADDQconst { 9242 break 9243 } 9244 d := v_0.AuxInt 9245 ptr := v_0.Args[0] 9246 idx := v.Args[1] 9247 val := v.Args[2] 9248 mem := v.Args[3] 9249 if !(is32Bit(c + d)) { 9250 break 9251 } 9252 v.reset(OpAMD64MOVLstoreidx4) 9253 v.AuxInt = c + d 9254 v.Aux = sym 9255 v.AddArg(ptr) 9256 v.AddArg(idx) 9257 v.AddArg(val) 9258 v.AddArg(mem) 9259 return true 9260 } 9261 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9262 // cond: is32Bit(c+4*d) 9263 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 9264 for { 9265 c := v.AuxInt 9266 sym := v.Aux 9267 _ = v.Args[3] 9268 ptr := v.Args[0] 9269 v_1 := v.Args[1] 9270 if v_1.Op != OpAMD64ADDQconst { 9271 break 9272 } 9273 d := v_1.AuxInt 9274 idx := v_1.Args[0] 9275 val := v.Args[2] 9276 mem := v.Args[3] 9277 if !(is32Bit(c + 4*d)) { 9278 break 9279 } 9280 v.reset(OpAMD64MOVLstoreidx4) 9281 v.AuxInt = c + 4*d 9282 v.Aux = sym 9283 v.AddArg(ptr) 9284 v.AddArg(idx) 9285 v.AddArg(val) 9286 v.AddArg(mem) 9287 return true 9288 } 9289 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 9290 // cond: x.Uses == 1 && clobber(x) 9291 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 9292 for { 9293 i := v.AuxInt 9294 s := v.Aux 9295 _ = v.Args[3] 9296 p := v.Args[0] 9297 idx := v.Args[1] 9298 v_2 := v.Args[2] 9299 if v_2.Op != OpAMD64SHRQconst { 9300 break 9301 } 9302 if v_2.AuxInt != 32 { 9303 break 9304 } 9305 w := v_2.Args[0] 9306 x := v.Args[3] 9307 if x.Op != OpAMD64MOVLstoreidx4 { 9308 break 9309 } 9310 if x.AuxInt != i-4 { 9311 break 9312 } 9313 if x.Aux != s { 9314 break 9315 } 9316 _ = x.Args[3] 9317 if p != x.Args[0] { 9318 break 9319 } 9320 if idx != x.Args[1] { 9321 break 9322 } 9323 if w != x.Args[2] { 9324 break 9325 } 9326 mem := x.Args[3] 9327 if !(x.Uses == 1 && clobber(x)) { 9328 break 9329 } 9330 v.reset(OpAMD64MOVQstoreidx1) 9331 v.AuxInt = i - 4 9332 v.Aux = s 9333 v.AddArg(p) 9334 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 9335 v0.AuxInt = 2 9336 v0.AddArg(idx) 9337 v.AddArg(v0) 9338 v.AddArg(w) 9339 v.AddArg(mem) 9340 return true 9341 } 9342 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 9343 // cond: x.Uses == 1 && clobber(x) 9344 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 9345 for { 9346 i := v.AuxInt 9347 s := v.Aux 9348 _ = v.Args[3] 9349 p := v.Args[0] 9350 idx := v.Args[1] 9351 v_2 := v.Args[2] 9352 if v_2.Op != OpAMD64SHRQconst { 9353 break 9354 } 9355 j := v_2.AuxInt 9356 w := v_2.Args[0] 9357 x := v.Args[3] 9358 if x.Op != OpAMD64MOVLstoreidx4 { 9359 break 9360 } 9361 if x.AuxInt != i-4 { 9362 break 9363 } 9364 if x.Aux != s { 9365 break 9366 } 9367 _ = x.Args[3] 9368 if p != x.Args[0] { 9369 break 9370 } 9371 if idx != x.Args[1] { 9372 break 9373 } 9374 w0 := x.Args[2] 9375 if w0.Op != OpAMD64SHRQconst { 9376 break 9377 } 9378 if w0.AuxInt != j-32 { 9379 break 9380 } 9381 if w != w0.Args[0] { 9382 break 9383 } 9384 mem := x.Args[3] 9385 if !(x.Uses == 1 && clobber(x)) { 9386 break 9387 } 9388 v.reset(OpAMD64MOVQstoreidx1) 9389 v.AuxInt = i - 4 9390 v.Aux = s 9391 v.AddArg(p) 9392 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 9393 v0.AuxInt = 2 9394 v0.AddArg(idx) 9395 v.AddArg(v0) 9396 v.AddArg(w0) 9397 v.AddArg(mem) 9398 return true 9399 } 9400 return false 9401 } 9402 func rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v *Value) bool { 9403 // match: (MOVLstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9404 // cond: is32Bit(c+d) 9405 // result: (MOVLstoreidx8 [c+d] {sym} ptr idx val mem) 9406 for { 9407 c := v.AuxInt 9408 sym := v.Aux 9409 _ = v.Args[3] 9410 v_0 := v.Args[0] 9411 if v_0.Op != OpAMD64ADDQconst { 9412 break 9413 } 9414 d := v_0.AuxInt 9415 ptr := v_0.Args[0] 9416 idx := v.Args[1] 9417 val := v.Args[2] 9418 mem := v.Args[3] 9419 if !(is32Bit(c + d)) { 9420 break 9421 } 9422 v.reset(OpAMD64MOVLstoreidx8) 9423 v.AuxInt = c + d 9424 v.Aux = sym 9425 v.AddArg(ptr) 9426 v.AddArg(idx) 9427 v.AddArg(val) 9428 v.AddArg(mem) 9429 return true 9430 } 9431 // match: (MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9432 // cond: is32Bit(c+8*d) 9433 // result: (MOVLstoreidx8 [c+8*d] {sym} ptr idx val mem) 9434 for { 9435 c := v.AuxInt 9436 sym := v.Aux 9437 _ = v.Args[3] 9438 ptr := v.Args[0] 9439 v_1 := v.Args[1] 9440 if v_1.Op != OpAMD64ADDQconst { 9441 break 9442 } 9443 d := v_1.AuxInt 9444 idx := v_1.Args[0] 9445 val := v.Args[2] 9446 mem := v.Args[3] 9447 if !(is32Bit(c + 8*d)) { 9448 break 9449 } 9450 v.reset(OpAMD64MOVLstoreidx8) 9451 v.AuxInt = c + 8*d 9452 v.Aux = sym 9453 v.AddArg(ptr) 9454 v.AddArg(idx) 9455 v.AddArg(val) 9456 v.AddArg(mem) 9457 return true 9458 } 9459 return false 9460 } 9461 func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool { 9462 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 9463 // cond: is32Bit(off1+off2) 9464 // result: (MOVOload [off1+off2] {sym} ptr mem) 9465 for { 9466 off1 := v.AuxInt 9467 sym := v.Aux 9468 _ = v.Args[1] 9469 v_0 := v.Args[0] 9470 if v_0.Op != OpAMD64ADDQconst { 9471 break 9472 } 9473 off2 := v_0.AuxInt 9474 ptr := v_0.Args[0] 9475 mem := v.Args[1] 9476 if !(is32Bit(off1 + off2)) { 9477 break 9478 } 9479 v.reset(OpAMD64MOVOload) 9480 v.AuxInt = off1 + off2 9481 v.Aux = sym 9482 v.AddArg(ptr) 9483 v.AddArg(mem) 9484 return true 9485 } 9486 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9487 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9488 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9489 for { 9490 off1 := v.AuxInt 9491 sym1 := v.Aux 9492 _ = v.Args[1] 9493 v_0 := v.Args[0] 9494 if v_0.Op != OpAMD64LEAQ { 9495 break 9496 } 9497 off2 := v_0.AuxInt 9498 sym2 := v_0.Aux 9499 base := v_0.Args[0] 9500 mem := v.Args[1] 9501 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9502 break 9503 } 9504 v.reset(OpAMD64MOVOload) 9505 v.AuxInt = off1 + off2 9506 v.Aux = mergeSym(sym1, sym2) 9507 v.AddArg(base) 9508 v.AddArg(mem) 9509 return true 9510 } 9511 return false 9512 } 9513 func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool { 9514 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9515 // cond: is32Bit(off1+off2) 9516 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 9517 for { 9518 off1 := v.AuxInt 9519 sym := v.Aux 9520 _ = v.Args[2] 9521 v_0 := v.Args[0] 9522 if v_0.Op != OpAMD64ADDQconst { 9523 break 9524 } 9525 off2 := v_0.AuxInt 9526 ptr := v_0.Args[0] 9527 val := v.Args[1] 9528 mem := v.Args[2] 9529 if !(is32Bit(off1 + off2)) { 9530 break 9531 } 9532 v.reset(OpAMD64MOVOstore) 9533 v.AuxInt = off1 + off2 9534 v.Aux = sym 9535 v.AddArg(ptr) 9536 v.AddArg(val) 9537 v.AddArg(mem) 9538 return true 9539 } 9540 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9541 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9542 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9543 for { 9544 off1 := v.AuxInt 9545 sym1 := v.Aux 9546 _ = v.Args[2] 9547 v_0 := v.Args[0] 9548 if v_0.Op != OpAMD64LEAQ { 9549 break 9550 } 9551 off2 := v_0.AuxInt 9552 sym2 := v_0.Aux 9553 base := v_0.Args[0] 9554 val := v.Args[1] 9555 mem := v.Args[2] 9556 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9557 break 9558 } 9559 v.reset(OpAMD64MOVOstore) 9560 v.AuxInt = off1 + off2 9561 v.Aux = mergeSym(sym1, sym2) 9562 v.AddArg(base) 9563 v.AddArg(val) 9564 v.AddArg(mem) 9565 return true 9566 } 9567 return false 9568 } 9569 func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool { 9570 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 9571 // cond: is32Bit(off1+off2) 9572 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 9573 for { 9574 off1 := v.AuxInt 9575 sym := v.Aux 9576 _ = v.Args[1] 9577 v_0 := v.Args[0] 9578 if v_0.Op != OpAMD64ADDQconst { 9579 break 9580 } 9581 off2 := v_0.AuxInt 9582 ptr := v_0.Args[0] 9583 mem := v.Args[1] 9584 if !(is32Bit(off1 + off2)) { 9585 break 9586 } 9587 v.reset(OpAMD64MOVQatomicload) 9588 v.AuxInt = off1 + off2 9589 v.Aux = sym 9590 v.AddArg(ptr) 9591 v.AddArg(mem) 9592 return true 9593 } 9594 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 9595 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9596 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 9597 for { 9598 off1 := v.AuxInt 9599 sym1 := v.Aux 9600 _ = v.Args[1] 9601 v_0 := v.Args[0] 9602 if v_0.Op != OpAMD64LEAQ { 9603 break 9604 } 9605 off2 := v_0.AuxInt 9606 sym2 := v_0.Aux 9607 ptr := v_0.Args[0] 9608 mem := v.Args[1] 9609 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9610 break 9611 } 9612 v.reset(OpAMD64MOVQatomicload) 9613 v.AuxInt = off1 + off2 9614 v.Aux = mergeSym(sym1, sym2) 9615 v.AddArg(ptr) 9616 v.AddArg(mem) 9617 return true 9618 } 9619 return false 9620 } 9621 func rewriteValueAMD64_OpAMD64MOVQf2i_0(v *Value) bool { 9622 b := v.Block 9623 _ = b 9624 // match: (MOVQf2i <t> (Arg [off] {sym})) 9625 // cond: 9626 // result: @b.Func.Entry (Arg <t> [off] {sym}) 9627 for { 9628 t := v.Type 9629 v_0 := v.Args[0] 9630 if v_0.Op != OpArg { 9631 break 9632 } 9633 off := v_0.AuxInt 9634 sym := v_0.Aux 9635 b = b.Func.Entry 9636 v0 := b.NewValue0(v.Pos, OpArg, t) 9637 v.reset(OpCopy) 9638 v.AddArg(v0) 9639 v0.AuxInt = off 9640 v0.Aux = sym 9641 return true 9642 } 9643 return false 9644 } 9645 func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool { 9646 b := v.Block 9647 _ = b 9648 // match: (MOVQi2f <t> (Arg [off] {sym})) 9649 // cond: 9650 // result: @b.Func.Entry (Arg <t> [off] {sym}) 9651 for { 9652 t := v.Type 9653 v_0 := v.Args[0] 9654 if v_0.Op != OpArg { 9655 break 9656 } 9657 off := v_0.AuxInt 9658 sym := v_0.Aux 9659 b = b.Func.Entry 9660 v0 := b.NewValue0(v.Pos, OpArg, t) 9661 v.reset(OpCopy) 9662 v.AddArg(v0) 9663 v0.AuxInt = off 9664 v0.Aux = sym 9665 return true 9666 } 9667 return false 9668 } 9669 func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { 9670 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 9671 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 9672 // result: x 9673 for { 9674 off := v.AuxInt 9675 sym := v.Aux 9676 _ = v.Args[1] 9677 ptr := v.Args[0] 9678 v_1 := v.Args[1] 9679 if v_1.Op != OpAMD64MOVQstore { 9680 break 9681 } 9682 off2 := v_1.AuxInt 9683 sym2 := v_1.Aux 9684 _ = v_1.Args[2] 9685 ptr2 := v_1.Args[0] 9686 x := v_1.Args[1] 9687 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 9688 break 9689 } 9690 v.reset(OpCopy) 9691 v.Type = x.Type 9692 v.AddArg(x) 9693 return true 9694 } 9695 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 9696 // cond: is32Bit(off1+off2) 9697 // result: (MOVQload [off1+off2] {sym} ptr mem) 9698 for { 9699 off1 := v.AuxInt 9700 sym := v.Aux 9701 _ = v.Args[1] 9702 v_0 := v.Args[0] 9703 if v_0.Op != OpAMD64ADDQconst { 9704 break 9705 } 9706 off2 := v_0.AuxInt 9707 ptr := v_0.Args[0] 9708 mem := v.Args[1] 9709 if !(is32Bit(off1 + off2)) { 9710 break 9711 } 9712 v.reset(OpAMD64MOVQload) 9713 v.AuxInt = off1 + off2 9714 v.Aux = sym 9715 v.AddArg(ptr) 9716 v.AddArg(mem) 9717 return true 9718 } 9719 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9720 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9721 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9722 for { 9723 off1 := v.AuxInt 9724 sym1 := v.Aux 9725 _ = v.Args[1] 9726 v_0 := v.Args[0] 9727 if v_0.Op != OpAMD64LEAQ { 9728 break 9729 } 9730 off2 := v_0.AuxInt 9731 sym2 := v_0.Aux 9732 base := v_0.Args[0] 9733 mem := v.Args[1] 9734 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9735 break 9736 } 9737 v.reset(OpAMD64MOVQload) 9738 v.AuxInt = off1 + off2 9739 v.Aux = mergeSym(sym1, sym2) 9740 v.AddArg(base) 9741 v.AddArg(mem) 9742 return true 9743 } 9744 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 9745 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9746 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9747 for { 9748 off1 := v.AuxInt 9749 sym1 := v.Aux 9750 _ = v.Args[1] 9751 v_0 := v.Args[0] 9752 if v_0.Op != OpAMD64LEAQ1 { 9753 break 9754 } 9755 off2 := v_0.AuxInt 9756 sym2 := v_0.Aux 9757 _ = v_0.Args[1] 9758 ptr := v_0.Args[0] 9759 idx := v_0.Args[1] 9760 mem := v.Args[1] 9761 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9762 break 9763 } 9764 v.reset(OpAMD64MOVQloadidx1) 9765 v.AuxInt = off1 + off2 9766 v.Aux = mergeSym(sym1, sym2) 9767 v.AddArg(ptr) 9768 v.AddArg(idx) 9769 v.AddArg(mem) 9770 return true 9771 } 9772 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 9773 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9774 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9775 for { 9776 off1 := v.AuxInt 9777 sym1 := v.Aux 9778 _ = v.Args[1] 9779 v_0 := v.Args[0] 9780 if v_0.Op != OpAMD64LEAQ8 { 9781 break 9782 } 9783 off2 := v_0.AuxInt 9784 sym2 := v_0.Aux 9785 _ = v_0.Args[1] 9786 ptr := v_0.Args[0] 9787 idx := v_0.Args[1] 9788 mem := v.Args[1] 9789 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9790 break 9791 } 9792 v.reset(OpAMD64MOVQloadidx8) 9793 v.AuxInt = off1 + off2 9794 v.Aux = mergeSym(sym1, sym2) 9795 v.AddArg(ptr) 9796 v.AddArg(idx) 9797 v.AddArg(mem) 9798 return true 9799 } 9800 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 9801 // cond: ptr.Op != OpSB 9802 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 9803 for { 9804 off := v.AuxInt 9805 sym := v.Aux 9806 _ = v.Args[1] 9807 v_0 := v.Args[0] 9808 if v_0.Op != OpAMD64ADDQ { 9809 break 9810 } 9811 _ = v_0.Args[1] 9812 ptr := v_0.Args[0] 9813 idx := v_0.Args[1] 9814 mem := v.Args[1] 9815 if !(ptr.Op != OpSB) { 9816 break 9817 } 9818 v.reset(OpAMD64MOVQloadidx1) 9819 v.AuxInt = off 9820 v.Aux = sym 9821 v.AddArg(ptr) 9822 v.AddArg(idx) 9823 v.AddArg(mem) 9824 return true 9825 } 9826 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 9827 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 9828 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9829 for { 9830 off1 := v.AuxInt 9831 sym1 := v.Aux 9832 _ = v.Args[1] 9833 v_0 := v.Args[0] 9834 if v_0.Op != OpAMD64LEAL { 9835 break 9836 } 9837 off2 := v_0.AuxInt 9838 sym2 := v_0.Aux 9839 base := v_0.Args[0] 9840 mem := v.Args[1] 9841 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 9842 break 9843 } 9844 v.reset(OpAMD64MOVQload) 9845 v.AuxInt = off1 + off2 9846 v.Aux = mergeSym(sym1, sym2) 9847 v.AddArg(base) 9848 v.AddArg(mem) 9849 return true 9850 } 9851 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 9852 // cond: is32Bit(off1+off2) 9853 // result: (MOVQload [off1+off2] {sym} ptr mem) 9854 for { 9855 off1 := v.AuxInt 9856 sym := v.Aux 9857 _ = v.Args[1] 9858 v_0 := v.Args[0] 9859 if v_0.Op != OpAMD64ADDLconst { 9860 break 9861 } 9862 off2 := v_0.AuxInt 9863 ptr := v_0.Args[0] 9864 mem := v.Args[1] 9865 if !(is32Bit(off1 + off2)) { 9866 break 9867 } 9868 v.reset(OpAMD64MOVQload) 9869 v.AuxInt = off1 + off2 9870 v.Aux = sym 9871 v.AddArg(ptr) 9872 v.AddArg(mem) 9873 return true 9874 } 9875 // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) 9876 // cond: 9877 // result: (MOVQf2i val) 9878 for { 9879 off := v.AuxInt 9880 sym := v.Aux 9881 _ = v.Args[1] 9882 ptr := v.Args[0] 9883 v_1 := v.Args[1] 9884 if v_1.Op != OpAMD64MOVSDstore { 9885 break 9886 } 9887 if v_1.AuxInt != off { 9888 break 9889 } 9890 if v_1.Aux != sym { 9891 break 9892 } 9893 _ = v_1.Args[2] 9894 if ptr != v_1.Args[0] { 9895 break 9896 } 9897 val := v_1.Args[1] 9898 v.reset(OpAMD64MOVQf2i) 9899 v.AddArg(val) 9900 return true 9901 } 9902 return false 9903 } 9904 func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { 9905 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 9906 // cond: 9907 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 9908 for { 9909 c := v.AuxInt 9910 sym := v.Aux 9911 _ = v.Args[2] 9912 ptr := v.Args[0] 9913 v_1 := v.Args[1] 9914 if v_1.Op != OpAMD64SHLQconst { 9915 break 9916 } 9917 if v_1.AuxInt != 3 { 9918 break 9919 } 9920 idx := v_1.Args[0] 9921 mem := v.Args[2] 9922 v.reset(OpAMD64MOVQloadidx8) 9923 v.AuxInt = c 9924 v.Aux = sym 9925 v.AddArg(ptr) 9926 v.AddArg(idx) 9927 v.AddArg(mem) 9928 return true 9929 } 9930 // match: (MOVQloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 9931 // cond: 9932 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 9933 for { 9934 c := v.AuxInt 9935 sym := v.Aux 9936 _ = v.Args[2] 9937 v_0 := v.Args[0] 9938 if v_0.Op != OpAMD64SHLQconst { 9939 break 9940 } 9941 if v_0.AuxInt != 3 { 9942 break 9943 } 9944 idx := v_0.Args[0] 9945 ptr := v.Args[1] 9946 mem := v.Args[2] 9947 v.reset(OpAMD64MOVQloadidx8) 9948 v.AuxInt = c 9949 v.Aux = sym 9950 v.AddArg(ptr) 9951 v.AddArg(idx) 9952 v.AddArg(mem) 9953 return true 9954 } 9955 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9956 // cond: is32Bit(c+d) 9957 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 9958 for { 9959 c := v.AuxInt 9960 sym := v.Aux 9961 _ = v.Args[2] 9962 v_0 := v.Args[0] 9963 if v_0.Op != OpAMD64ADDQconst { 9964 break 9965 } 9966 d := v_0.AuxInt 9967 ptr := v_0.Args[0] 9968 idx := v.Args[1] 9969 mem := v.Args[2] 9970 if !(is32Bit(c + d)) { 9971 break 9972 } 9973 v.reset(OpAMD64MOVQloadidx1) 9974 v.AuxInt = c + d 9975 v.Aux = sym 9976 v.AddArg(ptr) 9977 v.AddArg(idx) 9978 v.AddArg(mem) 9979 return true 9980 } 9981 // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 9982 // cond: is32Bit(c+d) 9983 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 9984 for { 9985 c := v.AuxInt 9986 sym := v.Aux 9987 _ = v.Args[2] 9988 idx := v.Args[0] 9989 v_1 := v.Args[1] 9990 if v_1.Op != OpAMD64ADDQconst { 9991 break 9992 } 9993 d := v_1.AuxInt 9994 ptr := v_1.Args[0] 9995 mem := v.Args[2] 9996 if !(is32Bit(c + d)) { 9997 break 9998 } 9999 v.reset(OpAMD64MOVQloadidx1) 10000 v.AuxInt = c + d 10001 v.Aux = sym 10002 v.AddArg(ptr) 10003 v.AddArg(idx) 10004 v.AddArg(mem) 10005 return true 10006 } 10007 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 10008 // cond: is32Bit(c+d) 10009 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 10010 for { 10011 c := v.AuxInt 10012 sym := v.Aux 10013 _ = v.Args[2] 10014 ptr := v.Args[0] 10015 v_1 := v.Args[1] 10016 if v_1.Op != OpAMD64ADDQconst { 10017 break 10018 } 10019 d := v_1.AuxInt 10020 idx := v_1.Args[0] 10021 mem := v.Args[2] 10022 if !(is32Bit(c + d)) { 10023 break 10024 } 10025 v.reset(OpAMD64MOVQloadidx1) 10026 v.AuxInt = c + d 10027 v.Aux = sym 10028 v.AddArg(ptr) 10029 v.AddArg(idx) 10030 v.AddArg(mem) 10031 return true 10032 } 10033 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 10034 // cond: is32Bit(c+d) 10035 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 10036 for { 10037 c := v.AuxInt 10038 sym := v.Aux 10039 _ = v.Args[2] 10040 v_0 := v.Args[0] 10041 if v_0.Op != OpAMD64ADDQconst { 10042 break 10043 } 10044 d := v_0.AuxInt 10045 idx := v_0.Args[0] 10046 ptr := v.Args[1] 10047 mem := v.Args[2] 10048 if !(is32Bit(c + d)) { 10049 break 10050 } 10051 v.reset(OpAMD64MOVQloadidx1) 10052 v.AuxInt = c + d 10053 v.Aux = sym 10054 v.AddArg(ptr) 10055 v.AddArg(idx) 10056 v.AddArg(mem) 10057 return true 10058 } 10059 return false 10060 } 10061 func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { 10062 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 10063 // cond: is32Bit(c+d) 10064 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 10065 for { 10066 c := v.AuxInt 10067 sym := v.Aux 10068 _ = v.Args[2] 10069 v_0 := v.Args[0] 10070 if v_0.Op != OpAMD64ADDQconst { 10071 break 10072 } 10073 d := v_0.AuxInt 10074 ptr := v_0.Args[0] 10075 idx := v.Args[1] 10076 mem := v.Args[2] 10077 if !(is32Bit(c + d)) { 10078 break 10079 } 10080 v.reset(OpAMD64MOVQloadidx8) 10081 v.AuxInt = c + d 10082 v.Aux = sym 10083 v.AddArg(ptr) 10084 v.AddArg(idx) 10085 v.AddArg(mem) 10086 return true 10087 } 10088 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 10089 // cond: is32Bit(c+8*d) 10090 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 10091 for { 10092 c := v.AuxInt 10093 sym := v.Aux 10094 _ = v.Args[2] 10095 ptr := v.Args[0] 10096 v_1 := v.Args[1] 10097 if v_1.Op != OpAMD64ADDQconst { 10098 break 10099 } 10100 d := v_1.AuxInt 10101 idx := v_1.Args[0] 10102 mem := v.Args[2] 10103 if !(is32Bit(c + 8*d)) { 10104 break 10105 } 10106 v.reset(OpAMD64MOVQloadidx8) 10107 v.AuxInt = c + 8*d 10108 v.Aux = sym 10109 v.AddArg(ptr) 10110 v.AddArg(idx) 10111 v.AddArg(mem) 10112 return true 10113 } 10114 return false 10115 } 10116 func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { 10117 b := v.Block 10118 _ = b 10119 config := b.Func.Config 10120 _ = config 10121 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 10122 // cond: is32Bit(off1+off2) 10123 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 10124 for { 10125 off1 := v.AuxInt 10126 sym := v.Aux 10127 _ = v.Args[2] 10128 v_0 := v.Args[0] 10129 if v_0.Op != OpAMD64ADDQconst { 10130 break 10131 } 10132 off2 := v_0.AuxInt 10133 ptr := v_0.Args[0] 10134 val := v.Args[1] 10135 mem := v.Args[2] 10136 if !(is32Bit(off1 + off2)) { 10137 break 10138 } 10139 v.reset(OpAMD64MOVQstore) 10140 v.AuxInt = off1 + off2 10141 v.Aux = sym 10142 v.AddArg(ptr) 10143 v.AddArg(val) 10144 v.AddArg(mem) 10145 return true 10146 } 10147 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 10148 // cond: validValAndOff(c,off) 10149 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 10150 for { 10151 off := v.AuxInt 10152 sym := v.Aux 10153 _ = v.Args[2] 10154 ptr := v.Args[0] 10155 v_1 := v.Args[1] 10156 if v_1.Op != OpAMD64MOVQconst { 10157 break 10158 } 10159 c := v_1.AuxInt 10160 mem := v.Args[2] 10161 if !(validValAndOff(c, off)) { 10162 break 10163 } 10164 v.reset(OpAMD64MOVQstoreconst) 10165 v.AuxInt = makeValAndOff(c, off) 10166 v.Aux = sym 10167 v.AddArg(ptr) 10168 v.AddArg(mem) 10169 return true 10170 } 10171 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 10172 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10173 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10174 for { 10175 off1 := v.AuxInt 10176 sym1 := v.Aux 10177 _ = v.Args[2] 10178 v_0 := v.Args[0] 10179 if v_0.Op != OpAMD64LEAQ { 10180 break 10181 } 10182 off2 := v_0.AuxInt 10183 sym2 := v_0.Aux 10184 base := v_0.Args[0] 10185 val := v.Args[1] 10186 mem := v.Args[2] 10187 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10188 break 10189 } 10190 v.reset(OpAMD64MOVQstore) 10191 v.AuxInt = off1 + off2 10192 v.Aux = mergeSym(sym1, sym2) 10193 v.AddArg(base) 10194 v.AddArg(val) 10195 v.AddArg(mem) 10196 return true 10197 } 10198 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 10199 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10200 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10201 for { 10202 off1 := v.AuxInt 10203 sym1 := v.Aux 10204 _ = v.Args[2] 10205 v_0 := v.Args[0] 10206 if v_0.Op != OpAMD64LEAQ1 { 10207 break 10208 } 10209 off2 := v_0.AuxInt 10210 sym2 := v_0.Aux 10211 _ = v_0.Args[1] 10212 ptr := v_0.Args[0] 10213 idx := v_0.Args[1] 10214 val := v.Args[1] 10215 mem := v.Args[2] 10216 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10217 break 10218 } 10219 v.reset(OpAMD64MOVQstoreidx1) 10220 v.AuxInt = off1 + off2 10221 v.Aux = mergeSym(sym1, sym2) 10222 v.AddArg(ptr) 10223 v.AddArg(idx) 10224 v.AddArg(val) 10225 v.AddArg(mem) 10226 return true 10227 } 10228 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 10229 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10230 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10231 for { 10232 off1 := v.AuxInt 10233 sym1 := v.Aux 10234 _ = v.Args[2] 10235 v_0 := v.Args[0] 10236 if v_0.Op != OpAMD64LEAQ8 { 10237 break 10238 } 10239 off2 := v_0.AuxInt 10240 sym2 := v_0.Aux 10241 _ = v_0.Args[1] 10242 ptr := v_0.Args[0] 10243 idx := v_0.Args[1] 10244 val := v.Args[1] 10245 mem := v.Args[2] 10246 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10247 break 10248 } 10249 v.reset(OpAMD64MOVQstoreidx8) 10250 v.AuxInt = off1 + off2 10251 v.Aux = mergeSym(sym1, sym2) 10252 v.AddArg(ptr) 10253 v.AddArg(idx) 10254 v.AddArg(val) 10255 v.AddArg(mem) 10256 return true 10257 } 10258 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 10259 // cond: ptr.Op != OpSB 10260 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 10261 for { 10262 off := v.AuxInt 10263 sym := v.Aux 10264 _ = v.Args[2] 10265 v_0 := v.Args[0] 10266 if v_0.Op != OpAMD64ADDQ { 10267 break 10268 } 10269 _ = v_0.Args[1] 10270 ptr := v_0.Args[0] 10271 idx := v_0.Args[1] 10272 val := v.Args[1] 10273 mem := v.Args[2] 10274 if !(ptr.Op != OpSB) { 10275 break 10276 } 10277 v.reset(OpAMD64MOVQstoreidx1) 10278 v.AuxInt = off 10279 v.Aux = sym 10280 v.AddArg(ptr) 10281 v.AddArg(idx) 10282 v.AddArg(val) 10283 v.AddArg(mem) 10284 return true 10285 } 10286 // match: (MOVQstore [i] {s} p x1:(MOVQload [j] {s2} p2 mem) mem2:(MOVQstore [i-8] {s} p x2:(MOVQload [j-8] {s2} p2 mem) mem)) 10287 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && config.useSSE && clobber(x1) && clobber(x2) && clobber(mem2) 10288 // result: (MOVOstore [i-8] {s} p (MOVOload [j-8] {s2} p2 mem) mem) 10289 for { 10290 i := v.AuxInt 10291 s := v.Aux 10292 _ = v.Args[2] 10293 p := v.Args[0] 10294 x1 := v.Args[1] 10295 if x1.Op != OpAMD64MOVQload { 10296 break 10297 } 10298 j := x1.AuxInt 10299 s2 := x1.Aux 10300 _ = x1.Args[1] 10301 p2 := x1.Args[0] 10302 mem := x1.Args[1] 10303 mem2 := v.Args[2] 10304 if mem2.Op != OpAMD64MOVQstore { 10305 break 10306 } 10307 if mem2.AuxInt != i-8 { 10308 break 10309 } 10310 if mem2.Aux != s { 10311 break 10312 } 10313 _ = mem2.Args[2] 10314 if p != mem2.Args[0] { 10315 break 10316 } 10317 x2 := mem2.Args[1] 10318 if x2.Op != OpAMD64MOVQload { 10319 break 10320 } 10321 if x2.AuxInt != j-8 { 10322 break 10323 } 10324 if x2.Aux != s2 { 10325 break 10326 } 10327 _ = x2.Args[1] 10328 if p2 != x2.Args[0] { 10329 break 10330 } 10331 if mem != x2.Args[1] { 10332 break 10333 } 10334 if mem != mem2.Args[2] { 10335 break 10336 } 10337 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && config.useSSE && clobber(x1) && clobber(x2) && clobber(mem2)) { 10338 break 10339 } 10340 v.reset(OpAMD64MOVOstore) 10341 v.AuxInt = i - 8 10342 v.Aux = s 10343 v.AddArg(p) 10344 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 10345 v0.AuxInt = j - 8 10346 v0.Aux = s2 10347 v0.AddArg(p2) 10348 v0.AddArg(mem) 10349 v.AddArg(v0) 10350 v.AddArg(mem) 10351 return true 10352 } 10353 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 10354 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 10355 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10356 for { 10357 off1 := v.AuxInt 10358 sym1 := v.Aux 10359 _ = v.Args[2] 10360 v_0 := v.Args[0] 10361 if v_0.Op != OpAMD64LEAL { 10362 break 10363 } 10364 off2 := v_0.AuxInt 10365 sym2 := v_0.Aux 10366 base := v_0.Args[0] 10367 val := v.Args[1] 10368 mem := v.Args[2] 10369 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 10370 break 10371 } 10372 v.reset(OpAMD64MOVQstore) 10373 v.AuxInt = off1 + off2 10374 v.Aux = mergeSym(sym1, sym2) 10375 v.AddArg(base) 10376 v.AddArg(val) 10377 v.AddArg(mem) 10378 return true 10379 } 10380 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 10381 // cond: is32Bit(off1+off2) 10382 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 10383 for { 10384 off1 := v.AuxInt 10385 sym := v.Aux 10386 _ = v.Args[2] 10387 v_0 := v.Args[0] 10388 if v_0.Op != OpAMD64ADDLconst { 10389 break 10390 } 10391 off2 := v_0.AuxInt 10392 ptr := v_0.Args[0] 10393 val := v.Args[1] 10394 mem := v.Args[2] 10395 if !(is32Bit(off1 + off2)) { 10396 break 10397 } 10398 v.reset(OpAMD64MOVQstore) 10399 v.AuxInt = off1 + off2 10400 v.Aux = sym 10401 v.AddArg(ptr) 10402 v.AddArg(val) 10403 v.AddArg(mem) 10404 return true 10405 } 10406 // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 10407 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) 10408 // result: (ADDQconstmem {sym} [makeValAndOff(c,off)] ptr mem) 10409 for { 10410 off := v.AuxInt 10411 sym := v.Aux 10412 _ = v.Args[2] 10413 ptr := v.Args[0] 10414 a := v.Args[1] 10415 if a.Op != OpAMD64ADDQconst { 10416 break 10417 } 10418 c := a.AuxInt 10419 l := a.Args[0] 10420 if l.Op != OpAMD64MOVQload { 10421 break 10422 } 10423 if l.AuxInt != off { 10424 break 10425 } 10426 if l.Aux != sym { 10427 break 10428 } 10429 _ = l.Args[1] 10430 ptr2 := l.Args[0] 10431 mem := l.Args[1] 10432 if mem != v.Args[2] { 10433 break 10434 } 10435 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) { 10436 break 10437 } 10438 v.reset(OpAMD64ADDQconstmem) 10439 v.AuxInt = makeValAndOff(c, off) 10440 v.Aux = sym 10441 v.AddArg(ptr) 10442 v.AddArg(mem) 10443 return true 10444 } 10445 return false 10446 } 10447 func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { 10448 // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem) 10449 // cond: 10450 // result: (MOVSDstore [off] {sym} ptr val mem) 10451 for { 10452 off := v.AuxInt 10453 sym := v.Aux 10454 _ = v.Args[2] 10455 ptr := v.Args[0] 10456 v_1 := v.Args[1] 10457 if v_1.Op != OpAMD64MOVQf2i { 10458 break 10459 } 10460 val := v_1.Args[0] 10461 mem := v.Args[2] 10462 v.reset(OpAMD64MOVSDstore) 10463 v.AuxInt = off 10464 v.Aux = sym 10465 v.AddArg(ptr) 10466 v.AddArg(val) 10467 v.AddArg(mem) 10468 return true 10469 } 10470 return false 10471 } 10472 func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { 10473 b := v.Block 10474 _ = b 10475 config := b.Func.Config 10476 _ = config 10477 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 10478 // cond: ValAndOff(sc).canAdd(off) 10479 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 10480 for { 10481 sc := v.AuxInt 10482 s := v.Aux 10483 _ = v.Args[1] 10484 v_0 := v.Args[0] 10485 if v_0.Op != OpAMD64ADDQconst { 10486 break 10487 } 10488 off := v_0.AuxInt 10489 ptr := v_0.Args[0] 10490 mem := v.Args[1] 10491 if !(ValAndOff(sc).canAdd(off)) { 10492 break 10493 } 10494 v.reset(OpAMD64MOVQstoreconst) 10495 v.AuxInt = ValAndOff(sc).add(off) 10496 v.Aux = s 10497 v.AddArg(ptr) 10498 v.AddArg(mem) 10499 return true 10500 } 10501 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 10502 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 10503 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 10504 for { 10505 sc := v.AuxInt 10506 sym1 := v.Aux 10507 _ = v.Args[1] 10508 v_0 := v.Args[0] 10509 if v_0.Op != OpAMD64LEAQ { 10510 break 10511 } 10512 off := v_0.AuxInt 10513 sym2 := v_0.Aux 10514 ptr := v_0.Args[0] 10515 mem := v.Args[1] 10516 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 10517 break 10518 } 10519 v.reset(OpAMD64MOVQstoreconst) 10520 v.AuxInt = ValAndOff(sc).add(off) 10521 v.Aux = mergeSym(sym1, sym2) 10522 v.AddArg(ptr) 10523 v.AddArg(mem) 10524 return true 10525 } 10526 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 10527 // cond: canMergeSym(sym1, sym2) 10528 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 10529 for { 10530 x := v.AuxInt 10531 sym1 := v.Aux 10532 _ = v.Args[1] 10533 v_0 := v.Args[0] 10534 if v_0.Op != OpAMD64LEAQ1 { 10535 break 10536 } 10537 off := v_0.AuxInt 10538 sym2 := v_0.Aux 10539 _ = v_0.Args[1] 10540 ptr := v_0.Args[0] 10541 idx := v_0.Args[1] 10542 mem := v.Args[1] 10543 if !(canMergeSym(sym1, sym2)) { 10544 break 10545 } 10546 v.reset(OpAMD64MOVQstoreconstidx1) 10547 v.AuxInt = ValAndOff(x).add(off) 10548 v.Aux = mergeSym(sym1, sym2) 10549 v.AddArg(ptr) 10550 v.AddArg(idx) 10551 v.AddArg(mem) 10552 return true 10553 } 10554 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 10555 // cond: canMergeSym(sym1, sym2) 10556 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 10557 for { 10558 x := v.AuxInt 10559 sym1 := v.Aux 10560 _ = v.Args[1] 10561 v_0 := v.Args[0] 10562 if v_0.Op != OpAMD64LEAQ8 { 10563 break 10564 } 10565 off := v_0.AuxInt 10566 sym2 := v_0.Aux 10567 _ = v_0.Args[1] 10568 ptr := v_0.Args[0] 10569 idx := v_0.Args[1] 10570 mem := v.Args[1] 10571 if !(canMergeSym(sym1, sym2)) { 10572 break 10573 } 10574 v.reset(OpAMD64MOVQstoreconstidx8) 10575 v.AuxInt = ValAndOff(x).add(off) 10576 v.Aux = mergeSym(sym1, sym2) 10577 v.AddArg(ptr) 10578 v.AddArg(idx) 10579 v.AddArg(mem) 10580 return true 10581 } 10582 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 10583 // cond: 10584 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 10585 for { 10586 x := v.AuxInt 10587 sym := v.Aux 10588 _ = v.Args[1] 10589 v_0 := v.Args[0] 10590 if v_0.Op != OpAMD64ADDQ { 10591 break 10592 } 10593 _ = v_0.Args[1] 10594 ptr := v_0.Args[0] 10595 idx := v_0.Args[1] 10596 mem := v.Args[1] 10597 v.reset(OpAMD64MOVQstoreconstidx1) 10598 v.AuxInt = x 10599 v.Aux = sym 10600 v.AddArg(ptr) 10601 v.AddArg(idx) 10602 v.AddArg(mem) 10603 return true 10604 } 10605 // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) 10606 // cond: config.useSSE && x.Uses == 1 && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x) 10607 // result: (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem) 10608 for { 10609 c := v.AuxInt 10610 s := v.Aux 10611 _ = v.Args[1] 10612 p := v.Args[0] 10613 x := v.Args[1] 10614 if x.Op != OpAMD64MOVQstoreconst { 10615 break 10616 } 10617 c2 := x.AuxInt 10618 if x.Aux != s { 10619 break 10620 } 10621 _ = x.Args[1] 10622 if p != x.Args[0] { 10623 break 10624 } 10625 mem := x.Args[1] 10626 if !(config.useSSE && x.Uses == 1 && ValAndOff(c2).Off()+8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x)) { 10627 break 10628 } 10629 v.reset(OpAMD64MOVOstore) 10630 v.AuxInt = ValAndOff(c2).Off() 10631 v.Aux = s 10632 v.AddArg(p) 10633 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 10634 v0.AuxInt = 0 10635 v.AddArg(v0) 10636 v.AddArg(mem) 10637 return true 10638 } 10639 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 10640 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 10641 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 10642 for { 10643 sc := v.AuxInt 10644 sym1 := v.Aux 10645 _ = v.Args[1] 10646 v_0 := v.Args[0] 10647 if v_0.Op != OpAMD64LEAL { 10648 break 10649 } 10650 off := v_0.AuxInt 10651 sym2 := v_0.Aux 10652 ptr := v_0.Args[0] 10653 mem := v.Args[1] 10654 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 10655 break 10656 } 10657 v.reset(OpAMD64MOVQstoreconst) 10658 v.AuxInt = ValAndOff(sc).add(off) 10659 v.Aux = mergeSym(sym1, sym2) 10660 v.AddArg(ptr) 10661 v.AddArg(mem) 10662 return true 10663 } 10664 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 10665 // cond: ValAndOff(sc).canAdd(off) 10666 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 10667 for { 10668 sc := v.AuxInt 10669 s := v.Aux 10670 _ = v.Args[1] 10671 v_0 := v.Args[0] 10672 if v_0.Op != OpAMD64ADDLconst { 10673 break 10674 } 10675 off := v_0.AuxInt 10676 ptr := v_0.Args[0] 10677 mem := v.Args[1] 10678 if !(ValAndOff(sc).canAdd(off)) { 10679 break 10680 } 10681 v.reset(OpAMD64MOVQstoreconst) 10682 v.AuxInt = ValAndOff(sc).add(off) 10683 v.Aux = s 10684 v.AddArg(ptr) 10685 v.AddArg(mem) 10686 return true 10687 } 10688 return false 10689 } 10690 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { 10691 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 10692 // cond: 10693 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 10694 for { 10695 c := v.AuxInt 10696 sym := v.Aux 10697 _ = v.Args[2] 10698 ptr := v.Args[0] 10699 v_1 := v.Args[1] 10700 if v_1.Op != OpAMD64SHLQconst { 10701 break 10702 } 10703 if v_1.AuxInt != 3 { 10704 break 10705 } 10706 idx := v_1.Args[0] 10707 mem := v.Args[2] 10708 v.reset(OpAMD64MOVQstoreconstidx8) 10709 v.AuxInt = c 10710 v.Aux = sym 10711 v.AddArg(ptr) 10712 v.AddArg(idx) 10713 v.AddArg(mem) 10714 return true 10715 } 10716 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 10717 // cond: ValAndOff(x).canAdd(c) 10718 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10719 for { 10720 x := v.AuxInt 10721 sym := v.Aux 10722 _ = v.Args[2] 10723 v_0 := v.Args[0] 10724 if v_0.Op != OpAMD64ADDQconst { 10725 break 10726 } 10727 c := v_0.AuxInt 10728 ptr := v_0.Args[0] 10729 idx := v.Args[1] 10730 mem := v.Args[2] 10731 if !(ValAndOff(x).canAdd(c)) { 10732 break 10733 } 10734 v.reset(OpAMD64MOVQstoreconstidx1) 10735 v.AuxInt = ValAndOff(x).add(c) 10736 v.Aux = sym 10737 v.AddArg(ptr) 10738 v.AddArg(idx) 10739 v.AddArg(mem) 10740 return true 10741 } 10742 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 10743 // cond: ValAndOff(x).canAdd(c) 10744 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10745 for { 10746 x := v.AuxInt 10747 sym := v.Aux 10748 _ = v.Args[2] 10749 ptr := v.Args[0] 10750 v_1 := v.Args[1] 10751 if v_1.Op != OpAMD64ADDQconst { 10752 break 10753 } 10754 c := v_1.AuxInt 10755 idx := v_1.Args[0] 10756 mem := v.Args[2] 10757 if !(ValAndOff(x).canAdd(c)) { 10758 break 10759 } 10760 v.reset(OpAMD64MOVQstoreconstidx1) 10761 v.AuxInt = ValAndOff(x).add(c) 10762 v.Aux = sym 10763 v.AddArg(ptr) 10764 v.AddArg(idx) 10765 v.AddArg(mem) 10766 return true 10767 } 10768 return false 10769 } 10770 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { 10771 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 10772 // cond: ValAndOff(x).canAdd(c) 10773 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10774 for { 10775 x := v.AuxInt 10776 sym := v.Aux 10777 _ = v.Args[2] 10778 v_0 := v.Args[0] 10779 if v_0.Op != OpAMD64ADDQconst { 10780 break 10781 } 10782 c := v_0.AuxInt 10783 ptr := v_0.Args[0] 10784 idx := v.Args[1] 10785 mem := v.Args[2] 10786 if !(ValAndOff(x).canAdd(c)) { 10787 break 10788 } 10789 v.reset(OpAMD64MOVQstoreconstidx8) 10790 v.AuxInt = ValAndOff(x).add(c) 10791 v.Aux = sym 10792 v.AddArg(ptr) 10793 v.AddArg(idx) 10794 v.AddArg(mem) 10795 return true 10796 } 10797 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 10798 // cond: ValAndOff(x).canAdd(8*c) 10799 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 10800 for { 10801 x := v.AuxInt 10802 sym := v.Aux 10803 _ = v.Args[2] 10804 ptr := v.Args[0] 10805 v_1 := v.Args[1] 10806 if v_1.Op != OpAMD64ADDQconst { 10807 break 10808 } 10809 c := v_1.AuxInt 10810 idx := v_1.Args[0] 10811 mem := v.Args[2] 10812 if !(ValAndOff(x).canAdd(8 * c)) { 10813 break 10814 } 10815 v.reset(OpAMD64MOVQstoreconstidx8) 10816 v.AuxInt = ValAndOff(x).add(8 * c) 10817 v.Aux = sym 10818 v.AddArg(ptr) 10819 v.AddArg(idx) 10820 v.AddArg(mem) 10821 return true 10822 } 10823 return false 10824 } 10825 func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { 10826 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 10827 // cond: 10828 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 10829 for { 10830 c := v.AuxInt 10831 sym := v.Aux 10832 _ = v.Args[3] 10833 ptr := v.Args[0] 10834 v_1 := v.Args[1] 10835 if v_1.Op != OpAMD64SHLQconst { 10836 break 10837 } 10838 if v_1.AuxInt != 3 { 10839 break 10840 } 10841 idx := v_1.Args[0] 10842 val := v.Args[2] 10843 mem := v.Args[3] 10844 v.reset(OpAMD64MOVQstoreidx8) 10845 v.AuxInt = c 10846 v.Aux = sym 10847 v.AddArg(ptr) 10848 v.AddArg(idx) 10849 v.AddArg(val) 10850 v.AddArg(mem) 10851 return true 10852 } 10853 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10854 // cond: is32Bit(c+d) 10855 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 10856 for { 10857 c := v.AuxInt 10858 sym := v.Aux 10859 _ = v.Args[3] 10860 v_0 := v.Args[0] 10861 if v_0.Op != OpAMD64ADDQconst { 10862 break 10863 } 10864 d := v_0.AuxInt 10865 ptr := v_0.Args[0] 10866 idx := v.Args[1] 10867 val := v.Args[2] 10868 mem := v.Args[3] 10869 if !(is32Bit(c + d)) { 10870 break 10871 } 10872 v.reset(OpAMD64MOVQstoreidx1) 10873 v.AuxInt = c + d 10874 v.Aux = sym 10875 v.AddArg(ptr) 10876 v.AddArg(idx) 10877 v.AddArg(val) 10878 v.AddArg(mem) 10879 return true 10880 } 10881 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10882 // cond: is32Bit(c+d) 10883 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 10884 for { 10885 c := v.AuxInt 10886 sym := v.Aux 10887 _ = v.Args[3] 10888 ptr := v.Args[0] 10889 v_1 := v.Args[1] 10890 if v_1.Op != OpAMD64ADDQconst { 10891 break 10892 } 10893 d := v_1.AuxInt 10894 idx := v_1.Args[0] 10895 val := v.Args[2] 10896 mem := v.Args[3] 10897 if !(is32Bit(c + d)) { 10898 break 10899 } 10900 v.reset(OpAMD64MOVQstoreidx1) 10901 v.AuxInt = c + d 10902 v.Aux = sym 10903 v.AddArg(ptr) 10904 v.AddArg(idx) 10905 v.AddArg(val) 10906 v.AddArg(mem) 10907 return true 10908 } 10909 return false 10910 } 10911 func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { 10912 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10913 // cond: is32Bit(c+d) 10914 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 10915 for { 10916 c := v.AuxInt 10917 sym := v.Aux 10918 _ = v.Args[3] 10919 v_0 := v.Args[0] 10920 if v_0.Op != OpAMD64ADDQconst { 10921 break 10922 } 10923 d := v_0.AuxInt 10924 ptr := v_0.Args[0] 10925 idx := v.Args[1] 10926 val := v.Args[2] 10927 mem := v.Args[3] 10928 if !(is32Bit(c + d)) { 10929 break 10930 } 10931 v.reset(OpAMD64MOVQstoreidx8) 10932 v.AuxInt = c + d 10933 v.Aux = sym 10934 v.AddArg(ptr) 10935 v.AddArg(idx) 10936 v.AddArg(val) 10937 v.AddArg(mem) 10938 return true 10939 } 10940 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10941 // cond: is32Bit(c+8*d) 10942 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 10943 for { 10944 c := v.AuxInt 10945 sym := v.Aux 10946 _ = v.Args[3] 10947 ptr := v.Args[0] 10948 v_1 := v.Args[1] 10949 if v_1.Op != OpAMD64ADDQconst { 10950 break 10951 } 10952 d := v_1.AuxInt 10953 idx := v_1.Args[0] 10954 val := v.Args[2] 10955 mem := v.Args[3] 10956 if !(is32Bit(c + 8*d)) { 10957 break 10958 } 10959 v.reset(OpAMD64MOVQstoreidx8) 10960 v.AuxInt = c + 8*d 10961 v.Aux = sym 10962 v.AddArg(ptr) 10963 v.AddArg(idx) 10964 v.AddArg(val) 10965 v.AddArg(mem) 10966 return true 10967 } 10968 return false 10969 } 10970 func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { 10971 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 10972 // cond: is32Bit(off1+off2) 10973 // result: (MOVSDload [off1+off2] {sym} ptr mem) 10974 for { 10975 off1 := v.AuxInt 10976 sym := v.Aux 10977 _ = v.Args[1] 10978 v_0 := v.Args[0] 10979 if v_0.Op != OpAMD64ADDQconst { 10980 break 10981 } 10982 off2 := v_0.AuxInt 10983 ptr := v_0.Args[0] 10984 mem := v.Args[1] 10985 if !(is32Bit(off1 + off2)) { 10986 break 10987 } 10988 v.reset(OpAMD64MOVSDload) 10989 v.AuxInt = off1 + off2 10990 v.Aux = sym 10991 v.AddArg(ptr) 10992 v.AddArg(mem) 10993 return true 10994 } 10995 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10996 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10997 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10998 for { 10999 off1 := v.AuxInt 11000 sym1 := v.Aux 11001 _ = v.Args[1] 11002 v_0 := v.Args[0] 11003 if v_0.Op != OpAMD64LEAQ { 11004 break 11005 } 11006 off2 := v_0.AuxInt 11007 sym2 := v_0.Aux 11008 base := v_0.Args[0] 11009 mem := v.Args[1] 11010 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11011 break 11012 } 11013 v.reset(OpAMD64MOVSDload) 11014 v.AuxInt = off1 + off2 11015 v.Aux = mergeSym(sym1, sym2) 11016 v.AddArg(base) 11017 v.AddArg(mem) 11018 return true 11019 } 11020 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 11021 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11022 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11023 for { 11024 off1 := v.AuxInt 11025 sym1 := v.Aux 11026 _ = v.Args[1] 11027 v_0 := v.Args[0] 11028 if v_0.Op != OpAMD64LEAQ1 { 11029 break 11030 } 11031 off2 := v_0.AuxInt 11032 sym2 := v_0.Aux 11033 _ = v_0.Args[1] 11034 ptr := v_0.Args[0] 11035 idx := v_0.Args[1] 11036 mem := v.Args[1] 11037 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11038 break 11039 } 11040 v.reset(OpAMD64MOVSDloadidx1) 11041 v.AuxInt = off1 + off2 11042 v.Aux = mergeSym(sym1, sym2) 11043 v.AddArg(ptr) 11044 v.AddArg(idx) 11045 v.AddArg(mem) 11046 return true 11047 } 11048 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 11049 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11050 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11051 for { 11052 off1 := v.AuxInt 11053 sym1 := v.Aux 11054 _ = v.Args[1] 11055 v_0 := v.Args[0] 11056 if v_0.Op != OpAMD64LEAQ8 { 11057 break 11058 } 11059 off2 := v_0.AuxInt 11060 sym2 := v_0.Aux 11061 _ = v_0.Args[1] 11062 ptr := v_0.Args[0] 11063 idx := v_0.Args[1] 11064 mem := v.Args[1] 11065 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11066 break 11067 } 11068 v.reset(OpAMD64MOVSDloadidx8) 11069 v.AuxInt = off1 + off2 11070 v.Aux = mergeSym(sym1, sym2) 11071 v.AddArg(ptr) 11072 v.AddArg(idx) 11073 v.AddArg(mem) 11074 return true 11075 } 11076 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 11077 // cond: ptr.Op != OpSB 11078 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 11079 for { 11080 off := v.AuxInt 11081 sym := v.Aux 11082 _ = v.Args[1] 11083 v_0 := v.Args[0] 11084 if v_0.Op != OpAMD64ADDQ { 11085 break 11086 } 11087 _ = v_0.Args[1] 11088 ptr := v_0.Args[0] 11089 idx := v_0.Args[1] 11090 mem := v.Args[1] 11091 if !(ptr.Op != OpSB) { 11092 break 11093 } 11094 v.reset(OpAMD64MOVSDloadidx1) 11095 v.AuxInt = off 11096 v.Aux = sym 11097 v.AddArg(ptr) 11098 v.AddArg(idx) 11099 v.AddArg(mem) 11100 return true 11101 } 11102 // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) 11103 // cond: 11104 // result: (MOVQi2f val) 11105 for { 11106 off := v.AuxInt 11107 sym := v.Aux 11108 _ = v.Args[1] 11109 ptr := v.Args[0] 11110 v_1 := v.Args[1] 11111 if v_1.Op != OpAMD64MOVQstore { 11112 break 11113 } 11114 if v_1.AuxInt != off { 11115 break 11116 } 11117 if v_1.Aux != sym { 11118 break 11119 } 11120 _ = v_1.Args[2] 11121 if ptr != v_1.Args[0] { 11122 break 11123 } 11124 val := v_1.Args[1] 11125 v.reset(OpAMD64MOVQi2f) 11126 v.AddArg(val) 11127 return true 11128 } 11129 return false 11130 } 11131 func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { 11132 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 11133 // cond: 11134 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 11135 for { 11136 c := v.AuxInt 11137 sym := v.Aux 11138 _ = v.Args[2] 11139 ptr := v.Args[0] 11140 v_1 := v.Args[1] 11141 if v_1.Op != OpAMD64SHLQconst { 11142 break 11143 } 11144 if v_1.AuxInt != 3 { 11145 break 11146 } 11147 idx := v_1.Args[0] 11148 mem := v.Args[2] 11149 v.reset(OpAMD64MOVSDloadidx8) 11150 v.AuxInt = c 11151 v.Aux = sym 11152 v.AddArg(ptr) 11153 v.AddArg(idx) 11154 v.AddArg(mem) 11155 return true 11156 } 11157 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 11158 // cond: is32Bit(c+d) 11159 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 11160 for { 11161 c := v.AuxInt 11162 sym := v.Aux 11163 _ = v.Args[2] 11164 v_0 := v.Args[0] 11165 if v_0.Op != OpAMD64ADDQconst { 11166 break 11167 } 11168 d := v_0.AuxInt 11169 ptr := v_0.Args[0] 11170 idx := v.Args[1] 11171 mem := v.Args[2] 11172 if !(is32Bit(c + d)) { 11173 break 11174 } 11175 v.reset(OpAMD64MOVSDloadidx1) 11176 v.AuxInt = c + d 11177 v.Aux = sym 11178 v.AddArg(ptr) 11179 v.AddArg(idx) 11180 v.AddArg(mem) 11181 return true 11182 } 11183 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 11184 // cond: is32Bit(c+d) 11185 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 11186 for { 11187 c := v.AuxInt 11188 sym := v.Aux 11189 _ = v.Args[2] 11190 ptr := v.Args[0] 11191 v_1 := v.Args[1] 11192 if v_1.Op != OpAMD64ADDQconst { 11193 break 11194 } 11195 d := v_1.AuxInt 11196 idx := v_1.Args[0] 11197 mem := v.Args[2] 11198 if !(is32Bit(c + d)) { 11199 break 11200 } 11201 v.reset(OpAMD64MOVSDloadidx1) 11202 v.AuxInt = c + d 11203 v.Aux = sym 11204 v.AddArg(ptr) 11205 v.AddArg(idx) 11206 v.AddArg(mem) 11207 return true 11208 } 11209 return false 11210 } 11211 func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { 11212 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 11213 // cond: is32Bit(c+d) 11214 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 11215 for { 11216 c := v.AuxInt 11217 sym := v.Aux 11218 _ = v.Args[2] 11219 v_0 := v.Args[0] 11220 if v_0.Op != OpAMD64ADDQconst { 11221 break 11222 } 11223 d := v_0.AuxInt 11224 ptr := v_0.Args[0] 11225 idx := v.Args[1] 11226 mem := v.Args[2] 11227 if !(is32Bit(c + d)) { 11228 break 11229 } 11230 v.reset(OpAMD64MOVSDloadidx8) 11231 v.AuxInt = c + d 11232 v.Aux = sym 11233 v.AddArg(ptr) 11234 v.AddArg(idx) 11235 v.AddArg(mem) 11236 return true 11237 } 11238 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 11239 // cond: is32Bit(c+8*d) 11240 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 11241 for { 11242 c := v.AuxInt 11243 sym := v.Aux 11244 _ = v.Args[2] 11245 ptr := v.Args[0] 11246 v_1 := v.Args[1] 11247 if v_1.Op != OpAMD64ADDQconst { 11248 break 11249 } 11250 d := v_1.AuxInt 11251 idx := v_1.Args[0] 11252 mem := v.Args[2] 11253 if !(is32Bit(c + 8*d)) { 11254 break 11255 } 11256 v.reset(OpAMD64MOVSDloadidx8) 11257 v.AuxInt = c + 8*d 11258 v.Aux = sym 11259 v.AddArg(ptr) 11260 v.AddArg(idx) 11261 v.AddArg(mem) 11262 return true 11263 } 11264 return false 11265 } 11266 func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { 11267 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 11268 // cond: is32Bit(off1+off2) 11269 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 11270 for { 11271 off1 := v.AuxInt 11272 sym := v.Aux 11273 _ = v.Args[2] 11274 v_0 := v.Args[0] 11275 if v_0.Op != OpAMD64ADDQconst { 11276 break 11277 } 11278 off2 := v_0.AuxInt 11279 ptr := v_0.Args[0] 11280 val := v.Args[1] 11281 mem := v.Args[2] 11282 if !(is32Bit(off1 + off2)) { 11283 break 11284 } 11285 v.reset(OpAMD64MOVSDstore) 11286 v.AuxInt = off1 + off2 11287 v.Aux = sym 11288 v.AddArg(ptr) 11289 v.AddArg(val) 11290 v.AddArg(mem) 11291 return true 11292 } 11293 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 11294 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11295 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11296 for { 11297 off1 := v.AuxInt 11298 sym1 := v.Aux 11299 _ = v.Args[2] 11300 v_0 := v.Args[0] 11301 if v_0.Op != OpAMD64LEAQ { 11302 break 11303 } 11304 off2 := v_0.AuxInt 11305 sym2 := v_0.Aux 11306 base := v_0.Args[0] 11307 val := v.Args[1] 11308 mem := v.Args[2] 11309 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11310 break 11311 } 11312 v.reset(OpAMD64MOVSDstore) 11313 v.AuxInt = off1 + off2 11314 v.Aux = mergeSym(sym1, sym2) 11315 v.AddArg(base) 11316 v.AddArg(val) 11317 v.AddArg(mem) 11318 return true 11319 } 11320 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 11321 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11322 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11323 for { 11324 off1 := v.AuxInt 11325 sym1 := v.Aux 11326 _ = v.Args[2] 11327 v_0 := v.Args[0] 11328 if v_0.Op != OpAMD64LEAQ1 { 11329 break 11330 } 11331 off2 := v_0.AuxInt 11332 sym2 := v_0.Aux 11333 _ = v_0.Args[1] 11334 ptr := v_0.Args[0] 11335 idx := v_0.Args[1] 11336 val := v.Args[1] 11337 mem := v.Args[2] 11338 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11339 break 11340 } 11341 v.reset(OpAMD64MOVSDstoreidx1) 11342 v.AuxInt = off1 + off2 11343 v.Aux = mergeSym(sym1, sym2) 11344 v.AddArg(ptr) 11345 v.AddArg(idx) 11346 v.AddArg(val) 11347 v.AddArg(mem) 11348 return true 11349 } 11350 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 11351 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11352 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11353 for { 11354 off1 := v.AuxInt 11355 sym1 := v.Aux 11356 _ = v.Args[2] 11357 v_0 := v.Args[0] 11358 if v_0.Op != OpAMD64LEAQ8 { 11359 break 11360 } 11361 off2 := v_0.AuxInt 11362 sym2 := v_0.Aux 11363 _ = v_0.Args[1] 11364 ptr := v_0.Args[0] 11365 idx := v_0.Args[1] 11366 val := v.Args[1] 11367 mem := v.Args[2] 11368 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11369 break 11370 } 11371 v.reset(OpAMD64MOVSDstoreidx8) 11372 v.AuxInt = off1 + off2 11373 v.Aux = mergeSym(sym1, sym2) 11374 v.AddArg(ptr) 11375 v.AddArg(idx) 11376 v.AddArg(val) 11377 v.AddArg(mem) 11378 return true 11379 } 11380 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 11381 // cond: ptr.Op != OpSB 11382 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 11383 for { 11384 off := v.AuxInt 11385 sym := v.Aux 11386 _ = v.Args[2] 11387 v_0 := v.Args[0] 11388 if v_0.Op != OpAMD64ADDQ { 11389 break 11390 } 11391 _ = v_0.Args[1] 11392 ptr := v_0.Args[0] 11393 idx := v_0.Args[1] 11394 val := v.Args[1] 11395 mem := v.Args[2] 11396 if !(ptr.Op != OpSB) { 11397 break 11398 } 11399 v.reset(OpAMD64MOVSDstoreidx1) 11400 v.AuxInt = off 11401 v.Aux = sym 11402 v.AddArg(ptr) 11403 v.AddArg(idx) 11404 v.AddArg(val) 11405 v.AddArg(mem) 11406 return true 11407 } 11408 // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) 11409 // cond: 11410 // result: (MOVQstore [off] {sym} ptr val mem) 11411 for { 11412 off := v.AuxInt 11413 sym := v.Aux 11414 _ = v.Args[2] 11415 ptr := v.Args[0] 11416 v_1 := v.Args[1] 11417 if v_1.Op != OpAMD64MOVQi2f { 11418 break 11419 } 11420 val := v_1.Args[0] 11421 mem := v.Args[2] 11422 v.reset(OpAMD64MOVQstore) 11423 v.AuxInt = off 11424 v.Aux = sym 11425 v.AddArg(ptr) 11426 v.AddArg(val) 11427 v.AddArg(mem) 11428 return true 11429 } 11430 return false 11431 } 11432 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { 11433 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 11434 // cond: 11435 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 11436 for { 11437 c := v.AuxInt 11438 sym := v.Aux 11439 _ = v.Args[3] 11440 ptr := v.Args[0] 11441 v_1 := v.Args[1] 11442 if v_1.Op != OpAMD64SHLQconst { 11443 break 11444 } 11445 if v_1.AuxInt != 3 { 11446 break 11447 } 11448 idx := v_1.Args[0] 11449 val := v.Args[2] 11450 mem := v.Args[3] 11451 v.reset(OpAMD64MOVSDstoreidx8) 11452 v.AuxInt = c 11453 v.Aux = sym 11454 v.AddArg(ptr) 11455 v.AddArg(idx) 11456 v.AddArg(val) 11457 v.AddArg(mem) 11458 return true 11459 } 11460 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 11461 // cond: is32Bit(c+d) 11462 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 11463 for { 11464 c := v.AuxInt 11465 sym := v.Aux 11466 _ = v.Args[3] 11467 v_0 := v.Args[0] 11468 if v_0.Op != OpAMD64ADDQconst { 11469 break 11470 } 11471 d := v_0.AuxInt 11472 ptr := v_0.Args[0] 11473 idx := v.Args[1] 11474 val := v.Args[2] 11475 mem := v.Args[3] 11476 if !(is32Bit(c + d)) { 11477 break 11478 } 11479 v.reset(OpAMD64MOVSDstoreidx1) 11480 v.AuxInt = c + d 11481 v.Aux = sym 11482 v.AddArg(ptr) 11483 v.AddArg(idx) 11484 v.AddArg(val) 11485 v.AddArg(mem) 11486 return true 11487 } 11488 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 11489 // cond: is32Bit(c+d) 11490 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 11491 for { 11492 c := v.AuxInt 11493 sym := v.Aux 11494 _ = v.Args[3] 11495 ptr := v.Args[0] 11496 v_1 := v.Args[1] 11497 if v_1.Op != OpAMD64ADDQconst { 11498 break 11499 } 11500 d := v_1.AuxInt 11501 idx := v_1.Args[0] 11502 val := v.Args[2] 11503 mem := v.Args[3] 11504 if !(is32Bit(c + d)) { 11505 break 11506 } 11507 v.reset(OpAMD64MOVSDstoreidx1) 11508 v.AuxInt = c + d 11509 v.Aux = sym 11510 v.AddArg(ptr) 11511 v.AddArg(idx) 11512 v.AddArg(val) 11513 v.AddArg(mem) 11514 return true 11515 } 11516 return false 11517 } 11518 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { 11519 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 11520 // cond: is32Bit(c+d) 11521 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 11522 for { 11523 c := v.AuxInt 11524 sym := v.Aux 11525 _ = v.Args[3] 11526 v_0 := v.Args[0] 11527 if v_0.Op != OpAMD64ADDQconst { 11528 break 11529 } 11530 d := v_0.AuxInt 11531 ptr := v_0.Args[0] 11532 idx := v.Args[1] 11533 val := v.Args[2] 11534 mem := v.Args[3] 11535 if !(is32Bit(c + d)) { 11536 break 11537 } 11538 v.reset(OpAMD64MOVSDstoreidx8) 11539 v.AuxInt = c + d 11540 v.Aux = sym 11541 v.AddArg(ptr) 11542 v.AddArg(idx) 11543 v.AddArg(val) 11544 v.AddArg(mem) 11545 return true 11546 } 11547 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 11548 // cond: is32Bit(c+8*d) 11549 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 11550 for { 11551 c := v.AuxInt 11552 sym := v.Aux 11553 _ = v.Args[3] 11554 ptr := v.Args[0] 11555 v_1 := v.Args[1] 11556 if v_1.Op != OpAMD64ADDQconst { 11557 break 11558 } 11559 d := v_1.AuxInt 11560 idx := v_1.Args[0] 11561 val := v.Args[2] 11562 mem := v.Args[3] 11563 if !(is32Bit(c + 8*d)) { 11564 break 11565 } 11566 v.reset(OpAMD64MOVSDstoreidx8) 11567 v.AuxInt = c + 8*d 11568 v.Aux = sym 11569 v.AddArg(ptr) 11570 v.AddArg(idx) 11571 v.AddArg(val) 11572 v.AddArg(mem) 11573 return true 11574 } 11575 return false 11576 } 11577 func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { 11578 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 11579 // cond: is32Bit(off1+off2) 11580 // result: (MOVSSload [off1+off2] {sym} ptr mem) 11581 for { 11582 off1 := v.AuxInt 11583 sym := v.Aux 11584 _ = v.Args[1] 11585 v_0 := v.Args[0] 11586 if v_0.Op != OpAMD64ADDQconst { 11587 break 11588 } 11589 off2 := v_0.AuxInt 11590 ptr := v_0.Args[0] 11591 mem := v.Args[1] 11592 if !(is32Bit(off1 + off2)) { 11593 break 11594 } 11595 v.reset(OpAMD64MOVSSload) 11596 v.AuxInt = off1 + off2 11597 v.Aux = sym 11598 v.AddArg(ptr) 11599 v.AddArg(mem) 11600 return true 11601 } 11602 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 11603 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11604 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11605 for { 11606 off1 := v.AuxInt 11607 sym1 := v.Aux 11608 _ = v.Args[1] 11609 v_0 := v.Args[0] 11610 if v_0.Op != OpAMD64LEAQ { 11611 break 11612 } 11613 off2 := v_0.AuxInt 11614 sym2 := v_0.Aux 11615 base := v_0.Args[0] 11616 mem := v.Args[1] 11617 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11618 break 11619 } 11620 v.reset(OpAMD64MOVSSload) 11621 v.AuxInt = off1 + off2 11622 v.Aux = mergeSym(sym1, sym2) 11623 v.AddArg(base) 11624 v.AddArg(mem) 11625 return true 11626 } 11627 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 11628 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11629 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11630 for { 11631 off1 := v.AuxInt 11632 sym1 := v.Aux 11633 _ = v.Args[1] 11634 v_0 := v.Args[0] 11635 if v_0.Op != OpAMD64LEAQ1 { 11636 break 11637 } 11638 off2 := v_0.AuxInt 11639 sym2 := v_0.Aux 11640 _ = v_0.Args[1] 11641 ptr := v_0.Args[0] 11642 idx := v_0.Args[1] 11643 mem := v.Args[1] 11644 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11645 break 11646 } 11647 v.reset(OpAMD64MOVSSloadidx1) 11648 v.AuxInt = off1 + off2 11649 v.Aux = mergeSym(sym1, sym2) 11650 v.AddArg(ptr) 11651 v.AddArg(idx) 11652 v.AddArg(mem) 11653 return true 11654 } 11655 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 11656 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11657 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11658 for { 11659 off1 := v.AuxInt 11660 sym1 := v.Aux 11661 _ = v.Args[1] 11662 v_0 := v.Args[0] 11663 if v_0.Op != OpAMD64LEAQ4 { 11664 break 11665 } 11666 off2 := v_0.AuxInt 11667 sym2 := v_0.Aux 11668 _ = v_0.Args[1] 11669 ptr := v_0.Args[0] 11670 idx := v_0.Args[1] 11671 mem := v.Args[1] 11672 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11673 break 11674 } 11675 v.reset(OpAMD64MOVSSloadidx4) 11676 v.AuxInt = off1 + off2 11677 v.Aux = mergeSym(sym1, sym2) 11678 v.AddArg(ptr) 11679 v.AddArg(idx) 11680 v.AddArg(mem) 11681 return true 11682 } 11683 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 11684 // cond: ptr.Op != OpSB 11685 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 11686 for { 11687 off := v.AuxInt 11688 sym := v.Aux 11689 _ = v.Args[1] 11690 v_0 := v.Args[0] 11691 if v_0.Op != OpAMD64ADDQ { 11692 break 11693 } 11694 _ = v_0.Args[1] 11695 ptr := v_0.Args[0] 11696 idx := v_0.Args[1] 11697 mem := v.Args[1] 11698 if !(ptr.Op != OpSB) { 11699 break 11700 } 11701 v.reset(OpAMD64MOVSSloadidx1) 11702 v.AuxInt = off 11703 v.Aux = sym 11704 v.AddArg(ptr) 11705 v.AddArg(idx) 11706 v.AddArg(mem) 11707 return true 11708 } 11709 // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) 11710 // cond: 11711 // result: (MOVLi2f val) 11712 for { 11713 off := v.AuxInt 11714 sym := v.Aux 11715 _ = v.Args[1] 11716 ptr := v.Args[0] 11717 v_1 := v.Args[1] 11718 if v_1.Op != OpAMD64MOVLstore { 11719 break 11720 } 11721 if v_1.AuxInt != off { 11722 break 11723 } 11724 if v_1.Aux != sym { 11725 break 11726 } 11727 _ = v_1.Args[2] 11728 if ptr != v_1.Args[0] { 11729 break 11730 } 11731 val := v_1.Args[1] 11732 v.reset(OpAMD64MOVLi2f) 11733 v.AddArg(val) 11734 return true 11735 } 11736 return false 11737 } 11738 func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { 11739 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 11740 // cond: 11741 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 11742 for { 11743 c := v.AuxInt 11744 sym := v.Aux 11745 _ = v.Args[2] 11746 ptr := v.Args[0] 11747 v_1 := v.Args[1] 11748 if v_1.Op != OpAMD64SHLQconst { 11749 break 11750 } 11751 if v_1.AuxInt != 2 { 11752 break 11753 } 11754 idx := v_1.Args[0] 11755 mem := v.Args[2] 11756 v.reset(OpAMD64MOVSSloadidx4) 11757 v.AuxInt = c 11758 v.Aux = sym 11759 v.AddArg(ptr) 11760 v.AddArg(idx) 11761 v.AddArg(mem) 11762 return true 11763 } 11764 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 11765 // cond: is32Bit(c+d) 11766 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 11767 for { 11768 c := v.AuxInt 11769 sym := v.Aux 11770 _ = v.Args[2] 11771 v_0 := v.Args[0] 11772 if v_0.Op != OpAMD64ADDQconst { 11773 break 11774 } 11775 d := v_0.AuxInt 11776 ptr := v_0.Args[0] 11777 idx := v.Args[1] 11778 mem := v.Args[2] 11779 if !(is32Bit(c + d)) { 11780 break 11781 } 11782 v.reset(OpAMD64MOVSSloadidx1) 11783 v.AuxInt = c + d 11784 v.Aux = sym 11785 v.AddArg(ptr) 11786 v.AddArg(idx) 11787 v.AddArg(mem) 11788 return true 11789 } 11790 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 11791 // cond: is32Bit(c+d) 11792 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 11793 for { 11794 c := v.AuxInt 11795 sym := v.Aux 11796 _ = v.Args[2] 11797 ptr := v.Args[0] 11798 v_1 := v.Args[1] 11799 if v_1.Op != OpAMD64ADDQconst { 11800 break 11801 } 11802 d := v_1.AuxInt 11803 idx := v_1.Args[0] 11804 mem := v.Args[2] 11805 if !(is32Bit(c + d)) { 11806 break 11807 } 11808 v.reset(OpAMD64MOVSSloadidx1) 11809 v.AuxInt = c + d 11810 v.Aux = sym 11811 v.AddArg(ptr) 11812 v.AddArg(idx) 11813 v.AddArg(mem) 11814 return true 11815 } 11816 return false 11817 } 11818 func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { 11819 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 11820 // cond: is32Bit(c+d) 11821 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 11822 for { 11823 c := v.AuxInt 11824 sym := v.Aux 11825 _ = v.Args[2] 11826 v_0 := v.Args[0] 11827 if v_0.Op != OpAMD64ADDQconst { 11828 break 11829 } 11830 d := v_0.AuxInt 11831 ptr := v_0.Args[0] 11832 idx := v.Args[1] 11833 mem := v.Args[2] 11834 if !(is32Bit(c + d)) { 11835 break 11836 } 11837 v.reset(OpAMD64MOVSSloadidx4) 11838 v.AuxInt = c + d 11839 v.Aux = sym 11840 v.AddArg(ptr) 11841 v.AddArg(idx) 11842 v.AddArg(mem) 11843 return true 11844 } 11845 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 11846 // cond: is32Bit(c+4*d) 11847 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 11848 for { 11849 c := v.AuxInt 11850 sym := v.Aux 11851 _ = v.Args[2] 11852 ptr := v.Args[0] 11853 v_1 := v.Args[1] 11854 if v_1.Op != OpAMD64ADDQconst { 11855 break 11856 } 11857 d := v_1.AuxInt 11858 idx := v_1.Args[0] 11859 mem := v.Args[2] 11860 if !(is32Bit(c + 4*d)) { 11861 break 11862 } 11863 v.reset(OpAMD64MOVSSloadidx4) 11864 v.AuxInt = c + 4*d 11865 v.Aux = sym 11866 v.AddArg(ptr) 11867 v.AddArg(idx) 11868 v.AddArg(mem) 11869 return true 11870 } 11871 return false 11872 } 11873 func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { 11874 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 11875 // cond: is32Bit(off1+off2) 11876 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 11877 for { 11878 off1 := v.AuxInt 11879 sym := v.Aux 11880 _ = v.Args[2] 11881 v_0 := v.Args[0] 11882 if v_0.Op != OpAMD64ADDQconst { 11883 break 11884 } 11885 off2 := v_0.AuxInt 11886 ptr := v_0.Args[0] 11887 val := v.Args[1] 11888 mem := v.Args[2] 11889 if !(is32Bit(off1 + off2)) { 11890 break 11891 } 11892 v.reset(OpAMD64MOVSSstore) 11893 v.AuxInt = off1 + off2 11894 v.Aux = sym 11895 v.AddArg(ptr) 11896 v.AddArg(val) 11897 v.AddArg(mem) 11898 return true 11899 } 11900 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 11901 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11902 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11903 for { 11904 off1 := v.AuxInt 11905 sym1 := v.Aux 11906 _ = v.Args[2] 11907 v_0 := v.Args[0] 11908 if v_0.Op != OpAMD64LEAQ { 11909 break 11910 } 11911 off2 := v_0.AuxInt 11912 sym2 := v_0.Aux 11913 base := v_0.Args[0] 11914 val := v.Args[1] 11915 mem := v.Args[2] 11916 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11917 break 11918 } 11919 v.reset(OpAMD64MOVSSstore) 11920 v.AuxInt = off1 + off2 11921 v.Aux = mergeSym(sym1, sym2) 11922 v.AddArg(base) 11923 v.AddArg(val) 11924 v.AddArg(mem) 11925 return true 11926 } 11927 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 11928 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11929 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11930 for { 11931 off1 := v.AuxInt 11932 sym1 := v.Aux 11933 _ = v.Args[2] 11934 v_0 := v.Args[0] 11935 if v_0.Op != OpAMD64LEAQ1 { 11936 break 11937 } 11938 off2 := v_0.AuxInt 11939 sym2 := v_0.Aux 11940 _ = v_0.Args[1] 11941 ptr := v_0.Args[0] 11942 idx := v_0.Args[1] 11943 val := v.Args[1] 11944 mem := v.Args[2] 11945 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11946 break 11947 } 11948 v.reset(OpAMD64MOVSSstoreidx1) 11949 v.AuxInt = off1 + off2 11950 v.Aux = mergeSym(sym1, sym2) 11951 v.AddArg(ptr) 11952 v.AddArg(idx) 11953 v.AddArg(val) 11954 v.AddArg(mem) 11955 return true 11956 } 11957 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 11958 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11959 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11960 for { 11961 off1 := v.AuxInt 11962 sym1 := v.Aux 11963 _ = v.Args[2] 11964 v_0 := v.Args[0] 11965 if v_0.Op != OpAMD64LEAQ4 { 11966 break 11967 } 11968 off2 := v_0.AuxInt 11969 sym2 := v_0.Aux 11970 _ = v_0.Args[1] 11971 ptr := v_0.Args[0] 11972 idx := v_0.Args[1] 11973 val := v.Args[1] 11974 mem := v.Args[2] 11975 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11976 break 11977 } 11978 v.reset(OpAMD64MOVSSstoreidx4) 11979 v.AuxInt = off1 + off2 11980 v.Aux = mergeSym(sym1, sym2) 11981 v.AddArg(ptr) 11982 v.AddArg(idx) 11983 v.AddArg(val) 11984 v.AddArg(mem) 11985 return true 11986 } 11987 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 11988 // cond: ptr.Op != OpSB 11989 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 11990 for { 11991 off := v.AuxInt 11992 sym := v.Aux 11993 _ = v.Args[2] 11994 v_0 := v.Args[0] 11995 if v_0.Op != OpAMD64ADDQ { 11996 break 11997 } 11998 _ = v_0.Args[1] 11999 ptr := v_0.Args[0] 12000 idx := v_0.Args[1] 12001 val := v.Args[1] 12002 mem := v.Args[2] 12003 if !(ptr.Op != OpSB) { 12004 break 12005 } 12006 v.reset(OpAMD64MOVSSstoreidx1) 12007 v.AuxInt = off 12008 v.Aux = sym 12009 v.AddArg(ptr) 12010 v.AddArg(idx) 12011 v.AddArg(val) 12012 v.AddArg(mem) 12013 return true 12014 } 12015 // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) 12016 // cond: 12017 // result: (MOVLstore [off] {sym} ptr val mem) 12018 for { 12019 off := v.AuxInt 12020 sym := v.Aux 12021 _ = v.Args[2] 12022 ptr := v.Args[0] 12023 v_1 := v.Args[1] 12024 if v_1.Op != OpAMD64MOVLi2f { 12025 break 12026 } 12027 val := v_1.Args[0] 12028 mem := v.Args[2] 12029 v.reset(OpAMD64MOVLstore) 12030 v.AuxInt = off 12031 v.Aux = sym 12032 v.AddArg(ptr) 12033 v.AddArg(val) 12034 v.AddArg(mem) 12035 return true 12036 } 12037 return false 12038 } 12039 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { 12040 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 12041 // cond: 12042 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 12043 for { 12044 c := v.AuxInt 12045 sym := v.Aux 12046 _ = v.Args[3] 12047 ptr := v.Args[0] 12048 v_1 := v.Args[1] 12049 if v_1.Op != OpAMD64SHLQconst { 12050 break 12051 } 12052 if v_1.AuxInt != 2 { 12053 break 12054 } 12055 idx := v_1.Args[0] 12056 val := v.Args[2] 12057 mem := v.Args[3] 12058 v.reset(OpAMD64MOVSSstoreidx4) 12059 v.AuxInt = c 12060 v.Aux = sym 12061 v.AddArg(ptr) 12062 v.AddArg(idx) 12063 v.AddArg(val) 12064 v.AddArg(mem) 12065 return true 12066 } 12067 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 12068 // cond: is32Bit(c+d) 12069 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 12070 for { 12071 c := v.AuxInt 12072 sym := v.Aux 12073 _ = v.Args[3] 12074 v_0 := v.Args[0] 12075 if v_0.Op != OpAMD64ADDQconst { 12076 break 12077 } 12078 d := v_0.AuxInt 12079 ptr := v_0.Args[0] 12080 idx := v.Args[1] 12081 val := v.Args[2] 12082 mem := v.Args[3] 12083 if !(is32Bit(c + d)) { 12084 break 12085 } 12086 v.reset(OpAMD64MOVSSstoreidx1) 12087 v.AuxInt = c + d 12088 v.Aux = sym 12089 v.AddArg(ptr) 12090 v.AddArg(idx) 12091 v.AddArg(val) 12092 v.AddArg(mem) 12093 return true 12094 } 12095 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 12096 // cond: is32Bit(c+d) 12097 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 12098 for { 12099 c := v.AuxInt 12100 sym := v.Aux 12101 _ = v.Args[3] 12102 ptr := v.Args[0] 12103 v_1 := v.Args[1] 12104 if v_1.Op != OpAMD64ADDQconst { 12105 break 12106 } 12107 d := v_1.AuxInt 12108 idx := v_1.Args[0] 12109 val := v.Args[2] 12110 mem := v.Args[3] 12111 if !(is32Bit(c + d)) { 12112 break 12113 } 12114 v.reset(OpAMD64MOVSSstoreidx1) 12115 v.AuxInt = c + d 12116 v.Aux = sym 12117 v.AddArg(ptr) 12118 v.AddArg(idx) 12119 v.AddArg(val) 12120 v.AddArg(mem) 12121 return true 12122 } 12123 return false 12124 } 12125 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { 12126 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 12127 // cond: is32Bit(c+d) 12128 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 12129 for { 12130 c := v.AuxInt 12131 sym := v.Aux 12132 _ = v.Args[3] 12133 v_0 := v.Args[0] 12134 if v_0.Op != OpAMD64ADDQconst { 12135 break 12136 } 12137 d := v_0.AuxInt 12138 ptr := v_0.Args[0] 12139 idx := v.Args[1] 12140 val := v.Args[2] 12141 mem := v.Args[3] 12142 if !(is32Bit(c + d)) { 12143 break 12144 } 12145 v.reset(OpAMD64MOVSSstoreidx4) 12146 v.AuxInt = c + d 12147 v.Aux = sym 12148 v.AddArg(ptr) 12149 v.AddArg(idx) 12150 v.AddArg(val) 12151 v.AddArg(mem) 12152 return true 12153 } 12154 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 12155 // cond: is32Bit(c+4*d) 12156 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 12157 for { 12158 c := v.AuxInt 12159 sym := v.Aux 12160 _ = v.Args[3] 12161 ptr := v.Args[0] 12162 v_1 := v.Args[1] 12163 if v_1.Op != OpAMD64ADDQconst { 12164 break 12165 } 12166 d := v_1.AuxInt 12167 idx := v_1.Args[0] 12168 val := v.Args[2] 12169 mem := v.Args[3] 12170 if !(is32Bit(c + 4*d)) { 12171 break 12172 } 12173 v.reset(OpAMD64MOVSSstoreidx4) 12174 v.AuxInt = c + 4*d 12175 v.Aux = sym 12176 v.AddArg(ptr) 12177 v.AddArg(idx) 12178 v.AddArg(val) 12179 v.AddArg(mem) 12180 return true 12181 } 12182 return false 12183 } 12184 func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { 12185 b := v.Block 12186 _ = b 12187 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 12188 // cond: x.Uses == 1 && clobber(x) 12189 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 12190 for { 12191 x := v.Args[0] 12192 if x.Op != OpAMD64MOVWload { 12193 break 12194 } 12195 off := x.AuxInt 12196 sym := x.Aux 12197 _ = x.Args[1] 12198 ptr := x.Args[0] 12199 mem := x.Args[1] 12200 if !(x.Uses == 1 && clobber(x)) { 12201 break 12202 } 12203 b = x.Block 12204 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 12205 v.reset(OpCopy) 12206 v.AddArg(v0) 12207 v0.AuxInt = off 12208 v0.Aux = sym 12209 v0.AddArg(ptr) 12210 v0.AddArg(mem) 12211 return true 12212 } 12213 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 12214 // cond: x.Uses == 1 && clobber(x) 12215 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 12216 for { 12217 x := v.Args[0] 12218 if x.Op != OpAMD64MOVLload { 12219 break 12220 } 12221 off := x.AuxInt 12222 sym := x.Aux 12223 _ = x.Args[1] 12224 ptr := x.Args[0] 12225 mem := x.Args[1] 12226 if !(x.Uses == 1 && clobber(x)) { 12227 break 12228 } 12229 b = x.Block 12230 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 12231 v.reset(OpCopy) 12232 v.AddArg(v0) 12233 v0.AuxInt = off 12234 v0.Aux = sym 12235 v0.AddArg(ptr) 12236 v0.AddArg(mem) 12237 return true 12238 } 12239 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 12240 // cond: x.Uses == 1 && clobber(x) 12241 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 12242 for { 12243 x := v.Args[0] 12244 if x.Op != OpAMD64MOVQload { 12245 break 12246 } 12247 off := x.AuxInt 12248 sym := x.Aux 12249 _ = x.Args[1] 12250 ptr := x.Args[0] 12251 mem := x.Args[1] 12252 if !(x.Uses == 1 && clobber(x)) { 12253 break 12254 } 12255 b = x.Block 12256 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 12257 v.reset(OpCopy) 12258 v.AddArg(v0) 12259 v0.AuxInt = off 12260 v0.Aux = sym 12261 v0.AddArg(ptr) 12262 v0.AddArg(mem) 12263 return true 12264 } 12265 // match: (MOVWQSX (ANDLconst [c] x)) 12266 // cond: c & 0x8000 == 0 12267 // result: (ANDLconst [c & 0x7fff] x) 12268 for { 12269 v_0 := v.Args[0] 12270 if v_0.Op != OpAMD64ANDLconst { 12271 break 12272 } 12273 c := v_0.AuxInt 12274 x := v_0.Args[0] 12275 if !(c&0x8000 == 0) { 12276 break 12277 } 12278 v.reset(OpAMD64ANDLconst) 12279 v.AuxInt = c & 0x7fff 12280 v.AddArg(x) 12281 return true 12282 } 12283 // match: (MOVWQSX (MOVWQSX x)) 12284 // cond: 12285 // result: (MOVWQSX x) 12286 for { 12287 v_0 := v.Args[0] 12288 if v_0.Op != OpAMD64MOVWQSX { 12289 break 12290 } 12291 x := v_0.Args[0] 12292 v.reset(OpAMD64MOVWQSX) 12293 v.AddArg(x) 12294 return true 12295 } 12296 // match: (MOVWQSX (MOVBQSX x)) 12297 // cond: 12298 // result: (MOVBQSX x) 12299 for { 12300 v_0 := v.Args[0] 12301 if v_0.Op != OpAMD64MOVBQSX { 12302 break 12303 } 12304 x := v_0.Args[0] 12305 v.reset(OpAMD64MOVBQSX) 12306 v.AddArg(x) 12307 return true 12308 } 12309 return false 12310 } 12311 func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool { 12312 // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 12313 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12314 // result: (MOVWQSX x) 12315 for { 12316 off := v.AuxInt 12317 sym := v.Aux 12318 _ = v.Args[1] 12319 ptr := v.Args[0] 12320 v_1 := v.Args[1] 12321 if v_1.Op != OpAMD64MOVWstore { 12322 break 12323 } 12324 off2 := v_1.AuxInt 12325 sym2 := v_1.Aux 12326 _ = v_1.Args[2] 12327 ptr2 := v_1.Args[0] 12328 x := v_1.Args[1] 12329 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12330 break 12331 } 12332 v.reset(OpAMD64MOVWQSX) 12333 v.AddArg(x) 12334 return true 12335 } 12336 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12337 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12338 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12339 for { 12340 off1 := v.AuxInt 12341 sym1 := v.Aux 12342 _ = v.Args[1] 12343 v_0 := v.Args[0] 12344 if v_0.Op != OpAMD64LEAQ { 12345 break 12346 } 12347 off2 := v_0.AuxInt 12348 sym2 := v_0.Aux 12349 base := v_0.Args[0] 12350 mem := v.Args[1] 12351 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12352 break 12353 } 12354 v.reset(OpAMD64MOVWQSXload) 12355 v.AuxInt = off1 + off2 12356 v.Aux = mergeSym(sym1, sym2) 12357 v.AddArg(base) 12358 v.AddArg(mem) 12359 return true 12360 } 12361 return false 12362 } 12363 func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { 12364 b := v.Block 12365 _ = b 12366 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 12367 // cond: x.Uses == 1 && clobber(x) 12368 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12369 for { 12370 x := v.Args[0] 12371 if x.Op != OpAMD64MOVWload { 12372 break 12373 } 12374 off := x.AuxInt 12375 sym := x.Aux 12376 _ = x.Args[1] 12377 ptr := x.Args[0] 12378 mem := x.Args[1] 12379 if !(x.Uses == 1 && clobber(x)) { 12380 break 12381 } 12382 b = x.Block 12383 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 12384 v.reset(OpCopy) 12385 v.AddArg(v0) 12386 v0.AuxInt = off 12387 v0.Aux = sym 12388 v0.AddArg(ptr) 12389 v0.AddArg(mem) 12390 return true 12391 } 12392 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 12393 // cond: x.Uses == 1 && clobber(x) 12394 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12395 for { 12396 x := v.Args[0] 12397 if x.Op != OpAMD64MOVLload { 12398 break 12399 } 12400 off := x.AuxInt 12401 sym := x.Aux 12402 _ = x.Args[1] 12403 ptr := x.Args[0] 12404 mem := x.Args[1] 12405 if !(x.Uses == 1 && clobber(x)) { 12406 break 12407 } 12408 b = x.Block 12409 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 12410 v.reset(OpCopy) 12411 v.AddArg(v0) 12412 v0.AuxInt = off 12413 v0.Aux = sym 12414 v0.AddArg(ptr) 12415 v0.AddArg(mem) 12416 return true 12417 } 12418 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 12419 // cond: x.Uses == 1 && clobber(x) 12420 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12421 for { 12422 x := v.Args[0] 12423 if x.Op != OpAMD64MOVQload { 12424 break 12425 } 12426 off := x.AuxInt 12427 sym := x.Aux 12428 _ = x.Args[1] 12429 ptr := x.Args[0] 12430 mem := x.Args[1] 12431 if !(x.Uses == 1 && clobber(x)) { 12432 break 12433 } 12434 b = x.Block 12435 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 12436 v.reset(OpCopy) 12437 v.AddArg(v0) 12438 v0.AuxInt = off 12439 v0.Aux = sym 12440 v0.AddArg(ptr) 12441 v0.AddArg(mem) 12442 return true 12443 } 12444 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 12445 // cond: x.Uses == 1 && clobber(x) 12446 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 12447 for { 12448 x := v.Args[0] 12449 if x.Op != OpAMD64MOVWloadidx1 { 12450 break 12451 } 12452 off := x.AuxInt 12453 sym := x.Aux 12454 _ = x.Args[2] 12455 ptr := x.Args[0] 12456 idx := x.Args[1] 12457 mem := x.Args[2] 12458 if !(x.Uses == 1 && clobber(x)) { 12459 break 12460 } 12461 b = x.Block 12462 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 12463 v.reset(OpCopy) 12464 v.AddArg(v0) 12465 v0.AuxInt = off 12466 v0.Aux = sym 12467 v0.AddArg(ptr) 12468 v0.AddArg(idx) 12469 v0.AddArg(mem) 12470 return true 12471 } 12472 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 12473 // cond: x.Uses == 1 && clobber(x) 12474 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 12475 for { 12476 x := v.Args[0] 12477 if x.Op != OpAMD64MOVWloadidx2 { 12478 break 12479 } 12480 off := x.AuxInt 12481 sym := x.Aux 12482 _ = x.Args[2] 12483 ptr := x.Args[0] 12484 idx := x.Args[1] 12485 mem := x.Args[2] 12486 if !(x.Uses == 1 && clobber(x)) { 12487 break 12488 } 12489 b = x.Block 12490 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) 12491 v.reset(OpCopy) 12492 v.AddArg(v0) 12493 v0.AuxInt = off 12494 v0.Aux = sym 12495 v0.AddArg(ptr) 12496 v0.AddArg(idx) 12497 v0.AddArg(mem) 12498 return true 12499 } 12500 // match: (MOVWQZX (ANDLconst [c] x)) 12501 // cond: 12502 // result: (ANDLconst [c & 0xffff] x) 12503 for { 12504 v_0 := v.Args[0] 12505 if v_0.Op != OpAMD64ANDLconst { 12506 break 12507 } 12508 c := v_0.AuxInt 12509 x := v_0.Args[0] 12510 v.reset(OpAMD64ANDLconst) 12511 v.AuxInt = c & 0xffff 12512 v.AddArg(x) 12513 return true 12514 } 12515 // match: (MOVWQZX (MOVWQZX x)) 12516 // cond: 12517 // result: (MOVWQZX x) 12518 for { 12519 v_0 := v.Args[0] 12520 if v_0.Op != OpAMD64MOVWQZX { 12521 break 12522 } 12523 x := v_0.Args[0] 12524 v.reset(OpAMD64MOVWQZX) 12525 v.AddArg(x) 12526 return true 12527 } 12528 // match: (MOVWQZX (MOVBQZX x)) 12529 // cond: 12530 // result: (MOVBQZX x) 12531 for { 12532 v_0 := v.Args[0] 12533 if v_0.Op != OpAMD64MOVBQZX { 12534 break 12535 } 12536 x := v_0.Args[0] 12537 v.reset(OpAMD64MOVBQZX) 12538 v.AddArg(x) 12539 return true 12540 } 12541 return false 12542 } 12543 func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { 12544 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 12545 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12546 // result: (MOVWQZX x) 12547 for { 12548 off := v.AuxInt 12549 sym := v.Aux 12550 _ = v.Args[1] 12551 ptr := v.Args[0] 12552 v_1 := v.Args[1] 12553 if v_1.Op != OpAMD64MOVWstore { 12554 break 12555 } 12556 off2 := v_1.AuxInt 12557 sym2 := v_1.Aux 12558 _ = v_1.Args[2] 12559 ptr2 := v_1.Args[0] 12560 x := v_1.Args[1] 12561 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12562 break 12563 } 12564 v.reset(OpAMD64MOVWQZX) 12565 v.AddArg(x) 12566 return true 12567 } 12568 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 12569 // cond: is32Bit(off1+off2) 12570 // result: (MOVWload [off1+off2] {sym} ptr mem) 12571 for { 12572 off1 := v.AuxInt 12573 sym := v.Aux 12574 _ = v.Args[1] 12575 v_0 := v.Args[0] 12576 if v_0.Op != OpAMD64ADDQconst { 12577 break 12578 } 12579 off2 := v_0.AuxInt 12580 ptr := v_0.Args[0] 12581 mem := v.Args[1] 12582 if !(is32Bit(off1 + off2)) { 12583 break 12584 } 12585 v.reset(OpAMD64MOVWload) 12586 v.AuxInt = off1 + off2 12587 v.Aux = sym 12588 v.AddArg(ptr) 12589 v.AddArg(mem) 12590 return true 12591 } 12592 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12593 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12594 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12595 for { 12596 off1 := v.AuxInt 12597 sym1 := v.Aux 12598 _ = v.Args[1] 12599 v_0 := v.Args[0] 12600 if v_0.Op != OpAMD64LEAQ { 12601 break 12602 } 12603 off2 := v_0.AuxInt 12604 sym2 := v_0.Aux 12605 base := v_0.Args[0] 12606 mem := v.Args[1] 12607 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12608 break 12609 } 12610 v.reset(OpAMD64MOVWload) 12611 v.AuxInt = off1 + off2 12612 v.Aux = mergeSym(sym1, sym2) 12613 v.AddArg(base) 12614 v.AddArg(mem) 12615 return true 12616 } 12617 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 12618 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12619 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 12620 for { 12621 off1 := v.AuxInt 12622 sym1 := v.Aux 12623 _ = v.Args[1] 12624 v_0 := v.Args[0] 12625 if v_0.Op != OpAMD64LEAQ1 { 12626 break 12627 } 12628 off2 := v_0.AuxInt 12629 sym2 := v_0.Aux 12630 _ = v_0.Args[1] 12631 ptr := v_0.Args[0] 12632 idx := v_0.Args[1] 12633 mem := v.Args[1] 12634 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12635 break 12636 } 12637 v.reset(OpAMD64MOVWloadidx1) 12638 v.AuxInt = off1 + off2 12639 v.Aux = mergeSym(sym1, sym2) 12640 v.AddArg(ptr) 12641 v.AddArg(idx) 12642 v.AddArg(mem) 12643 return true 12644 } 12645 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 12646 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12647 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 12648 for { 12649 off1 := v.AuxInt 12650 sym1 := v.Aux 12651 _ = v.Args[1] 12652 v_0 := v.Args[0] 12653 if v_0.Op != OpAMD64LEAQ2 { 12654 break 12655 } 12656 off2 := v_0.AuxInt 12657 sym2 := v_0.Aux 12658 _ = v_0.Args[1] 12659 ptr := v_0.Args[0] 12660 idx := v_0.Args[1] 12661 mem := v.Args[1] 12662 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12663 break 12664 } 12665 v.reset(OpAMD64MOVWloadidx2) 12666 v.AuxInt = off1 + off2 12667 v.Aux = mergeSym(sym1, sym2) 12668 v.AddArg(ptr) 12669 v.AddArg(idx) 12670 v.AddArg(mem) 12671 return true 12672 } 12673 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 12674 // cond: ptr.Op != OpSB 12675 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 12676 for { 12677 off := v.AuxInt 12678 sym := v.Aux 12679 _ = v.Args[1] 12680 v_0 := v.Args[0] 12681 if v_0.Op != OpAMD64ADDQ { 12682 break 12683 } 12684 _ = v_0.Args[1] 12685 ptr := v_0.Args[0] 12686 idx := v_0.Args[1] 12687 mem := v.Args[1] 12688 if !(ptr.Op != OpSB) { 12689 break 12690 } 12691 v.reset(OpAMD64MOVWloadidx1) 12692 v.AuxInt = off 12693 v.Aux = sym 12694 v.AddArg(ptr) 12695 v.AddArg(idx) 12696 v.AddArg(mem) 12697 return true 12698 } 12699 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 12700 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 12701 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12702 for { 12703 off1 := v.AuxInt 12704 sym1 := v.Aux 12705 _ = v.Args[1] 12706 v_0 := v.Args[0] 12707 if v_0.Op != OpAMD64LEAL { 12708 break 12709 } 12710 off2 := v_0.AuxInt 12711 sym2 := v_0.Aux 12712 base := v_0.Args[0] 12713 mem := v.Args[1] 12714 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 12715 break 12716 } 12717 v.reset(OpAMD64MOVWload) 12718 v.AuxInt = off1 + off2 12719 v.Aux = mergeSym(sym1, sym2) 12720 v.AddArg(base) 12721 v.AddArg(mem) 12722 return true 12723 } 12724 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 12725 // cond: is32Bit(off1+off2) 12726 // result: (MOVWload [off1+off2] {sym} ptr mem) 12727 for { 12728 off1 := v.AuxInt 12729 sym := v.Aux 12730 _ = v.Args[1] 12731 v_0 := v.Args[0] 12732 if v_0.Op != OpAMD64ADDLconst { 12733 break 12734 } 12735 off2 := v_0.AuxInt 12736 ptr := v_0.Args[0] 12737 mem := v.Args[1] 12738 if !(is32Bit(off1 + off2)) { 12739 break 12740 } 12741 v.reset(OpAMD64MOVWload) 12742 v.AuxInt = off1 + off2 12743 v.Aux = sym 12744 v.AddArg(ptr) 12745 v.AddArg(mem) 12746 return true 12747 } 12748 return false 12749 } 12750 func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { 12751 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 12752 // cond: 12753 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 12754 for { 12755 c := v.AuxInt 12756 sym := v.Aux 12757 _ = v.Args[2] 12758 ptr := v.Args[0] 12759 v_1 := v.Args[1] 12760 if v_1.Op != OpAMD64SHLQconst { 12761 break 12762 } 12763 if v_1.AuxInt != 1 { 12764 break 12765 } 12766 idx := v_1.Args[0] 12767 mem := v.Args[2] 12768 v.reset(OpAMD64MOVWloadidx2) 12769 v.AuxInt = c 12770 v.Aux = sym 12771 v.AddArg(ptr) 12772 v.AddArg(idx) 12773 v.AddArg(mem) 12774 return true 12775 } 12776 // match: (MOVWloadidx1 [c] {sym} (SHLQconst [1] idx) ptr mem) 12777 // cond: 12778 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 12779 for { 12780 c := v.AuxInt 12781 sym := v.Aux 12782 _ = v.Args[2] 12783 v_0 := v.Args[0] 12784 if v_0.Op != OpAMD64SHLQconst { 12785 break 12786 } 12787 if v_0.AuxInt != 1 { 12788 break 12789 } 12790 idx := v_0.Args[0] 12791 ptr := v.Args[1] 12792 mem := v.Args[2] 12793 v.reset(OpAMD64MOVWloadidx2) 12794 v.AuxInt = c 12795 v.Aux = sym 12796 v.AddArg(ptr) 12797 v.AddArg(idx) 12798 v.AddArg(mem) 12799 return true 12800 } 12801 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 12802 // cond: is32Bit(c+d) 12803 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12804 for { 12805 c := v.AuxInt 12806 sym := v.Aux 12807 _ = v.Args[2] 12808 v_0 := v.Args[0] 12809 if v_0.Op != OpAMD64ADDQconst { 12810 break 12811 } 12812 d := v_0.AuxInt 12813 ptr := v_0.Args[0] 12814 idx := v.Args[1] 12815 mem := v.Args[2] 12816 if !(is32Bit(c + d)) { 12817 break 12818 } 12819 v.reset(OpAMD64MOVWloadidx1) 12820 v.AuxInt = c + d 12821 v.Aux = sym 12822 v.AddArg(ptr) 12823 v.AddArg(idx) 12824 v.AddArg(mem) 12825 return true 12826 } 12827 // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 12828 // cond: is32Bit(c+d) 12829 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12830 for { 12831 c := v.AuxInt 12832 sym := v.Aux 12833 _ = v.Args[2] 12834 idx := v.Args[0] 12835 v_1 := v.Args[1] 12836 if v_1.Op != OpAMD64ADDQconst { 12837 break 12838 } 12839 d := v_1.AuxInt 12840 ptr := v_1.Args[0] 12841 mem := v.Args[2] 12842 if !(is32Bit(c + d)) { 12843 break 12844 } 12845 v.reset(OpAMD64MOVWloadidx1) 12846 v.AuxInt = c + d 12847 v.Aux = sym 12848 v.AddArg(ptr) 12849 v.AddArg(idx) 12850 v.AddArg(mem) 12851 return true 12852 } 12853 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 12854 // cond: is32Bit(c+d) 12855 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12856 for { 12857 c := v.AuxInt 12858 sym := v.Aux 12859 _ = v.Args[2] 12860 ptr := v.Args[0] 12861 v_1 := v.Args[1] 12862 if v_1.Op != OpAMD64ADDQconst { 12863 break 12864 } 12865 d := v_1.AuxInt 12866 idx := v_1.Args[0] 12867 mem := v.Args[2] 12868 if !(is32Bit(c + d)) { 12869 break 12870 } 12871 v.reset(OpAMD64MOVWloadidx1) 12872 v.AuxInt = c + d 12873 v.Aux = sym 12874 v.AddArg(ptr) 12875 v.AddArg(idx) 12876 v.AddArg(mem) 12877 return true 12878 } 12879 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 12880 // cond: is32Bit(c+d) 12881 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12882 for { 12883 c := v.AuxInt 12884 sym := v.Aux 12885 _ = v.Args[2] 12886 v_0 := v.Args[0] 12887 if v_0.Op != OpAMD64ADDQconst { 12888 break 12889 } 12890 d := v_0.AuxInt 12891 idx := v_0.Args[0] 12892 ptr := v.Args[1] 12893 mem := v.Args[2] 12894 if !(is32Bit(c + d)) { 12895 break 12896 } 12897 v.reset(OpAMD64MOVWloadidx1) 12898 v.AuxInt = c + d 12899 v.Aux = sym 12900 v.AddArg(ptr) 12901 v.AddArg(idx) 12902 v.AddArg(mem) 12903 return true 12904 } 12905 return false 12906 } 12907 func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { 12908 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 12909 // cond: is32Bit(c+d) 12910 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 12911 for { 12912 c := v.AuxInt 12913 sym := v.Aux 12914 _ = v.Args[2] 12915 v_0 := v.Args[0] 12916 if v_0.Op != OpAMD64ADDQconst { 12917 break 12918 } 12919 d := v_0.AuxInt 12920 ptr := v_0.Args[0] 12921 idx := v.Args[1] 12922 mem := v.Args[2] 12923 if !(is32Bit(c + d)) { 12924 break 12925 } 12926 v.reset(OpAMD64MOVWloadidx2) 12927 v.AuxInt = c + d 12928 v.Aux = sym 12929 v.AddArg(ptr) 12930 v.AddArg(idx) 12931 v.AddArg(mem) 12932 return true 12933 } 12934 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 12935 // cond: is32Bit(c+2*d) 12936 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 12937 for { 12938 c := v.AuxInt 12939 sym := v.Aux 12940 _ = v.Args[2] 12941 ptr := v.Args[0] 12942 v_1 := v.Args[1] 12943 if v_1.Op != OpAMD64ADDQconst { 12944 break 12945 } 12946 d := v_1.AuxInt 12947 idx := v_1.Args[0] 12948 mem := v.Args[2] 12949 if !(is32Bit(c + 2*d)) { 12950 break 12951 } 12952 v.reset(OpAMD64MOVWloadidx2) 12953 v.AuxInt = c + 2*d 12954 v.Aux = sym 12955 v.AddArg(ptr) 12956 v.AddArg(idx) 12957 v.AddArg(mem) 12958 return true 12959 } 12960 return false 12961 } 12962 func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { 12963 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 12964 // cond: 12965 // result: (MOVWstore [off] {sym} ptr x mem) 12966 for { 12967 off := v.AuxInt 12968 sym := v.Aux 12969 _ = v.Args[2] 12970 ptr := v.Args[0] 12971 v_1 := v.Args[1] 12972 if v_1.Op != OpAMD64MOVWQSX { 12973 break 12974 } 12975 x := v_1.Args[0] 12976 mem := v.Args[2] 12977 v.reset(OpAMD64MOVWstore) 12978 v.AuxInt = off 12979 v.Aux = sym 12980 v.AddArg(ptr) 12981 v.AddArg(x) 12982 v.AddArg(mem) 12983 return true 12984 } 12985 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 12986 // cond: 12987 // result: (MOVWstore [off] {sym} ptr x mem) 12988 for { 12989 off := v.AuxInt 12990 sym := v.Aux 12991 _ = v.Args[2] 12992 ptr := v.Args[0] 12993 v_1 := v.Args[1] 12994 if v_1.Op != OpAMD64MOVWQZX { 12995 break 12996 } 12997 x := v_1.Args[0] 12998 mem := v.Args[2] 12999 v.reset(OpAMD64MOVWstore) 13000 v.AuxInt = off 13001 v.Aux = sym 13002 v.AddArg(ptr) 13003 v.AddArg(x) 13004 v.AddArg(mem) 13005 return true 13006 } 13007 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 13008 // cond: is32Bit(off1+off2) 13009 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 13010 for { 13011 off1 := v.AuxInt 13012 sym := v.Aux 13013 _ = v.Args[2] 13014 v_0 := v.Args[0] 13015 if v_0.Op != OpAMD64ADDQconst { 13016 break 13017 } 13018 off2 := v_0.AuxInt 13019 ptr := v_0.Args[0] 13020 val := v.Args[1] 13021 mem := v.Args[2] 13022 if !(is32Bit(off1 + off2)) { 13023 break 13024 } 13025 v.reset(OpAMD64MOVWstore) 13026 v.AuxInt = off1 + off2 13027 v.Aux = sym 13028 v.AddArg(ptr) 13029 v.AddArg(val) 13030 v.AddArg(mem) 13031 return true 13032 } 13033 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 13034 // cond: validOff(off) 13035 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 13036 for { 13037 off := v.AuxInt 13038 sym := v.Aux 13039 _ = v.Args[2] 13040 ptr := v.Args[0] 13041 v_1 := v.Args[1] 13042 if v_1.Op != OpAMD64MOVLconst { 13043 break 13044 } 13045 c := v_1.AuxInt 13046 mem := v.Args[2] 13047 if !(validOff(off)) { 13048 break 13049 } 13050 v.reset(OpAMD64MOVWstoreconst) 13051 v.AuxInt = makeValAndOff(int64(int16(c)), off) 13052 v.Aux = sym 13053 v.AddArg(ptr) 13054 v.AddArg(mem) 13055 return true 13056 } 13057 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 13058 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 13059 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 13060 for { 13061 off1 := v.AuxInt 13062 sym1 := v.Aux 13063 _ = v.Args[2] 13064 v_0 := v.Args[0] 13065 if v_0.Op != OpAMD64LEAQ { 13066 break 13067 } 13068 off2 := v_0.AuxInt 13069 sym2 := v_0.Aux 13070 base := v_0.Args[0] 13071 val := v.Args[1] 13072 mem := v.Args[2] 13073 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13074 break 13075 } 13076 v.reset(OpAMD64MOVWstore) 13077 v.AuxInt = off1 + off2 13078 v.Aux = mergeSym(sym1, sym2) 13079 v.AddArg(base) 13080 v.AddArg(val) 13081 v.AddArg(mem) 13082 return true 13083 } 13084 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 13085 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 13086 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 13087 for { 13088 off1 := v.AuxInt 13089 sym1 := v.Aux 13090 _ = v.Args[2] 13091 v_0 := v.Args[0] 13092 if v_0.Op != OpAMD64LEAQ1 { 13093 break 13094 } 13095 off2 := v_0.AuxInt 13096 sym2 := v_0.Aux 13097 _ = v_0.Args[1] 13098 ptr := v_0.Args[0] 13099 idx := v_0.Args[1] 13100 val := v.Args[1] 13101 mem := v.Args[2] 13102 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13103 break 13104 } 13105 v.reset(OpAMD64MOVWstoreidx1) 13106 v.AuxInt = off1 + off2 13107 v.Aux = mergeSym(sym1, sym2) 13108 v.AddArg(ptr) 13109 v.AddArg(idx) 13110 v.AddArg(val) 13111 v.AddArg(mem) 13112 return true 13113 } 13114 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 13115 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 13116 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 13117 for { 13118 off1 := v.AuxInt 13119 sym1 := v.Aux 13120 _ = v.Args[2] 13121 v_0 := v.Args[0] 13122 if v_0.Op != OpAMD64LEAQ2 { 13123 break 13124 } 13125 off2 := v_0.AuxInt 13126 sym2 := v_0.Aux 13127 _ = v_0.Args[1] 13128 ptr := v_0.Args[0] 13129 idx := v_0.Args[1] 13130 val := v.Args[1] 13131 mem := v.Args[2] 13132 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13133 break 13134 } 13135 v.reset(OpAMD64MOVWstoreidx2) 13136 v.AuxInt = off1 + off2 13137 v.Aux = mergeSym(sym1, sym2) 13138 v.AddArg(ptr) 13139 v.AddArg(idx) 13140 v.AddArg(val) 13141 v.AddArg(mem) 13142 return true 13143 } 13144 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 13145 // cond: ptr.Op != OpSB 13146 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 13147 for { 13148 off := v.AuxInt 13149 sym := v.Aux 13150 _ = v.Args[2] 13151 v_0 := v.Args[0] 13152 if v_0.Op != OpAMD64ADDQ { 13153 break 13154 } 13155 _ = v_0.Args[1] 13156 ptr := v_0.Args[0] 13157 idx := v_0.Args[1] 13158 val := v.Args[1] 13159 mem := v.Args[2] 13160 if !(ptr.Op != OpSB) { 13161 break 13162 } 13163 v.reset(OpAMD64MOVWstoreidx1) 13164 v.AuxInt = off 13165 v.Aux = sym 13166 v.AddArg(ptr) 13167 v.AddArg(idx) 13168 v.AddArg(val) 13169 v.AddArg(mem) 13170 return true 13171 } 13172 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 13173 // cond: x.Uses == 1 && clobber(x) 13174 // result: (MOVLstore [i-2] {s} p w mem) 13175 for { 13176 i := v.AuxInt 13177 s := v.Aux 13178 _ = v.Args[2] 13179 p := v.Args[0] 13180 v_1 := v.Args[1] 13181 if v_1.Op != OpAMD64SHRQconst { 13182 break 13183 } 13184 if v_1.AuxInt != 16 { 13185 break 13186 } 13187 w := v_1.Args[0] 13188 x := v.Args[2] 13189 if x.Op != OpAMD64MOVWstore { 13190 break 13191 } 13192 if x.AuxInt != i-2 { 13193 break 13194 } 13195 if x.Aux != s { 13196 break 13197 } 13198 _ = x.Args[2] 13199 if p != x.Args[0] { 13200 break 13201 } 13202 if w != x.Args[1] { 13203 break 13204 } 13205 mem := x.Args[2] 13206 if !(x.Uses == 1 && clobber(x)) { 13207 break 13208 } 13209 v.reset(OpAMD64MOVLstore) 13210 v.AuxInt = i - 2 13211 v.Aux = s 13212 v.AddArg(p) 13213 v.AddArg(w) 13214 v.AddArg(mem) 13215 return true 13216 } 13217 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 13218 // cond: x.Uses == 1 && clobber(x) 13219 // result: (MOVLstore [i-2] {s} p w0 mem) 13220 for { 13221 i := v.AuxInt 13222 s := v.Aux 13223 _ = v.Args[2] 13224 p := v.Args[0] 13225 v_1 := v.Args[1] 13226 if v_1.Op != OpAMD64SHRQconst { 13227 break 13228 } 13229 j := v_1.AuxInt 13230 w := v_1.Args[0] 13231 x := v.Args[2] 13232 if x.Op != OpAMD64MOVWstore { 13233 break 13234 } 13235 if x.AuxInt != i-2 { 13236 break 13237 } 13238 if x.Aux != s { 13239 break 13240 } 13241 _ = x.Args[2] 13242 if p != x.Args[0] { 13243 break 13244 } 13245 w0 := x.Args[1] 13246 if w0.Op != OpAMD64SHRQconst { 13247 break 13248 } 13249 if w0.AuxInt != j-16 { 13250 break 13251 } 13252 if w != w0.Args[0] { 13253 break 13254 } 13255 mem := x.Args[2] 13256 if !(x.Uses == 1 && clobber(x)) { 13257 break 13258 } 13259 v.reset(OpAMD64MOVLstore) 13260 v.AuxInt = i - 2 13261 v.Aux = s 13262 v.AddArg(p) 13263 v.AddArg(w0) 13264 v.AddArg(mem) 13265 return true 13266 } 13267 return false 13268 } 13269 func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { 13270 b := v.Block 13271 _ = b 13272 typ := &b.Func.Config.Types 13273 _ = typ 13274 // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem)) 13275 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 13276 // result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem) 13277 for { 13278 i := v.AuxInt 13279 s := v.Aux 13280 _ = v.Args[2] 13281 p := v.Args[0] 13282 x1 := v.Args[1] 13283 if x1.Op != OpAMD64MOVWload { 13284 break 13285 } 13286 j := x1.AuxInt 13287 s2 := x1.Aux 13288 _ = x1.Args[1] 13289 p2 := x1.Args[0] 13290 mem := x1.Args[1] 13291 mem2 := v.Args[2] 13292 if mem2.Op != OpAMD64MOVWstore { 13293 break 13294 } 13295 if mem2.AuxInt != i-2 { 13296 break 13297 } 13298 if mem2.Aux != s { 13299 break 13300 } 13301 _ = mem2.Args[2] 13302 if p != mem2.Args[0] { 13303 break 13304 } 13305 x2 := mem2.Args[1] 13306 if x2.Op != OpAMD64MOVWload { 13307 break 13308 } 13309 if x2.AuxInt != j-2 { 13310 break 13311 } 13312 if x2.Aux != s2 { 13313 break 13314 } 13315 _ = x2.Args[1] 13316 if p2 != x2.Args[0] { 13317 break 13318 } 13319 if mem != x2.Args[1] { 13320 break 13321 } 13322 if mem != mem2.Args[2] { 13323 break 13324 } 13325 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 13326 break 13327 } 13328 v.reset(OpAMD64MOVLstore) 13329 v.AuxInt = i - 2 13330 v.Aux = s 13331 v.AddArg(p) 13332 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 13333 v0.AuxInt = j - 2 13334 v0.Aux = s2 13335 v0.AddArg(p2) 13336 v0.AddArg(mem) 13337 v.AddArg(v0) 13338 v.AddArg(mem) 13339 return true 13340 } 13341 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 13342 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 13343 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 13344 for { 13345 off1 := v.AuxInt 13346 sym1 := v.Aux 13347 _ = v.Args[2] 13348 v_0 := v.Args[0] 13349 if v_0.Op != OpAMD64LEAL { 13350 break 13351 } 13352 off2 := v_0.AuxInt 13353 sym2 := v_0.Aux 13354 base := v_0.Args[0] 13355 val := v.Args[1] 13356 mem := v.Args[2] 13357 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 13358 break 13359 } 13360 v.reset(OpAMD64MOVWstore) 13361 v.AuxInt = off1 + off2 13362 v.Aux = mergeSym(sym1, sym2) 13363 v.AddArg(base) 13364 v.AddArg(val) 13365 v.AddArg(mem) 13366 return true 13367 } 13368 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 13369 // cond: is32Bit(off1+off2) 13370 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 13371 for { 13372 off1 := v.AuxInt 13373 sym := v.Aux 13374 _ = v.Args[2] 13375 v_0 := v.Args[0] 13376 if v_0.Op != OpAMD64ADDLconst { 13377 break 13378 } 13379 off2 := v_0.AuxInt 13380 ptr := v_0.Args[0] 13381 val := v.Args[1] 13382 mem := v.Args[2] 13383 if !(is32Bit(off1 + off2)) { 13384 break 13385 } 13386 v.reset(OpAMD64MOVWstore) 13387 v.AuxInt = off1 + off2 13388 v.Aux = sym 13389 v.AddArg(ptr) 13390 v.AddArg(val) 13391 v.AddArg(mem) 13392 return true 13393 } 13394 return false 13395 } 13396 func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { 13397 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 13398 // cond: ValAndOff(sc).canAdd(off) 13399 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 13400 for { 13401 sc := v.AuxInt 13402 s := v.Aux 13403 _ = v.Args[1] 13404 v_0 := v.Args[0] 13405 if v_0.Op != OpAMD64ADDQconst { 13406 break 13407 } 13408 off := v_0.AuxInt 13409 ptr := v_0.Args[0] 13410 mem := v.Args[1] 13411 if !(ValAndOff(sc).canAdd(off)) { 13412 break 13413 } 13414 v.reset(OpAMD64MOVWstoreconst) 13415 v.AuxInt = ValAndOff(sc).add(off) 13416 v.Aux = s 13417 v.AddArg(ptr) 13418 v.AddArg(mem) 13419 return true 13420 } 13421 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 13422 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 13423 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 13424 for { 13425 sc := v.AuxInt 13426 sym1 := v.Aux 13427 _ = v.Args[1] 13428 v_0 := v.Args[0] 13429 if v_0.Op != OpAMD64LEAQ { 13430 break 13431 } 13432 off := v_0.AuxInt 13433 sym2 := v_0.Aux 13434 ptr := v_0.Args[0] 13435 mem := v.Args[1] 13436 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 13437 break 13438 } 13439 v.reset(OpAMD64MOVWstoreconst) 13440 v.AuxInt = ValAndOff(sc).add(off) 13441 v.Aux = mergeSym(sym1, sym2) 13442 v.AddArg(ptr) 13443 v.AddArg(mem) 13444 return true 13445 } 13446 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 13447 // cond: canMergeSym(sym1, sym2) 13448 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 13449 for { 13450 x := v.AuxInt 13451 sym1 := v.Aux 13452 _ = v.Args[1] 13453 v_0 := v.Args[0] 13454 if v_0.Op != OpAMD64LEAQ1 { 13455 break 13456 } 13457 off := v_0.AuxInt 13458 sym2 := v_0.Aux 13459 _ = v_0.Args[1] 13460 ptr := v_0.Args[0] 13461 idx := v_0.Args[1] 13462 mem := v.Args[1] 13463 if !(canMergeSym(sym1, sym2)) { 13464 break 13465 } 13466 v.reset(OpAMD64MOVWstoreconstidx1) 13467 v.AuxInt = ValAndOff(x).add(off) 13468 v.Aux = mergeSym(sym1, sym2) 13469 v.AddArg(ptr) 13470 v.AddArg(idx) 13471 v.AddArg(mem) 13472 return true 13473 } 13474 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 13475 // cond: canMergeSym(sym1, sym2) 13476 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 13477 for { 13478 x := v.AuxInt 13479 sym1 := v.Aux 13480 _ = v.Args[1] 13481 v_0 := v.Args[0] 13482 if v_0.Op != OpAMD64LEAQ2 { 13483 break 13484 } 13485 off := v_0.AuxInt 13486 sym2 := v_0.Aux 13487 _ = v_0.Args[1] 13488 ptr := v_0.Args[0] 13489 idx := v_0.Args[1] 13490 mem := v.Args[1] 13491 if !(canMergeSym(sym1, sym2)) { 13492 break 13493 } 13494 v.reset(OpAMD64MOVWstoreconstidx2) 13495 v.AuxInt = ValAndOff(x).add(off) 13496 v.Aux = mergeSym(sym1, sym2) 13497 v.AddArg(ptr) 13498 v.AddArg(idx) 13499 v.AddArg(mem) 13500 return true 13501 } 13502 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 13503 // cond: 13504 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 13505 for { 13506 x := v.AuxInt 13507 sym := v.Aux 13508 _ = v.Args[1] 13509 v_0 := v.Args[0] 13510 if v_0.Op != OpAMD64ADDQ { 13511 break 13512 } 13513 _ = v_0.Args[1] 13514 ptr := v_0.Args[0] 13515 idx := v_0.Args[1] 13516 mem := v.Args[1] 13517 v.reset(OpAMD64MOVWstoreconstidx1) 13518 v.AuxInt = x 13519 v.Aux = sym 13520 v.AddArg(ptr) 13521 v.AddArg(idx) 13522 v.AddArg(mem) 13523 return true 13524 } 13525 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 13526 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 13527 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 13528 for { 13529 c := v.AuxInt 13530 s := v.Aux 13531 _ = v.Args[1] 13532 p := v.Args[0] 13533 x := v.Args[1] 13534 if x.Op != OpAMD64MOVWstoreconst { 13535 break 13536 } 13537 a := x.AuxInt 13538 if x.Aux != s { 13539 break 13540 } 13541 _ = x.Args[1] 13542 if p != x.Args[0] { 13543 break 13544 } 13545 mem := x.Args[1] 13546 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 13547 break 13548 } 13549 v.reset(OpAMD64MOVLstoreconst) 13550 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 13551 v.Aux = s 13552 v.AddArg(p) 13553 v.AddArg(mem) 13554 return true 13555 } 13556 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 13557 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 13558 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 13559 for { 13560 sc := v.AuxInt 13561 sym1 := v.Aux 13562 _ = v.Args[1] 13563 v_0 := v.Args[0] 13564 if v_0.Op != OpAMD64LEAL { 13565 break 13566 } 13567 off := v_0.AuxInt 13568 sym2 := v_0.Aux 13569 ptr := v_0.Args[0] 13570 mem := v.Args[1] 13571 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 13572 break 13573 } 13574 v.reset(OpAMD64MOVWstoreconst) 13575 v.AuxInt = ValAndOff(sc).add(off) 13576 v.Aux = mergeSym(sym1, sym2) 13577 v.AddArg(ptr) 13578 v.AddArg(mem) 13579 return true 13580 } 13581 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 13582 // cond: ValAndOff(sc).canAdd(off) 13583 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 13584 for { 13585 sc := v.AuxInt 13586 s := v.Aux 13587 _ = v.Args[1] 13588 v_0 := v.Args[0] 13589 if v_0.Op != OpAMD64ADDLconst { 13590 break 13591 } 13592 off := v_0.AuxInt 13593 ptr := v_0.Args[0] 13594 mem := v.Args[1] 13595 if !(ValAndOff(sc).canAdd(off)) { 13596 break 13597 } 13598 v.reset(OpAMD64MOVWstoreconst) 13599 v.AuxInt = ValAndOff(sc).add(off) 13600 v.Aux = s 13601 v.AddArg(ptr) 13602 v.AddArg(mem) 13603 return true 13604 } 13605 return false 13606 } 13607 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { 13608 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 13609 // cond: 13610 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 13611 for { 13612 c := v.AuxInt 13613 sym := v.Aux 13614 _ = v.Args[2] 13615 ptr := v.Args[0] 13616 v_1 := v.Args[1] 13617 if v_1.Op != OpAMD64SHLQconst { 13618 break 13619 } 13620 if v_1.AuxInt != 1 { 13621 break 13622 } 13623 idx := v_1.Args[0] 13624 mem := v.Args[2] 13625 v.reset(OpAMD64MOVWstoreconstidx2) 13626 v.AuxInt = c 13627 v.Aux = sym 13628 v.AddArg(ptr) 13629 v.AddArg(idx) 13630 v.AddArg(mem) 13631 return true 13632 } 13633 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 13634 // cond: ValAndOff(x).canAdd(c) 13635 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13636 for { 13637 x := v.AuxInt 13638 sym := v.Aux 13639 _ = v.Args[2] 13640 v_0 := v.Args[0] 13641 if v_0.Op != OpAMD64ADDQconst { 13642 break 13643 } 13644 c := v_0.AuxInt 13645 ptr := v_0.Args[0] 13646 idx := v.Args[1] 13647 mem := v.Args[2] 13648 if !(ValAndOff(x).canAdd(c)) { 13649 break 13650 } 13651 v.reset(OpAMD64MOVWstoreconstidx1) 13652 v.AuxInt = ValAndOff(x).add(c) 13653 v.Aux = sym 13654 v.AddArg(ptr) 13655 v.AddArg(idx) 13656 v.AddArg(mem) 13657 return true 13658 } 13659 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 13660 // cond: ValAndOff(x).canAdd(c) 13661 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13662 for { 13663 x := v.AuxInt 13664 sym := v.Aux 13665 _ = v.Args[2] 13666 ptr := v.Args[0] 13667 v_1 := v.Args[1] 13668 if v_1.Op != OpAMD64ADDQconst { 13669 break 13670 } 13671 c := v_1.AuxInt 13672 idx := v_1.Args[0] 13673 mem := v.Args[2] 13674 if !(ValAndOff(x).canAdd(c)) { 13675 break 13676 } 13677 v.reset(OpAMD64MOVWstoreconstidx1) 13678 v.AuxInt = ValAndOff(x).add(c) 13679 v.Aux = sym 13680 v.AddArg(ptr) 13681 v.AddArg(idx) 13682 v.AddArg(mem) 13683 return true 13684 } 13685 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 13686 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 13687 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 13688 for { 13689 c := v.AuxInt 13690 s := v.Aux 13691 _ = v.Args[2] 13692 p := v.Args[0] 13693 i := v.Args[1] 13694 x := v.Args[2] 13695 if x.Op != OpAMD64MOVWstoreconstidx1 { 13696 break 13697 } 13698 a := x.AuxInt 13699 if x.Aux != s { 13700 break 13701 } 13702 _ = x.Args[2] 13703 if p != x.Args[0] { 13704 break 13705 } 13706 if i != x.Args[1] { 13707 break 13708 } 13709 mem := x.Args[2] 13710 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 13711 break 13712 } 13713 v.reset(OpAMD64MOVLstoreconstidx1) 13714 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 13715 v.Aux = s 13716 v.AddArg(p) 13717 v.AddArg(i) 13718 v.AddArg(mem) 13719 return true 13720 } 13721 return false 13722 } 13723 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { 13724 b := v.Block 13725 _ = b 13726 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 13727 // cond: ValAndOff(x).canAdd(c) 13728 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13729 for { 13730 x := v.AuxInt 13731 sym := v.Aux 13732 _ = v.Args[2] 13733 v_0 := v.Args[0] 13734 if v_0.Op != OpAMD64ADDQconst { 13735 break 13736 } 13737 c := v_0.AuxInt 13738 ptr := v_0.Args[0] 13739 idx := v.Args[1] 13740 mem := v.Args[2] 13741 if !(ValAndOff(x).canAdd(c)) { 13742 break 13743 } 13744 v.reset(OpAMD64MOVWstoreconstidx2) 13745 v.AuxInt = ValAndOff(x).add(c) 13746 v.Aux = sym 13747 v.AddArg(ptr) 13748 v.AddArg(idx) 13749 v.AddArg(mem) 13750 return true 13751 } 13752 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 13753 // cond: ValAndOff(x).canAdd(2*c) 13754 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 13755 for { 13756 x := v.AuxInt 13757 sym := v.Aux 13758 _ = v.Args[2] 13759 ptr := v.Args[0] 13760 v_1 := v.Args[1] 13761 if v_1.Op != OpAMD64ADDQconst { 13762 break 13763 } 13764 c := v_1.AuxInt 13765 idx := v_1.Args[0] 13766 mem := v.Args[2] 13767 if !(ValAndOff(x).canAdd(2 * c)) { 13768 break 13769 } 13770 v.reset(OpAMD64MOVWstoreconstidx2) 13771 v.AuxInt = ValAndOff(x).add(2 * c) 13772 v.Aux = sym 13773 v.AddArg(ptr) 13774 v.AddArg(idx) 13775 v.AddArg(mem) 13776 return true 13777 } 13778 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 13779 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 13780 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 13781 for { 13782 c := v.AuxInt 13783 s := v.Aux 13784 _ = v.Args[2] 13785 p := v.Args[0] 13786 i := v.Args[1] 13787 x := v.Args[2] 13788 if x.Op != OpAMD64MOVWstoreconstidx2 { 13789 break 13790 } 13791 a := x.AuxInt 13792 if x.Aux != s { 13793 break 13794 } 13795 _ = x.Args[2] 13796 if p != x.Args[0] { 13797 break 13798 } 13799 if i != x.Args[1] { 13800 break 13801 } 13802 mem := x.Args[2] 13803 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 13804 break 13805 } 13806 v.reset(OpAMD64MOVLstoreconstidx1) 13807 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 13808 v.Aux = s 13809 v.AddArg(p) 13810 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 13811 v0.AuxInt = 1 13812 v0.AddArg(i) 13813 v.AddArg(v0) 13814 v.AddArg(mem) 13815 return true 13816 } 13817 return false 13818 } 13819 func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { 13820 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 13821 // cond: 13822 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 13823 for { 13824 c := v.AuxInt 13825 sym := v.Aux 13826 _ = v.Args[3] 13827 ptr := v.Args[0] 13828 v_1 := v.Args[1] 13829 if v_1.Op != OpAMD64SHLQconst { 13830 break 13831 } 13832 if v_1.AuxInt != 1 { 13833 break 13834 } 13835 idx := v_1.Args[0] 13836 val := v.Args[2] 13837 mem := v.Args[3] 13838 v.reset(OpAMD64MOVWstoreidx2) 13839 v.AuxInt = c 13840 v.Aux = sym 13841 v.AddArg(ptr) 13842 v.AddArg(idx) 13843 v.AddArg(val) 13844 v.AddArg(mem) 13845 return true 13846 } 13847 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 13848 // cond: is32Bit(c+d) 13849 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 13850 for { 13851 c := v.AuxInt 13852 sym := v.Aux 13853 _ = v.Args[3] 13854 v_0 := v.Args[0] 13855 if v_0.Op != OpAMD64ADDQconst { 13856 break 13857 } 13858 d := v_0.AuxInt 13859 ptr := v_0.Args[0] 13860 idx := v.Args[1] 13861 val := v.Args[2] 13862 mem := v.Args[3] 13863 if !(is32Bit(c + d)) { 13864 break 13865 } 13866 v.reset(OpAMD64MOVWstoreidx1) 13867 v.AuxInt = c + d 13868 v.Aux = sym 13869 v.AddArg(ptr) 13870 v.AddArg(idx) 13871 v.AddArg(val) 13872 v.AddArg(mem) 13873 return true 13874 } 13875 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 13876 // cond: is32Bit(c+d) 13877 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 13878 for { 13879 c := v.AuxInt 13880 sym := v.Aux 13881 _ = v.Args[3] 13882 ptr := v.Args[0] 13883 v_1 := v.Args[1] 13884 if v_1.Op != OpAMD64ADDQconst { 13885 break 13886 } 13887 d := v_1.AuxInt 13888 idx := v_1.Args[0] 13889 val := v.Args[2] 13890 mem := v.Args[3] 13891 if !(is32Bit(c + d)) { 13892 break 13893 } 13894 v.reset(OpAMD64MOVWstoreidx1) 13895 v.AuxInt = c + d 13896 v.Aux = sym 13897 v.AddArg(ptr) 13898 v.AddArg(idx) 13899 v.AddArg(val) 13900 v.AddArg(mem) 13901 return true 13902 } 13903 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 13904 // cond: x.Uses == 1 && clobber(x) 13905 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 13906 for { 13907 i := v.AuxInt 13908 s := v.Aux 13909 _ = v.Args[3] 13910 p := v.Args[0] 13911 idx := v.Args[1] 13912 v_2 := v.Args[2] 13913 if v_2.Op != OpAMD64SHRQconst { 13914 break 13915 } 13916 if v_2.AuxInt != 16 { 13917 break 13918 } 13919 w := v_2.Args[0] 13920 x := v.Args[3] 13921 if x.Op != OpAMD64MOVWstoreidx1 { 13922 break 13923 } 13924 if x.AuxInt != i-2 { 13925 break 13926 } 13927 if x.Aux != s { 13928 break 13929 } 13930 _ = x.Args[3] 13931 if p != x.Args[0] { 13932 break 13933 } 13934 if idx != x.Args[1] { 13935 break 13936 } 13937 if w != x.Args[2] { 13938 break 13939 } 13940 mem := x.Args[3] 13941 if !(x.Uses == 1 && clobber(x)) { 13942 break 13943 } 13944 v.reset(OpAMD64MOVLstoreidx1) 13945 v.AuxInt = i - 2 13946 v.Aux = s 13947 v.AddArg(p) 13948 v.AddArg(idx) 13949 v.AddArg(w) 13950 v.AddArg(mem) 13951 return true 13952 } 13953 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 13954 // cond: x.Uses == 1 && clobber(x) 13955 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 13956 for { 13957 i := v.AuxInt 13958 s := v.Aux 13959 _ = v.Args[3] 13960 p := v.Args[0] 13961 idx := v.Args[1] 13962 v_2 := v.Args[2] 13963 if v_2.Op != OpAMD64SHRQconst { 13964 break 13965 } 13966 j := v_2.AuxInt 13967 w := v_2.Args[0] 13968 x := v.Args[3] 13969 if x.Op != OpAMD64MOVWstoreidx1 { 13970 break 13971 } 13972 if x.AuxInt != i-2 { 13973 break 13974 } 13975 if x.Aux != s { 13976 break 13977 } 13978 _ = x.Args[3] 13979 if p != x.Args[0] { 13980 break 13981 } 13982 if idx != x.Args[1] { 13983 break 13984 } 13985 w0 := x.Args[2] 13986 if w0.Op != OpAMD64SHRQconst { 13987 break 13988 } 13989 if w0.AuxInt != j-16 { 13990 break 13991 } 13992 if w != w0.Args[0] { 13993 break 13994 } 13995 mem := x.Args[3] 13996 if !(x.Uses == 1 && clobber(x)) { 13997 break 13998 } 13999 v.reset(OpAMD64MOVLstoreidx1) 14000 v.AuxInt = i - 2 14001 v.Aux = s 14002 v.AddArg(p) 14003 v.AddArg(idx) 14004 v.AddArg(w0) 14005 v.AddArg(mem) 14006 return true 14007 } 14008 return false 14009 } 14010 func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { 14011 b := v.Block 14012 _ = b 14013 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 14014 // cond: is32Bit(c+d) 14015 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 14016 for { 14017 c := v.AuxInt 14018 sym := v.Aux 14019 _ = v.Args[3] 14020 v_0 := v.Args[0] 14021 if v_0.Op != OpAMD64ADDQconst { 14022 break 14023 } 14024 d := v_0.AuxInt 14025 ptr := v_0.Args[0] 14026 idx := v.Args[1] 14027 val := v.Args[2] 14028 mem := v.Args[3] 14029 if !(is32Bit(c + d)) { 14030 break 14031 } 14032 v.reset(OpAMD64MOVWstoreidx2) 14033 v.AuxInt = c + d 14034 v.Aux = sym 14035 v.AddArg(ptr) 14036 v.AddArg(idx) 14037 v.AddArg(val) 14038 v.AddArg(mem) 14039 return true 14040 } 14041 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 14042 // cond: is32Bit(c+2*d) 14043 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 14044 for { 14045 c := v.AuxInt 14046 sym := v.Aux 14047 _ = v.Args[3] 14048 ptr := v.Args[0] 14049 v_1 := v.Args[1] 14050 if v_1.Op != OpAMD64ADDQconst { 14051 break 14052 } 14053 d := v_1.AuxInt 14054 idx := v_1.Args[0] 14055 val := v.Args[2] 14056 mem := v.Args[3] 14057 if !(is32Bit(c + 2*d)) { 14058 break 14059 } 14060 v.reset(OpAMD64MOVWstoreidx2) 14061 v.AuxInt = c + 2*d 14062 v.Aux = sym 14063 v.AddArg(ptr) 14064 v.AddArg(idx) 14065 v.AddArg(val) 14066 v.AddArg(mem) 14067 return true 14068 } 14069 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 14070 // cond: x.Uses == 1 && clobber(x) 14071 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 14072 for { 14073 i := v.AuxInt 14074 s := v.Aux 14075 _ = v.Args[3] 14076 p := v.Args[0] 14077 idx := v.Args[1] 14078 v_2 := v.Args[2] 14079 if v_2.Op != OpAMD64SHRQconst { 14080 break 14081 } 14082 if v_2.AuxInt != 16 { 14083 break 14084 } 14085 w := v_2.Args[0] 14086 x := v.Args[3] 14087 if x.Op != OpAMD64MOVWstoreidx2 { 14088 break 14089 } 14090 if x.AuxInt != i-2 { 14091 break 14092 } 14093 if x.Aux != s { 14094 break 14095 } 14096 _ = x.Args[3] 14097 if p != x.Args[0] { 14098 break 14099 } 14100 if idx != x.Args[1] { 14101 break 14102 } 14103 if w != x.Args[2] { 14104 break 14105 } 14106 mem := x.Args[3] 14107 if !(x.Uses == 1 && clobber(x)) { 14108 break 14109 } 14110 v.reset(OpAMD64MOVLstoreidx1) 14111 v.AuxInt = i - 2 14112 v.Aux = s 14113 v.AddArg(p) 14114 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 14115 v0.AuxInt = 1 14116 v0.AddArg(idx) 14117 v.AddArg(v0) 14118 v.AddArg(w) 14119 v.AddArg(mem) 14120 return true 14121 } 14122 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 14123 // cond: x.Uses == 1 && clobber(x) 14124 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 14125 for { 14126 i := v.AuxInt 14127 s := v.Aux 14128 _ = v.Args[3] 14129 p := v.Args[0] 14130 idx := v.Args[1] 14131 v_2 := v.Args[2] 14132 if v_2.Op != OpAMD64SHRQconst { 14133 break 14134 } 14135 j := v_2.AuxInt 14136 w := v_2.Args[0] 14137 x := v.Args[3] 14138 if x.Op != OpAMD64MOVWstoreidx2 { 14139 break 14140 } 14141 if x.AuxInt != i-2 { 14142 break 14143 } 14144 if x.Aux != s { 14145 break 14146 } 14147 _ = x.Args[3] 14148 if p != x.Args[0] { 14149 break 14150 } 14151 if idx != x.Args[1] { 14152 break 14153 } 14154 w0 := x.Args[2] 14155 if w0.Op != OpAMD64SHRQconst { 14156 break 14157 } 14158 if w0.AuxInt != j-16 { 14159 break 14160 } 14161 if w != w0.Args[0] { 14162 break 14163 } 14164 mem := x.Args[3] 14165 if !(x.Uses == 1 && clobber(x)) { 14166 break 14167 } 14168 v.reset(OpAMD64MOVLstoreidx1) 14169 v.AuxInt = i - 2 14170 v.Aux = s 14171 v.AddArg(p) 14172 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 14173 v0.AuxInt = 1 14174 v0.AddArg(idx) 14175 v.AddArg(v0) 14176 v.AddArg(w0) 14177 v.AddArg(mem) 14178 return true 14179 } 14180 return false 14181 } 14182 func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool { 14183 // match: (MULL x (MOVLconst [c])) 14184 // cond: 14185 // result: (MULLconst [c] x) 14186 for { 14187 _ = v.Args[1] 14188 x := v.Args[0] 14189 v_1 := v.Args[1] 14190 if v_1.Op != OpAMD64MOVLconst { 14191 break 14192 } 14193 c := v_1.AuxInt 14194 v.reset(OpAMD64MULLconst) 14195 v.AuxInt = c 14196 v.AddArg(x) 14197 return true 14198 } 14199 // match: (MULL (MOVLconst [c]) x) 14200 // cond: 14201 // result: (MULLconst [c] x) 14202 for { 14203 _ = v.Args[1] 14204 v_0 := v.Args[0] 14205 if v_0.Op != OpAMD64MOVLconst { 14206 break 14207 } 14208 c := v_0.AuxInt 14209 x := v.Args[1] 14210 v.reset(OpAMD64MULLconst) 14211 v.AuxInt = c 14212 v.AddArg(x) 14213 return true 14214 } 14215 return false 14216 } 14217 func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { 14218 // match: (MULLconst [c] (MULLconst [d] x)) 14219 // cond: 14220 // result: (MULLconst [int64(int32(c * d))] x) 14221 for { 14222 c := v.AuxInt 14223 v_0 := v.Args[0] 14224 if v_0.Op != OpAMD64MULLconst { 14225 break 14226 } 14227 d := v_0.AuxInt 14228 x := v_0.Args[0] 14229 v.reset(OpAMD64MULLconst) 14230 v.AuxInt = int64(int32(c * d)) 14231 v.AddArg(x) 14232 return true 14233 } 14234 // match: (MULLconst [c] (MOVLconst [d])) 14235 // cond: 14236 // result: (MOVLconst [int64(int32(c*d))]) 14237 for { 14238 c := v.AuxInt 14239 v_0 := v.Args[0] 14240 if v_0.Op != OpAMD64MOVLconst { 14241 break 14242 } 14243 d := v_0.AuxInt 14244 v.reset(OpAMD64MOVLconst) 14245 v.AuxInt = int64(int32(c * d)) 14246 return true 14247 } 14248 return false 14249 } 14250 func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool { 14251 // match: (MULQ x (MOVQconst [c])) 14252 // cond: is32Bit(c) 14253 // result: (MULQconst [c] x) 14254 for { 14255 _ = v.Args[1] 14256 x := v.Args[0] 14257 v_1 := v.Args[1] 14258 if v_1.Op != OpAMD64MOVQconst { 14259 break 14260 } 14261 c := v_1.AuxInt 14262 if !(is32Bit(c)) { 14263 break 14264 } 14265 v.reset(OpAMD64MULQconst) 14266 v.AuxInt = c 14267 v.AddArg(x) 14268 return true 14269 } 14270 // match: (MULQ (MOVQconst [c]) x) 14271 // cond: is32Bit(c) 14272 // result: (MULQconst [c] x) 14273 for { 14274 _ = v.Args[1] 14275 v_0 := v.Args[0] 14276 if v_0.Op != OpAMD64MOVQconst { 14277 break 14278 } 14279 c := v_0.AuxInt 14280 x := v.Args[1] 14281 if !(is32Bit(c)) { 14282 break 14283 } 14284 v.reset(OpAMD64MULQconst) 14285 v.AuxInt = c 14286 v.AddArg(x) 14287 return true 14288 } 14289 return false 14290 } 14291 func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { 14292 b := v.Block 14293 _ = b 14294 // match: (MULQconst [c] (MULQconst [d] x)) 14295 // cond: is32Bit(c*d) 14296 // result: (MULQconst [c * d] x) 14297 for { 14298 c := v.AuxInt 14299 v_0 := v.Args[0] 14300 if v_0.Op != OpAMD64MULQconst { 14301 break 14302 } 14303 d := v_0.AuxInt 14304 x := v_0.Args[0] 14305 if !(is32Bit(c * d)) { 14306 break 14307 } 14308 v.reset(OpAMD64MULQconst) 14309 v.AuxInt = c * d 14310 v.AddArg(x) 14311 return true 14312 } 14313 // match: (MULQconst [-1] x) 14314 // cond: 14315 // result: (NEGQ x) 14316 for { 14317 if v.AuxInt != -1 { 14318 break 14319 } 14320 x := v.Args[0] 14321 v.reset(OpAMD64NEGQ) 14322 v.AddArg(x) 14323 return true 14324 } 14325 // match: (MULQconst [0] _) 14326 // cond: 14327 // result: (MOVQconst [0]) 14328 for { 14329 if v.AuxInt != 0 { 14330 break 14331 } 14332 v.reset(OpAMD64MOVQconst) 14333 v.AuxInt = 0 14334 return true 14335 } 14336 // match: (MULQconst [1] x) 14337 // cond: 14338 // result: x 14339 for { 14340 if v.AuxInt != 1 { 14341 break 14342 } 14343 x := v.Args[0] 14344 v.reset(OpCopy) 14345 v.Type = x.Type 14346 v.AddArg(x) 14347 return true 14348 } 14349 // match: (MULQconst [3] x) 14350 // cond: 14351 // result: (LEAQ2 x x) 14352 for { 14353 if v.AuxInt != 3 { 14354 break 14355 } 14356 x := v.Args[0] 14357 v.reset(OpAMD64LEAQ2) 14358 v.AddArg(x) 14359 v.AddArg(x) 14360 return true 14361 } 14362 // match: (MULQconst [5] x) 14363 // cond: 14364 // result: (LEAQ4 x x) 14365 for { 14366 if v.AuxInt != 5 { 14367 break 14368 } 14369 x := v.Args[0] 14370 v.reset(OpAMD64LEAQ4) 14371 v.AddArg(x) 14372 v.AddArg(x) 14373 return true 14374 } 14375 // match: (MULQconst [7] x) 14376 // cond: 14377 // result: (LEAQ8 (NEGQ <v.Type> x) x) 14378 for { 14379 if v.AuxInt != 7 { 14380 break 14381 } 14382 x := v.Args[0] 14383 v.reset(OpAMD64LEAQ8) 14384 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, v.Type) 14385 v0.AddArg(x) 14386 v.AddArg(v0) 14387 v.AddArg(x) 14388 return true 14389 } 14390 // match: (MULQconst [9] x) 14391 // cond: 14392 // result: (LEAQ8 x x) 14393 for { 14394 if v.AuxInt != 9 { 14395 break 14396 } 14397 x := v.Args[0] 14398 v.reset(OpAMD64LEAQ8) 14399 v.AddArg(x) 14400 v.AddArg(x) 14401 return true 14402 } 14403 // match: (MULQconst [11] x) 14404 // cond: 14405 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 14406 for { 14407 if v.AuxInt != 11 { 14408 break 14409 } 14410 x := v.Args[0] 14411 v.reset(OpAMD64LEAQ2) 14412 v.AddArg(x) 14413 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14414 v0.AddArg(x) 14415 v0.AddArg(x) 14416 v.AddArg(v0) 14417 return true 14418 } 14419 // match: (MULQconst [13] x) 14420 // cond: 14421 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 14422 for { 14423 if v.AuxInt != 13 { 14424 break 14425 } 14426 x := v.Args[0] 14427 v.reset(OpAMD64LEAQ4) 14428 v.AddArg(x) 14429 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 14430 v0.AddArg(x) 14431 v0.AddArg(x) 14432 v.AddArg(v0) 14433 return true 14434 } 14435 return false 14436 } 14437 func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { 14438 b := v.Block 14439 _ = b 14440 // match: (MULQconst [21] x) 14441 // cond: 14442 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 14443 for { 14444 if v.AuxInt != 21 { 14445 break 14446 } 14447 x := v.Args[0] 14448 v.reset(OpAMD64LEAQ4) 14449 v.AddArg(x) 14450 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14451 v0.AddArg(x) 14452 v0.AddArg(x) 14453 v.AddArg(v0) 14454 return true 14455 } 14456 // match: (MULQconst [25] x) 14457 // cond: 14458 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 14459 for { 14460 if v.AuxInt != 25 { 14461 break 14462 } 14463 x := v.Args[0] 14464 v.reset(OpAMD64LEAQ8) 14465 v.AddArg(x) 14466 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 14467 v0.AddArg(x) 14468 v0.AddArg(x) 14469 v.AddArg(v0) 14470 return true 14471 } 14472 // match: (MULQconst [37] x) 14473 // cond: 14474 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 14475 for { 14476 if v.AuxInt != 37 { 14477 break 14478 } 14479 x := v.Args[0] 14480 v.reset(OpAMD64LEAQ4) 14481 v.AddArg(x) 14482 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 14483 v0.AddArg(x) 14484 v0.AddArg(x) 14485 v.AddArg(v0) 14486 return true 14487 } 14488 // match: (MULQconst [41] x) 14489 // cond: 14490 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 14491 for { 14492 if v.AuxInt != 41 { 14493 break 14494 } 14495 x := v.Args[0] 14496 v.reset(OpAMD64LEAQ8) 14497 v.AddArg(x) 14498 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14499 v0.AddArg(x) 14500 v0.AddArg(x) 14501 v.AddArg(v0) 14502 return true 14503 } 14504 // match: (MULQconst [73] x) 14505 // cond: 14506 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 14507 for { 14508 if v.AuxInt != 73 { 14509 break 14510 } 14511 x := v.Args[0] 14512 v.reset(OpAMD64LEAQ8) 14513 v.AddArg(x) 14514 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 14515 v0.AddArg(x) 14516 v0.AddArg(x) 14517 v.AddArg(v0) 14518 return true 14519 } 14520 // match: (MULQconst [c] x) 14521 // cond: isPowerOfTwo(c+1) && c >= 15 14522 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 14523 for { 14524 c := v.AuxInt 14525 x := v.Args[0] 14526 if !(isPowerOfTwo(c+1) && c >= 15) { 14527 break 14528 } 14529 v.reset(OpAMD64SUBQ) 14530 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14531 v0.AuxInt = log2(c + 1) 14532 v0.AddArg(x) 14533 v.AddArg(v0) 14534 v.AddArg(x) 14535 return true 14536 } 14537 // match: (MULQconst [c] x) 14538 // cond: isPowerOfTwo(c-1) && c >= 17 14539 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 14540 for { 14541 c := v.AuxInt 14542 x := v.Args[0] 14543 if !(isPowerOfTwo(c-1) && c >= 17) { 14544 break 14545 } 14546 v.reset(OpAMD64LEAQ1) 14547 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14548 v0.AuxInt = log2(c - 1) 14549 v0.AddArg(x) 14550 v.AddArg(v0) 14551 v.AddArg(x) 14552 return true 14553 } 14554 // match: (MULQconst [c] x) 14555 // cond: isPowerOfTwo(c-2) && c >= 34 14556 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 14557 for { 14558 c := v.AuxInt 14559 x := v.Args[0] 14560 if !(isPowerOfTwo(c-2) && c >= 34) { 14561 break 14562 } 14563 v.reset(OpAMD64LEAQ2) 14564 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14565 v0.AuxInt = log2(c - 2) 14566 v0.AddArg(x) 14567 v.AddArg(v0) 14568 v.AddArg(x) 14569 return true 14570 } 14571 // match: (MULQconst [c] x) 14572 // cond: isPowerOfTwo(c-4) && c >= 68 14573 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 14574 for { 14575 c := v.AuxInt 14576 x := v.Args[0] 14577 if !(isPowerOfTwo(c-4) && c >= 68) { 14578 break 14579 } 14580 v.reset(OpAMD64LEAQ4) 14581 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14582 v0.AuxInt = log2(c - 4) 14583 v0.AddArg(x) 14584 v.AddArg(v0) 14585 v.AddArg(x) 14586 return true 14587 } 14588 // match: (MULQconst [c] x) 14589 // cond: isPowerOfTwo(c-8) && c >= 136 14590 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 14591 for { 14592 c := v.AuxInt 14593 x := v.Args[0] 14594 if !(isPowerOfTwo(c-8) && c >= 136) { 14595 break 14596 } 14597 v.reset(OpAMD64LEAQ8) 14598 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14599 v0.AuxInt = log2(c - 8) 14600 v0.AddArg(x) 14601 v.AddArg(v0) 14602 v.AddArg(x) 14603 return true 14604 } 14605 return false 14606 } 14607 func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { 14608 b := v.Block 14609 _ = b 14610 // match: (MULQconst [c] x) 14611 // cond: c%3 == 0 && isPowerOfTwo(c/3) 14612 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 14613 for { 14614 c := v.AuxInt 14615 x := v.Args[0] 14616 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 14617 break 14618 } 14619 v.reset(OpAMD64SHLQconst) 14620 v.AuxInt = log2(c / 3) 14621 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 14622 v0.AddArg(x) 14623 v0.AddArg(x) 14624 v.AddArg(v0) 14625 return true 14626 } 14627 // match: (MULQconst [c] x) 14628 // cond: c%5 == 0 && isPowerOfTwo(c/5) 14629 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 14630 for { 14631 c := v.AuxInt 14632 x := v.Args[0] 14633 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 14634 break 14635 } 14636 v.reset(OpAMD64SHLQconst) 14637 v.AuxInt = log2(c / 5) 14638 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14639 v0.AddArg(x) 14640 v0.AddArg(x) 14641 v.AddArg(v0) 14642 return true 14643 } 14644 // match: (MULQconst [c] x) 14645 // cond: c%9 == 0 && isPowerOfTwo(c/9) 14646 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 14647 for { 14648 c := v.AuxInt 14649 x := v.Args[0] 14650 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 14651 break 14652 } 14653 v.reset(OpAMD64SHLQconst) 14654 v.AuxInt = log2(c / 9) 14655 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 14656 v0.AddArg(x) 14657 v0.AddArg(x) 14658 v.AddArg(v0) 14659 return true 14660 } 14661 // match: (MULQconst [c] (MOVQconst [d])) 14662 // cond: 14663 // result: (MOVQconst [c*d]) 14664 for { 14665 c := v.AuxInt 14666 v_0 := v.Args[0] 14667 if v_0.Op != OpAMD64MOVQconst { 14668 break 14669 } 14670 d := v_0.AuxInt 14671 v.reset(OpAMD64MOVQconst) 14672 v.AuxInt = c * d 14673 return true 14674 } 14675 return false 14676 } 14677 func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { 14678 // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) 14679 // cond: canMergeLoad(v, l, x) && clobber(l) 14680 // result: (MULSDmem x [off] {sym} ptr mem) 14681 for { 14682 _ = v.Args[1] 14683 x := v.Args[0] 14684 l := v.Args[1] 14685 if l.Op != OpAMD64MOVSDload { 14686 break 14687 } 14688 off := l.AuxInt 14689 sym := l.Aux 14690 _ = l.Args[1] 14691 ptr := l.Args[0] 14692 mem := l.Args[1] 14693 if !(canMergeLoad(v, l, x) && clobber(l)) { 14694 break 14695 } 14696 v.reset(OpAMD64MULSDmem) 14697 v.AuxInt = off 14698 v.Aux = sym 14699 v.AddArg(x) 14700 v.AddArg(ptr) 14701 v.AddArg(mem) 14702 return true 14703 } 14704 // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) 14705 // cond: canMergeLoad(v, l, x) && clobber(l) 14706 // result: (MULSDmem x [off] {sym} ptr mem) 14707 for { 14708 _ = v.Args[1] 14709 l := v.Args[0] 14710 if l.Op != OpAMD64MOVSDload { 14711 break 14712 } 14713 off := l.AuxInt 14714 sym := l.Aux 14715 _ = l.Args[1] 14716 ptr := l.Args[0] 14717 mem := l.Args[1] 14718 x := v.Args[1] 14719 if !(canMergeLoad(v, l, x) && clobber(l)) { 14720 break 14721 } 14722 v.reset(OpAMD64MULSDmem) 14723 v.AuxInt = off 14724 v.Aux = sym 14725 v.AddArg(x) 14726 v.AddArg(ptr) 14727 v.AddArg(mem) 14728 return true 14729 } 14730 return false 14731 } 14732 func rewriteValueAMD64_OpAMD64MULSDmem_0(v *Value) bool { 14733 b := v.Block 14734 _ = b 14735 typ := &b.Func.Config.Types 14736 _ = typ 14737 // match: (MULSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 14738 // cond: 14739 // result: (MULSD x (MOVQi2f y)) 14740 for { 14741 off := v.AuxInt 14742 sym := v.Aux 14743 _ = v.Args[2] 14744 x := v.Args[0] 14745 ptr := v.Args[1] 14746 v_2 := v.Args[2] 14747 if v_2.Op != OpAMD64MOVQstore { 14748 break 14749 } 14750 if v_2.AuxInt != off { 14751 break 14752 } 14753 if v_2.Aux != sym { 14754 break 14755 } 14756 _ = v_2.Args[2] 14757 if ptr != v_2.Args[0] { 14758 break 14759 } 14760 y := v_2.Args[1] 14761 v.reset(OpAMD64MULSD) 14762 v.AddArg(x) 14763 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 14764 v0.AddArg(y) 14765 v.AddArg(v0) 14766 return true 14767 } 14768 return false 14769 } 14770 func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { 14771 // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) 14772 // cond: canMergeLoad(v, l, x) && clobber(l) 14773 // result: (MULSSmem x [off] {sym} ptr mem) 14774 for { 14775 _ = v.Args[1] 14776 x := v.Args[0] 14777 l := v.Args[1] 14778 if l.Op != OpAMD64MOVSSload { 14779 break 14780 } 14781 off := l.AuxInt 14782 sym := l.Aux 14783 _ = l.Args[1] 14784 ptr := l.Args[0] 14785 mem := l.Args[1] 14786 if !(canMergeLoad(v, l, x) && clobber(l)) { 14787 break 14788 } 14789 v.reset(OpAMD64MULSSmem) 14790 v.AuxInt = off 14791 v.Aux = sym 14792 v.AddArg(x) 14793 v.AddArg(ptr) 14794 v.AddArg(mem) 14795 return true 14796 } 14797 // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) 14798 // cond: canMergeLoad(v, l, x) && clobber(l) 14799 // result: (MULSSmem x [off] {sym} ptr mem) 14800 for { 14801 _ = v.Args[1] 14802 l := v.Args[0] 14803 if l.Op != OpAMD64MOVSSload { 14804 break 14805 } 14806 off := l.AuxInt 14807 sym := l.Aux 14808 _ = l.Args[1] 14809 ptr := l.Args[0] 14810 mem := l.Args[1] 14811 x := v.Args[1] 14812 if !(canMergeLoad(v, l, x) && clobber(l)) { 14813 break 14814 } 14815 v.reset(OpAMD64MULSSmem) 14816 v.AuxInt = off 14817 v.Aux = sym 14818 v.AddArg(x) 14819 v.AddArg(ptr) 14820 v.AddArg(mem) 14821 return true 14822 } 14823 return false 14824 } 14825 func rewriteValueAMD64_OpAMD64MULSSmem_0(v *Value) bool { 14826 b := v.Block 14827 _ = b 14828 typ := &b.Func.Config.Types 14829 _ = typ 14830 // match: (MULSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 14831 // cond: 14832 // result: (MULSS x (MOVLi2f y)) 14833 for { 14834 off := v.AuxInt 14835 sym := v.Aux 14836 _ = v.Args[2] 14837 x := v.Args[0] 14838 ptr := v.Args[1] 14839 v_2 := v.Args[2] 14840 if v_2.Op != OpAMD64MOVLstore { 14841 break 14842 } 14843 if v_2.AuxInt != off { 14844 break 14845 } 14846 if v_2.Aux != sym { 14847 break 14848 } 14849 _ = v_2.Args[2] 14850 if ptr != v_2.Args[0] { 14851 break 14852 } 14853 y := v_2.Args[1] 14854 v.reset(OpAMD64MULSS) 14855 v.AddArg(x) 14856 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 14857 v0.AddArg(y) 14858 v.AddArg(v0) 14859 return true 14860 } 14861 return false 14862 } 14863 func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { 14864 // match: (NEGL (MOVLconst [c])) 14865 // cond: 14866 // result: (MOVLconst [int64(int32(-c))]) 14867 for { 14868 v_0 := v.Args[0] 14869 if v_0.Op != OpAMD64MOVLconst { 14870 break 14871 } 14872 c := v_0.AuxInt 14873 v.reset(OpAMD64MOVLconst) 14874 v.AuxInt = int64(int32(-c)) 14875 return true 14876 } 14877 return false 14878 } 14879 func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { 14880 // match: (NEGQ (MOVQconst [c])) 14881 // cond: 14882 // result: (MOVQconst [-c]) 14883 for { 14884 v_0 := v.Args[0] 14885 if v_0.Op != OpAMD64MOVQconst { 14886 break 14887 } 14888 c := v_0.AuxInt 14889 v.reset(OpAMD64MOVQconst) 14890 v.AuxInt = -c 14891 return true 14892 } 14893 // match: (NEGQ (ADDQconst [c] (NEGQ x))) 14894 // cond: c != -(1<<31) 14895 // result: (ADDQconst [-c] x) 14896 for { 14897 v_0 := v.Args[0] 14898 if v_0.Op != OpAMD64ADDQconst { 14899 break 14900 } 14901 c := v_0.AuxInt 14902 v_0_0 := v_0.Args[0] 14903 if v_0_0.Op != OpAMD64NEGQ { 14904 break 14905 } 14906 x := v_0_0.Args[0] 14907 if !(c != -(1 << 31)) { 14908 break 14909 } 14910 v.reset(OpAMD64ADDQconst) 14911 v.AuxInt = -c 14912 v.AddArg(x) 14913 return true 14914 } 14915 return false 14916 } 14917 func rewriteValueAMD64_OpAMD64NOTL_0(v *Value) bool { 14918 // match: (NOTL (MOVLconst [c])) 14919 // cond: 14920 // result: (MOVLconst [^c]) 14921 for { 14922 v_0 := v.Args[0] 14923 if v_0.Op != OpAMD64MOVLconst { 14924 break 14925 } 14926 c := v_0.AuxInt 14927 v.reset(OpAMD64MOVLconst) 14928 v.AuxInt = ^c 14929 return true 14930 } 14931 return false 14932 } 14933 func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool { 14934 // match: (NOTQ (MOVQconst [c])) 14935 // cond: 14936 // result: (MOVQconst [^c]) 14937 for { 14938 v_0 := v.Args[0] 14939 if v_0.Op != OpAMD64MOVQconst { 14940 break 14941 } 14942 c := v_0.AuxInt 14943 v.reset(OpAMD64MOVQconst) 14944 v.AuxInt = ^c 14945 return true 14946 } 14947 return false 14948 } 14949 func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { 14950 // match: (ORL x (MOVLconst [c])) 14951 // cond: 14952 // result: (ORLconst [c] x) 14953 for { 14954 _ = v.Args[1] 14955 x := v.Args[0] 14956 v_1 := v.Args[1] 14957 if v_1.Op != OpAMD64MOVLconst { 14958 break 14959 } 14960 c := v_1.AuxInt 14961 v.reset(OpAMD64ORLconst) 14962 v.AuxInt = c 14963 v.AddArg(x) 14964 return true 14965 } 14966 // match: (ORL (MOVLconst [c]) x) 14967 // cond: 14968 // result: (ORLconst [c] x) 14969 for { 14970 _ = v.Args[1] 14971 v_0 := v.Args[0] 14972 if v_0.Op != OpAMD64MOVLconst { 14973 break 14974 } 14975 c := v_0.AuxInt 14976 x := v.Args[1] 14977 v.reset(OpAMD64ORLconst) 14978 v.AuxInt = c 14979 v.AddArg(x) 14980 return true 14981 } 14982 // match: (ORL (SHLLconst x [c]) (SHRLconst x [d])) 14983 // cond: d==32-c 14984 // result: (ROLLconst x [c]) 14985 for { 14986 _ = v.Args[1] 14987 v_0 := v.Args[0] 14988 if v_0.Op != OpAMD64SHLLconst { 14989 break 14990 } 14991 c := v_0.AuxInt 14992 x := v_0.Args[0] 14993 v_1 := v.Args[1] 14994 if v_1.Op != OpAMD64SHRLconst { 14995 break 14996 } 14997 d := v_1.AuxInt 14998 if x != v_1.Args[0] { 14999 break 15000 } 15001 if !(d == 32-c) { 15002 break 15003 } 15004 v.reset(OpAMD64ROLLconst) 15005 v.AuxInt = c 15006 v.AddArg(x) 15007 return true 15008 } 15009 // match: (ORL (SHRLconst x [d]) (SHLLconst x [c])) 15010 // cond: d==32-c 15011 // result: (ROLLconst x [c]) 15012 for { 15013 _ = v.Args[1] 15014 v_0 := v.Args[0] 15015 if v_0.Op != OpAMD64SHRLconst { 15016 break 15017 } 15018 d := v_0.AuxInt 15019 x := v_0.Args[0] 15020 v_1 := v.Args[1] 15021 if v_1.Op != OpAMD64SHLLconst { 15022 break 15023 } 15024 c := v_1.AuxInt 15025 if x != v_1.Args[0] { 15026 break 15027 } 15028 if !(d == 32-c) { 15029 break 15030 } 15031 v.reset(OpAMD64ROLLconst) 15032 v.AuxInt = c 15033 v.AddArg(x) 15034 return true 15035 } 15036 // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 15037 // cond: d==16-c && c < 16 && t.Size() == 2 15038 // result: (ROLWconst x [c]) 15039 for { 15040 t := v.Type 15041 _ = v.Args[1] 15042 v_0 := v.Args[0] 15043 if v_0.Op != OpAMD64SHLLconst { 15044 break 15045 } 15046 c := v_0.AuxInt 15047 x := v_0.Args[0] 15048 v_1 := v.Args[1] 15049 if v_1.Op != OpAMD64SHRWconst { 15050 break 15051 } 15052 d := v_1.AuxInt 15053 if x != v_1.Args[0] { 15054 break 15055 } 15056 if !(d == 16-c && c < 16 && t.Size() == 2) { 15057 break 15058 } 15059 v.reset(OpAMD64ROLWconst) 15060 v.AuxInt = c 15061 v.AddArg(x) 15062 return true 15063 } 15064 // match: (ORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 15065 // cond: d==16-c && c < 16 && t.Size() == 2 15066 // result: (ROLWconst x [c]) 15067 for { 15068 t := v.Type 15069 _ = v.Args[1] 15070 v_0 := v.Args[0] 15071 if v_0.Op != OpAMD64SHRWconst { 15072 break 15073 } 15074 d := v_0.AuxInt 15075 x := v_0.Args[0] 15076 v_1 := v.Args[1] 15077 if v_1.Op != OpAMD64SHLLconst { 15078 break 15079 } 15080 c := v_1.AuxInt 15081 if x != v_1.Args[0] { 15082 break 15083 } 15084 if !(d == 16-c && c < 16 && t.Size() == 2) { 15085 break 15086 } 15087 v.reset(OpAMD64ROLWconst) 15088 v.AuxInt = c 15089 v.AddArg(x) 15090 return true 15091 } 15092 // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 15093 // cond: d==8-c && c < 8 && t.Size() == 1 15094 // result: (ROLBconst x [c]) 15095 for { 15096 t := v.Type 15097 _ = v.Args[1] 15098 v_0 := v.Args[0] 15099 if v_0.Op != OpAMD64SHLLconst { 15100 break 15101 } 15102 c := v_0.AuxInt 15103 x := v_0.Args[0] 15104 v_1 := v.Args[1] 15105 if v_1.Op != OpAMD64SHRBconst { 15106 break 15107 } 15108 d := v_1.AuxInt 15109 if x != v_1.Args[0] { 15110 break 15111 } 15112 if !(d == 8-c && c < 8 && t.Size() == 1) { 15113 break 15114 } 15115 v.reset(OpAMD64ROLBconst) 15116 v.AuxInt = c 15117 v.AddArg(x) 15118 return true 15119 } 15120 // match: (ORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 15121 // cond: d==8-c && c < 8 && t.Size() == 1 15122 // result: (ROLBconst x [c]) 15123 for { 15124 t := v.Type 15125 _ = v.Args[1] 15126 v_0 := v.Args[0] 15127 if v_0.Op != OpAMD64SHRBconst { 15128 break 15129 } 15130 d := v_0.AuxInt 15131 x := v_0.Args[0] 15132 v_1 := v.Args[1] 15133 if v_1.Op != OpAMD64SHLLconst { 15134 break 15135 } 15136 c := v_1.AuxInt 15137 if x != v_1.Args[0] { 15138 break 15139 } 15140 if !(d == 8-c && c < 8 && t.Size() == 1) { 15141 break 15142 } 15143 v.reset(OpAMD64ROLBconst) 15144 v.AuxInt = c 15145 v.AddArg(x) 15146 return true 15147 } 15148 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 15149 // cond: 15150 // result: (ROLL x y) 15151 for { 15152 _ = v.Args[1] 15153 v_0 := v.Args[0] 15154 if v_0.Op != OpAMD64SHLL { 15155 break 15156 } 15157 _ = v_0.Args[1] 15158 x := v_0.Args[0] 15159 y := v_0.Args[1] 15160 v_1 := v.Args[1] 15161 if v_1.Op != OpAMD64ANDL { 15162 break 15163 } 15164 _ = v_1.Args[1] 15165 v_1_0 := v_1.Args[0] 15166 if v_1_0.Op != OpAMD64SHRL { 15167 break 15168 } 15169 _ = v_1_0.Args[1] 15170 if x != v_1_0.Args[0] { 15171 break 15172 } 15173 v_1_0_1 := v_1_0.Args[1] 15174 if v_1_0_1.Op != OpAMD64NEGQ { 15175 break 15176 } 15177 if y != v_1_0_1.Args[0] { 15178 break 15179 } 15180 v_1_1 := v_1.Args[1] 15181 if v_1_1.Op != OpAMD64SBBLcarrymask { 15182 break 15183 } 15184 v_1_1_0 := v_1_1.Args[0] 15185 if v_1_1_0.Op != OpAMD64CMPQconst { 15186 break 15187 } 15188 if v_1_1_0.AuxInt != 32 { 15189 break 15190 } 15191 v_1_1_0_0 := v_1_1_0.Args[0] 15192 if v_1_1_0_0.Op != OpAMD64NEGQ { 15193 break 15194 } 15195 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15196 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 15197 break 15198 } 15199 if v_1_1_0_0_0.AuxInt != -32 { 15200 break 15201 } 15202 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15203 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 15204 break 15205 } 15206 if v_1_1_0_0_0_0.AuxInt != 31 { 15207 break 15208 } 15209 if y != v_1_1_0_0_0_0.Args[0] { 15210 break 15211 } 15212 v.reset(OpAMD64ROLL) 15213 v.AddArg(x) 15214 v.AddArg(y) 15215 return true 15216 } 15217 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y)))) 15218 // cond: 15219 // result: (ROLL x y) 15220 for { 15221 _ = v.Args[1] 15222 v_0 := v.Args[0] 15223 if v_0.Op != OpAMD64SHLL { 15224 break 15225 } 15226 _ = v_0.Args[1] 15227 x := v_0.Args[0] 15228 y := v_0.Args[1] 15229 v_1 := v.Args[1] 15230 if v_1.Op != OpAMD64ANDL { 15231 break 15232 } 15233 _ = v_1.Args[1] 15234 v_1_0 := v_1.Args[0] 15235 if v_1_0.Op != OpAMD64SBBLcarrymask { 15236 break 15237 } 15238 v_1_0_0 := v_1_0.Args[0] 15239 if v_1_0_0.Op != OpAMD64CMPQconst { 15240 break 15241 } 15242 if v_1_0_0.AuxInt != 32 { 15243 break 15244 } 15245 v_1_0_0_0 := v_1_0_0.Args[0] 15246 if v_1_0_0_0.Op != OpAMD64NEGQ { 15247 break 15248 } 15249 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15250 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 15251 break 15252 } 15253 if v_1_0_0_0_0.AuxInt != -32 { 15254 break 15255 } 15256 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15257 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 15258 break 15259 } 15260 if v_1_0_0_0_0_0.AuxInt != 31 { 15261 break 15262 } 15263 if y != v_1_0_0_0_0_0.Args[0] { 15264 break 15265 } 15266 v_1_1 := v_1.Args[1] 15267 if v_1_1.Op != OpAMD64SHRL { 15268 break 15269 } 15270 _ = v_1_1.Args[1] 15271 if x != v_1_1.Args[0] { 15272 break 15273 } 15274 v_1_1_1 := v_1_1.Args[1] 15275 if v_1_1_1.Op != OpAMD64NEGQ { 15276 break 15277 } 15278 if y != v_1_1_1.Args[0] { 15279 break 15280 } 15281 v.reset(OpAMD64ROLL) 15282 v.AddArg(x) 15283 v.AddArg(y) 15284 return true 15285 } 15286 return false 15287 } 15288 func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { 15289 // match: (ORL (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHLL x y)) 15290 // cond: 15291 // result: (ROLL x y) 15292 for { 15293 _ = v.Args[1] 15294 v_0 := v.Args[0] 15295 if v_0.Op != OpAMD64ANDL { 15296 break 15297 } 15298 _ = v_0.Args[1] 15299 v_0_0 := v_0.Args[0] 15300 if v_0_0.Op != OpAMD64SHRL { 15301 break 15302 } 15303 _ = v_0_0.Args[1] 15304 x := v_0_0.Args[0] 15305 v_0_0_1 := v_0_0.Args[1] 15306 if v_0_0_1.Op != OpAMD64NEGQ { 15307 break 15308 } 15309 y := v_0_0_1.Args[0] 15310 v_0_1 := v_0.Args[1] 15311 if v_0_1.Op != OpAMD64SBBLcarrymask { 15312 break 15313 } 15314 v_0_1_0 := v_0_1.Args[0] 15315 if v_0_1_0.Op != OpAMD64CMPQconst { 15316 break 15317 } 15318 if v_0_1_0.AuxInt != 32 { 15319 break 15320 } 15321 v_0_1_0_0 := v_0_1_0.Args[0] 15322 if v_0_1_0_0.Op != OpAMD64NEGQ { 15323 break 15324 } 15325 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15326 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 15327 break 15328 } 15329 if v_0_1_0_0_0.AuxInt != -32 { 15330 break 15331 } 15332 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15333 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 15334 break 15335 } 15336 if v_0_1_0_0_0_0.AuxInt != 31 { 15337 break 15338 } 15339 if y != v_0_1_0_0_0_0.Args[0] { 15340 break 15341 } 15342 v_1 := v.Args[1] 15343 if v_1.Op != OpAMD64SHLL { 15344 break 15345 } 15346 _ = v_1.Args[1] 15347 if x != v_1.Args[0] { 15348 break 15349 } 15350 if y != v_1.Args[1] { 15351 break 15352 } 15353 v.reset(OpAMD64ROLL) 15354 v.AddArg(x) 15355 v.AddArg(y) 15356 return true 15357 } 15358 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y))) (SHLL x y)) 15359 // cond: 15360 // result: (ROLL x y) 15361 for { 15362 _ = v.Args[1] 15363 v_0 := v.Args[0] 15364 if v_0.Op != OpAMD64ANDL { 15365 break 15366 } 15367 _ = v_0.Args[1] 15368 v_0_0 := v_0.Args[0] 15369 if v_0_0.Op != OpAMD64SBBLcarrymask { 15370 break 15371 } 15372 v_0_0_0 := v_0_0.Args[0] 15373 if v_0_0_0.Op != OpAMD64CMPQconst { 15374 break 15375 } 15376 if v_0_0_0.AuxInt != 32 { 15377 break 15378 } 15379 v_0_0_0_0 := v_0_0_0.Args[0] 15380 if v_0_0_0_0.Op != OpAMD64NEGQ { 15381 break 15382 } 15383 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15384 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 15385 break 15386 } 15387 if v_0_0_0_0_0.AuxInt != -32 { 15388 break 15389 } 15390 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15391 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 15392 break 15393 } 15394 if v_0_0_0_0_0_0.AuxInt != 31 { 15395 break 15396 } 15397 y := v_0_0_0_0_0_0.Args[0] 15398 v_0_1 := v_0.Args[1] 15399 if v_0_1.Op != OpAMD64SHRL { 15400 break 15401 } 15402 _ = v_0_1.Args[1] 15403 x := v_0_1.Args[0] 15404 v_0_1_1 := v_0_1.Args[1] 15405 if v_0_1_1.Op != OpAMD64NEGQ { 15406 break 15407 } 15408 if y != v_0_1_1.Args[0] { 15409 break 15410 } 15411 v_1 := v.Args[1] 15412 if v_1.Op != OpAMD64SHLL { 15413 break 15414 } 15415 _ = v_1.Args[1] 15416 if x != v_1.Args[0] { 15417 break 15418 } 15419 if y != v_1.Args[1] { 15420 break 15421 } 15422 v.reset(OpAMD64ROLL) 15423 v.AddArg(x) 15424 v.AddArg(y) 15425 return true 15426 } 15427 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 15428 // cond: 15429 // result: (ROLL x y) 15430 for { 15431 _ = v.Args[1] 15432 v_0 := v.Args[0] 15433 if v_0.Op != OpAMD64SHLL { 15434 break 15435 } 15436 _ = v_0.Args[1] 15437 x := v_0.Args[0] 15438 y := v_0.Args[1] 15439 v_1 := v.Args[1] 15440 if v_1.Op != OpAMD64ANDL { 15441 break 15442 } 15443 _ = v_1.Args[1] 15444 v_1_0 := v_1.Args[0] 15445 if v_1_0.Op != OpAMD64SHRL { 15446 break 15447 } 15448 _ = v_1_0.Args[1] 15449 if x != v_1_0.Args[0] { 15450 break 15451 } 15452 v_1_0_1 := v_1_0.Args[1] 15453 if v_1_0_1.Op != OpAMD64NEGL { 15454 break 15455 } 15456 if y != v_1_0_1.Args[0] { 15457 break 15458 } 15459 v_1_1 := v_1.Args[1] 15460 if v_1_1.Op != OpAMD64SBBLcarrymask { 15461 break 15462 } 15463 v_1_1_0 := v_1_1.Args[0] 15464 if v_1_1_0.Op != OpAMD64CMPLconst { 15465 break 15466 } 15467 if v_1_1_0.AuxInt != 32 { 15468 break 15469 } 15470 v_1_1_0_0 := v_1_1_0.Args[0] 15471 if v_1_1_0_0.Op != OpAMD64NEGL { 15472 break 15473 } 15474 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15475 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 15476 break 15477 } 15478 if v_1_1_0_0_0.AuxInt != -32 { 15479 break 15480 } 15481 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15482 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 15483 break 15484 } 15485 if v_1_1_0_0_0_0.AuxInt != 31 { 15486 break 15487 } 15488 if y != v_1_1_0_0_0_0.Args[0] { 15489 break 15490 } 15491 v.reset(OpAMD64ROLL) 15492 v.AddArg(x) 15493 v.AddArg(y) 15494 return true 15495 } 15496 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y)))) 15497 // cond: 15498 // result: (ROLL x y) 15499 for { 15500 _ = v.Args[1] 15501 v_0 := v.Args[0] 15502 if v_0.Op != OpAMD64SHLL { 15503 break 15504 } 15505 _ = v_0.Args[1] 15506 x := v_0.Args[0] 15507 y := v_0.Args[1] 15508 v_1 := v.Args[1] 15509 if v_1.Op != OpAMD64ANDL { 15510 break 15511 } 15512 _ = v_1.Args[1] 15513 v_1_0 := v_1.Args[0] 15514 if v_1_0.Op != OpAMD64SBBLcarrymask { 15515 break 15516 } 15517 v_1_0_0 := v_1_0.Args[0] 15518 if v_1_0_0.Op != OpAMD64CMPLconst { 15519 break 15520 } 15521 if v_1_0_0.AuxInt != 32 { 15522 break 15523 } 15524 v_1_0_0_0 := v_1_0_0.Args[0] 15525 if v_1_0_0_0.Op != OpAMD64NEGL { 15526 break 15527 } 15528 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15529 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 15530 break 15531 } 15532 if v_1_0_0_0_0.AuxInt != -32 { 15533 break 15534 } 15535 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15536 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 15537 break 15538 } 15539 if v_1_0_0_0_0_0.AuxInt != 31 { 15540 break 15541 } 15542 if y != v_1_0_0_0_0_0.Args[0] { 15543 break 15544 } 15545 v_1_1 := v_1.Args[1] 15546 if v_1_1.Op != OpAMD64SHRL { 15547 break 15548 } 15549 _ = v_1_1.Args[1] 15550 if x != v_1_1.Args[0] { 15551 break 15552 } 15553 v_1_1_1 := v_1_1.Args[1] 15554 if v_1_1_1.Op != OpAMD64NEGL { 15555 break 15556 } 15557 if y != v_1_1_1.Args[0] { 15558 break 15559 } 15560 v.reset(OpAMD64ROLL) 15561 v.AddArg(x) 15562 v.AddArg(y) 15563 return true 15564 } 15565 // match: (ORL (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHLL x y)) 15566 // cond: 15567 // result: (ROLL x y) 15568 for { 15569 _ = v.Args[1] 15570 v_0 := v.Args[0] 15571 if v_0.Op != OpAMD64ANDL { 15572 break 15573 } 15574 _ = v_0.Args[1] 15575 v_0_0 := v_0.Args[0] 15576 if v_0_0.Op != OpAMD64SHRL { 15577 break 15578 } 15579 _ = v_0_0.Args[1] 15580 x := v_0_0.Args[0] 15581 v_0_0_1 := v_0_0.Args[1] 15582 if v_0_0_1.Op != OpAMD64NEGL { 15583 break 15584 } 15585 y := v_0_0_1.Args[0] 15586 v_0_1 := v_0.Args[1] 15587 if v_0_1.Op != OpAMD64SBBLcarrymask { 15588 break 15589 } 15590 v_0_1_0 := v_0_1.Args[0] 15591 if v_0_1_0.Op != OpAMD64CMPLconst { 15592 break 15593 } 15594 if v_0_1_0.AuxInt != 32 { 15595 break 15596 } 15597 v_0_1_0_0 := v_0_1_0.Args[0] 15598 if v_0_1_0_0.Op != OpAMD64NEGL { 15599 break 15600 } 15601 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15602 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 15603 break 15604 } 15605 if v_0_1_0_0_0.AuxInt != -32 { 15606 break 15607 } 15608 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15609 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 15610 break 15611 } 15612 if v_0_1_0_0_0_0.AuxInt != 31 { 15613 break 15614 } 15615 if y != v_0_1_0_0_0_0.Args[0] { 15616 break 15617 } 15618 v_1 := v.Args[1] 15619 if v_1.Op != OpAMD64SHLL { 15620 break 15621 } 15622 _ = v_1.Args[1] 15623 if x != v_1.Args[0] { 15624 break 15625 } 15626 if y != v_1.Args[1] { 15627 break 15628 } 15629 v.reset(OpAMD64ROLL) 15630 v.AddArg(x) 15631 v.AddArg(y) 15632 return true 15633 } 15634 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y))) (SHLL x y)) 15635 // cond: 15636 // result: (ROLL x y) 15637 for { 15638 _ = v.Args[1] 15639 v_0 := v.Args[0] 15640 if v_0.Op != OpAMD64ANDL { 15641 break 15642 } 15643 _ = v_0.Args[1] 15644 v_0_0 := v_0.Args[0] 15645 if v_0_0.Op != OpAMD64SBBLcarrymask { 15646 break 15647 } 15648 v_0_0_0 := v_0_0.Args[0] 15649 if v_0_0_0.Op != OpAMD64CMPLconst { 15650 break 15651 } 15652 if v_0_0_0.AuxInt != 32 { 15653 break 15654 } 15655 v_0_0_0_0 := v_0_0_0.Args[0] 15656 if v_0_0_0_0.Op != OpAMD64NEGL { 15657 break 15658 } 15659 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15660 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 15661 break 15662 } 15663 if v_0_0_0_0_0.AuxInt != -32 { 15664 break 15665 } 15666 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15667 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 15668 break 15669 } 15670 if v_0_0_0_0_0_0.AuxInt != 31 { 15671 break 15672 } 15673 y := v_0_0_0_0_0_0.Args[0] 15674 v_0_1 := v_0.Args[1] 15675 if v_0_1.Op != OpAMD64SHRL { 15676 break 15677 } 15678 _ = v_0_1.Args[1] 15679 x := v_0_1.Args[0] 15680 v_0_1_1 := v_0_1.Args[1] 15681 if v_0_1_1.Op != OpAMD64NEGL { 15682 break 15683 } 15684 if y != v_0_1_1.Args[0] { 15685 break 15686 } 15687 v_1 := v.Args[1] 15688 if v_1.Op != OpAMD64SHLL { 15689 break 15690 } 15691 _ = v_1.Args[1] 15692 if x != v_1.Args[0] { 15693 break 15694 } 15695 if y != v_1.Args[1] { 15696 break 15697 } 15698 v.reset(OpAMD64ROLL) 15699 v.AddArg(x) 15700 v.AddArg(y) 15701 return true 15702 } 15703 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 15704 // cond: 15705 // result: (RORL x y) 15706 for { 15707 _ = v.Args[1] 15708 v_0 := v.Args[0] 15709 if v_0.Op != OpAMD64SHRL { 15710 break 15711 } 15712 _ = v_0.Args[1] 15713 x := v_0.Args[0] 15714 y := v_0.Args[1] 15715 v_1 := v.Args[1] 15716 if v_1.Op != OpAMD64ANDL { 15717 break 15718 } 15719 _ = v_1.Args[1] 15720 v_1_0 := v_1.Args[0] 15721 if v_1_0.Op != OpAMD64SHLL { 15722 break 15723 } 15724 _ = v_1_0.Args[1] 15725 if x != v_1_0.Args[0] { 15726 break 15727 } 15728 v_1_0_1 := v_1_0.Args[1] 15729 if v_1_0_1.Op != OpAMD64NEGQ { 15730 break 15731 } 15732 if y != v_1_0_1.Args[0] { 15733 break 15734 } 15735 v_1_1 := v_1.Args[1] 15736 if v_1_1.Op != OpAMD64SBBLcarrymask { 15737 break 15738 } 15739 v_1_1_0 := v_1_1.Args[0] 15740 if v_1_1_0.Op != OpAMD64CMPQconst { 15741 break 15742 } 15743 if v_1_1_0.AuxInt != 32 { 15744 break 15745 } 15746 v_1_1_0_0 := v_1_1_0.Args[0] 15747 if v_1_1_0_0.Op != OpAMD64NEGQ { 15748 break 15749 } 15750 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15751 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 15752 break 15753 } 15754 if v_1_1_0_0_0.AuxInt != -32 { 15755 break 15756 } 15757 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15758 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 15759 break 15760 } 15761 if v_1_1_0_0_0_0.AuxInt != 31 { 15762 break 15763 } 15764 if y != v_1_1_0_0_0_0.Args[0] { 15765 break 15766 } 15767 v.reset(OpAMD64RORL) 15768 v.AddArg(x) 15769 v.AddArg(y) 15770 return true 15771 } 15772 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y)))) 15773 // cond: 15774 // result: (RORL x y) 15775 for { 15776 _ = v.Args[1] 15777 v_0 := v.Args[0] 15778 if v_0.Op != OpAMD64SHRL { 15779 break 15780 } 15781 _ = v_0.Args[1] 15782 x := v_0.Args[0] 15783 y := v_0.Args[1] 15784 v_1 := v.Args[1] 15785 if v_1.Op != OpAMD64ANDL { 15786 break 15787 } 15788 _ = v_1.Args[1] 15789 v_1_0 := v_1.Args[0] 15790 if v_1_0.Op != OpAMD64SBBLcarrymask { 15791 break 15792 } 15793 v_1_0_0 := v_1_0.Args[0] 15794 if v_1_0_0.Op != OpAMD64CMPQconst { 15795 break 15796 } 15797 if v_1_0_0.AuxInt != 32 { 15798 break 15799 } 15800 v_1_0_0_0 := v_1_0_0.Args[0] 15801 if v_1_0_0_0.Op != OpAMD64NEGQ { 15802 break 15803 } 15804 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15805 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 15806 break 15807 } 15808 if v_1_0_0_0_0.AuxInt != -32 { 15809 break 15810 } 15811 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15812 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 15813 break 15814 } 15815 if v_1_0_0_0_0_0.AuxInt != 31 { 15816 break 15817 } 15818 if y != v_1_0_0_0_0_0.Args[0] { 15819 break 15820 } 15821 v_1_1 := v_1.Args[1] 15822 if v_1_1.Op != OpAMD64SHLL { 15823 break 15824 } 15825 _ = v_1_1.Args[1] 15826 if x != v_1_1.Args[0] { 15827 break 15828 } 15829 v_1_1_1 := v_1_1.Args[1] 15830 if v_1_1_1.Op != OpAMD64NEGQ { 15831 break 15832 } 15833 if y != v_1_1_1.Args[0] { 15834 break 15835 } 15836 v.reset(OpAMD64RORL) 15837 v.AddArg(x) 15838 v.AddArg(y) 15839 return true 15840 } 15841 // match: (ORL (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHRL x y)) 15842 // cond: 15843 // result: (RORL x y) 15844 for { 15845 _ = v.Args[1] 15846 v_0 := v.Args[0] 15847 if v_0.Op != OpAMD64ANDL { 15848 break 15849 } 15850 _ = v_0.Args[1] 15851 v_0_0 := v_0.Args[0] 15852 if v_0_0.Op != OpAMD64SHLL { 15853 break 15854 } 15855 _ = v_0_0.Args[1] 15856 x := v_0_0.Args[0] 15857 v_0_0_1 := v_0_0.Args[1] 15858 if v_0_0_1.Op != OpAMD64NEGQ { 15859 break 15860 } 15861 y := v_0_0_1.Args[0] 15862 v_0_1 := v_0.Args[1] 15863 if v_0_1.Op != OpAMD64SBBLcarrymask { 15864 break 15865 } 15866 v_0_1_0 := v_0_1.Args[0] 15867 if v_0_1_0.Op != OpAMD64CMPQconst { 15868 break 15869 } 15870 if v_0_1_0.AuxInt != 32 { 15871 break 15872 } 15873 v_0_1_0_0 := v_0_1_0.Args[0] 15874 if v_0_1_0_0.Op != OpAMD64NEGQ { 15875 break 15876 } 15877 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15878 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 15879 break 15880 } 15881 if v_0_1_0_0_0.AuxInt != -32 { 15882 break 15883 } 15884 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15885 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 15886 break 15887 } 15888 if v_0_1_0_0_0_0.AuxInt != 31 { 15889 break 15890 } 15891 if y != v_0_1_0_0_0_0.Args[0] { 15892 break 15893 } 15894 v_1 := v.Args[1] 15895 if v_1.Op != OpAMD64SHRL { 15896 break 15897 } 15898 _ = v_1.Args[1] 15899 if x != v_1.Args[0] { 15900 break 15901 } 15902 if y != v_1.Args[1] { 15903 break 15904 } 15905 v.reset(OpAMD64RORL) 15906 v.AddArg(x) 15907 v.AddArg(y) 15908 return true 15909 } 15910 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y))) (SHRL x y)) 15911 // cond: 15912 // result: (RORL x y) 15913 for { 15914 _ = v.Args[1] 15915 v_0 := v.Args[0] 15916 if v_0.Op != OpAMD64ANDL { 15917 break 15918 } 15919 _ = v_0.Args[1] 15920 v_0_0 := v_0.Args[0] 15921 if v_0_0.Op != OpAMD64SBBLcarrymask { 15922 break 15923 } 15924 v_0_0_0 := v_0_0.Args[0] 15925 if v_0_0_0.Op != OpAMD64CMPQconst { 15926 break 15927 } 15928 if v_0_0_0.AuxInt != 32 { 15929 break 15930 } 15931 v_0_0_0_0 := v_0_0_0.Args[0] 15932 if v_0_0_0_0.Op != OpAMD64NEGQ { 15933 break 15934 } 15935 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15936 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 15937 break 15938 } 15939 if v_0_0_0_0_0.AuxInt != -32 { 15940 break 15941 } 15942 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15943 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 15944 break 15945 } 15946 if v_0_0_0_0_0_0.AuxInt != 31 { 15947 break 15948 } 15949 y := v_0_0_0_0_0_0.Args[0] 15950 v_0_1 := v_0.Args[1] 15951 if v_0_1.Op != OpAMD64SHLL { 15952 break 15953 } 15954 _ = v_0_1.Args[1] 15955 x := v_0_1.Args[0] 15956 v_0_1_1 := v_0_1.Args[1] 15957 if v_0_1_1.Op != OpAMD64NEGQ { 15958 break 15959 } 15960 if y != v_0_1_1.Args[0] { 15961 break 15962 } 15963 v_1 := v.Args[1] 15964 if v_1.Op != OpAMD64SHRL { 15965 break 15966 } 15967 _ = v_1.Args[1] 15968 if x != v_1.Args[0] { 15969 break 15970 } 15971 if y != v_1.Args[1] { 15972 break 15973 } 15974 v.reset(OpAMD64RORL) 15975 v.AddArg(x) 15976 v.AddArg(y) 15977 return true 15978 } 15979 return false 15980 } 15981 func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { 15982 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 15983 // cond: 15984 // result: (RORL x y) 15985 for { 15986 _ = v.Args[1] 15987 v_0 := v.Args[0] 15988 if v_0.Op != OpAMD64SHRL { 15989 break 15990 } 15991 _ = v_0.Args[1] 15992 x := v_0.Args[0] 15993 y := v_0.Args[1] 15994 v_1 := v.Args[1] 15995 if v_1.Op != OpAMD64ANDL { 15996 break 15997 } 15998 _ = v_1.Args[1] 15999 v_1_0 := v_1.Args[0] 16000 if v_1_0.Op != OpAMD64SHLL { 16001 break 16002 } 16003 _ = v_1_0.Args[1] 16004 if x != v_1_0.Args[0] { 16005 break 16006 } 16007 v_1_0_1 := v_1_0.Args[1] 16008 if v_1_0_1.Op != OpAMD64NEGL { 16009 break 16010 } 16011 if y != v_1_0_1.Args[0] { 16012 break 16013 } 16014 v_1_1 := v_1.Args[1] 16015 if v_1_1.Op != OpAMD64SBBLcarrymask { 16016 break 16017 } 16018 v_1_1_0 := v_1_1.Args[0] 16019 if v_1_1_0.Op != OpAMD64CMPLconst { 16020 break 16021 } 16022 if v_1_1_0.AuxInt != 32 { 16023 break 16024 } 16025 v_1_1_0_0 := v_1_1_0.Args[0] 16026 if v_1_1_0_0.Op != OpAMD64NEGL { 16027 break 16028 } 16029 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 16030 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 16031 break 16032 } 16033 if v_1_1_0_0_0.AuxInt != -32 { 16034 break 16035 } 16036 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 16037 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 16038 break 16039 } 16040 if v_1_1_0_0_0_0.AuxInt != 31 { 16041 break 16042 } 16043 if y != v_1_1_0_0_0_0.Args[0] { 16044 break 16045 } 16046 v.reset(OpAMD64RORL) 16047 v.AddArg(x) 16048 v.AddArg(y) 16049 return true 16050 } 16051 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y)))) 16052 // cond: 16053 // result: (RORL x y) 16054 for { 16055 _ = v.Args[1] 16056 v_0 := v.Args[0] 16057 if v_0.Op != OpAMD64SHRL { 16058 break 16059 } 16060 _ = v_0.Args[1] 16061 x := v_0.Args[0] 16062 y := v_0.Args[1] 16063 v_1 := v.Args[1] 16064 if v_1.Op != OpAMD64ANDL { 16065 break 16066 } 16067 _ = v_1.Args[1] 16068 v_1_0 := v_1.Args[0] 16069 if v_1_0.Op != OpAMD64SBBLcarrymask { 16070 break 16071 } 16072 v_1_0_0 := v_1_0.Args[0] 16073 if v_1_0_0.Op != OpAMD64CMPLconst { 16074 break 16075 } 16076 if v_1_0_0.AuxInt != 32 { 16077 break 16078 } 16079 v_1_0_0_0 := v_1_0_0.Args[0] 16080 if v_1_0_0_0.Op != OpAMD64NEGL { 16081 break 16082 } 16083 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 16084 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 16085 break 16086 } 16087 if v_1_0_0_0_0.AuxInt != -32 { 16088 break 16089 } 16090 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 16091 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 16092 break 16093 } 16094 if v_1_0_0_0_0_0.AuxInt != 31 { 16095 break 16096 } 16097 if y != v_1_0_0_0_0_0.Args[0] { 16098 break 16099 } 16100 v_1_1 := v_1.Args[1] 16101 if v_1_1.Op != OpAMD64SHLL { 16102 break 16103 } 16104 _ = v_1_1.Args[1] 16105 if x != v_1_1.Args[0] { 16106 break 16107 } 16108 v_1_1_1 := v_1_1.Args[1] 16109 if v_1_1_1.Op != OpAMD64NEGL { 16110 break 16111 } 16112 if y != v_1_1_1.Args[0] { 16113 break 16114 } 16115 v.reset(OpAMD64RORL) 16116 v.AddArg(x) 16117 v.AddArg(y) 16118 return true 16119 } 16120 // match: (ORL (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHRL x y)) 16121 // cond: 16122 // result: (RORL x y) 16123 for { 16124 _ = v.Args[1] 16125 v_0 := v.Args[0] 16126 if v_0.Op != OpAMD64ANDL { 16127 break 16128 } 16129 _ = v_0.Args[1] 16130 v_0_0 := v_0.Args[0] 16131 if v_0_0.Op != OpAMD64SHLL { 16132 break 16133 } 16134 _ = v_0_0.Args[1] 16135 x := v_0_0.Args[0] 16136 v_0_0_1 := v_0_0.Args[1] 16137 if v_0_0_1.Op != OpAMD64NEGL { 16138 break 16139 } 16140 y := v_0_0_1.Args[0] 16141 v_0_1 := v_0.Args[1] 16142 if v_0_1.Op != OpAMD64SBBLcarrymask { 16143 break 16144 } 16145 v_0_1_0 := v_0_1.Args[0] 16146 if v_0_1_0.Op != OpAMD64CMPLconst { 16147 break 16148 } 16149 if v_0_1_0.AuxInt != 32 { 16150 break 16151 } 16152 v_0_1_0_0 := v_0_1_0.Args[0] 16153 if v_0_1_0_0.Op != OpAMD64NEGL { 16154 break 16155 } 16156 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16157 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 16158 break 16159 } 16160 if v_0_1_0_0_0.AuxInt != -32 { 16161 break 16162 } 16163 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16164 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 16165 break 16166 } 16167 if v_0_1_0_0_0_0.AuxInt != 31 { 16168 break 16169 } 16170 if y != v_0_1_0_0_0_0.Args[0] { 16171 break 16172 } 16173 v_1 := v.Args[1] 16174 if v_1.Op != OpAMD64SHRL { 16175 break 16176 } 16177 _ = v_1.Args[1] 16178 if x != v_1.Args[0] { 16179 break 16180 } 16181 if y != v_1.Args[1] { 16182 break 16183 } 16184 v.reset(OpAMD64RORL) 16185 v.AddArg(x) 16186 v.AddArg(y) 16187 return true 16188 } 16189 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y))) (SHRL x y)) 16190 // cond: 16191 // result: (RORL x y) 16192 for { 16193 _ = v.Args[1] 16194 v_0 := v.Args[0] 16195 if v_0.Op != OpAMD64ANDL { 16196 break 16197 } 16198 _ = v_0.Args[1] 16199 v_0_0 := v_0.Args[0] 16200 if v_0_0.Op != OpAMD64SBBLcarrymask { 16201 break 16202 } 16203 v_0_0_0 := v_0_0.Args[0] 16204 if v_0_0_0.Op != OpAMD64CMPLconst { 16205 break 16206 } 16207 if v_0_0_0.AuxInt != 32 { 16208 break 16209 } 16210 v_0_0_0_0 := v_0_0_0.Args[0] 16211 if v_0_0_0_0.Op != OpAMD64NEGL { 16212 break 16213 } 16214 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16215 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 16216 break 16217 } 16218 if v_0_0_0_0_0.AuxInt != -32 { 16219 break 16220 } 16221 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16222 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 16223 break 16224 } 16225 if v_0_0_0_0_0_0.AuxInt != 31 { 16226 break 16227 } 16228 y := v_0_0_0_0_0_0.Args[0] 16229 v_0_1 := v_0.Args[1] 16230 if v_0_1.Op != OpAMD64SHLL { 16231 break 16232 } 16233 _ = v_0_1.Args[1] 16234 x := v_0_1.Args[0] 16235 v_0_1_1 := v_0_1.Args[1] 16236 if v_0_1_1.Op != OpAMD64NEGL { 16237 break 16238 } 16239 if y != v_0_1_1.Args[0] { 16240 break 16241 } 16242 v_1 := v.Args[1] 16243 if v_1.Op != OpAMD64SHRL { 16244 break 16245 } 16246 _ = v_1.Args[1] 16247 if x != v_1.Args[0] { 16248 break 16249 } 16250 if y != v_1.Args[1] { 16251 break 16252 } 16253 v.reset(OpAMD64RORL) 16254 v.AddArg(x) 16255 v.AddArg(y) 16256 return true 16257 } 16258 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) 16259 // cond: v.Type.Size() == 2 16260 // result: (ROLW x y) 16261 for { 16262 _ = v.Args[1] 16263 v_0 := v.Args[0] 16264 if v_0.Op != OpAMD64SHLL { 16265 break 16266 } 16267 _ = v_0.Args[1] 16268 x := v_0.Args[0] 16269 v_0_1 := v_0.Args[1] 16270 if v_0_1.Op != OpAMD64ANDQconst { 16271 break 16272 } 16273 if v_0_1.AuxInt != 15 { 16274 break 16275 } 16276 y := v_0_1.Args[0] 16277 v_1 := v.Args[1] 16278 if v_1.Op != OpAMD64ANDL { 16279 break 16280 } 16281 _ = v_1.Args[1] 16282 v_1_0 := v_1.Args[0] 16283 if v_1_0.Op != OpAMD64SHRW { 16284 break 16285 } 16286 _ = v_1_0.Args[1] 16287 if x != v_1_0.Args[0] { 16288 break 16289 } 16290 v_1_0_1 := v_1_0.Args[1] 16291 if v_1_0_1.Op != OpAMD64NEGQ { 16292 break 16293 } 16294 v_1_0_1_0 := v_1_0_1.Args[0] 16295 if v_1_0_1_0.Op != OpAMD64ADDQconst { 16296 break 16297 } 16298 if v_1_0_1_0.AuxInt != -16 { 16299 break 16300 } 16301 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 16302 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 16303 break 16304 } 16305 if v_1_0_1_0_0.AuxInt != 15 { 16306 break 16307 } 16308 if y != v_1_0_1_0_0.Args[0] { 16309 break 16310 } 16311 v_1_1 := v_1.Args[1] 16312 if v_1_1.Op != OpAMD64SBBLcarrymask { 16313 break 16314 } 16315 v_1_1_0 := v_1_1.Args[0] 16316 if v_1_1_0.Op != OpAMD64CMPQconst { 16317 break 16318 } 16319 if v_1_1_0.AuxInt != 16 { 16320 break 16321 } 16322 v_1_1_0_0 := v_1_1_0.Args[0] 16323 if v_1_1_0_0.Op != OpAMD64NEGQ { 16324 break 16325 } 16326 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 16327 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 16328 break 16329 } 16330 if v_1_1_0_0_0.AuxInt != -16 { 16331 break 16332 } 16333 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 16334 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 16335 break 16336 } 16337 if v_1_1_0_0_0_0.AuxInt != 15 { 16338 break 16339 } 16340 if y != v_1_1_0_0_0_0.Args[0] { 16341 break 16342 } 16343 if !(v.Type.Size() == 2) { 16344 break 16345 } 16346 v.reset(OpAMD64ROLW) 16347 v.AddArg(x) 16348 v.AddArg(y) 16349 return true 16350 } 16351 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))) 16352 // cond: v.Type.Size() == 2 16353 // result: (ROLW x y) 16354 for { 16355 _ = v.Args[1] 16356 v_0 := v.Args[0] 16357 if v_0.Op != OpAMD64SHLL { 16358 break 16359 } 16360 _ = v_0.Args[1] 16361 x := v_0.Args[0] 16362 v_0_1 := v_0.Args[1] 16363 if v_0_1.Op != OpAMD64ANDQconst { 16364 break 16365 } 16366 if v_0_1.AuxInt != 15 { 16367 break 16368 } 16369 y := v_0_1.Args[0] 16370 v_1 := v.Args[1] 16371 if v_1.Op != OpAMD64ANDL { 16372 break 16373 } 16374 _ = v_1.Args[1] 16375 v_1_0 := v_1.Args[0] 16376 if v_1_0.Op != OpAMD64SBBLcarrymask { 16377 break 16378 } 16379 v_1_0_0 := v_1_0.Args[0] 16380 if v_1_0_0.Op != OpAMD64CMPQconst { 16381 break 16382 } 16383 if v_1_0_0.AuxInt != 16 { 16384 break 16385 } 16386 v_1_0_0_0 := v_1_0_0.Args[0] 16387 if v_1_0_0_0.Op != OpAMD64NEGQ { 16388 break 16389 } 16390 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 16391 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 16392 break 16393 } 16394 if v_1_0_0_0_0.AuxInt != -16 { 16395 break 16396 } 16397 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 16398 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 16399 break 16400 } 16401 if v_1_0_0_0_0_0.AuxInt != 15 { 16402 break 16403 } 16404 if y != v_1_0_0_0_0_0.Args[0] { 16405 break 16406 } 16407 v_1_1 := v_1.Args[1] 16408 if v_1_1.Op != OpAMD64SHRW { 16409 break 16410 } 16411 _ = v_1_1.Args[1] 16412 if x != v_1_1.Args[0] { 16413 break 16414 } 16415 v_1_1_1 := v_1_1.Args[1] 16416 if v_1_1_1.Op != OpAMD64NEGQ { 16417 break 16418 } 16419 v_1_1_1_0 := v_1_1_1.Args[0] 16420 if v_1_1_1_0.Op != OpAMD64ADDQconst { 16421 break 16422 } 16423 if v_1_1_1_0.AuxInt != -16 { 16424 break 16425 } 16426 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 16427 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 16428 break 16429 } 16430 if v_1_1_1_0_0.AuxInt != 15 { 16431 break 16432 } 16433 if y != v_1_1_1_0_0.Args[0] { 16434 break 16435 } 16436 if !(v.Type.Size() == 2) { 16437 break 16438 } 16439 v.reset(OpAMD64ROLW) 16440 v.AddArg(x) 16441 v.AddArg(y) 16442 return true 16443 } 16444 // match: (ORL (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))) (SHLL x (ANDQconst y [15]))) 16445 // cond: v.Type.Size() == 2 16446 // result: (ROLW x y) 16447 for { 16448 _ = v.Args[1] 16449 v_0 := v.Args[0] 16450 if v_0.Op != OpAMD64ANDL { 16451 break 16452 } 16453 _ = v_0.Args[1] 16454 v_0_0 := v_0.Args[0] 16455 if v_0_0.Op != OpAMD64SHRW { 16456 break 16457 } 16458 _ = v_0_0.Args[1] 16459 x := v_0_0.Args[0] 16460 v_0_0_1 := v_0_0.Args[1] 16461 if v_0_0_1.Op != OpAMD64NEGQ { 16462 break 16463 } 16464 v_0_0_1_0 := v_0_0_1.Args[0] 16465 if v_0_0_1_0.Op != OpAMD64ADDQconst { 16466 break 16467 } 16468 if v_0_0_1_0.AuxInt != -16 { 16469 break 16470 } 16471 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 16472 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 16473 break 16474 } 16475 if v_0_0_1_0_0.AuxInt != 15 { 16476 break 16477 } 16478 y := v_0_0_1_0_0.Args[0] 16479 v_0_1 := v_0.Args[1] 16480 if v_0_1.Op != OpAMD64SBBLcarrymask { 16481 break 16482 } 16483 v_0_1_0 := v_0_1.Args[0] 16484 if v_0_1_0.Op != OpAMD64CMPQconst { 16485 break 16486 } 16487 if v_0_1_0.AuxInt != 16 { 16488 break 16489 } 16490 v_0_1_0_0 := v_0_1_0.Args[0] 16491 if v_0_1_0_0.Op != OpAMD64NEGQ { 16492 break 16493 } 16494 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16495 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 16496 break 16497 } 16498 if v_0_1_0_0_0.AuxInt != -16 { 16499 break 16500 } 16501 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16502 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 16503 break 16504 } 16505 if v_0_1_0_0_0_0.AuxInt != 15 { 16506 break 16507 } 16508 if y != v_0_1_0_0_0_0.Args[0] { 16509 break 16510 } 16511 v_1 := v.Args[1] 16512 if v_1.Op != OpAMD64SHLL { 16513 break 16514 } 16515 _ = v_1.Args[1] 16516 if x != v_1.Args[0] { 16517 break 16518 } 16519 v_1_1 := v_1.Args[1] 16520 if v_1_1.Op != OpAMD64ANDQconst { 16521 break 16522 } 16523 if v_1_1.AuxInt != 15 { 16524 break 16525 } 16526 if y != v_1_1.Args[0] { 16527 break 16528 } 16529 if !(v.Type.Size() == 2) { 16530 break 16531 } 16532 v.reset(OpAMD64ROLW) 16533 v.AddArg(x) 16534 v.AddArg(y) 16535 return true 16536 } 16537 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) (SHLL x (ANDQconst y [15]))) 16538 // cond: v.Type.Size() == 2 16539 // result: (ROLW x y) 16540 for { 16541 _ = v.Args[1] 16542 v_0 := v.Args[0] 16543 if v_0.Op != OpAMD64ANDL { 16544 break 16545 } 16546 _ = v_0.Args[1] 16547 v_0_0 := v_0.Args[0] 16548 if v_0_0.Op != OpAMD64SBBLcarrymask { 16549 break 16550 } 16551 v_0_0_0 := v_0_0.Args[0] 16552 if v_0_0_0.Op != OpAMD64CMPQconst { 16553 break 16554 } 16555 if v_0_0_0.AuxInt != 16 { 16556 break 16557 } 16558 v_0_0_0_0 := v_0_0_0.Args[0] 16559 if v_0_0_0_0.Op != OpAMD64NEGQ { 16560 break 16561 } 16562 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16563 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 16564 break 16565 } 16566 if v_0_0_0_0_0.AuxInt != -16 { 16567 break 16568 } 16569 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16570 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 16571 break 16572 } 16573 if v_0_0_0_0_0_0.AuxInt != 15 { 16574 break 16575 } 16576 y := v_0_0_0_0_0_0.Args[0] 16577 v_0_1 := v_0.Args[1] 16578 if v_0_1.Op != OpAMD64SHRW { 16579 break 16580 } 16581 _ = v_0_1.Args[1] 16582 x := v_0_1.Args[0] 16583 v_0_1_1 := v_0_1.Args[1] 16584 if v_0_1_1.Op != OpAMD64NEGQ { 16585 break 16586 } 16587 v_0_1_1_0 := v_0_1_1.Args[0] 16588 if v_0_1_1_0.Op != OpAMD64ADDQconst { 16589 break 16590 } 16591 if v_0_1_1_0.AuxInt != -16 { 16592 break 16593 } 16594 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 16595 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 16596 break 16597 } 16598 if v_0_1_1_0_0.AuxInt != 15 { 16599 break 16600 } 16601 if y != v_0_1_1_0_0.Args[0] { 16602 break 16603 } 16604 v_1 := v.Args[1] 16605 if v_1.Op != OpAMD64SHLL { 16606 break 16607 } 16608 _ = v_1.Args[1] 16609 if x != v_1.Args[0] { 16610 break 16611 } 16612 v_1_1 := v_1.Args[1] 16613 if v_1_1.Op != OpAMD64ANDQconst { 16614 break 16615 } 16616 if v_1_1.AuxInt != 15 { 16617 break 16618 } 16619 if y != v_1_1.Args[0] { 16620 break 16621 } 16622 if !(v.Type.Size() == 2) { 16623 break 16624 } 16625 v.reset(OpAMD64ROLW) 16626 v.AddArg(x) 16627 v.AddArg(y) 16628 return true 16629 } 16630 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])))) 16631 // cond: v.Type.Size() == 2 16632 // result: (ROLW x y) 16633 for { 16634 _ = v.Args[1] 16635 v_0 := v.Args[0] 16636 if v_0.Op != OpAMD64SHLL { 16637 break 16638 } 16639 _ = v_0.Args[1] 16640 x := v_0.Args[0] 16641 v_0_1 := v_0.Args[1] 16642 if v_0_1.Op != OpAMD64ANDLconst { 16643 break 16644 } 16645 if v_0_1.AuxInt != 15 { 16646 break 16647 } 16648 y := v_0_1.Args[0] 16649 v_1 := v.Args[1] 16650 if v_1.Op != OpAMD64ANDL { 16651 break 16652 } 16653 _ = v_1.Args[1] 16654 v_1_0 := v_1.Args[0] 16655 if v_1_0.Op != OpAMD64SHRW { 16656 break 16657 } 16658 _ = v_1_0.Args[1] 16659 if x != v_1_0.Args[0] { 16660 break 16661 } 16662 v_1_0_1 := v_1_0.Args[1] 16663 if v_1_0_1.Op != OpAMD64NEGL { 16664 break 16665 } 16666 v_1_0_1_0 := v_1_0_1.Args[0] 16667 if v_1_0_1_0.Op != OpAMD64ADDLconst { 16668 break 16669 } 16670 if v_1_0_1_0.AuxInt != -16 { 16671 break 16672 } 16673 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 16674 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 16675 break 16676 } 16677 if v_1_0_1_0_0.AuxInt != 15 { 16678 break 16679 } 16680 if y != v_1_0_1_0_0.Args[0] { 16681 break 16682 } 16683 v_1_1 := v_1.Args[1] 16684 if v_1_1.Op != OpAMD64SBBLcarrymask { 16685 break 16686 } 16687 v_1_1_0 := v_1_1.Args[0] 16688 if v_1_1_0.Op != OpAMD64CMPLconst { 16689 break 16690 } 16691 if v_1_1_0.AuxInt != 16 { 16692 break 16693 } 16694 v_1_1_0_0 := v_1_1_0.Args[0] 16695 if v_1_1_0_0.Op != OpAMD64NEGL { 16696 break 16697 } 16698 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 16699 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 16700 break 16701 } 16702 if v_1_1_0_0_0.AuxInt != -16 { 16703 break 16704 } 16705 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 16706 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 16707 break 16708 } 16709 if v_1_1_0_0_0_0.AuxInt != 15 { 16710 break 16711 } 16712 if y != v_1_1_0_0_0_0.Args[0] { 16713 break 16714 } 16715 if !(v.Type.Size() == 2) { 16716 break 16717 } 16718 v.reset(OpAMD64ROLW) 16719 v.AddArg(x) 16720 v.AddArg(y) 16721 return true 16722 } 16723 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))) 16724 // cond: v.Type.Size() == 2 16725 // result: (ROLW x y) 16726 for { 16727 _ = v.Args[1] 16728 v_0 := v.Args[0] 16729 if v_0.Op != OpAMD64SHLL { 16730 break 16731 } 16732 _ = v_0.Args[1] 16733 x := v_0.Args[0] 16734 v_0_1 := v_0.Args[1] 16735 if v_0_1.Op != OpAMD64ANDLconst { 16736 break 16737 } 16738 if v_0_1.AuxInt != 15 { 16739 break 16740 } 16741 y := v_0_1.Args[0] 16742 v_1 := v.Args[1] 16743 if v_1.Op != OpAMD64ANDL { 16744 break 16745 } 16746 _ = v_1.Args[1] 16747 v_1_0 := v_1.Args[0] 16748 if v_1_0.Op != OpAMD64SBBLcarrymask { 16749 break 16750 } 16751 v_1_0_0 := v_1_0.Args[0] 16752 if v_1_0_0.Op != OpAMD64CMPLconst { 16753 break 16754 } 16755 if v_1_0_0.AuxInt != 16 { 16756 break 16757 } 16758 v_1_0_0_0 := v_1_0_0.Args[0] 16759 if v_1_0_0_0.Op != OpAMD64NEGL { 16760 break 16761 } 16762 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 16763 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 16764 break 16765 } 16766 if v_1_0_0_0_0.AuxInt != -16 { 16767 break 16768 } 16769 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 16770 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 16771 break 16772 } 16773 if v_1_0_0_0_0_0.AuxInt != 15 { 16774 break 16775 } 16776 if y != v_1_0_0_0_0_0.Args[0] { 16777 break 16778 } 16779 v_1_1 := v_1.Args[1] 16780 if v_1_1.Op != OpAMD64SHRW { 16781 break 16782 } 16783 _ = v_1_1.Args[1] 16784 if x != v_1_1.Args[0] { 16785 break 16786 } 16787 v_1_1_1 := v_1_1.Args[1] 16788 if v_1_1_1.Op != OpAMD64NEGL { 16789 break 16790 } 16791 v_1_1_1_0 := v_1_1_1.Args[0] 16792 if v_1_1_1_0.Op != OpAMD64ADDLconst { 16793 break 16794 } 16795 if v_1_1_1_0.AuxInt != -16 { 16796 break 16797 } 16798 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 16799 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 16800 break 16801 } 16802 if v_1_1_1_0_0.AuxInt != 15 { 16803 break 16804 } 16805 if y != v_1_1_1_0_0.Args[0] { 16806 break 16807 } 16808 if !(v.Type.Size() == 2) { 16809 break 16810 } 16811 v.reset(OpAMD64ROLW) 16812 v.AddArg(x) 16813 v.AddArg(y) 16814 return true 16815 } 16816 return false 16817 } 16818 func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { 16819 // match: (ORL (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))) (SHLL x (ANDLconst y [15]))) 16820 // cond: v.Type.Size() == 2 16821 // result: (ROLW x y) 16822 for { 16823 _ = v.Args[1] 16824 v_0 := v.Args[0] 16825 if v_0.Op != OpAMD64ANDL { 16826 break 16827 } 16828 _ = v_0.Args[1] 16829 v_0_0 := v_0.Args[0] 16830 if v_0_0.Op != OpAMD64SHRW { 16831 break 16832 } 16833 _ = v_0_0.Args[1] 16834 x := v_0_0.Args[0] 16835 v_0_0_1 := v_0_0.Args[1] 16836 if v_0_0_1.Op != OpAMD64NEGL { 16837 break 16838 } 16839 v_0_0_1_0 := v_0_0_1.Args[0] 16840 if v_0_0_1_0.Op != OpAMD64ADDLconst { 16841 break 16842 } 16843 if v_0_0_1_0.AuxInt != -16 { 16844 break 16845 } 16846 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 16847 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 16848 break 16849 } 16850 if v_0_0_1_0_0.AuxInt != 15 { 16851 break 16852 } 16853 y := v_0_0_1_0_0.Args[0] 16854 v_0_1 := v_0.Args[1] 16855 if v_0_1.Op != OpAMD64SBBLcarrymask { 16856 break 16857 } 16858 v_0_1_0 := v_0_1.Args[0] 16859 if v_0_1_0.Op != OpAMD64CMPLconst { 16860 break 16861 } 16862 if v_0_1_0.AuxInt != 16 { 16863 break 16864 } 16865 v_0_1_0_0 := v_0_1_0.Args[0] 16866 if v_0_1_0_0.Op != OpAMD64NEGL { 16867 break 16868 } 16869 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16870 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 16871 break 16872 } 16873 if v_0_1_0_0_0.AuxInt != -16 { 16874 break 16875 } 16876 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16877 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 16878 break 16879 } 16880 if v_0_1_0_0_0_0.AuxInt != 15 { 16881 break 16882 } 16883 if y != v_0_1_0_0_0_0.Args[0] { 16884 break 16885 } 16886 v_1 := v.Args[1] 16887 if v_1.Op != OpAMD64SHLL { 16888 break 16889 } 16890 _ = v_1.Args[1] 16891 if x != v_1.Args[0] { 16892 break 16893 } 16894 v_1_1 := v_1.Args[1] 16895 if v_1_1.Op != OpAMD64ANDLconst { 16896 break 16897 } 16898 if v_1_1.AuxInt != 15 { 16899 break 16900 } 16901 if y != v_1_1.Args[0] { 16902 break 16903 } 16904 if !(v.Type.Size() == 2) { 16905 break 16906 } 16907 v.reset(OpAMD64ROLW) 16908 v.AddArg(x) 16909 v.AddArg(y) 16910 return true 16911 } 16912 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) (SHLL x (ANDLconst y [15]))) 16913 // cond: v.Type.Size() == 2 16914 // result: (ROLW x y) 16915 for { 16916 _ = v.Args[1] 16917 v_0 := v.Args[0] 16918 if v_0.Op != OpAMD64ANDL { 16919 break 16920 } 16921 _ = v_0.Args[1] 16922 v_0_0 := v_0.Args[0] 16923 if v_0_0.Op != OpAMD64SBBLcarrymask { 16924 break 16925 } 16926 v_0_0_0 := v_0_0.Args[0] 16927 if v_0_0_0.Op != OpAMD64CMPLconst { 16928 break 16929 } 16930 if v_0_0_0.AuxInt != 16 { 16931 break 16932 } 16933 v_0_0_0_0 := v_0_0_0.Args[0] 16934 if v_0_0_0_0.Op != OpAMD64NEGL { 16935 break 16936 } 16937 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16938 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 16939 break 16940 } 16941 if v_0_0_0_0_0.AuxInt != -16 { 16942 break 16943 } 16944 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16945 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 16946 break 16947 } 16948 if v_0_0_0_0_0_0.AuxInt != 15 { 16949 break 16950 } 16951 y := v_0_0_0_0_0_0.Args[0] 16952 v_0_1 := v_0.Args[1] 16953 if v_0_1.Op != OpAMD64SHRW { 16954 break 16955 } 16956 _ = v_0_1.Args[1] 16957 x := v_0_1.Args[0] 16958 v_0_1_1 := v_0_1.Args[1] 16959 if v_0_1_1.Op != OpAMD64NEGL { 16960 break 16961 } 16962 v_0_1_1_0 := v_0_1_1.Args[0] 16963 if v_0_1_1_0.Op != OpAMD64ADDLconst { 16964 break 16965 } 16966 if v_0_1_1_0.AuxInt != -16 { 16967 break 16968 } 16969 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 16970 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 16971 break 16972 } 16973 if v_0_1_1_0_0.AuxInt != 15 { 16974 break 16975 } 16976 if y != v_0_1_1_0_0.Args[0] { 16977 break 16978 } 16979 v_1 := v.Args[1] 16980 if v_1.Op != OpAMD64SHLL { 16981 break 16982 } 16983 _ = v_1.Args[1] 16984 if x != v_1.Args[0] { 16985 break 16986 } 16987 v_1_1 := v_1.Args[1] 16988 if v_1_1.Op != OpAMD64ANDLconst { 16989 break 16990 } 16991 if v_1_1.AuxInt != 15 { 16992 break 16993 } 16994 if y != v_1_1.Args[0] { 16995 break 16996 } 16997 if !(v.Type.Size() == 2) { 16998 break 16999 } 17000 v.reset(OpAMD64ROLW) 17001 v.AddArg(x) 17002 v.AddArg(y) 17003 return true 17004 } 17005 // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) 17006 // cond: v.Type.Size() == 2 17007 // result: (RORW x y) 17008 for { 17009 _ = v.Args[1] 17010 v_0 := v.Args[0] 17011 if v_0.Op != OpAMD64SHRW { 17012 break 17013 } 17014 _ = v_0.Args[1] 17015 x := v_0.Args[0] 17016 v_0_1 := v_0.Args[1] 17017 if v_0_1.Op != OpAMD64ANDQconst { 17018 break 17019 } 17020 if v_0_1.AuxInt != 15 { 17021 break 17022 } 17023 y := v_0_1.Args[0] 17024 v_1 := v.Args[1] 17025 if v_1.Op != OpAMD64SHLL { 17026 break 17027 } 17028 _ = v_1.Args[1] 17029 if x != v_1.Args[0] { 17030 break 17031 } 17032 v_1_1 := v_1.Args[1] 17033 if v_1_1.Op != OpAMD64NEGQ { 17034 break 17035 } 17036 v_1_1_0 := v_1_1.Args[0] 17037 if v_1_1_0.Op != OpAMD64ADDQconst { 17038 break 17039 } 17040 if v_1_1_0.AuxInt != -16 { 17041 break 17042 } 17043 v_1_1_0_0 := v_1_1_0.Args[0] 17044 if v_1_1_0_0.Op != OpAMD64ANDQconst { 17045 break 17046 } 17047 if v_1_1_0_0.AuxInt != 15 { 17048 break 17049 } 17050 if y != v_1_1_0_0.Args[0] { 17051 break 17052 } 17053 if !(v.Type.Size() == 2) { 17054 break 17055 } 17056 v.reset(OpAMD64RORW) 17057 v.AddArg(x) 17058 v.AddArg(y) 17059 return true 17060 } 17061 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SHRW x (ANDQconst y [15]))) 17062 // cond: v.Type.Size() == 2 17063 // result: (RORW x y) 17064 for { 17065 _ = v.Args[1] 17066 v_0 := v.Args[0] 17067 if v_0.Op != OpAMD64SHLL { 17068 break 17069 } 17070 _ = v_0.Args[1] 17071 x := v_0.Args[0] 17072 v_0_1 := v_0.Args[1] 17073 if v_0_1.Op != OpAMD64NEGQ { 17074 break 17075 } 17076 v_0_1_0 := v_0_1.Args[0] 17077 if v_0_1_0.Op != OpAMD64ADDQconst { 17078 break 17079 } 17080 if v_0_1_0.AuxInt != -16 { 17081 break 17082 } 17083 v_0_1_0_0 := v_0_1_0.Args[0] 17084 if v_0_1_0_0.Op != OpAMD64ANDQconst { 17085 break 17086 } 17087 if v_0_1_0_0.AuxInt != 15 { 17088 break 17089 } 17090 y := v_0_1_0_0.Args[0] 17091 v_1 := v.Args[1] 17092 if v_1.Op != OpAMD64SHRW { 17093 break 17094 } 17095 _ = v_1.Args[1] 17096 if x != v_1.Args[0] { 17097 break 17098 } 17099 v_1_1 := v_1.Args[1] 17100 if v_1_1.Op != OpAMD64ANDQconst { 17101 break 17102 } 17103 if v_1_1.AuxInt != 15 { 17104 break 17105 } 17106 if y != v_1_1.Args[0] { 17107 break 17108 } 17109 if !(v.Type.Size() == 2) { 17110 break 17111 } 17112 v.reset(OpAMD64RORW) 17113 v.AddArg(x) 17114 v.AddArg(y) 17115 return true 17116 } 17117 // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) 17118 // cond: v.Type.Size() == 2 17119 // result: (RORW x y) 17120 for { 17121 _ = v.Args[1] 17122 v_0 := v.Args[0] 17123 if v_0.Op != OpAMD64SHRW { 17124 break 17125 } 17126 _ = v_0.Args[1] 17127 x := v_0.Args[0] 17128 v_0_1 := v_0.Args[1] 17129 if v_0_1.Op != OpAMD64ANDLconst { 17130 break 17131 } 17132 if v_0_1.AuxInt != 15 { 17133 break 17134 } 17135 y := v_0_1.Args[0] 17136 v_1 := v.Args[1] 17137 if v_1.Op != OpAMD64SHLL { 17138 break 17139 } 17140 _ = v_1.Args[1] 17141 if x != v_1.Args[0] { 17142 break 17143 } 17144 v_1_1 := v_1.Args[1] 17145 if v_1_1.Op != OpAMD64NEGL { 17146 break 17147 } 17148 v_1_1_0 := v_1_1.Args[0] 17149 if v_1_1_0.Op != OpAMD64ADDLconst { 17150 break 17151 } 17152 if v_1_1_0.AuxInt != -16 { 17153 break 17154 } 17155 v_1_1_0_0 := v_1_1_0.Args[0] 17156 if v_1_1_0_0.Op != OpAMD64ANDLconst { 17157 break 17158 } 17159 if v_1_1_0_0.AuxInt != 15 { 17160 break 17161 } 17162 if y != v_1_1_0_0.Args[0] { 17163 break 17164 } 17165 if !(v.Type.Size() == 2) { 17166 break 17167 } 17168 v.reset(OpAMD64RORW) 17169 v.AddArg(x) 17170 v.AddArg(y) 17171 return true 17172 } 17173 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SHRW x (ANDLconst y [15]))) 17174 // cond: v.Type.Size() == 2 17175 // result: (RORW x y) 17176 for { 17177 _ = v.Args[1] 17178 v_0 := v.Args[0] 17179 if v_0.Op != OpAMD64SHLL { 17180 break 17181 } 17182 _ = v_0.Args[1] 17183 x := v_0.Args[0] 17184 v_0_1 := v_0.Args[1] 17185 if v_0_1.Op != OpAMD64NEGL { 17186 break 17187 } 17188 v_0_1_0 := v_0_1.Args[0] 17189 if v_0_1_0.Op != OpAMD64ADDLconst { 17190 break 17191 } 17192 if v_0_1_0.AuxInt != -16 { 17193 break 17194 } 17195 v_0_1_0_0 := v_0_1_0.Args[0] 17196 if v_0_1_0_0.Op != OpAMD64ANDLconst { 17197 break 17198 } 17199 if v_0_1_0_0.AuxInt != 15 { 17200 break 17201 } 17202 y := v_0_1_0_0.Args[0] 17203 v_1 := v.Args[1] 17204 if v_1.Op != OpAMD64SHRW { 17205 break 17206 } 17207 _ = v_1.Args[1] 17208 if x != v_1.Args[0] { 17209 break 17210 } 17211 v_1_1 := v_1.Args[1] 17212 if v_1_1.Op != OpAMD64ANDLconst { 17213 break 17214 } 17215 if v_1_1.AuxInt != 15 { 17216 break 17217 } 17218 if y != v_1_1.Args[0] { 17219 break 17220 } 17221 if !(v.Type.Size() == 2) { 17222 break 17223 } 17224 v.reset(OpAMD64RORW) 17225 v.AddArg(x) 17226 v.AddArg(y) 17227 return true 17228 } 17229 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])))) 17230 // cond: v.Type.Size() == 1 17231 // result: (ROLB x y) 17232 for { 17233 _ = v.Args[1] 17234 v_0 := v.Args[0] 17235 if v_0.Op != OpAMD64SHLL { 17236 break 17237 } 17238 _ = v_0.Args[1] 17239 x := v_0.Args[0] 17240 v_0_1 := v_0.Args[1] 17241 if v_0_1.Op != OpAMD64ANDQconst { 17242 break 17243 } 17244 if v_0_1.AuxInt != 7 { 17245 break 17246 } 17247 y := v_0_1.Args[0] 17248 v_1 := v.Args[1] 17249 if v_1.Op != OpAMD64ANDL { 17250 break 17251 } 17252 _ = v_1.Args[1] 17253 v_1_0 := v_1.Args[0] 17254 if v_1_0.Op != OpAMD64SHRB { 17255 break 17256 } 17257 _ = v_1_0.Args[1] 17258 if x != v_1_0.Args[0] { 17259 break 17260 } 17261 v_1_0_1 := v_1_0.Args[1] 17262 if v_1_0_1.Op != OpAMD64NEGQ { 17263 break 17264 } 17265 v_1_0_1_0 := v_1_0_1.Args[0] 17266 if v_1_0_1_0.Op != OpAMD64ADDQconst { 17267 break 17268 } 17269 if v_1_0_1_0.AuxInt != -8 { 17270 break 17271 } 17272 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 17273 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 17274 break 17275 } 17276 if v_1_0_1_0_0.AuxInt != 7 { 17277 break 17278 } 17279 if y != v_1_0_1_0_0.Args[0] { 17280 break 17281 } 17282 v_1_1 := v_1.Args[1] 17283 if v_1_1.Op != OpAMD64SBBLcarrymask { 17284 break 17285 } 17286 v_1_1_0 := v_1_1.Args[0] 17287 if v_1_1_0.Op != OpAMD64CMPQconst { 17288 break 17289 } 17290 if v_1_1_0.AuxInt != 8 { 17291 break 17292 } 17293 v_1_1_0_0 := v_1_1_0.Args[0] 17294 if v_1_1_0_0.Op != OpAMD64NEGQ { 17295 break 17296 } 17297 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 17298 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 17299 break 17300 } 17301 if v_1_1_0_0_0.AuxInt != -8 { 17302 break 17303 } 17304 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 17305 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 17306 break 17307 } 17308 if v_1_1_0_0_0_0.AuxInt != 7 { 17309 break 17310 } 17311 if y != v_1_1_0_0_0_0.Args[0] { 17312 break 17313 } 17314 if !(v.Type.Size() == 1) { 17315 break 17316 } 17317 v.reset(OpAMD64ROLB) 17318 v.AddArg(x) 17319 v.AddArg(y) 17320 return true 17321 } 17322 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))) 17323 // cond: v.Type.Size() == 1 17324 // result: (ROLB x y) 17325 for { 17326 _ = v.Args[1] 17327 v_0 := v.Args[0] 17328 if v_0.Op != OpAMD64SHLL { 17329 break 17330 } 17331 _ = v_0.Args[1] 17332 x := v_0.Args[0] 17333 v_0_1 := v_0.Args[1] 17334 if v_0_1.Op != OpAMD64ANDQconst { 17335 break 17336 } 17337 if v_0_1.AuxInt != 7 { 17338 break 17339 } 17340 y := v_0_1.Args[0] 17341 v_1 := v.Args[1] 17342 if v_1.Op != OpAMD64ANDL { 17343 break 17344 } 17345 _ = v_1.Args[1] 17346 v_1_0 := v_1.Args[0] 17347 if v_1_0.Op != OpAMD64SBBLcarrymask { 17348 break 17349 } 17350 v_1_0_0 := v_1_0.Args[0] 17351 if v_1_0_0.Op != OpAMD64CMPQconst { 17352 break 17353 } 17354 if v_1_0_0.AuxInt != 8 { 17355 break 17356 } 17357 v_1_0_0_0 := v_1_0_0.Args[0] 17358 if v_1_0_0_0.Op != OpAMD64NEGQ { 17359 break 17360 } 17361 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 17362 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 17363 break 17364 } 17365 if v_1_0_0_0_0.AuxInt != -8 { 17366 break 17367 } 17368 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 17369 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 17370 break 17371 } 17372 if v_1_0_0_0_0_0.AuxInt != 7 { 17373 break 17374 } 17375 if y != v_1_0_0_0_0_0.Args[0] { 17376 break 17377 } 17378 v_1_1 := v_1.Args[1] 17379 if v_1_1.Op != OpAMD64SHRB { 17380 break 17381 } 17382 _ = v_1_1.Args[1] 17383 if x != v_1_1.Args[0] { 17384 break 17385 } 17386 v_1_1_1 := v_1_1.Args[1] 17387 if v_1_1_1.Op != OpAMD64NEGQ { 17388 break 17389 } 17390 v_1_1_1_0 := v_1_1_1.Args[0] 17391 if v_1_1_1_0.Op != OpAMD64ADDQconst { 17392 break 17393 } 17394 if v_1_1_1_0.AuxInt != -8 { 17395 break 17396 } 17397 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 17398 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 17399 break 17400 } 17401 if v_1_1_1_0_0.AuxInt != 7 { 17402 break 17403 } 17404 if y != v_1_1_1_0_0.Args[0] { 17405 break 17406 } 17407 if !(v.Type.Size() == 1) { 17408 break 17409 } 17410 v.reset(OpAMD64ROLB) 17411 v.AddArg(x) 17412 v.AddArg(y) 17413 return true 17414 } 17415 // match: (ORL (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDQconst y [ 7]))) 17416 // cond: v.Type.Size() == 1 17417 // result: (ROLB x y) 17418 for { 17419 _ = v.Args[1] 17420 v_0 := v.Args[0] 17421 if v_0.Op != OpAMD64ANDL { 17422 break 17423 } 17424 _ = v_0.Args[1] 17425 v_0_0 := v_0.Args[0] 17426 if v_0_0.Op != OpAMD64SHRB { 17427 break 17428 } 17429 _ = v_0_0.Args[1] 17430 x := v_0_0.Args[0] 17431 v_0_0_1 := v_0_0.Args[1] 17432 if v_0_0_1.Op != OpAMD64NEGQ { 17433 break 17434 } 17435 v_0_0_1_0 := v_0_0_1.Args[0] 17436 if v_0_0_1_0.Op != OpAMD64ADDQconst { 17437 break 17438 } 17439 if v_0_0_1_0.AuxInt != -8 { 17440 break 17441 } 17442 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 17443 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 17444 break 17445 } 17446 if v_0_0_1_0_0.AuxInt != 7 { 17447 break 17448 } 17449 y := v_0_0_1_0_0.Args[0] 17450 v_0_1 := v_0.Args[1] 17451 if v_0_1.Op != OpAMD64SBBLcarrymask { 17452 break 17453 } 17454 v_0_1_0 := v_0_1.Args[0] 17455 if v_0_1_0.Op != OpAMD64CMPQconst { 17456 break 17457 } 17458 if v_0_1_0.AuxInt != 8 { 17459 break 17460 } 17461 v_0_1_0_0 := v_0_1_0.Args[0] 17462 if v_0_1_0_0.Op != OpAMD64NEGQ { 17463 break 17464 } 17465 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 17466 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 17467 break 17468 } 17469 if v_0_1_0_0_0.AuxInt != -8 { 17470 break 17471 } 17472 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 17473 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 17474 break 17475 } 17476 if v_0_1_0_0_0_0.AuxInt != 7 { 17477 break 17478 } 17479 if y != v_0_1_0_0_0_0.Args[0] { 17480 break 17481 } 17482 v_1 := v.Args[1] 17483 if v_1.Op != OpAMD64SHLL { 17484 break 17485 } 17486 _ = v_1.Args[1] 17487 if x != v_1.Args[0] { 17488 break 17489 } 17490 v_1_1 := v_1.Args[1] 17491 if v_1_1.Op != OpAMD64ANDQconst { 17492 break 17493 } 17494 if v_1_1.AuxInt != 7 { 17495 break 17496 } 17497 if y != v_1_1.Args[0] { 17498 break 17499 } 17500 if !(v.Type.Size() == 1) { 17501 break 17502 } 17503 v.reset(OpAMD64ROLB) 17504 v.AddArg(x) 17505 v.AddArg(y) 17506 return true 17507 } 17508 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) (SHLL x (ANDQconst y [ 7]))) 17509 // cond: v.Type.Size() == 1 17510 // result: (ROLB x y) 17511 for { 17512 _ = v.Args[1] 17513 v_0 := v.Args[0] 17514 if v_0.Op != OpAMD64ANDL { 17515 break 17516 } 17517 _ = v_0.Args[1] 17518 v_0_0 := v_0.Args[0] 17519 if v_0_0.Op != OpAMD64SBBLcarrymask { 17520 break 17521 } 17522 v_0_0_0 := v_0_0.Args[0] 17523 if v_0_0_0.Op != OpAMD64CMPQconst { 17524 break 17525 } 17526 if v_0_0_0.AuxInt != 8 { 17527 break 17528 } 17529 v_0_0_0_0 := v_0_0_0.Args[0] 17530 if v_0_0_0_0.Op != OpAMD64NEGQ { 17531 break 17532 } 17533 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 17534 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 17535 break 17536 } 17537 if v_0_0_0_0_0.AuxInt != -8 { 17538 break 17539 } 17540 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 17541 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 17542 break 17543 } 17544 if v_0_0_0_0_0_0.AuxInt != 7 { 17545 break 17546 } 17547 y := v_0_0_0_0_0_0.Args[0] 17548 v_0_1 := v_0.Args[1] 17549 if v_0_1.Op != OpAMD64SHRB { 17550 break 17551 } 17552 _ = v_0_1.Args[1] 17553 x := v_0_1.Args[0] 17554 v_0_1_1 := v_0_1.Args[1] 17555 if v_0_1_1.Op != OpAMD64NEGQ { 17556 break 17557 } 17558 v_0_1_1_0 := v_0_1_1.Args[0] 17559 if v_0_1_1_0.Op != OpAMD64ADDQconst { 17560 break 17561 } 17562 if v_0_1_1_0.AuxInt != -8 { 17563 break 17564 } 17565 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 17566 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 17567 break 17568 } 17569 if v_0_1_1_0_0.AuxInt != 7 { 17570 break 17571 } 17572 if y != v_0_1_1_0_0.Args[0] { 17573 break 17574 } 17575 v_1 := v.Args[1] 17576 if v_1.Op != OpAMD64SHLL { 17577 break 17578 } 17579 _ = v_1.Args[1] 17580 if x != v_1.Args[0] { 17581 break 17582 } 17583 v_1_1 := v_1.Args[1] 17584 if v_1_1.Op != OpAMD64ANDQconst { 17585 break 17586 } 17587 if v_1_1.AuxInt != 7 { 17588 break 17589 } 17590 if y != v_1_1.Args[0] { 17591 break 17592 } 17593 if !(v.Type.Size() == 1) { 17594 break 17595 } 17596 v.reset(OpAMD64ROLB) 17597 v.AddArg(x) 17598 v.AddArg(y) 17599 return true 17600 } 17601 return false 17602 } 17603 func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool { 17604 b := v.Block 17605 _ = b 17606 typ := &b.Func.Config.Types 17607 _ = typ 17608 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])))) 17609 // cond: v.Type.Size() == 1 17610 // result: (ROLB x y) 17611 for { 17612 _ = v.Args[1] 17613 v_0 := v.Args[0] 17614 if v_0.Op != OpAMD64SHLL { 17615 break 17616 } 17617 _ = v_0.Args[1] 17618 x := v_0.Args[0] 17619 v_0_1 := v_0.Args[1] 17620 if v_0_1.Op != OpAMD64ANDLconst { 17621 break 17622 } 17623 if v_0_1.AuxInt != 7 { 17624 break 17625 } 17626 y := v_0_1.Args[0] 17627 v_1 := v.Args[1] 17628 if v_1.Op != OpAMD64ANDL { 17629 break 17630 } 17631 _ = v_1.Args[1] 17632 v_1_0 := v_1.Args[0] 17633 if v_1_0.Op != OpAMD64SHRB { 17634 break 17635 } 17636 _ = v_1_0.Args[1] 17637 if x != v_1_0.Args[0] { 17638 break 17639 } 17640 v_1_0_1 := v_1_0.Args[1] 17641 if v_1_0_1.Op != OpAMD64NEGL { 17642 break 17643 } 17644 v_1_0_1_0 := v_1_0_1.Args[0] 17645 if v_1_0_1_0.Op != OpAMD64ADDLconst { 17646 break 17647 } 17648 if v_1_0_1_0.AuxInt != -8 { 17649 break 17650 } 17651 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 17652 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 17653 break 17654 } 17655 if v_1_0_1_0_0.AuxInt != 7 { 17656 break 17657 } 17658 if y != v_1_0_1_0_0.Args[0] { 17659 break 17660 } 17661 v_1_1 := v_1.Args[1] 17662 if v_1_1.Op != OpAMD64SBBLcarrymask { 17663 break 17664 } 17665 v_1_1_0 := v_1_1.Args[0] 17666 if v_1_1_0.Op != OpAMD64CMPLconst { 17667 break 17668 } 17669 if v_1_1_0.AuxInt != 8 { 17670 break 17671 } 17672 v_1_1_0_0 := v_1_1_0.Args[0] 17673 if v_1_1_0_0.Op != OpAMD64NEGL { 17674 break 17675 } 17676 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 17677 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 17678 break 17679 } 17680 if v_1_1_0_0_0.AuxInt != -8 { 17681 break 17682 } 17683 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 17684 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 17685 break 17686 } 17687 if v_1_1_0_0_0_0.AuxInt != 7 { 17688 break 17689 } 17690 if y != v_1_1_0_0_0_0.Args[0] { 17691 break 17692 } 17693 if !(v.Type.Size() == 1) { 17694 break 17695 } 17696 v.reset(OpAMD64ROLB) 17697 v.AddArg(x) 17698 v.AddArg(y) 17699 return true 17700 } 17701 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))) 17702 // cond: v.Type.Size() == 1 17703 // result: (ROLB x y) 17704 for { 17705 _ = v.Args[1] 17706 v_0 := v.Args[0] 17707 if v_0.Op != OpAMD64SHLL { 17708 break 17709 } 17710 _ = v_0.Args[1] 17711 x := v_0.Args[0] 17712 v_0_1 := v_0.Args[1] 17713 if v_0_1.Op != OpAMD64ANDLconst { 17714 break 17715 } 17716 if v_0_1.AuxInt != 7 { 17717 break 17718 } 17719 y := v_0_1.Args[0] 17720 v_1 := v.Args[1] 17721 if v_1.Op != OpAMD64ANDL { 17722 break 17723 } 17724 _ = v_1.Args[1] 17725 v_1_0 := v_1.Args[0] 17726 if v_1_0.Op != OpAMD64SBBLcarrymask { 17727 break 17728 } 17729 v_1_0_0 := v_1_0.Args[0] 17730 if v_1_0_0.Op != OpAMD64CMPLconst { 17731 break 17732 } 17733 if v_1_0_0.AuxInt != 8 { 17734 break 17735 } 17736 v_1_0_0_0 := v_1_0_0.Args[0] 17737 if v_1_0_0_0.Op != OpAMD64NEGL { 17738 break 17739 } 17740 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 17741 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 17742 break 17743 } 17744 if v_1_0_0_0_0.AuxInt != -8 { 17745 break 17746 } 17747 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 17748 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 17749 break 17750 } 17751 if v_1_0_0_0_0_0.AuxInt != 7 { 17752 break 17753 } 17754 if y != v_1_0_0_0_0_0.Args[0] { 17755 break 17756 } 17757 v_1_1 := v_1.Args[1] 17758 if v_1_1.Op != OpAMD64SHRB { 17759 break 17760 } 17761 _ = v_1_1.Args[1] 17762 if x != v_1_1.Args[0] { 17763 break 17764 } 17765 v_1_1_1 := v_1_1.Args[1] 17766 if v_1_1_1.Op != OpAMD64NEGL { 17767 break 17768 } 17769 v_1_1_1_0 := v_1_1_1.Args[0] 17770 if v_1_1_1_0.Op != OpAMD64ADDLconst { 17771 break 17772 } 17773 if v_1_1_1_0.AuxInt != -8 { 17774 break 17775 } 17776 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 17777 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 17778 break 17779 } 17780 if v_1_1_1_0_0.AuxInt != 7 { 17781 break 17782 } 17783 if y != v_1_1_1_0_0.Args[0] { 17784 break 17785 } 17786 if !(v.Type.Size() == 1) { 17787 break 17788 } 17789 v.reset(OpAMD64ROLB) 17790 v.AddArg(x) 17791 v.AddArg(y) 17792 return true 17793 } 17794 // match: (ORL (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDLconst y [ 7]))) 17795 // cond: v.Type.Size() == 1 17796 // result: (ROLB x y) 17797 for { 17798 _ = v.Args[1] 17799 v_0 := v.Args[0] 17800 if v_0.Op != OpAMD64ANDL { 17801 break 17802 } 17803 _ = v_0.Args[1] 17804 v_0_0 := v_0.Args[0] 17805 if v_0_0.Op != OpAMD64SHRB { 17806 break 17807 } 17808 _ = v_0_0.Args[1] 17809 x := v_0_0.Args[0] 17810 v_0_0_1 := v_0_0.Args[1] 17811 if v_0_0_1.Op != OpAMD64NEGL { 17812 break 17813 } 17814 v_0_0_1_0 := v_0_0_1.Args[0] 17815 if v_0_0_1_0.Op != OpAMD64ADDLconst { 17816 break 17817 } 17818 if v_0_0_1_0.AuxInt != -8 { 17819 break 17820 } 17821 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 17822 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 17823 break 17824 } 17825 if v_0_0_1_0_0.AuxInt != 7 { 17826 break 17827 } 17828 y := v_0_0_1_0_0.Args[0] 17829 v_0_1 := v_0.Args[1] 17830 if v_0_1.Op != OpAMD64SBBLcarrymask { 17831 break 17832 } 17833 v_0_1_0 := v_0_1.Args[0] 17834 if v_0_1_0.Op != OpAMD64CMPLconst { 17835 break 17836 } 17837 if v_0_1_0.AuxInt != 8 { 17838 break 17839 } 17840 v_0_1_0_0 := v_0_1_0.Args[0] 17841 if v_0_1_0_0.Op != OpAMD64NEGL { 17842 break 17843 } 17844 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 17845 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 17846 break 17847 } 17848 if v_0_1_0_0_0.AuxInt != -8 { 17849 break 17850 } 17851 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 17852 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 17853 break 17854 } 17855 if v_0_1_0_0_0_0.AuxInt != 7 { 17856 break 17857 } 17858 if y != v_0_1_0_0_0_0.Args[0] { 17859 break 17860 } 17861 v_1 := v.Args[1] 17862 if v_1.Op != OpAMD64SHLL { 17863 break 17864 } 17865 _ = v_1.Args[1] 17866 if x != v_1.Args[0] { 17867 break 17868 } 17869 v_1_1 := v_1.Args[1] 17870 if v_1_1.Op != OpAMD64ANDLconst { 17871 break 17872 } 17873 if v_1_1.AuxInt != 7 { 17874 break 17875 } 17876 if y != v_1_1.Args[0] { 17877 break 17878 } 17879 if !(v.Type.Size() == 1) { 17880 break 17881 } 17882 v.reset(OpAMD64ROLB) 17883 v.AddArg(x) 17884 v.AddArg(y) 17885 return true 17886 } 17887 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) (SHLL x (ANDLconst y [ 7]))) 17888 // cond: v.Type.Size() == 1 17889 // result: (ROLB x y) 17890 for { 17891 _ = v.Args[1] 17892 v_0 := v.Args[0] 17893 if v_0.Op != OpAMD64ANDL { 17894 break 17895 } 17896 _ = v_0.Args[1] 17897 v_0_0 := v_0.Args[0] 17898 if v_0_0.Op != OpAMD64SBBLcarrymask { 17899 break 17900 } 17901 v_0_0_0 := v_0_0.Args[0] 17902 if v_0_0_0.Op != OpAMD64CMPLconst { 17903 break 17904 } 17905 if v_0_0_0.AuxInt != 8 { 17906 break 17907 } 17908 v_0_0_0_0 := v_0_0_0.Args[0] 17909 if v_0_0_0_0.Op != OpAMD64NEGL { 17910 break 17911 } 17912 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 17913 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 17914 break 17915 } 17916 if v_0_0_0_0_0.AuxInt != -8 { 17917 break 17918 } 17919 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 17920 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 17921 break 17922 } 17923 if v_0_0_0_0_0_0.AuxInt != 7 { 17924 break 17925 } 17926 y := v_0_0_0_0_0_0.Args[0] 17927 v_0_1 := v_0.Args[1] 17928 if v_0_1.Op != OpAMD64SHRB { 17929 break 17930 } 17931 _ = v_0_1.Args[1] 17932 x := v_0_1.Args[0] 17933 v_0_1_1 := v_0_1.Args[1] 17934 if v_0_1_1.Op != OpAMD64NEGL { 17935 break 17936 } 17937 v_0_1_1_0 := v_0_1_1.Args[0] 17938 if v_0_1_1_0.Op != OpAMD64ADDLconst { 17939 break 17940 } 17941 if v_0_1_1_0.AuxInt != -8 { 17942 break 17943 } 17944 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 17945 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 17946 break 17947 } 17948 if v_0_1_1_0_0.AuxInt != 7 { 17949 break 17950 } 17951 if y != v_0_1_1_0_0.Args[0] { 17952 break 17953 } 17954 v_1 := v.Args[1] 17955 if v_1.Op != OpAMD64SHLL { 17956 break 17957 } 17958 _ = v_1.Args[1] 17959 if x != v_1.Args[0] { 17960 break 17961 } 17962 v_1_1 := v_1.Args[1] 17963 if v_1_1.Op != OpAMD64ANDLconst { 17964 break 17965 } 17966 if v_1_1.AuxInt != 7 { 17967 break 17968 } 17969 if y != v_1_1.Args[0] { 17970 break 17971 } 17972 if !(v.Type.Size() == 1) { 17973 break 17974 } 17975 v.reset(OpAMD64ROLB) 17976 v.AddArg(x) 17977 v.AddArg(y) 17978 return true 17979 } 17980 // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) 17981 // cond: v.Type.Size() == 1 17982 // result: (RORB x y) 17983 for { 17984 _ = v.Args[1] 17985 v_0 := v.Args[0] 17986 if v_0.Op != OpAMD64SHRB { 17987 break 17988 } 17989 _ = v_0.Args[1] 17990 x := v_0.Args[0] 17991 v_0_1 := v_0.Args[1] 17992 if v_0_1.Op != OpAMD64ANDQconst { 17993 break 17994 } 17995 if v_0_1.AuxInt != 7 { 17996 break 17997 } 17998 y := v_0_1.Args[0] 17999 v_1 := v.Args[1] 18000 if v_1.Op != OpAMD64SHLL { 18001 break 18002 } 18003 _ = v_1.Args[1] 18004 if x != v_1.Args[0] { 18005 break 18006 } 18007 v_1_1 := v_1.Args[1] 18008 if v_1_1.Op != OpAMD64NEGQ { 18009 break 18010 } 18011 v_1_1_0 := v_1_1.Args[0] 18012 if v_1_1_0.Op != OpAMD64ADDQconst { 18013 break 18014 } 18015 if v_1_1_0.AuxInt != -8 { 18016 break 18017 } 18018 v_1_1_0_0 := v_1_1_0.Args[0] 18019 if v_1_1_0_0.Op != OpAMD64ANDQconst { 18020 break 18021 } 18022 if v_1_1_0_0.AuxInt != 7 { 18023 break 18024 } 18025 if y != v_1_1_0_0.Args[0] { 18026 break 18027 } 18028 if !(v.Type.Size() == 1) { 18029 break 18030 } 18031 v.reset(OpAMD64RORB) 18032 v.AddArg(x) 18033 v.AddArg(y) 18034 return true 18035 } 18036 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SHRB x (ANDQconst y [ 7]))) 18037 // cond: v.Type.Size() == 1 18038 // result: (RORB x y) 18039 for { 18040 _ = v.Args[1] 18041 v_0 := v.Args[0] 18042 if v_0.Op != OpAMD64SHLL { 18043 break 18044 } 18045 _ = v_0.Args[1] 18046 x := v_0.Args[0] 18047 v_0_1 := v_0.Args[1] 18048 if v_0_1.Op != OpAMD64NEGQ { 18049 break 18050 } 18051 v_0_1_0 := v_0_1.Args[0] 18052 if v_0_1_0.Op != OpAMD64ADDQconst { 18053 break 18054 } 18055 if v_0_1_0.AuxInt != -8 { 18056 break 18057 } 18058 v_0_1_0_0 := v_0_1_0.Args[0] 18059 if v_0_1_0_0.Op != OpAMD64ANDQconst { 18060 break 18061 } 18062 if v_0_1_0_0.AuxInt != 7 { 18063 break 18064 } 18065 y := v_0_1_0_0.Args[0] 18066 v_1 := v.Args[1] 18067 if v_1.Op != OpAMD64SHRB { 18068 break 18069 } 18070 _ = v_1.Args[1] 18071 if x != v_1.Args[0] { 18072 break 18073 } 18074 v_1_1 := v_1.Args[1] 18075 if v_1_1.Op != OpAMD64ANDQconst { 18076 break 18077 } 18078 if v_1_1.AuxInt != 7 { 18079 break 18080 } 18081 if y != v_1_1.Args[0] { 18082 break 18083 } 18084 if !(v.Type.Size() == 1) { 18085 break 18086 } 18087 v.reset(OpAMD64RORB) 18088 v.AddArg(x) 18089 v.AddArg(y) 18090 return true 18091 } 18092 // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) 18093 // cond: v.Type.Size() == 1 18094 // result: (RORB x y) 18095 for { 18096 _ = v.Args[1] 18097 v_0 := v.Args[0] 18098 if v_0.Op != OpAMD64SHRB { 18099 break 18100 } 18101 _ = v_0.Args[1] 18102 x := v_0.Args[0] 18103 v_0_1 := v_0.Args[1] 18104 if v_0_1.Op != OpAMD64ANDLconst { 18105 break 18106 } 18107 if v_0_1.AuxInt != 7 { 18108 break 18109 } 18110 y := v_0_1.Args[0] 18111 v_1 := v.Args[1] 18112 if v_1.Op != OpAMD64SHLL { 18113 break 18114 } 18115 _ = v_1.Args[1] 18116 if x != v_1.Args[0] { 18117 break 18118 } 18119 v_1_1 := v_1.Args[1] 18120 if v_1_1.Op != OpAMD64NEGL { 18121 break 18122 } 18123 v_1_1_0 := v_1_1.Args[0] 18124 if v_1_1_0.Op != OpAMD64ADDLconst { 18125 break 18126 } 18127 if v_1_1_0.AuxInt != -8 { 18128 break 18129 } 18130 v_1_1_0_0 := v_1_1_0.Args[0] 18131 if v_1_1_0_0.Op != OpAMD64ANDLconst { 18132 break 18133 } 18134 if v_1_1_0_0.AuxInt != 7 { 18135 break 18136 } 18137 if y != v_1_1_0_0.Args[0] { 18138 break 18139 } 18140 if !(v.Type.Size() == 1) { 18141 break 18142 } 18143 v.reset(OpAMD64RORB) 18144 v.AddArg(x) 18145 v.AddArg(y) 18146 return true 18147 } 18148 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SHRB x (ANDLconst y [ 7]))) 18149 // cond: v.Type.Size() == 1 18150 // result: (RORB x y) 18151 for { 18152 _ = v.Args[1] 18153 v_0 := v.Args[0] 18154 if v_0.Op != OpAMD64SHLL { 18155 break 18156 } 18157 _ = v_0.Args[1] 18158 x := v_0.Args[0] 18159 v_0_1 := v_0.Args[1] 18160 if v_0_1.Op != OpAMD64NEGL { 18161 break 18162 } 18163 v_0_1_0 := v_0_1.Args[0] 18164 if v_0_1_0.Op != OpAMD64ADDLconst { 18165 break 18166 } 18167 if v_0_1_0.AuxInt != -8 { 18168 break 18169 } 18170 v_0_1_0_0 := v_0_1_0.Args[0] 18171 if v_0_1_0_0.Op != OpAMD64ANDLconst { 18172 break 18173 } 18174 if v_0_1_0_0.AuxInt != 7 { 18175 break 18176 } 18177 y := v_0_1_0_0.Args[0] 18178 v_1 := v.Args[1] 18179 if v_1.Op != OpAMD64SHRB { 18180 break 18181 } 18182 _ = v_1.Args[1] 18183 if x != v_1.Args[0] { 18184 break 18185 } 18186 v_1_1 := v_1.Args[1] 18187 if v_1_1.Op != OpAMD64ANDLconst { 18188 break 18189 } 18190 if v_1_1.AuxInt != 7 { 18191 break 18192 } 18193 if y != v_1_1.Args[0] { 18194 break 18195 } 18196 if !(v.Type.Size() == 1) { 18197 break 18198 } 18199 v.reset(OpAMD64RORB) 18200 v.AddArg(x) 18201 v.AddArg(y) 18202 return true 18203 } 18204 // match: (ORL x x) 18205 // cond: 18206 // result: x 18207 for { 18208 _ = v.Args[1] 18209 x := v.Args[0] 18210 if x != v.Args[1] { 18211 break 18212 } 18213 v.reset(OpCopy) 18214 v.Type = x.Type 18215 v.AddArg(x) 18216 return true 18217 } 18218 // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) 18219 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18220 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 18221 for { 18222 _ = v.Args[1] 18223 x0 := v.Args[0] 18224 if x0.Op != OpAMD64MOVBload { 18225 break 18226 } 18227 i0 := x0.AuxInt 18228 s := x0.Aux 18229 _ = x0.Args[1] 18230 p := x0.Args[0] 18231 mem := x0.Args[1] 18232 sh := v.Args[1] 18233 if sh.Op != OpAMD64SHLLconst { 18234 break 18235 } 18236 if sh.AuxInt != 8 { 18237 break 18238 } 18239 x1 := sh.Args[0] 18240 if x1.Op != OpAMD64MOVBload { 18241 break 18242 } 18243 i1 := x1.AuxInt 18244 if x1.Aux != s { 18245 break 18246 } 18247 _ = x1.Args[1] 18248 if p != x1.Args[0] { 18249 break 18250 } 18251 if mem != x1.Args[1] { 18252 break 18253 } 18254 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18255 break 18256 } 18257 b = mergePoint(b, x0, x1) 18258 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18259 v.reset(OpCopy) 18260 v.AddArg(v0) 18261 v0.AuxInt = i0 18262 v0.Aux = s 18263 v0.AddArg(p) 18264 v0.AddArg(mem) 18265 return true 18266 } 18267 return false 18268 } 18269 func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { 18270 b := v.Block 18271 _ = b 18272 typ := &b.Func.Config.Types 18273 _ = typ 18274 // match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 18275 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18276 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 18277 for { 18278 _ = v.Args[1] 18279 sh := v.Args[0] 18280 if sh.Op != OpAMD64SHLLconst { 18281 break 18282 } 18283 if sh.AuxInt != 8 { 18284 break 18285 } 18286 x1 := sh.Args[0] 18287 if x1.Op != OpAMD64MOVBload { 18288 break 18289 } 18290 i1 := x1.AuxInt 18291 s := x1.Aux 18292 _ = x1.Args[1] 18293 p := x1.Args[0] 18294 mem := x1.Args[1] 18295 x0 := v.Args[1] 18296 if x0.Op != OpAMD64MOVBload { 18297 break 18298 } 18299 i0 := x0.AuxInt 18300 if x0.Aux != s { 18301 break 18302 } 18303 _ = x0.Args[1] 18304 if p != x0.Args[0] { 18305 break 18306 } 18307 if mem != x0.Args[1] { 18308 break 18309 } 18310 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18311 break 18312 } 18313 b = mergePoint(b, x0, x1) 18314 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18315 v.reset(OpCopy) 18316 v.AddArg(v0) 18317 v0.AuxInt = i0 18318 v0.Aux = s 18319 v0.AddArg(p) 18320 v0.AddArg(mem) 18321 return true 18322 } 18323 // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) 18324 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18325 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 18326 for { 18327 _ = v.Args[1] 18328 x0 := v.Args[0] 18329 if x0.Op != OpAMD64MOVWload { 18330 break 18331 } 18332 i0 := x0.AuxInt 18333 s := x0.Aux 18334 _ = x0.Args[1] 18335 p := x0.Args[0] 18336 mem := x0.Args[1] 18337 sh := v.Args[1] 18338 if sh.Op != OpAMD64SHLLconst { 18339 break 18340 } 18341 if sh.AuxInt != 16 { 18342 break 18343 } 18344 x1 := sh.Args[0] 18345 if x1.Op != OpAMD64MOVWload { 18346 break 18347 } 18348 i1 := x1.AuxInt 18349 if x1.Aux != s { 18350 break 18351 } 18352 _ = x1.Args[1] 18353 if p != x1.Args[0] { 18354 break 18355 } 18356 if mem != x1.Args[1] { 18357 break 18358 } 18359 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18360 break 18361 } 18362 b = mergePoint(b, x0, x1) 18363 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 18364 v.reset(OpCopy) 18365 v.AddArg(v0) 18366 v0.AuxInt = i0 18367 v0.Aux = s 18368 v0.AddArg(p) 18369 v0.AddArg(mem) 18370 return true 18371 } 18372 // match: (ORL sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 18373 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18374 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 18375 for { 18376 _ = v.Args[1] 18377 sh := v.Args[0] 18378 if sh.Op != OpAMD64SHLLconst { 18379 break 18380 } 18381 if sh.AuxInt != 16 { 18382 break 18383 } 18384 x1 := sh.Args[0] 18385 if x1.Op != OpAMD64MOVWload { 18386 break 18387 } 18388 i1 := x1.AuxInt 18389 s := x1.Aux 18390 _ = x1.Args[1] 18391 p := x1.Args[0] 18392 mem := x1.Args[1] 18393 x0 := v.Args[1] 18394 if x0.Op != OpAMD64MOVWload { 18395 break 18396 } 18397 i0 := x0.AuxInt 18398 if x0.Aux != s { 18399 break 18400 } 18401 _ = x0.Args[1] 18402 if p != x0.Args[0] { 18403 break 18404 } 18405 if mem != x0.Args[1] { 18406 break 18407 } 18408 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18409 break 18410 } 18411 b = mergePoint(b, x0, x1) 18412 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 18413 v.reset(OpCopy) 18414 v.AddArg(v0) 18415 v0.AuxInt = i0 18416 v0.Aux = s 18417 v0.AddArg(p) 18418 v0.AddArg(mem) 18419 return true 18420 } 18421 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 18422 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18423 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18424 for { 18425 _ = v.Args[1] 18426 s1 := v.Args[0] 18427 if s1.Op != OpAMD64SHLLconst { 18428 break 18429 } 18430 j1 := s1.AuxInt 18431 x1 := s1.Args[0] 18432 if x1.Op != OpAMD64MOVBload { 18433 break 18434 } 18435 i1 := x1.AuxInt 18436 s := x1.Aux 18437 _ = x1.Args[1] 18438 p := x1.Args[0] 18439 mem := x1.Args[1] 18440 or := v.Args[1] 18441 if or.Op != OpAMD64ORL { 18442 break 18443 } 18444 _ = or.Args[1] 18445 s0 := or.Args[0] 18446 if s0.Op != OpAMD64SHLLconst { 18447 break 18448 } 18449 j0 := s0.AuxInt 18450 x0 := s0.Args[0] 18451 if x0.Op != OpAMD64MOVBload { 18452 break 18453 } 18454 i0 := x0.AuxInt 18455 if x0.Aux != s { 18456 break 18457 } 18458 _ = x0.Args[1] 18459 if p != x0.Args[0] { 18460 break 18461 } 18462 if mem != x0.Args[1] { 18463 break 18464 } 18465 y := or.Args[1] 18466 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18467 break 18468 } 18469 b = mergePoint(b, x0, x1) 18470 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18471 v.reset(OpCopy) 18472 v.AddArg(v0) 18473 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18474 v1.AuxInt = j0 18475 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18476 v2.AuxInt = i0 18477 v2.Aux = s 18478 v2.AddArg(p) 18479 v2.AddArg(mem) 18480 v1.AddArg(v2) 18481 v0.AddArg(v1) 18482 v0.AddArg(y) 18483 return true 18484 } 18485 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))) 18486 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18487 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18488 for { 18489 _ = v.Args[1] 18490 s1 := v.Args[0] 18491 if s1.Op != OpAMD64SHLLconst { 18492 break 18493 } 18494 j1 := s1.AuxInt 18495 x1 := s1.Args[0] 18496 if x1.Op != OpAMD64MOVBload { 18497 break 18498 } 18499 i1 := x1.AuxInt 18500 s := x1.Aux 18501 _ = x1.Args[1] 18502 p := x1.Args[0] 18503 mem := x1.Args[1] 18504 or := v.Args[1] 18505 if or.Op != OpAMD64ORL { 18506 break 18507 } 18508 _ = or.Args[1] 18509 y := or.Args[0] 18510 s0 := or.Args[1] 18511 if s0.Op != OpAMD64SHLLconst { 18512 break 18513 } 18514 j0 := s0.AuxInt 18515 x0 := s0.Args[0] 18516 if x0.Op != OpAMD64MOVBload { 18517 break 18518 } 18519 i0 := x0.AuxInt 18520 if x0.Aux != s { 18521 break 18522 } 18523 _ = x0.Args[1] 18524 if p != x0.Args[0] { 18525 break 18526 } 18527 if mem != x0.Args[1] { 18528 break 18529 } 18530 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18531 break 18532 } 18533 b = mergePoint(b, x0, x1) 18534 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18535 v.reset(OpCopy) 18536 v.AddArg(v0) 18537 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18538 v1.AuxInt = j0 18539 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18540 v2.AuxInt = i0 18541 v2.Aux = s 18542 v2.AddArg(p) 18543 v2.AddArg(mem) 18544 v1.AddArg(v2) 18545 v0.AddArg(v1) 18546 v0.AddArg(y) 18547 return true 18548 } 18549 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 18550 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18551 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18552 for { 18553 _ = v.Args[1] 18554 or := v.Args[0] 18555 if or.Op != OpAMD64ORL { 18556 break 18557 } 18558 _ = or.Args[1] 18559 s0 := or.Args[0] 18560 if s0.Op != OpAMD64SHLLconst { 18561 break 18562 } 18563 j0 := s0.AuxInt 18564 x0 := s0.Args[0] 18565 if x0.Op != OpAMD64MOVBload { 18566 break 18567 } 18568 i0 := x0.AuxInt 18569 s := x0.Aux 18570 _ = x0.Args[1] 18571 p := x0.Args[0] 18572 mem := x0.Args[1] 18573 y := or.Args[1] 18574 s1 := v.Args[1] 18575 if s1.Op != OpAMD64SHLLconst { 18576 break 18577 } 18578 j1 := s1.AuxInt 18579 x1 := s1.Args[0] 18580 if x1.Op != OpAMD64MOVBload { 18581 break 18582 } 18583 i1 := x1.AuxInt 18584 if x1.Aux != s { 18585 break 18586 } 18587 _ = x1.Args[1] 18588 if p != x1.Args[0] { 18589 break 18590 } 18591 if mem != x1.Args[1] { 18592 break 18593 } 18594 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18595 break 18596 } 18597 b = mergePoint(b, x0, x1) 18598 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18599 v.reset(OpCopy) 18600 v.AddArg(v0) 18601 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18602 v1.AuxInt = j0 18603 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18604 v2.AuxInt = i0 18605 v2.Aux = s 18606 v2.AddArg(p) 18607 v2.AddArg(mem) 18608 v1.AddArg(v2) 18609 v0.AddArg(v1) 18610 v0.AddArg(y) 18611 return true 18612 } 18613 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 18614 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18615 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18616 for { 18617 _ = v.Args[1] 18618 or := v.Args[0] 18619 if or.Op != OpAMD64ORL { 18620 break 18621 } 18622 _ = or.Args[1] 18623 y := or.Args[0] 18624 s0 := or.Args[1] 18625 if s0.Op != OpAMD64SHLLconst { 18626 break 18627 } 18628 j0 := s0.AuxInt 18629 x0 := s0.Args[0] 18630 if x0.Op != OpAMD64MOVBload { 18631 break 18632 } 18633 i0 := x0.AuxInt 18634 s := x0.Aux 18635 _ = x0.Args[1] 18636 p := x0.Args[0] 18637 mem := x0.Args[1] 18638 s1 := v.Args[1] 18639 if s1.Op != OpAMD64SHLLconst { 18640 break 18641 } 18642 j1 := s1.AuxInt 18643 x1 := s1.Args[0] 18644 if x1.Op != OpAMD64MOVBload { 18645 break 18646 } 18647 i1 := x1.AuxInt 18648 if x1.Aux != s { 18649 break 18650 } 18651 _ = x1.Args[1] 18652 if p != x1.Args[0] { 18653 break 18654 } 18655 if mem != x1.Args[1] { 18656 break 18657 } 18658 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18659 break 18660 } 18661 b = mergePoint(b, x0, x1) 18662 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18663 v.reset(OpCopy) 18664 v.AddArg(v0) 18665 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18666 v1.AuxInt = j0 18667 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18668 v2.AuxInt = i0 18669 v2.Aux = s 18670 v2.AddArg(p) 18671 v2.AddArg(mem) 18672 v1.AddArg(v2) 18673 v0.AddArg(v1) 18674 v0.AddArg(y) 18675 return true 18676 } 18677 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18678 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18679 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18680 for { 18681 _ = v.Args[1] 18682 x0 := v.Args[0] 18683 if x0.Op != OpAMD64MOVBloadidx1 { 18684 break 18685 } 18686 i0 := x0.AuxInt 18687 s := x0.Aux 18688 _ = x0.Args[2] 18689 p := x0.Args[0] 18690 idx := x0.Args[1] 18691 mem := x0.Args[2] 18692 sh := v.Args[1] 18693 if sh.Op != OpAMD64SHLLconst { 18694 break 18695 } 18696 if sh.AuxInt != 8 { 18697 break 18698 } 18699 x1 := sh.Args[0] 18700 if x1.Op != OpAMD64MOVBloadidx1 { 18701 break 18702 } 18703 i1 := x1.AuxInt 18704 if x1.Aux != s { 18705 break 18706 } 18707 _ = x1.Args[2] 18708 if p != x1.Args[0] { 18709 break 18710 } 18711 if idx != x1.Args[1] { 18712 break 18713 } 18714 if mem != x1.Args[2] { 18715 break 18716 } 18717 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18718 break 18719 } 18720 b = mergePoint(b, x0, x1) 18721 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18722 v.reset(OpCopy) 18723 v.AddArg(v0) 18724 v0.AuxInt = i0 18725 v0.Aux = s 18726 v0.AddArg(p) 18727 v0.AddArg(idx) 18728 v0.AddArg(mem) 18729 return true 18730 } 18731 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18732 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18733 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18734 for { 18735 _ = v.Args[1] 18736 x0 := v.Args[0] 18737 if x0.Op != OpAMD64MOVBloadidx1 { 18738 break 18739 } 18740 i0 := x0.AuxInt 18741 s := x0.Aux 18742 _ = x0.Args[2] 18743 idx := x0.Args[0] 18744 p := x0.Args[1] 18745 mem := x0.Args[2] 18746 sh := v.Args[1] 18747 if sh.Op != OpAMD64SHLLconst { 18748 break 18749 } 18750 if sh.AuxInt != 8 { 18751 break 18752 } 18753 x1 := sh.Args[0] 18754 if x1.Op != OpAMD64MOVBloadidx1 { 18755 break 18756 } 18757 i1 := x1.AuxInt 18758 if x1.Aux != s { 18759 break 18760 } 18761 _ = x1.Args[2] 18762 if p != x1.Args[0] { 18763 break 18764 } 18765 if idx != x1.Args[1] { 18766 break 18767 } 18768 if mem != x1.Args[2] { 18769 break 18770 } 18771 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18772 break 18773 } 18774 b = mergePoint(b, x0, x1) 18775 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18776 v.reset(OpCopy) 18777 v.AddArg(v0) 18778 v0.AuxInt = i0 18779 v0.Aux = s 18780 v0.AddArg(p) 18781 v0.AddArg(idx) 18782 v0.AddArg(mem) 18783 return true 18784 } 18785 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18786 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18787 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18788 for { 18789 _ = v.Args[1] 18790 x0 := v.Args[0] 18791 if x0.Op != OpAMD64MOVBloadidx1 { 18792 break 18793 } 18794 i0 := x0.AuxInt 18795 s := x0.Aux 18796 _ = x0.Args[2] 18797 p := x0.Args[0] 18798 idx := x0.Args[1] 18799 mem := x0.Args[2] 18800 sh := v.Args[1] 18801 if sh.Op != OpAMD64SHLLconst { 18802 break 18803 } 18804 if sh.AuxInt != 8 { 18805 break 18806 } 18807 x1 := sh.Args[0] 18808 if x1.Op != OpAMD64MOVBloadidx1 { 18809 break 18810 } 18811 i1 := x1.AuxInt 18812 if x1.Aux != s { 18813 break 18814 } 18815 _ = x1.Args[2] 18816 if idx != x1.Args[0] { 18817 break 18818 } 18819 if p != x1.Args[1] { 18820 break 18821 } 18822 if mem != x1.Args[2] { 18823 break 18824 } 18825 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18826 break 18827 } 18828 b = mergePoint(b, x0, x1) 18829 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18830 v.reset(OpCopy) 18831 v.AddArg(v0) 18832 v0.AuxInt = i0 18833 v0.Aux = s 18834 v0.AddArg(p) 18835 v0.AddArg(idx) 18836 v0.AddArg(mem) 18837 return true 18838 } 18839 return false 18840 } 18841 func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool { 18842 b := v.Block 18843 _ = b 18844 typ := &b.Func.Config.Types 18845 _ = typ 18846 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18847 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18848 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18849 for { 18850 _ = v.Args[1] 18851 x0 := v.Args[0] 18852 if x0.Op != OpAMD64MOVBloadidx1 { 18853 break 18854 } 18855 i0 := x0.AuxInt 18856 s := x0.Aux 18857 _ = x0.Args[2] 18858 idx := x0.Args[0] 18859 p := x0.Args[1] 18860 mem := x0.Args[2] 18861 sh := v.Args[1] 18862 if sh.Op != OpAMD64SHLLconst { 18863 break 18864 } 18865 if sh.AuxInt != 8 { 18866 break 18867 } 18868 x1 := sh.Args[0] 18869 if x1.Op != OpAMD64MOVBloadidx1 { 18870 break 18871 } 18872 i1 := x1.AuxInt 18873 if x1.Aux != s { 18874 break 18875 } 18876 _ = x1.Args[2] 18877 if idx != x1.Args[0] { 18878 break 18879 } 18880 if p != x1.Args[1] { 18881 break 18882 } 18883 if mem != x1.Args[2] { 18884 break 18885 } 18886 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18887 break 18888 } 18889 b = mergePoint(b, x0, x1) 18890 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18891 v.reset(OpCopy) 18892 v.AddArg(v0) 18893 v0.AuxInt = i0 18894 v0.Aux = s 18895 v0.AddArg(p) 18896 v0.AddArg(idx) 18897 v0.AddArg(mem) 18898 return true 18899 } 18900 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 18901 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18902 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18903 for { 18904 _ = v.Args[1] 18905 sh := v.Args[0] 18906 if sh.Op != OpAMD64SHLLconst { 18907 break 18908 } 18909 if sh.AuxInt != 8 { 18910 break 18911 } 18912 x1 := sh.Args[0] 18913 if x1.Op != OpAMD64MOVBloadidx1 { 18914 break 18915 } 18916 i1 := x1.AuxInt 18917 s := x1.Aux 18918 _ = x1.Args[2] 18919 p := x1.Args[0] 18920 idx := x1.Args[1] 18921 mem := x1.Args[2] 18922 x0 := v.Args[1] 18923 if x0.Op != OpAMD64MOVBloadidx1 { 18924 break 18925 } 18926 i0 := x0.AuxInt 18927 if x0.Aux != s { 18928 break 18929 } 18930 _ = x0.Args[2] 18931 if p != x0.Args[0] { 18932 break 18933 } 18934 if idx != x0.Args[1] { 18935 break 18936 } 18937 if mem != x0.Args[2] { 18938 break 18939 } 18940 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18941 break 18942 } 18943 b = mergePoint(b, x0, x1) 18944 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18945 v.reset(OpCopy) 18946 v.AddArg(v0) 18947 v0.AuxInt = i0 18948 v0.Aux = s 18949 v0.AddArg(p) 18950 v0.AddArg(idx) 18951 v0.AddArg(mem) 18952 return true 18953 } 18954 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 18955 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18956 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18957 for { 18958 _ = v.Args[1] 18959 sh := v.Args[0] 18960 if sh.Op != OpAMD64SHLLconst { 18961 break 18962 } 18963 if sh.AuxInt != 8 { 18964 break 18965 } 18966 x1 := sh.Args[0] 18967 if x1.Op != OpAMD64MOVBloadidx1 { 18968 break 18969 } 18970 i1 := x1.AuxInt 18971 s := x1.Aux 18972 _ = x1.Args[2] 18973 idx := x1.Args[0] 18974 p := x1.Args[1] 18975 mem := x1.Args[2] 18976 x0 := v.Args[1] 18977 if x0.Op != OpAMD64MOVBloadidx1 { 18978 break 18979 } 18980 i0 := x0.AuxInt 18981 if x0.Aux != s { 18982 break 18983 } 18984 _ = x0.Args[2] 18985 if p != x0.Args[0] { 18986 break 18987 } 18988 if idx != x0.Args[1] { 18989 break 18990 } 18991 if mem != x0.Args[2] { 18992 break 18993 } 18994 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18995 break 18996 } 18997 b = mergePoint(b, x0, x1) 18998 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18999 v.reset(OpCopy) 19000 v.AddArg(v0) 19001 v0.AuxInt = i0 19002 v0.Aux = s 19003 v0.AddArg(p) 19004 v0.AddArg(idx) 19005 v0.AddArg(mem) 19006 return true 19007 } 19008 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 19009 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19010 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 19011 for { 19012 _ = v.Args[1] 19013 sh := v.Args[0] 19014 if sh.Op != OpAMD64SHLLconst { 19015 break 19016 } 19017 if sh.AuxInt != 8 { 19018 break 19019 } 19020 x1 := sh.Args[0] 19021 if x1.Op != OpAMD64MOVBloadidx1 { 19022 break 19023 } 19024 i1 := x1.AuxInt 19025 s := x1.Aux 19026 _ = x1.Args[2] 19027 p := x1.Args[0] 19028 idx := x1.Args[1] 19029 mem := x1.Args[2] 19030 x0 := v.Args[1] 19031 if x0.Op != OpAMD64MOVBloadidx1 { 19032 break 19033 } 19034 i0 := x0.AuxInt 19035 if x0.Aux != s { 19036 break 19037 } 19038 _ = x0.Args[2] 19039 if idx != x0.Args[0] { 19040 break 19041 } 19042 if p != x0.Args[1] { 19043 break 19044 } 19045 if mem != x0.Args[2] { 19046 break 19047 } 19048 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19049 break 19050 } 19051 b = mergePoint(b, x0, x1) 19052 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 19053 v.reset(OpCopy) 19054 v.AddArg(v0) 19055 v0.AuxInt = i0 19056 v0.Aux = s 19057 v0.AddArg(p) 19058 v0.AddArg(idx) 19059 v0.AddArg(mem) 19060 return true 19061 } 19062 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 19063 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19064 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 19065 for { 19066 _ = v.Args[1] 19067 sh := v.Args[0] 19068 if sh.Op != OpAMD64SHLLconst { 19069 break 19070 } 19071 if sh.AuxInt != 8 { 19072 break 19073 } 19074 x1 := sh.Args[0] 19075 if x1.Op != OpAMD64MOVBloadidx1 { 19076 break 19077 } 19078 i1 := x1.AuxInt 19079 s := x1.Aux 19080 _ = x1.Args[2] 19081 idx := x1.Args[0] 19082 p := x1.Args[1] 19083 mem := x1.Args[2] 19084 x0 := v.Args[1] 19085 if x0.Op != OpAMD64MOVBloadidx1 { 19086 break 19087 } 19088 i0 := x0.AuxInt 19089 if x0.Aux != s { 19090 break 19091 } 19092 _ = x0.Args[2] 19093 if idx != x0.Args[0] { 19094 break 19095 } 19096 if p != x0.Args[1] { 19097 break 19098 } 19099 if mem != x0.Args[2] { 19100 break 19101 } 19102 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19103 break 19104 } 19105 b = mergePoint(b, x0, x1) 19106 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 19107 v.reset(OpCopy) 19108 v.AddArg(v0) 19109 v0.AuxInt = i0 19110 v0.Aux = s 19111 v0.AddArg(p) 19112 v0.AddArg(idx) 19113 v0.AddArg(mem) 19114 return true 19115 } 19116 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 19117 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19118 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19119 for { 19120 _ = v.Args[1] 19121 x0 := v.Args[0] 19122 if x0.Op != OpAMD64MOVWloadidx1 { 19123 break 19124 } 19125 i0 := x0.AuxInt 19126 s := x0.Aux 19127 _ = x0.Args[2] 19128 p := x0.Args[0] 19129 idx := x0.Args[1] 19130 mem := x0.Args[2] 19131 sh := v.Args[1] 19132 if sh.Op != OpAMD64SHLLconst { 19133 break 19134 } 19135 if sh.AuxInt != 16 { 19136 break 19137 } 19138 x1 := sh.Args[0] 19139 if x1.Op != OpAMD64MOVWloadidx1 { 19140 break 19141 } 19142 i1 := x1.AuxInt 19143 if x1.Aux != s { 19144 break 19145 } 19146 _ = x1.Args[2] 19147 if p != x1.Args[0] { 19148 break 19149 } 19150 if idx != x1.Args[1] { 19151 break 19152 } 19153 if mem != x1.Args[2] { 19154 break 19155 } 19156 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19157 break 19158 } 19159 b = mergePoint(b, x0, x1) 19160 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19161 v.reset(OpCopy) 19162 v.AddArg(v0) 19163 v0.AuxInt = i0 19164 v0.Aux = s 19165 v0.AddArg(p) 19166 v0.AddArg(idx) 19167 v0.AddArg(mem) 19168 return true 19169 } 19170 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 19171 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19172 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19173 for { 19174 _ = v.Args[1] 19175 x0 := v.Args[0] 19176 if x0.Op != OpAMD64MOVWloadidx1 { 19177 break 19178 } 19179 i0 := x0.AuxInt 19180 s := x0.Aux 19181 _ = x0.Args[2] 19182 idx := x0.Args[0] 19183 p := x0.Args[1] 19184 mem := x0.Args[2] 19185 sh := v.Args[1] 19186 if sh.Op != OpAMD64SHLLconst { 19187 break 19188 } 19189 if sh.AuxInt != 16 { 19190 break 19191 } 19192 x1 := sh.Args[0] 19193 if x1.Op != OpAMD64MOVWloadidx1 { 19194 break 19195 } 19196 i1 := x1.AuxInt 19197 if x1.Aux != s { 19198 break 19199 } 19200 _ = x1.Args[2] 19201 if p != x1.Args[0] { 19202 break 19203 } 19204 if idx != x1.Args[1] { 19205 break 19206 } 19207 if mem != x1.Args[2] { 19208 break 19209 } 19210 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19211 break 19212 } 19213 b = mergePoint(b, x0, x1) 19214 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19215 v.reset(OpCopy) 19216 v.AddArg(v0) 19217 v0.AuxInt = i0 19218 v0.Aux = s 19219 v0.AddArg(p) 19220 v0.AddArg(idx) 19221 v0.AddArg(mem) 19222 return true 19223 } 19224 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 19225 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19226 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19227 for { 19228 _ = v.Args[1] 19229 x0 := v.Args[0] 19230 if x0.Op != OpAMD64MOVWloadidx1 { 19231 break 19232 } 19233 i0 := x0.AuxInt 19234 s := x0.Aux 19235 _ = x0.Args[2] 19236 p := x0.Args[0] 19237 idx := x0.Args[1] 19238 mem := x0.Args[2] 19239 sh := v.Args[1] 19240 if sh.Op != OpAMD64SHLLconst { 19241 break 19242 } 19243 if sh.AuxInt != 16 { 19244 break 19245 } 19246 x1 := sh.Args[0] 19247 if x1.Op != OpAMD64MOVWloadidx1 { 19248 break 19249 } 19250 i1 := x1.AuxInt 19251 if x1.Aux != s { 19252 break 19253 } 19254 _ = x1.Args[2] 19255 if idx != x1.Args[0] { 19256 break 19257 } 19258 if p != x1.Args[1] { 19259 break 19260 } 19261 if mem != x1.Args[2] { 19262 break 19263 } 19264 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19265 break 19266 } 19267 b = mergePoint(b, x0, x1) 19268 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19269 v.reset(OpCopy) 19270 v.AddArg(v0) 19271 v0.AuxInt = i0 19272 v0.Aux = s 19273 v0.AddArg(p) 19274 v0.AddArg(idx) 19275 v0.AddArg(mem) 19276 return true 19277 } 19278 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 19279 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19280 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19281 for { 19282 _ = v.Args[1] 19283 x0 := v.Args[0] 19284 if x0.Op != OpAMD64MOVWloadidx1 { 19285 break 19286 } 19287 i0 := x0.AuxInt 19288 s := x0.Aux 19289 _ = x0.Args[2] 19290 idx := x0.Args[0] 19291 p := x0.Args[1] 19292 mem := x0.Args[2] 19293 sh := v.Args[1] 19294 if sh.Op != OpAMD64SHLLconst { 19295 break 19296 } 19297 if sh.AuxInt != 16 { 19298 break 19299 } 19300 x1 := sh.Args[0] 19301 if x1.Op != OpAMD64MOVWloadidx1 { 19302 break 19303 } 19304 i1 := x1.AuxInt 19305 if x1.Aux != s { 19306 break 19307 } 19308 _ = x1.Args[2] 19309 if idx != x1.Args[0] { 19310 break 19311 } 19312 if p != x1.Args[1] { 19313 break 19314 } 19315 if mem != x1.Args[2] { 19316 break 19317 } 19318 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19319 break 19320 } 19321 b = mergePoint(b, x0, x1) 19322 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19323 v.reset(OpCopy) 19324 v.AddArg(v0) 19325 v0.AuxInt = i0 19326 v0.Aux = s 19327 v0.AddArg(p) 19328 v0.AddArg(idx) 19329 v0.AddArg(mem) 19330 return true 19331 } 19332 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 19333 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19334 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19335 for { 19336 _ = v.Args[1] 19337 sh := v.Args[0] 19338 if sh.Op != OpAMD64SHLLconst { 19339 break 19340 } 19341 if sh.AuxInt != 16 { 19342 break 19343 } 19344 x1 := sh.Args[0] 19345 if x1.Op != OpAMD64MOVWloadidx1 { 19346 break 19347 } 19348 i1 := x1.AuxInt 19349 s := x1.Aux 19350 _ = x1.Args[2] 19351 p := x1.Args[0] 19352 idx := x1.Args[1] 19353 mem := x1.Args[2] 19354 x0 := v.Args[1] 19355 if x0.Op != OpAMD64MOVWloadidx1 { 19356 break 19357 } 19358 i0 := x0.AuxInt 19359 if x0.Aux != s { 19360 break 19361 } 19362 _ = x0.Args[2] 19363 if p != x0.Args[0] { 19364 break 19365 } 19366 if idx != x0.Args[1] { 19367 break 19368 } 19369 if mem != x0.Args[2] { 19370 break 19371 } 19372 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19373 break 19374 } 19375 b = mergePoint(b, x0, x1) 19376 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19377 v.reset(OpCopy) 19378 v.AddArg(v0) 19379 v0.AuxInt = i0 19380 v0.Aux = s 19381 v0.AddArg(p) 19382 v0.AddArg(idx) 19383 v0.AddArg(mem) 19384 return true 19385 } 19386 return false 19387 } 19388 func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool { 19389 b := v.Block 19390 _ = b 19391 typ := &b.Func.Config.Types 19392 _ = typ 19393 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 19394 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19395 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19396 for { 19397 _ = v.Args[1] 19398 sh := v.Args[0] 19399 if sh.Op != OpAMD64SHLLconst { 19400 break 19401 } 19402 if sh.AuxInt != 16 { 19403 break 19404 } 19405 x1 := sh.Args[0] 19406 if x1.Op != OpAMD64MOVWloadidx1 { 19407 break 19408 } 19409 i1 := x1.AuxInt 19410 s := x1.Aux 19411 _ = x1.Args[2] 19412 idx := x1.Args[0] 19413 p := x1.Args[1] 19414 mem := x1.Args[2] 19415 x0 := v.Args[1] 19416 if x0.Op != OpAMD64MOVWloadidx1 { 19417 break 19418 } 19419 i0 := x0.AuxInt 19420 if x0.Aux != s { 19421 break 19422 } 19423 _ = x0.Args[2] 19424 if p != x0.Args[0] { 19425 break 19426 } 19427 if idx != x0.Args[1] { 19428 break 19429 } 19430 if mem != x0.Args[2] { 19431 break 19432 } 19433 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19434 break 19435 } 19436 b = mergePoint(b, x0, x1) 19437 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19438 v.reset(OpCopy) 19439 v.AddArg(v0) 19440 v0.AuxInt = i0 19441 v0.Aux = s 19442 v0.AddArg(p) 19443 v0.AddArg(idx) 19444 v0.AddArg(mem) 19445 return true 19446 } 19447 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 19448 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19449 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19450 for { 19451 _ = v.Args[1] 19452 sh := v.Args[0] 19453 if sh.Op != OpAMD64SHLLconst { 19454 break 19455 } 19456 if sh.AuxInt != 16 { 19457 break 19458 } 19459 x1 := sh.Args[0] 19460 if x1.Op != OpAMD64MOVWloadidx1 { 19461 break 19462 } 19463 i1 := x1.AuxInt 19464 s := x1.Aux 19465 _ = x1.Args[2] 19466 p := x1.Args[0] 19467 idx := x1.Args[1] 19468 mem := x1.Args[2] 19469 x0 := v.Args[1] 19470 if x0.Op != OpAMD64MOVWloadidx1 { 19471 break 19472 } 19473 i0 := x0.AuxInt 19474 if x0.Aux != s { 19475 break 19476 } 19477 _ = x0.Args[2] 19478 if idx != x0.Args[0] { 19479 break 19480 } 19481 if p != x0.Args[1] { 19482 break 19483 } 19484 if mem != x0.Args[2] { 19485 break 19486 } 19487 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19488 break 19489 } 19490 b = mergePoint(b, x0, x1) 19491 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19492 v.reset(OpCopy) 19493 v.AddArg(v0) 19494 v0.AuxInt = i0 19495 v0.Aux = s 19496 v0.AddArg(p) 19497 v0.AddArg(idx) 19498 v0.AddArg(mem) 19499 return true 19500 } 19501 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 19502 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19503 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19504 for { 19505 _ = v.Args[1] 19506 sh := v.Args[0] 19507 if sh.Op != OpAMD64SHLLconst { 19508 break 19509 } 19510 if sh.AuxInt != 16 { 19511 break 19512 } 19513 x1 := sh.Args[0] 19514 if x1.Op != OpAMD64MOVWloadidx1 { 19515 break 19516 } 19517 i1 := x1.AuxInt 19518 s := x1.Aux 19519 _ = x1.Args[2] 19520 idx := x1.Args[0] 19521 p := x1.Args[1] 19522 mem := x1.Args[2] 19523 x0 := v.Args[1] 19524 if x0.Op != OpAMD64MOVWloadidx1 { 19525 break 19526 } 19527 i0 := x0.AuxInt 19528 if x0.Aux != s { 19529 break 19530 } 19531 _ = x0.Args[2] 19532 if idx != x0.Args[0] { 19533 break 19534 } 19535 if p != x0.Args[1] { 19536 break 19537 } 19538 if mem != x0.Args[2] { 19539 break 19540 } 19541 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19542 break 19543 } 19544 b = mergePoint(b, x0, x1) 19545 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19546 v.reset(OpCopy) 19547 v.AddArg(v0) 19548 v0.AuxInt = i0 19549 v0.Aux = s 19550 v0.AddArg(p) 19551 v0.AddArg(idx) 19552 v0.AddArg(mem) 19553 return true 19554 } 19555 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 19556 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19557 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19558 for { 19559 _ = v.Args[1] 19560 s1 := v.Args[0] 19561 if s1.Op != OpAMD64SHLLconst { 19562 break 19563 } 19564 j1 := s1.AuxInt 19565 x1 := s1.Args[0] 19566 if x1.Op != OpAMD64MOVBloadidx1 { 19567 break 19568 } 19569 i1 := x1.AuxInt 19570 s := x1.Aux 19571 _ = x1.Args[2] 19572 p := x1.Args[0] 19573 idx := x1.Args[1] 19574 mem := x1.Args[2] 19575 or := v.Args[1] 19576 if or.Op != OpAMD64ORL { 19577 break 19578 } 19579 _ = or.Args[1] 19580 s0 := or.Args[0] 19581 if s0.Op != OpAMD64SHLLconst { 19582 break 19583 } 19584 j0 := s0.AuxInt 19585 x0 := s0.Args[0] 19586 if x0.Op != OpAMD64MOVBloadidx1 { 19587 break 19588 } 19589 i0 := x0.AuxInt 19590 if x0.Aux != s { 19591 break 19592 } 19593 _ = x0.Args[2] 19594 if p != x0.Args[0] { 19595 break 19596 } 19597 if idx != x0.Args[1] { 19598 break 19599 } 19600 if mem != x0.Args[2] { 19601 break 19602 } 19603 y := or.Args[1] 19604 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19605 break 19606 } 19607 b = mergePoint(b, x0, x1) 19608 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19609 v.reset(OpCopy) 19610 v.AddArg(v0) 19611 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19612 v1.AuxInt = j0 19613 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19614 v2.AuxInt = i0 19615 v2.Aux = s 19616 v2.AddArg(p) 19617 v2.AddArg(idx) 19618 v2.AddArg(mem) 19619 v1.AddArg(v2) 19620 v0.AddArg(v1) 19621 v0.AddArg(y) 19622 return true 19623 } 19624 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 19625 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19626 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19627 for { 19628 _ = v.Args[1] 19629 s1 := v.Args[0] 19630 if s1.Op != OpAMD64SHLLconst { 19631 break 19632 } 19633 j1 := s1.AuxInt 19634 x1 := s1.Args[0] 19635 if x1.Op != OpAMD64MOVBloadidx1 { 19636 break 19637 } 19638 i1 := x1.AuxInt 19639 s := x1.Aux 19640 _ = x1.Args[2] 19641 idx := x1.Args[0] 19642 p := x1.Args[1] 19643 mem := x1.Args[2] 19644 or := v.Args[1] 19645 if or.Op != OpAMD64ORL { 19646 break 19647 } 19648 _ = or.Args[1] 19649 s0 := or.Args[0] 19650 if s0.Op != OpAMD64SHLLconst { 19651 break 19652 } 19653 j0 := s0.AuxInt 19654 x0 := s0.Args[0] 19655 if x0.Op != OpAMD64MOVBloadidx1 { 19656 break 19657 } 19658 i0 := x0.AuxInt 19659 if x0.Aux != s { 19660 break 19661 } 19662 _ = x0.Args[2] 19663 if p != x0.Args[0] { 19664 break 19665 } 19666 if idx != x0.Args[1] { 19667 break 19668 } 19669 if mem != x0.Args[2] { 19670 break 19671 } 19672 y := or.Args[1] 19673 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19674 break 19675 } 19676 b = mergePoint(b, x0, x1) 19677 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19678 v.reset(OpCopy) 19679 v.AddArg(v0) 19680 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19681 v1.AuxInt = j0 19682 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19683 v2.AuxInt = i0 19684 v2.Aux = s 19685 v2.AddArg(p) 19686 v2.AddArg(idx) 19687 v2.AddArg(mem) 19688 v1.AddArg(v2) 19689 v0.AddArg(v1) 19690 v0.AddArg(y) 19691 return true 19692 } 19693 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 19694 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19695 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19696 for { 19697 _ = v.Args[1] 19698 s1 := v.Args[0] 19699 if s1.Op != OpAMD64SHLLconst { 19700 break 19701 } 19702 j1 := s1.AuxInt 19703 x1 := s1.Args[0] 19704 if x1.Op != OpAMD64MOVBloadidx1 { 19705 break 19706 } 19707 i1 := x1.AuxInt 19708 s := x1.Aux 19709 _ = x1.Args[2] 19710 p := x1.Args[0] 19711 idx := x1.Args[1] 19712 mem := x1.Args[2] 19713 or := v.Args[1] 19714 if or.Op != OpAMD64ORL { 19715 break 19716 } 19717 _ = or.Args[1] 19718 s0 := or.Args[0] 19719 if s0.Op != OpAMD64SHLLconst { 19720 break 19721 } 19722 j0 := s0.AuxInt 19723 x0 := s0.Args[0] 19724 if x0.Op != OpAMD64MOVBloadidx1 { 19725 break 19726 } 19727 i0 := x0.AuxInt 19728 if x0.Aux != s { 19729 break 19730 } 19731 _ = x0.Args[2] 19732 if idx != x0.Args[0] { 19733 break 19734 } 19735 if p != x0.Args[1] { 19736 break 19737 } 19738 if mem != x0.Args[2] { 19739 break 19740 } 19741 y := or.Args[1] 19742 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19743 break 19744 } 19745 b = mergePoint(b, x0, x1) 19746 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19747 v.reset(OpCopy) 19748 v.AddArg(v0) 19749 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19750 v1.AuxInt = j0 19751 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19752 v2.AuxInt = i0 19753 v2.Aux = s 19754 v2.AddArg(p) 19755 v2.AddArg(idx) 19756 v2.AddArg(mem) 19757 v1.AddArg(v2) 19758 v0.AddArg(v1) 19759 v0.AddArg(y) 19760 return true 19761 } 19762 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 19763 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19764 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19765 for { 19766 _ = v.Args[1] 19767 s1 := v.Args[0] 19768 if s1.Op != OpAMD64SHLLconst { 19769 break 19770 } 19771 j1 := s1.AuxInt 19772 x1 := s1.Args[0] 19773 if x1.Op != OpAMD64MOVBloadidx1 { 19774 break 19775 } 19776 i1 := x1.AuxInt 19777 s := x1.Aux 19778 _ = x1.Args[2] 19779 idx := x1.Args[0] 19780 p := x1.Args[1] 19781 mem := x1.Args[2] 19782 or := v.Args[1] 19783 if or.Op != OpAMD64ORL { 19784 break 19785 } 19786 _ = or.Args[1] 19787 s0 := or.Args[0] 19788 if s0.Op != OpAMD64SHLLconst { 19789 break 19790 } 19791 j0 := s0.AuxInt 19792 x0 := s0.Args[0] 19793 if x0.Op != OpAMD64MOVBloadidx1 { 19794 break 19795 } 19796 i0 := x0.AuxInt 19797 if x0.Aux != s { 19798 break 19799 } 19800 _ = x0.Args[2] 19801 if idx != x0.Args[0] { 19802 break 19803 } 19804 if p != x0.Args[1] { 19805 break 19806 } 19807 if mem != x0.Args[2] { 19808 break 19809 } 19810 y := or.Args[1] 19811 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19812 break 19813 } 19814 b = mergePoint(b, x0, x1) 19815 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19816 v.reset(OpCopy) 19817 v.AddArg(v0) 19818 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19819 v1.AuxInt = j0 19820 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19821 v2.AuxInt = i0 19822 v2.Aux = s 19823 v2.AddArg(p) 19824 v2.AddArg(idx) 19825 v2.AddArg(mem) 19826 v1.AddArg(v2) 19827 v0.AddArg(v1) 19828 v0.AddArg(y) 19829 return true 19830 } 19831 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 19832 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19833 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19834 for { 19835 _ = v.Args[1] 19836 s1 := v.Args[0] 19837 if s1.Op != OpAMD64SHLLconst { 19838 break 19839 } 19840 j1 := s1.AuxInt 19841 x1 := s1.Args[0] 19842 if x1.Op != OpAMD64MOVBloadidx1 { 19843 break 19844 } 19845 i1 := x1.AuxInt 19846 s := x1.Aux 19847 _ = x1.Args[2] 19848 p := x1.Args[0] 19849 idx := x1.Args[1] 19850 mem := x1.Args[2] 19851 or := v.Args[1] 19852 if or.Op != OpAMD64ORL { 19853 break 19854 } 19855 _ = or.Args[1] 19856 y := or.Args[0] 19857 s0 := or.Args[1] 19858 if s0.Op != OpAMD64SHLLconst { 19859 break 19860 } 19861 j0 := s0.AuxInt 19862 x0 := s0.Args[0] 19863 if x0.Op != OpAMD64MOVBloadidx1 { 19864 break 19865 } 19866 i0 := x0.AuxInt 19867 if x0.Aux != s { 19868 break 19869 } 19870 _ = x0.Args[2] 19871 if p != x0.Args[0] { 19872 break 19873 } 19874 if idx != x0.Args[1] { 19875 break 19876 } 19877 if mem != x0.Args[2] { 19878 break 19879 } 19880 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19881 break 19882 } 19883 b = mergePoint(b, x0, x1) 19884 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19885 v.reset(OpCopy) 19886 v.AddArg(v0) 19887 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19888 v1.AuxInt = j0 19889 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19890 v2.AuxInt = i0 19891 v2.Aux = s 19892 v2.AddArg(p) 19893 v2.AddArg(idx) 19894 v2.AddArg(mem) 19895 v1.AddArg(v2) 19896 v0.AddArg(v1) 19897 v0.AddArg(y) 19898 return true 19899 } 19900 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 19901 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19902 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19903 for { 19904 _ = v.Args[1] 19905 s1 := v.Args[0] 19906 if s1.Op != OpAMD64SHLLconst { 19907 break 19908 } 19909 j1 := s1.AuxInt 19910 x1 := s1.Args[0] 19911 if x1.Op != OpAMD64MOVBloadidx1 { 19912 break 19913 } 19914 i1 := x1.AuxInt 19915 s := x1.Aux 19916 _ = x1.Args[2] 19917 idx := x1.Args[0] 19918 p := x1.Args[1] 19919 mem := x1.Args[2] 19920 or := v.Args[1] 19921 if or.Op != OpAMD64ORL { 19922 break 19923 } 19924 _ = or.Args[1] 19925 y := or.Args[0] 19926 s0 := or.Args[1] 19927 if s0.Op != OpAMD64SHLLconst { 19928 break 19929 } 19930 j0 := s0.AuxInt 19931 x0 := s0.Args[0] 19932 if x0.Op != OpAMD64MOVBloadidx1 { 19933 break 19934 } 19935 i0 := x0.AuxInt 19936 if x0.Aux != s { 19937 break 19938 } 19939 _ = x0.Args[2] 19940 if p != x0.Args[0] { 19941 break 19942 } 19943 if idx != x0.Args[1] { 19944 break 19945 } 19946 if mem != x0.Args[2] { 19947 break 19948 } 19949 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19950 break 19951 } 19952 b = mergePoint(b, x0, x1) 19953 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19954 v.reset(OpCopy) 19955 v.AddArg(v0) 19956 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19957 v1.AuxInt = j0 19958 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19959 v2.AuxInt = i0 19960 v2.Aux = s 19961 v2.AddArg(p) 19962 v2.AddArg(idx) 19963 v2.AddArg(mem) 19964 v1.AddArg(v2) 19965 v0.AddArg(v1) 19966 v0.AddArg(y) 19967 return true 19968 } 19969 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 19970 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19971 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19972 for { 19973 _ = v.Args[1] 19974 s1 := v.Args[0] 19975 if s1.Op != OpAMD64SHLLconst { 19976 break 19977 } 19978 j1 := s1.AuxInt 19979 x1 := s1.Args[0] 19980 if x1.Op != OpAMD64MOVBloadidx1 { 19981 break 19982 } 19983 i1 := x1.AuxInt 19984 s := x1.Aux 19985 _ = x1.Args[2] 19986 p := x1.Args[0] 19987 idx := x1.Args[1] 19988 mem := x1.Args[2] 19989 or := v.Args[1] 19990 if or.Op != OpAMD64ORL { 19991 break 19992 } 19993 _ = or.Args[1] 19994 y := or.Args[0] 19995 s0 := or.Args[1] 19996 if s0.Op != OpAMD64SHLLconst { 19997 break 19998 } 19999 j0 := s0.AuxInt 20000 x0 := s0.Args[0] 20001 if x0.Op != OpAMD64MOVBloadidx1 { 20002 break 20003 } 20004 i0 := x0.AuxInt 20005 if x0.Aux != s { 20006 break 20007 } 20008 _ = x0.Args[2] 20009 if idx != x0.Args[0] { 20010 break 20011 } 20012 if p != x0.Args[1] { 20013 break 20014 } 20015 if mem != x0.Args[2] { 20016 break 20017 } 20018 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20019 break 20020 } 20021 b = mergePoint(b, x0, x1) 20022 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20023 v.reset(OpCopy) 20024 v.AddArg(v0) 20025 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20026 v1.AuxInt = j0 20027 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20028 v2.AuxInt = i0 20029 v2.Aux = s 20030 v2.AddArg(p) 20031 v2.AddArg(idx) 20032 v2.AddArg(mem) 20033 v1.AddArg(v2) 20034 v0.AddArg(v1) 20035 v0.AddArg(y) 20036 return true 20037 } 20038 return false 20039 } 20040 func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool { 20041 b := v.Block 20042 _ = b 20043 typ := &b.Func.Config.Types 20044 _ = typ 20045 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 20046 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20047 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20048 for { 20049 _ = v.Args[1] 20050 s1 := v.Args[0] 20051 if s1.Op != OpAMD64SHLLconst { 20052 break 20053 } 20054 j1 := s1.AuxInt 20055 x1 := s1.Args[0] 20056 if x1.Op != OpAMD64MOVBloadidx1 { 20057 break 20058 } 20059 i1 := x1.AuxInt 20060 s := x1.Aux 20061 _ = x1.Args[2] 20062 idx := x1.Args[0] 20063 p := x1.Args[1] 20064 mem := x1.Args[2] 20065 or := v.Args[1] 20066 if or.Op != OpAMD64ORL { 20067 break 20068 } 20069 _ = or.Args[1] 20070 y := or.Args[0] 20071 s0 := or.Args[1] 20072 if s0.Op != OpAMD64SHLLconst { 20073 break 20074 } 20075 j0 := s0.AuxInt 20076 x0 := s0.Args[0] 20077 if x0.Op != OpAMD64MOVBloadidx1 { 20078 break 20079 } 20080 i0 := x0.AuxInt 20081 if x0.Aux != s { 20082 break 20083 } 20084 _ = x0.Args[2] 20085 if idx != x0.Args[0] { 20086 break 20087 } 20088 if p != x0.Args[1] { 20089 break 20090 } 20091 if mem != x0.Args[2] { 20092 break 20093 } 20094 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20095 break 20096 } 20097 b = mergePoint(b, x0, x1) 20098 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20099 v.reset(OpCopy) 20100 v.AddArg(v0) 20101 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20102 v1.AuxInt = j0 20103 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20104 v2.AuxInt = i0 20105 v2.Aux = s 20106 v2.AddArg(p) 20107 v2.AddArg(idx) 20108 v2.AddArg(mem) 20109 v1.AddArg(v2) 20110 v0.AddArg(v1) 20111 v0.AddArg(y) 20112 return true 20113 } 20114 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20115 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20116 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20117 for { 20118 _ = v.Args[1] 20119 or := v.Args[0] 20120 if or.Op != OpAMD64ORL { 20121 break 20122 } 20123 _ = or.Args[1] 20124 s0 := or.Args[0] 20125 if s0.Op != OpAMD64SHLLconst { 20126 break 20127 } 20128 j0 := s0.AuxInt 20129 x0 := s0.Args[0] 20130 if x0.Op != OpAMD64MOVBloadidx1 { 20131 break 20132 } 20133 i0 := x0.AuxInt 20134 s := x0.Aux 20135 _ = x0.Args[2] 20136 p := x0.Args[0] 20137 idx := x0.Args[1] 20138 mem := x0.Args[2] 20139 y := or.Args[1] 20140 s1 := v.Args[1] 20141 if s1.Op != OpAMD64SHLLconst { 20142 break 20143 } 20144 j1 := s1.AuxInt 20145 x1 := s1.Args[0] 20146 if x1.Op != OpAMD64MOVBloadidx1 { 20147 break 20148 } 20149 i1 := x1.AuxInt 20150 if x1.Aux != s { 20151 break 20152 } 20153 _ = x1.Args[2] 20154 if p != x1.Args[0] { 20155 break 20156 } 20157 if idx != x1.Args[1] { 20158 break 20159 } 20160 if mem != x1.Args[2] { 20161 break 20162 } 20163 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20164 break 20165 } 20166 b = mergePoint(b, x0, x1) 20167 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20168 v.reset(OpCopy) 20169 v.AddArg(v0) 20170 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20171 v1.AuxInt = j0 20172 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20173 v2.AuxInt = i0 20174 v2.Aux = s 20175 v2.AddArg(p) 20176 v2.AddArg(idx) 20177 v2.AddArg(mem) 20178 v1.AddArg(v2) 20179 v0.AddArg(v1) 20180 v0.AddArg(y) 20181 return true 20182 } 20183 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20184 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20185 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20186 for { 20187 _ = v.Args[1] 20188 or := v.Args[0] 20189 if or.Op != OpAMD64ORL { 20190 break 20191 } 20192 _ = or.Args[1] 20193 s0 := or.Args[0] 20194 if s0.Op != OpAMD64SHLLconst { 20195 break 20196 } 20197 j0 := s0.AuxInt 20198 x0 := s0.Args[0] 20199 if x0.Op != OpAMD64MOVBloadidx1 { 20200 break 20201 } 20202 i0 := x0.AuxInt 20203 s := x0.Aux 20204 _ = x0.Args[2] 20205 idx := x0.Args[0] 20206 p := x0.Args[1] 20207 mem := x0.Args[2] 20208 y := or.Args[1] 20209 s1 := v.Args[1] 20210 if s1.Op != OpAMD64SHLLconst { 20211 break 20212 } 20213 j1 := s1.AuxInt 20214 x1 := s1.Args[0] 20215 if x1.Op != OpAMD64MOVBloadidx1 { 20216 break 20217 } 20218 i1 := x1.AuxInt 20219 if x1.Aux != s { 20220 break 20221 } 20222 _ = x1.Args[2] 20223 if p != x1.Args[0] { 20224 break 20225 } 20226 if idx != x1.Args[1] { 20227 break 20228 } 20229 if mem != x1.Args[2] { 20230 break 20231 } 20232 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20233 break 20234 } 20235 b = mergePoint(b, x0, x1) 20236 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20237 v.reset(OpCopy) 20238 v.AddArg(v0) 20239 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20240 v1.AuxInt = j0 20241 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20242 v2.AuxInt = i0 20243 v2.Aux = s 20244 v2.AddArg(p) 20245 v2.AddArg(idx) 20246 v2.AddArg(mem) 20247 v1.AddArg(v2) 20248 v0.AddArg(v1) 20249 v0.AddArg(y) 20250 return true 20251 } 20252 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20253 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20254 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20255 for { 20256 _ = v.Args[1] 20257 or := v.Args[0] 20258 if or.Op != OpAMD64ORL { 20259 break 20260 } 20261 _ = or.Args[1] 20262 y := or.Args[0] 20263 s0 := or.Args[1] 20264 if s0.Op != OpAMD64SHLLconst { 20265 break 20266 } 20267 j0 := s0.AuxInt 20268 x0 := s0.Args[0] 20269 if x0.Op != OpAMD64MOVBloadidx1 { 20270 break 20271 } 20272 i0 := x0.AuxInt 20273 s := x0.Aux 20274 _ = x0.Args[2] 20275 p := x0.Args[0] 20276 idx := x0.Args[1] 20277 mem := x0.Args[2] 20278 s1 := v.Args[1] 20279 if s1.Op != OpAMD64SHLLconst { 20280 break 20281 } 20282 j1 := s1.AuxInt 20283 x1 := s1.Args[0] 20284 if x1.Op != OpAMD64MOVBloadidx1 { 20285 break 20286 } 20287 i1 := x1.AuxInt 20288 if x1.Aux != s { 20289 break 20290 } 20291 _ = x1.Args[2] 20292 if p != x1.Args[0] { 20293 break 20294 } 20295 if idx != x1.Args[1] { 20296 break 20297 } 20298 if mem != x1.Args[2] { 20299 break 20300 } 20301 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20302 break 20303 } 20304 b = mergePoint(b, x0, x1) 20305 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20306 v.reset(OpCopy) 20307 v.AddArg(v0) 20308 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20309 v1.AuxInt = j0 20310 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20311 v2.AuxInt = i0 20312 v2.Aux = s 20313 v2.AddArg(p) 20314 v2.AddArg(idx) 20315 v2.AddArg(mem) 20316 v1.AddArg(v2) 20317 v0.AddArg(v1) 20318 v0.AddArg(y) 20319 return true 20320 } 20321 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20322 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20323 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20324 for { 20325 _ = v.Args[1] 20326 or := v.Args[0] 20327 if or.Op != OpAMD64ORL { 20328 break 20329 } 20330 _ = or.Args[1] 20331 y := or.Args[0] 20332 s0 := or.Args[1] 20333 if s0.Op != OpAMD64SHLLconst { 20334 break 20335 } 20336 j0 := s0.AuxInt 20337 x0 := s0.Args[0] 20338 if x0.Op != OpAMD64MOVBloadidx1 { 20339 break 20340 } 20341 i0 := x0.AuxInt 20342 s := x0.Aux 20343 _ = x0.Args[2] 20344 idx := x0.Args[0] 20345 p := x0.Args[1] 20346 mem := x0.Args[2] 20347 s1 := v.Args[1] 20348 if s1.Op != OpAMD64SHLLconst { 20349 break 20350 } 20351 j1 := s1.AuxInt 20352 x1 := s1.Args[0] 20353 if x1.Op != OpAMD64MOVBloadidx1 { 20354 break 20355 } 20356 i1 := x1.AuxInt 20357 if x1.Aux != s { 20358 break 20359 } 20360 _ = x1.Args[2] 20361 if p != x1.Args[0] { 20362 break 20363 } 20364 if idx != x1.Args[1] { 20365 break 20366 } 20367 if mem != x1.Args[2] { 20368 break 20369 } 20370 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20371 break 20372 } 20373 b = mergePoint(b, x0, x1) 20374 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20375 v.reset(OpCopy) 20376 v.AddArg(v0) 20377 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20378 v1.AuxInt = j0 20379 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20380 v2.AuxInt = i0 20381 v2.Aux = s 20382 v2.AddArg(p) 20383 v2.AddArg(idx) 20384 v2.AddArg(mem) 20385 v1.AddArg(v2) 20386 v0.AddArg(v1) 20387 v0.AddArg(y) 20388 return true 20389 } 20390 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20391 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20392 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20393 for { 20394 _ = v.Args[1] 20395 or := v.Args[0] 20396 if or.Op != OpAMD64ORL { 20397 break 20398 } 20399 _ = or.Args[1] 20400 s0 := or.Args[0] 20401 if s0.Op != OpAMD64SHLLconst { 20402 break 20403 } 20404 j0 := s0.AuxInt 20405 x0 := s0.Args[0] 20406 if x0.Op != OpAMD64MOVBloadidx1 { 20407 break 20408 } 20409 i0 := x0.AuxInt 20410 s := x0.Aux 20411 _ = x0.Args[2] 20412 p := x0.Args[0] 20413 idx := x0.Args[1] 20414 mem := x0.Args[2] 20415 y := or.Args[1] 20416 s1 := v.Args[1] 20417 if s1.Op != OpAMD64SHLLconst { 20418 break 20419 } 20420 j1 := s1.AuxInt 20421 x1 := s1.Args[0] 20422 if x1.Op != OpAMD64MOVBloadidx1 { 20423 break 20424 } 20425 i1 := x1.AuxInt 20426 if x1.Aux != s { 20427 break 20428 } 20429 _ = x1.Args[2] 20430 if idx != x1.Args[0] { 20431 break 20432 } 20433 if p != x1.Args[1] { 20434 break 20435 } 20436 if mem != x1.Args[2] { 20437 break 20438 } 20439 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20440 break 20441 } 20442 b = mergePoint(b, x0, x1) 20443 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20444 v.reset(OpCopy) 20445 v.AddArg(v0) 20446 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20447 v1.AuxInt = j0 20448 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20449 v2.AuxInt = i0 20450 v2.Aux = s 20451 v2.AddArg(p) 20452 v2.AddArg(idx) 20453 v2.AddArg(mem) 20454 v1.AddArg(v2) 20455 v0.AddArg(v1) 20456 v0.AddArg(y) 20457 return true 20458 } 20459 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20460 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20461 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20462 for { 20463 _ = v.Args[1] 20464 or := v.Args[0] 20465 if or.Op != OpAMD64ORL { 20466 break 20467 } 20468 _ = or.Args[1] 20469 s0 := or.Args[0] 20470 if s0.Op != OpAMD64SHLLconst { 20471 break 20472 } 20473 j0 := s0.AuxInt 20474 x0 := s0.Args[0] 20475 if x0.Op != OpAMD64MOVBloadidx1 { 20476 break 20477 } 20478 i0 := x0.AuxInt 20479 s := x0.Aux 20480 _ = x0.Args[2] 20481 idx := x0.Args[0] 20482 p := x0.Args[1] 20483 mem := x0.Args[2] 20484 y := or.Args[1] 20485 s1 := v.Args[1] 20486 if s1.Op != OpAMD64SHLLconst { 20487 break 20488 } 20489 j1 := s1.AuxInt 20490 x1 := s1.Args[0] 20491 if x1.Op != OpAMD64MOVBloadidx1 { 20492 break 20493 } 20494 i1 := x1.AuxInt 20495 if x1.Aux != s { 20496 break 20497 } 20498 _ = x1.Args[2] 20499 if idx != x1.Args[0] { 20500 break 20501 } 20502 if p != x1.Args[1] { 20503 break 20504 } 20505 if mem != x1.Args[2] { 20506 break 20507 } 20508 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20509 break 20510 } 20511 b = mergePoint(b, x0, x1) 20512 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20513 v.reset(OpCopy) 20514 v.AddArg(v0) 20515 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20516 v1.AuxInt = j0 20517 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20518 v2.AuxInt = i0 20519 v2.Aux = s 20520 v2.AddArg(p) 20521 v2.AddArg(idx) 20522 v2.AddArg(mem) 20523 v1.AddArg(v2) 20524 v0.AddArg(v1) 20525 v0.AddArg(y) 20526 return true 20527 } 20528 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20529 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20530 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20531 for { 20532 _ = v.Args[1] 20533 or := v.Args[0] 20534 if or.Op != OpAMD64ORL { 20535 break 20536 } 20537 _ = or.Args[1] 20538 y := or.Args[0] 20539 s0 := or.Args[1] 20540 if s0.Op != OpAMD64SHLLconst { 20541 break 20542 } 20543 j0 := s0.AuxInt 20544 x0 := s0.Args[0] 20545 if x0.Op != OpAMD64MOVBloadidx1 { 20546 break 20547 } 20548 i0 := x0.AuxInt 20549 s := x0.Aux 20550 _ = x0.Args[2] 20551 p := x0.Args[0] 20552 idx := x0.Args[1] 20553 mem := x0.Args[2] 20554 s1 := v.Args[1] 20555 if s1.Op != OpAMD64SHLLconst { 20556 break 20557 } 20558 j1 := s1.AuxInt 20559 x1 := s1.Args[0] 20560 if x1.Op != OpAMD64MOVBloadidx1 { 20561 break 20562 } 20563 i1 := x1.AuxInt 20564 if x1.Aux != s { 20565 break 20566 } 20567 _ = x1.Args[2] 20568 if idx != x1.Args[0] { 20569 break 20570 } 20571 if p != x1.Args[1] { 20572 break 20573 } 20574 if mem != x1.Args[2] { 20575 break 20576 } 20577 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20578 break 20579 } 20580 b = mergePoint(b, x0, x1) 20581 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20582 v.reset(OpCopy) 20583 v.AddArg(v0) 20584 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20585 v1.AuxInt = j0 20586 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20587 v2.AuxInt = i0 20588 v2.Aux = s 20589 v2.AddArg(p) 20590 v2.AddArg(idx) 20591 v2.AddArg(mem) 20592 v1.AddArg(v2) 20593 v0.AddArg(v1) 20594 v0.AddArg(y) 20595 return true 20596 } 20597 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20598 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20599 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20600 for { 20601 _ = v.Args[1] 20602 or := v.Args[0] 20603 if or.Op != OpAMD64ORL { 20604 break 20605 } 20606 _ = or.Args[1] 20607 y := or.Args[0] 20608 s0 := or.Args[1] 20609 if s0.Op != OpAMD64SHLLconst { 20610 break 20611 } 20612 j0 := s0.AuxInt 20613 x0 := s0.Args[0] 20614 if x0.Op != OpAMD64MOVBloadidx1 { 20615 break 20616 } 20617 i0 := x0.AuxInt 20618 s := x0.Aux 20619 _ = x0.Args[2] 20620 idx := x0.Args[0] 20621 p := x0.Args[1] 20622 mem := x0.Args[2] 20623 s1 := v.Args[1] 20624 if s1.Op != OpAMD64SHLLconst { 20625 break 20626 } 20627 j1 := s1.AuxInt 20628 x1 := s1.Args[0] 20629 if x1.Op != OpAMD64MOVBloadidx1 { 20630 break 20631 } 20632 i1 := x1.AuxInt 20633 if x1.Aux != s { 20634 break 20635 } 20636 _ = x1.Args[2] 20637 if idx != x1.Args[0] { 20638 break 20639 } 20640 if p != x1.Args[1] { 20641 break 20642 } 20643 if mem != x1.Args[2] { 20644 break 20645 } 20646 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20647 break 20648 } 20649 b = mergePoint(b, x0, x1) 20650 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20651 v.reset(OpCopy) 20652 v.AddArg(v0) 20653 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20654 v1.AuxInt = j0 20655 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20656 v2.AuxInt = i0 20657 v2.Aux = s 20658 v2.AddArg(p) 20659 v2.AddArg(idx) 20660 v2.AddArg(mem) 20661 v1.AddArg(v2) 20662 v0.AddArg(v1) 20663 v0.AddArg(y) 20664 return true 20665 } 20666 // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) 20667 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 20668 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 20669 for { 20670 _ = v.Args[1] 20671 x1 := v.Args[0] 20672 if x1.Op != OpAMD64MOVBload { 20673 break 20674 } 20675 i1 := x1.AuxInt 20676 s := x1.Aux 20677 _ = x1.Args[1] 20678 p := x1.Args[0] 20679 mem := x1.Args[1] 20680 sh := v.Args[1] 20681 if sh.Op != OpAMD64SHLLconst { 20682 break 20683 } 20684 if sh.AuxInt != 8 { 20685 break 20686 } 20687 x0 := sh.Args[0] 20688 if x0.Op != OpAMD64MOVBload { 20689 break 20690 } 20691 i0 := x0.AuxInt 20692 if x0.Aux != s { 20693 break 20694 } 20695 _ = x0.Args[1] 20696 if p != x0.Args[0] { 20697 break 20698 } 20699 if mem != x0.Args[1] { 20700 break 20701 } 20702 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 20703 break 20704 } 20705 b = mergePoint(b, x0, x1) 20706 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 20707 v.reset(OpCopy) 20708 v.AddArg(v0) 20709 v0.AuxInt = 8 20710 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20711 v1.AuxInt = i0 20712 v1.Aux = s 20713 v1.AddArg(p) 20714 v1.AddArg(mem) 20715 v0.AddArg(v1) 20716 return true 20717 } 20718 return false 20719 } 20720 func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { 20721 b := v.Block 20722 _ = b 20723 typ := &b.Func.Config.Types 20724 _ = typ 20725 // match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 20726 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 20727 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 20728 for { 20729 _ = v.Args[1] 20730 sh := v.Args[0] 20731 if sh.Op != OpAMD64SHLLconst { 20732 break 20733 } 20734 if sh.AuxInt != 8 { 20735 break 20736 } 20737 x0 := sh.Args[0] 20738 if x0.Op != OpAMD64MOVBload { 20739 break 20740 } 20741 i0 := x0.AuxInt 20742 s := x0.Aux 20743 _ = x0.Args[1] 20744 p := x0.Args[0] 20745 mem := x0.Args[1] 20746 x1 := v.Args[1] 20747 if x1.Op != OpAMD64MOVBload { 20748 break 20749 } 20750 i1 := x1.AuxInt 20751 if x1.Aux != s { 20752 break 20753 } 20754 _ = x1.Args[1] 20755 if p != x1.Args[0] { 20756 break 20757 } 20758 if mem != x1.Args[1] { 20759 break 20760 } 20761 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 20762 break 20763 } 20764 b = mergePoint(b, x0, x1) 20765 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 20766 v.reset(OpCopy) 20767 v.AddArg(v0) 20768 v0.AuxInt = 8 20769 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20770 v1.AuxInt = i0 20771 v1.Aux = s 20772 v1.AddArg(p) 20773 v1.AddArg(mem) 20774 v0.AddArg(v1) 20775 return true 20776 } 20777 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 20778 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20779 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 20780 for { 20781 _ = v.Args[1] 20782 r1 := v.Args[0] 20783 if r1.Op != OpAMD64ROLWconst { 20784 break 20785 } 20786 if r1.AuxInt != 8 { 20787 break 20788 } 20789 x1 := r1.Args[0] 20790 if x1.Op != OpAMD64MOVWload { 20791 break 20792 } 20793 i1 := x1.AuxInt 20794 s := x1.Aux 20795 _ = x1.Args[1] 20796 p := x1.Args[0] 20797 mem := x1.Args[1] 20798 sh := v.Args[1] 20799 if sh.Op != OpAMD64SHLLconst { 20800 break 20801 } 20802 if sh.AuxInt != 16 { 20803 break 20804 } 20805 r0 := sh.Args[0] 20806 if r0.Op != OpAMD64ROLWconst { 20807 break 20808 } 20809 if r0.AuxInt != 8 { 20810 break 20811 } 20812 x0 := r0.Args[0] 20813 if x0.Op != OpAMD64MOVWload { 20814 break 20815 } 20816 i0 := x0.AuxInt 20817 if x0.Aux != s { 20818 break 20819 } 20820 _ = x0.Args[1] 20821 if p != x0.Args[0] { 20822 break 20823 } 20824 if mem != x0.Args[1] { 20825 break 20826 } 20827 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20828 break 20829 } 20830 b = mergePoint(b, x0, x1) 20831 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20832 v.reset(OpCopy) 20833 v.AddArg(v0) 20834 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 20835 v1.AuxInt = i0 20836 v1.Aux = s 20837 v1.AddArg(p) 20838 v1.AddArg(mem) 20839 v0.AddArg(v1) 20840 return true 20841 } 20842 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 20843 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20844 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 20845 for { 20846 _ = v.Args[1] 20847 sh := v.Args[0] 20848 if sh.Op != OpAMD64SHLLconst { 20849 break 20850 } 20851 if sh.AuxInt != 16 { 20852 break 20853 } 20854 r0 := sh.Args[0] 20855 if r0.Op != OpAMD64ROLWconst { 20856 break 20857 } 20858 if r0.AuxInt != 8 { 20859 break 20860 } 20861 x0 := r0.Args[0] 20862 if x0.Op != OpAMD64MOVWload { 20863 break 20864 } 20865 i0 := x0.AuxInt 20866 s := x0.Aux 20867 _ = x0.Args[1] 20868 p := x0.Args[0] 20869 mem := x0.Args[1] 20870 r1 := v.Args[1] 20871 if r1.Op != OpAMD64ROLWconst { 20872 break 20873 } 20874 if r1.AuxInt != 8 { 20875 break 20876 } 20877 x1 := r1.Args[0] 20878 if x1.Op != OpAMD64MOVWload { 20879 break 20880 } 20881 i1 := x1.AuxInt 20882 if x1.Aux != s { 20883 break 20884 } 20885 _ = x1.Args[1] 20886 if p != x1.Args[0] { 20887 break 20888 } 20889 if mem != x1.Args[1] { 20890 break 20891 } 20892 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20893 break 20894 } 20895 b = mergePoint(b, x0, x1) 20896 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20897 v.reset(OpCopy) 20898 v.AddArg(v0) 20899 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 20900 v1.AuxInt = i0 20901 v1.Aux = s 20902 v1.AddArg(p) 20903 v1.AddArg(mem) 20904 v0.AddArg(v1) 20905 return true 20906 } 20907 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 20908 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20909 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 20910 for { 20911 _ = v.Args[1] 20912 s0 := v.Args[0] 20913 if s0.Op != OpAMD64SHLLconst { 20914 break 20915 } 20916 j0 := s0.AuxInt 20917 x0 := s0.Args[0] 20918 if x0.Op != OpAMD64MOVBload { 20919 break 20920 } 20921 i0 := x0.AuxInt 20922 s := x0.Aux 20923 _ = x0.Args[1] 20924 p := x0.Args[0] 20925 mem := x0.Args[1] 20926 or := v.Args[1] 20927 if or.Op != OpAMD64ORL { 20928 break 20929 } 20930 _ = or.Args[1] 20931 s1 := or.Args[0] 20932 if s1.Op != OpAMD64SHLLconst { 20933 break 20934 } 20935 j1 := s1.AuxInt 20936 x1 := s1.Args[0] 20937 if x1.Op != OpAMD64MOVBload { 20938 break 20939 } 20940 i1 := x1.AuxInt 20941 if x1.Aux != s { 20942 break 20943 } 20944 _ = x1.Args[1] 20945 if p != x1.Args[0] { 20946 break 20947 } 20948 if mem != x1.Args[1] { 20949 break 20950 } 20951 y := or.Args[1] 20952 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20953 break 20954 } 20955 b = mergePoint(b, x0, x1) 20956 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20957 v.reset(OpCopy) 20958 v.AddArg(v0) 20959 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20960 v1.AuxInt = j1 20961 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20962 v2.AuxInt = 8 20963 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20964 v3.AuxInt = i0 20965 v3.Aux = s 20966 v3.AddArg(p) 20967 v3.AddArg(mem) 20968 v2.AddArg(v3) 20969 v1.AddArg(v2) 20970 v0.AddArg(v1) 20971 v0.AddArg(y) 20972 return true 20973 } 20974 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))) 20975 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20976 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 20977 for { 20978 _ = v.Args[1] 20979 s0 := v.Args[0] 20980 if s0.Op != OpAMD64SHLLconst { 20981 break 20982 } 20983 j0 := s0.AuxInt 20984 x0 := s0.Args[0] 20985 if x0.Op != OpAMD64MOVBload { 20986 break 20987 } 20988 i0 := x0.AuxInt 20989 s := x0.Aux 20990 _ = x0.Args[1] 20991 p := x0.Args[0] 20992 mem := x0.Args[1] 20993 or := v.Args[1] 20994 if or.Op != OpAMD64ORL { 20995 break 20996 } 20997 _ = or.Args[1] 20998 y := or.Args[0] 20999 s1 := or.Args[1] 21000 if s1.Op != OpAMD64SHLLconst { 21001 break 21002 } 21003 j1 := s1.AuxInt 21004 x1 := s1.Args[0] 21005 if x1.Op != OpAMD64MOVBload { 21006 break 21007 } 21008 i1 := x1.AuxInt 21009 if x1.Aux != s { 21010 break 21011 } 21012 _ = x1.Args[1] 21013 if p != x1.Args[0] { 21014 break 21015 } 21016 if mem != x1.Args[1] { 21017 break 21018 } 21019 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21020 break 21021 } 21022 b = mergePoint(b, x0, x1) 21023 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21024 v.reset(OpCopy) 21025 v.AddArg(v0) 21026 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21027 v1.AuxInt = j1 21028 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21029 v2.AuxInt = 8 21030 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 21031 v3.AuxInt = i0 21032 v3.Aux = s 21033 v3.AddArg(p) 21034 v3.AddArg(mem) 21035 v2.AddArg(v3) 21036 v1.AddArg(v2) 21037 v0.AddArg(v1) 21038 v0.AddArg(y) 21039 return true 21040 } 21041 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 21042 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21043 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 21044 for { 21045 _ = v.Args[1] 21046 or := v.Args[0] 21047 if or.Op != OpAMD64ORL { 21048 break 21049 } 21050 _ = or.Args[1] 21051 s1 := or.Args[0] 21052 if s1.Op != OpAMD64SHLLconst { 21053 break 21054 } 21055 j1 := s1.AuxInt 21056 x1 := s1.Args[0] 21057 if x1.Op != OpAMD64MOVBload { 21058 break 21059 } 21060 i1 := x1.AuxInt 21061 s := x1.Aux 21062 _ = x1.Args[1] 21063 p := x1.Args[0] 21064 mem := x1.Args[1] 21065 y := or.Args[1] 21066 s0 := v.Args[1] 21067 if s0.Op != OpAMD64SHLLconst { 21068 break 21069 } 21070 j0 := s0.AuxInt 21071 x0 := s0.Args[0] 21072 if x0.Op != OpAMD64MOVBload { 21073 break 21074 } 21075 i0 := x0.AuxInt 21076 if x0.Aux != s { 21077 break 21078 } 21079 _ = x0.Args[1] 21080 if p != x0.Args[0] { 21081 break 21082 } 21083 if mem != x0.Args[1] { 21084 break 21085 } 21086 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21087 break 21088 } 21089 b = mergePoint(b, x0, x1) 21090 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21091 v.reset(OpCopy) 21092 v.AddArg(v0) 21093 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21094 v1.AuxInt = j1 21095 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21096 v2.AuxInt = 8 21097 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 21098 v3.AuxInt = i0 21099 v3.Aux = s 21100 v3.AddArg(p) 21101 v3.AddArg(mem) 21102 v2.AddArg(v3) 21103 v1.AddArg(v2) 21104 v0.AddArg(v1) 21105 v0.AddArg(y) 21106 return true 21107 } 21108 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 21109 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21110 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 21111 for { 21112 _ = v.Args[1] 21113 or := v.Args[0] 21114 if or.Op != OpAMD64ORL { 21115 break 21116 } 21117 _ = or.Args[1] 21118 y := or.Args[0] 21119 s1 := or.Args[1] 21120 if s1.Op != OpAMD64SHLLconst { 21121 break 21122 } 21123 j1 := s1.AuxInt 21124 x1 := s1.Args[0] 21125 if x1.Op != OpAMD64MOVBload { 21126 break 21127 } 21128 i1 := x1.AuxInt 21129 s := x1.Aux 21130 _ = x1.Args[1] 21131 p := x1.Args[0] 21132 mem := x1.Args[1] 21133 s0 := v.Args[1] 21134 if s0.Op != OpAMD64SHLLconst { 21135 break 21136 } 21137 j0 := s0.AuxInt 21138 x0 := s0.Args[0] 21139 if x0.Op != OpAMD64MOVBload { 21140 break 21141 } 21142 i0 := x0.AuxInt 21143 if x0.Aux != s { 21144 break 21145 } 21146 _ = x0.Args[1] 21147 if p != x0.Args[0] { 21148 break 21149 } 21150 if mem != x0.Args[1] { 21151 break 21152 } 21153 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21154 break 21155 } 21156 b = mergePoint(b, x0, x1) 21157 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21158 v.reset(OpCopy) 21159 v.AddArg(v0) 21160 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21161 v1.AuxInt = j1 21162 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21163 v2.AuxInt = 8 21164 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 21165 v3.AuxInt = i0 21166 v3.Aux = s 21167 v3.AddArg(p) 21168 v3.AddArg(mem) 21169 v2.AddArg(v3) 21170 v1.AddArg(v2) 21171 v0.AddArg(v1) 21172 v0.AddArg(y) 21173 return true 21174 } 21175 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21176 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21177 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21178 for { 21179 _ = v.Args[1] 21180 x1 := v.Args[0] 21181 if x1.Op != OpAMD64MOVBloadidx1 { 21182 break 21183 } 21184 i1 := x1.AuxInt 21185 s := x1.Aux 21186 _ = x1.Args[2] 21187 p := x1.Args[0] 21188 idx := x1.Args[1] 21189 mem := x1.Args[2] 21190 sh := v.Args[1] 21191 if sh.Op != OpAMD64SHLLconst { 21192 break 21193 } 21194 if sh.AuxInt != 8 { 21195 break 21196 } 21197 x0 := sh.Args[0] 21198 if x0.Op != OpAMD64MOVBloadidx1 { 21199 break 21200 } 21201 i0 := x0.AuxInt 21202 if x0.Aux != s { 21203 break 21204 } 21205 _ = x0.Args[2] 21206 if p != x0.Args[0] { 21207 break 21208 } 21209 if idx != x0.Args[1] { 21210 break 21211 } 21212 if mem != x0.Args[2] { 21213 break 21214 } 21215 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21216 break 21217 } 21218 b = mergePoint(b, x0, x1) 21219 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21220 v.reset(OpCopy) 21221 v.AddArg(v0) 21222 v0.AuxInt = 8 21223 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21224 v1.AuxInt = i0 21225 v1.Aux = s 21226 v1.AddArg(p) 21227 v1.AddArg(idx) 21228 v1.AddArg(mem) 21229 v0.AddArg(v1) 21230 return true 21231 } 21232 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21233 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21234 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21235 for { 21236 _ = v.Args[1] 21237 x1 := v.Args[0] 21238 if x1.Op != OpAMD64MOVBloadidx1 { 21239 break 21240 } 21241 i1 := x1.AuxInt 21242 s := x1.Aux 21243 _ = x1.Args[2] 21244 idx := x1.Args[0] 21245 p := x1.Args[1] 21246 mem := x1.Args[2] 21247 sh := v.Args[1] 21248 if sh.Op != OpAMD64SHLLconst { 21249 break 21250 } 21251 if sh.AuxInt != 8 { 21252 break 21253 } 21254 x0 := sh.Args[0] 21255 if x0.Op != OpAMD64MOVBloadidx1 { 21256 break 21257 } 21258 i0 := x0.AuxInt 21259 if x0.Aux != s { 21260 break 21261 } 21262 _ = x0.Args[2] 21263 if p != x0.Args[0] { 21264 break 21265 } 21266 if idx != x0.Args[1] { 21267 break 21268 } 21269 if mem != x0.Args[2] { 21270 break 21271 } 21272 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21273 break 21274 } 21275 b = mergePoint(b, x0, x1) 21276 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21277 v.reset(OpCopy) 21278 v.AddArg(v0) 21279 v0.AuxInt = 8 21280 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21281 v1.AuxInt = i0 21282 v1.Aux = s 21283 v1.AddArg(p) 21284 v1.AddArg(idx) 21285 v1.AddArg(mem) 21286 v0.AddArg(v1) 21287 return true 21288 } 21289 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21290 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21291 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21292 for { 21293 _ = v.Args[1] 21294 x1 := v.Args[0] 21295 if x1.Op != OpAMD64MOVBloadidx1 { 21296 break 21297 } 21298 i1 := x1.AuxInt 21299 s := x1.Aux 21300 _ = x1.Args[2] 21301 p := x1.Args[0] 21302 idx := x1.Args[1] 21303 mem := x1.Args[2] 21304 sh := v.Args[1] 21305 if sh.Op != OpAMD64SHLLconst { 21306 break 21307 } 21308 if sh.AuxInt != 8 { 21309 break 21310 } 21311 x0 := sh.Args[0] 21312 if x0.Op != OpAMD64MOVBloadidx1 { 21313 break 21314 } 21315 i0 := x0.AuxInt 21316 if x0.Aux != s { 21317 break 21318 } 21319 _ = x0.Args[2] 21320 if idx != x0.Args[0] { 21321 break 21322 } 21323 if p != x0.Args[1] { 21324 break 21325 } 21326 if mem != x0.Args[2] { 21327 break 21328 } 21329 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21330 break 21331 } 21332 b = mergePoint(b, x0, x1) 21333 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21334 v.reset(OpCopy) 21335 v.AddArg(v0) 21336 v0.AuxInt = 8 21337 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21338 v1.AuxInt = i0 21339 v1.Aux = s 21340 v1.AddArg(p) 21341 v1.AddArg(idx) 21342 v1.AddArg(mem) 21343 v0.AddArg(v1) 21344 return true 21345 } 21346 return false 21347 } 21348 func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool { 21349 b := v.Block 21350 _ = b 21351 typ := &b.Func.Config.Types 21352 _ = typ 21353 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21354 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21355 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21356 for { 21357 _ = v.Args[1] 21358 x1 := v.Args[0] 21359 if x1.Op != OpAMD64MOVBloadidx1 { 21360 break 21361 } 21362 i1 := x1.AuxInt 21363 s := x1.Aux 21364 _ = x1.Args[2] 21365 idx := x1.Args[0] 21366 p := x1.Args[1] 21367 mem := x1.Args[2] 21368 sh := v.Args[1] 21369 if sh.Op != OpAMD64SHLLconst { 21370 break 21371 } 21372 if sh.AuxInt != 8 { 21373 break 21374 } 21375 x0 := sh.Args[0] 21376 if x0.Op != OpAMD64MOVBloadidx1 { 21377 break 21378 } 21379 i0 := x0.AuxInt 21380 if x0.Aux != s { 21381 break 21382 } 21383 _ = x0.Args[2] 21384 if idx != x0.Args[0] { 21385 break 21386 } 21387 if p != x0.Args[1] { 21388 break 21389 } 21390 if mem != x0.Args[2] { 21391 break 21392 } 21393 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21394 break 21395 } 21396 b = mergePoint(b, x0, x1) 21397 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21398 v.reset(OpCopy) 21399 v.AddArg(v0) 21400 v0.AuxInt = 8 21401 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21402 v1.AuxInt = i0 21403 v1.Aux = s 21404 v1.AddArg(p) 21405 v1.AddArg(idx) 21406 v1.AddArg(mem) 21407 v0.AddArg(v1) 21408 return true 21409 } 21410 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 21411 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21412 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21413 for { 21414 _ = v.Args[1] 21415 sh := v.Args[0] 21416 if sh.Op != OpAMD64SHLLconst { 21417 break 21418 } 21419 if sh.AuxInt != 8 { 21420 break 21421 } 21422 x0 := sh.Args[0] 21423 if x0.Op != OpAMD64MOVBloadidx1 { 21424 break 21425 } 21426 i0 := x0.AuxInt 21427 s := x0.Aux 21428 _ = x0.Args[2] 21429 p := x0.Args[0] 21430 idx := x0.Args[1] 21431 mem := x0.Args[2] 21432 x1 := v.Args[1] 21433 if x1.Op != OpAMD64MOVBloadidx1 { 21434 break 21435 } 21436 i1 := x1.AuxInt 21437 if x1.Aux != s { 21438 break 21439 } 21440 _ = x1.Args[2] 21441 if p != x1.Args[0] { 21442 break 21443 } 21444 if idx != x1.Args[1] { 21445 break 21446 } 21447 if mem != x1.Args[2] { 21448 break 21449 } 21450 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21451 break 21452 } 21453 b = mergePoint(b, x0, x1) 21454 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21455 v.reset(OpCopy) 21456 v.AddArg(v0) 21457 v0.AuxInt = 8 21458 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21459 v1.AuxInt = i0 21460 v1.Aux = s 21461 v1.AddArg(p) 21462 v1.AddArg(idx) 21463 v1.AddArg(mem) 21464 v0.AddArg(v1) 21465 return true 21466 } 21467 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 21468 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21469 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21470 for { 21471 _ = v.Args[1] 21472 sh := v.Args[0] 21473 if sh.Op != OpAMD64SHLLconst { 21474 break 21475 } 21476 if sh.AuxInt != 8 { 21477 break 21478 } 21479 x0 := sh.Args[0] 21480 if x0.Op != OpAMD64MOVBloadidx1 { 21481 break 21482 } 21483 i0 := x0.AuxInt 21484 s := x0.Aux 21485 _ = x0.Args[2] 21486 idx := x0.Args[0] 21487 p := x0.Args[1] 21488 mem := x0.Args[2] 21489 x1 := v.Args[1] 21490 if x1.Op != OpAMD64MOVBloadidx1 { 21491 break 21492 } 21493 i1 := x1.AuxInt 21494 if x1.Aux != s { 21495 break 21496 } 21497 _ = x1.Args[2] 21498 if p != x1.Args[0] { 21499 break 21500 } 21501 if idx != x1.Args[1] { 21502 break 21503 } 21504 if mem != x1.Args[2] { 21505 break 21506 } 21507 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21508 break 21509 } 21510 b = mergePoint(b, x0, x1) 21511 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21512 v.reset(OpCopy) 21513 v.AddArg(v0) 21514 v0.AuxInt = 8 21515 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21516 v1.AuxInt = i0 21517 v1.Aux = s 21518 v1.AddArg(p) 21519 v1.AddArg(idx) 21520 v1.AddArg(mem) 21521 v0.AddArg(v1) 21522 return true 21523 } 21524 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 21525 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21526 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21527 for { 21528 _ = v.Args[1] 21529 sh := v.Args[0] 21530 if sh.Op != OpAMD64SHLLconst { 21531 break 21532 } 21533 if sh.AuxInt != 8 { 21534 break 21535 } 21536 x0 := sh.Args[0] 21537 if x0.Op != OpAMD64MOVBloadidx1 { 21538 break 21539 } 21540 i0 := x0.AuxInt 21541 s := x0.Aux 21542 _ = x0.Args[2] 21543 p := x0.Args[0] 21544 idx := x0.Args[1] 21545 mem := x0.Args[2] 21546 x1 := v.Args[1] 21547 if x1.Op != OpAMD64MOVBloadidx1 { 21548 break 21549 } 21550 i1 := x1.AuxInt 21551 if x1.Aux != s { 21552 break 21553 } 21554 _ = x1.Args[2] 21555 if idx != x1.Args[0] { 21556 break 21557 } 21558 if p != x1.Args[1] { 21559 break 21560 } 21561 if mem != x1.Args[2] { 21562 break 21563 } 21564 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21565 break 21566 } 21567 b = mergePoint(b, x0, x1) 21568 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21569 v.reset(OpCopy) 21570 v.AddArg(v0) 21571 v0.AuxInt = 8 21572 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21573 v1.AuxInt = i0 21574 v1.Aux = s 21575 v1.AddArg(p) 21576 v1.AddArg(idx) 21577 v1.AddArg(mem) 21578 v0.AddArg(v1) 21579 return true 21580 } 21581 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 21582 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21583 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21584 for { 21585 _ = v.Args[1] 21586 sh := v.Args[0] 21587 if sh.Op != OpAMD64SHLLconst { 21588 break 21589 } 21590 if sh.AuxInt != 8 { 21591 break 21592 } 21593 x0 := sh.Args[0] 21594 if x0.Op != OpAMD64MOVBloadidx1 { 21595 break 21596 } 21597 i0 := x0.AuxInt 21598 s := x0.Aux 21599 _ = x0.Args[2] 21600 idx := x0.Args[0] 21601 p := x0.Args[1] 21602 mem := x0.Args[2] 21603 x1 := v.Args[1] 21604 if x1.Op != OpAMD64MOVBloadidx1 { 21605 break 21606 } 21607 i1 := x1.AuxInt 21608 if x1.Aux != s { 21609 break 21610 } 21611 _ = x1.Args[2] 21612 if idx != x1.Args[0] { 21613 break 21614 } 21615 if p != x1.Args[1] { 21616 break 21617 } 21618 if mem != x1.Args[2] { 21619 break 21620 } 21621 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21622 break 21623 } 21624 b = mergePoint(b, x0, x1) 21625 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21626 v.reset(OpCopy) 21627 v.AddArg(v0) 21628 v0.AuxInt = 8 21629 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21630 v1.AuxInt = i0 21631 v1.Aux = s 21632 v1.AddArg(p) 21633 v1.AddArg(idx) 21634 v1.AddArg(mem) 21635 v0.AddArg(v1) 21636 return true 21637 } 21638 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 21639 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21640 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21641 for { 21642 _ = v.Args[1] 21643 r1 := v.Args[0] 21644 if r1.Op != OpAMD64ROLWconst { 21645 break 21646 } 21647 if r1.AuxInt != 8 { 21648 break 21649 } 21650 x1 := r1.Args[0] 21651 if x1.Op != OpAMD64MOVWloadidx1 { 21652 break 21653 } 21654 i1 := x1.AuxInt 21655 s := x1.Aux 21656 _ = x1.Args[2] 21657 p := x1.Args[0] 21658 idx := x1.Args[1] 21659 mem := x1.Args[2] 21660 sh := v.Args[1] 21661 if sh.Op != OpAMD64SHLLconst { 21662 break 21663 } 21664 if sh.AuxInt != 16 { 21665 break 21666 } 21667 r0 := sh.Args[0] 21668 if r0.Op != OpAMD64ROLWconst { 21669 break 21670 } 21671 if r0.AuxInt != 8 { 21672 break 21673 } 21674 x0 := r0.Args[0] 21675 if x0.Op != OpAMD64MOVWloadidx1 { 21676 break 21677 } 21678 i0 := x0.AuxInt 21679 if x0.Aux != s { 21680 break 21681 } 21682 _ = x0.Args[2] 21683 if p != x0.Args[0] { 21684 break 21685 } 21686 if idx != x0.Args[1] { 21687 break 21688 } 21689 if mem != x0.Args[2] { 21690 break 21691 } 21692 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21693 break 21694 } 21695 b = mergePoint(b, x0, x1) 21696 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21697 v.reset(OpCopy) 21698 v.AddArg(v0) 21699 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21700 v1.AuxInt = i0 21701 v1.Aux = s 21702 v1.AddArg(p) 21703 v1.AddArg(idx) 21704 v1.AddArg(mem) 21705 v0.AddArg(v1) 21706 return true 21707 } 21708 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 21709 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21710 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21711 for { 21712 _ = v.Args[1] 21713 r1 := v.Args[0] 21714 if r1.Op != OpAMD64ROLWconst { 21715 break 21716 } 21717 if r1.AuxInt != 8 { 21718 break 21719 } 21720 x1 := r1.Args[0] 21721 if x1.Op != OpAMD64MOVWloadidx1 { 21722 break 21723 } 21724 i1 := x1.AuxInt 21725 s := x1.Aux 21726 _ = x1.Args[2] 21727 idx := x1.Args[0] 21728 p := x1.Args[1] 21729 mem := x1.Args[2] 21730 sh := v.Args[1] 21731 if sh.Op != OpAMD64SHLLconst { 21732 break 21733 } 21734 if sh.AuxInt != 16 { 21735 break 21736 } 21737 r0 := sh.Args[0] 21738 if r0.Op != OpAMD64ROLWconst { 21739 break 21740 } 21741 if r0.AuxInt != 8 { 21742 break 21743 } 21744 x0 := r0.Args[0] 21745 if x0.Op != OpAMD64MOVWloadidx1 { 21746 break 21747 } 21748 i0 := x0.AuxInt 21749 if x0.Aux != s { 21750 break 21751 } 21752 _ = x0.Args[2] 21753 if p != x0.Args[0] { 21754 break 21755 } 21756 if idx != x0.Args[1] { 21757 break 21758 } 21759 if mem != x0.Args[2] { 21760 break 21761 } 21762 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21763 break 21764 } 21765 b = mergePoint(b, x0, x1) 21766 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21767 v.reset(OpCopy) 21768 v.AddArg(v0) 21769 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21770 v1.AuxInt = i0 21771 v1.Aux = s 21772 v1.AddArg(p) 21773 v1.AddArg(idx) 21774 v1.AddArg(mem) 21775 v0.AddArg(v1) 21776 return true 21777 } 21778 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 21779 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21780 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21781 for { 21782 _ = v.Args[1] 21783 r1 := v.Args[0] 21784 if r1.Op != OpAMD64ROLWconst { 21785 break 21786 } 21787 if r1.AuxInt != 8 { 21788 break 21789 } 21790 x1 := r1.Args[0] 21791 if x1.Op != OpAMD64MOVWloadidx1 { 21792 break 21793 } 21794 i1 := x1.AuxInt 21795 s := x1.Aux 21796 _ = x1.Args[2] 21797 p := x1.Args[0] 21798 idx := x1.Args[1] 21799 mem := x1.Args[2] 21800 sh := v.Args[1] 21801 if sh.Op != OpAMD64SHLLconst { 21802 break 21803 } 21804 if sh.AuxInt != 16 { 21805 break 21806 } 21807 r0 := sh.Args[0] 21808 if r0.Op != OpAMD64ROLWconst { 21809 break 21810 } 21811 if r0.AuxInt != 8 { 21812 break 21813 } 21814 x0 := r0.Args[0] 21815 if x0.Op != OpAMD64MOVWloadidx1 { 21816 break 21817 } 21818 i0 := x0.AuxInt 21819 if x0.Aux != s { 21820 break 21821 } 21822 _ = x0.Args[2] 21823 if idx != x0.Args[0] { 21824 break 21825 } 21826 if p != x0.Args[1] { 21827 break 21828 } 21829 if mem != x0.Args[2] { 21830 break 21831 } 21832 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21833 break 21834 } 21835 b = mergePoint(b, x0, x1) 21836 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21837 v.reset(OpCopy) 21838 v.AddArg(v0) 21839 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21840 v1.AuxInt = i0 21841 v1.Aux = s 21842 v1.AddArg(p) 21843 v1.AddArg(idx) 21844 v1.AddArg(mem) 21845 v0.AddArg(v1) 21846 return true 21847 } 21848 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 21849 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21850 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21851 for { 21852 _ = v.Args[1] 21853 r1 := v.Args[0] 21854 if r1.Op != OpAMD64ROLWconst { 21855 break 21856 } 21857 if r1.AuxInt != 8 { 21858 break 21859 } 21860 x1 := r1.Args[0] 21861 if x1.Op != OpAMD64MOVWloadidx1 { 21862 break 21863 } 21864 i1 := x1.AuxInt 21865 s := x1.Aux 21866 _ = x1.Args[2] 21867 idx := x1.Args[0] 21868 p := x1.Args[1] 21869 mem := x1.Args[2] 21870 sh := v.Args[1] 21871 if sh.Op != OpAMD64SHLLconst { 21872 break 21873 } 21874 if sh.AuxInt != 16 { 21875 break 21876 } 21877 r0 := sh.Args[0] 21878 if r0.Op != OpAMD64ROLWconst { 21879 break 21880 } 21881 if r0.AuxInt != 8 { 21882 break 21883 } 21884 x0 := r0.Args[0] 21885 if x0.Op != OpAMD64MOVWloadidx1 { 21886 break 21887 } 21888 i0 := x0.AuxInt 21889 if x0.Aux != s { 21890 break 21891 } 21892 _ = x0.Args[2] 21893 if idx != x0.Args[0] { 21894 break 21895 } 21896 if p != x0.Args[1] { 21897 break 21898 } 21899 if mem != x0.Args[2] { 21900 break 21901 } 21902 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21903 break 21904 } 21905 b = mergePoint(b, x0, x1) 21906 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21907 v.reset(OpCopy) 21908 v.AddArg(v0) 21909 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21910 v1.AuxInt = i0 21911 v1.Aux = s 21912 v1.AddArg(p) 21913 v1.AddArg(idx) 21914 v1.AddArg(mem) 21915 v0.AddArg(v1) 21916 return true 21917 } 21918 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 21919 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21920 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21921 for { 21922 _ = v.Args[1] 21923 sh := v.Args[0] 21924 if sh.Op != OpAMD64SHLLconst { 21925 break 21926 } 21927 if sh.AuxInt != 16 { 21928 break 21929 } 21930 r0 := sh.Args[0] 21931 if r0.Op != OpAMD64ROLWconst { 21932 break 21933 } 21934 if r0.AuxInt != 8 { 21935 break 21936 } 21937 x0 := r0.Args[0] 21938 if x0.Op != OpAMD64MOVWloadidx1 { 21939 break 21940 } 21941 i0 := x0.AuxInt 21942 s := x0.Aux 21943 _ = x0.Args[2] 21944 p := x0.Args[0] 21945 idx := x0.Args[1] 21946 mem := x0.Args[2] 21947 r1 := v.Args[1] 21948 if r1.Op != OpAMD64ROLWconst { 21949 break 21950 } 21951 if r1.AuxInt != 8 { 21952 break 21953 } 21954 x1 := r1.Args[0] 21955 if x1.Op != OpAMD64MOVWloadidx1 { 21956 break 21957 } 21958 i1 := x1.AuxInt 21959 if x1.Aux != s { 21960 break 21961 } 21962 _ = x1.Args[2] 21963 if p != x1.Args[0] { 21964 break 21965 } 21966 if idx != x1.Args[1] { 21967 break 21968 } 21969 if mem != x1.Args[2] { 21970 break 21971 } 21972 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21973 break 21974 } 21975 b = mergePoint(b, x0, x1) 21976 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21977 v.reset(OpCopy) 21978 v.AddArg(v0) 21979 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21980 v1.AuxInt = i0 21981 v1.Aux = s 21982 v1.AddArg(p) 21983 v1.AddArg(idx) 21984 v1.AddArg(mem) 21985 v0.AddArg(v1) 21986 return true 21987 } 21988 return false 21989 } 21990 func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool { 21991 b := v.Block 21992 _ = b 21993 typ := &b.Func.Config.Types 21994 _ = typ 21995 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 21996 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21997 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21998 for { 21999 _ = v.Args[1] 22000 sh := v.Args[0] 22001 if sh.Op != OpAMD64SHLLconst { 22002 break 22003 } 22004 if sh.AuxInt != 16 { 22005 break 22006 } 22007 r0 := sh.Args[0] 22008 if r0.Op != OpAMD64ROLWconst { 22009 break 22010 } 22011 if r0.AuxInt != 8 { 22012 break 22013 } 22014 x0 := r0.Args[0] 22015 if x0.Op != OpAMD64MOVWloadidx1 { 22016 break 22017 } 22018 i0 := x0.AuxInt 22019 s := x0.Aux 22020 _ = x0.Args[2] 22021 idx := x0.Args[0] 22022 p := x0.Args[1] 22023 mem := x0.Args[2] 22024 r1 := v.Args[1] 22025 if r1.Op != OpAMD64ROLWconst { 22026 break 22027 } 22028 if r1.AuxInt != 8 { 22029 break 22030 } 22031 x1 := r1.Args[0] 22032 if x1.Op != OpAMD64MOVWloadidx1 { 22033 break 22034 } 22035 i1 := x1.AuxInt 22036 if x1.Aux != s { 22037 break 22038 } 22039 _ = x1.Args[2] 22040 if p != x1.Args[0] { 22041 break 22042 } 22043 if idx != x1.Args[1] { 22044 break 22045 } 22046 if mem != x1.Args[2] { 22047 break 22048 } 22049 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 22050 break 22051 } 22052 b = mergePoint(b, x0, x1) 22053 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 22054 v.reset(OpCopy) 22055 v.AddArg(v0) 22056 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 22057 v1.AuxInt = i0 22058 v1.Aux = s 22059 v1.AddArg(p) 22060 v1.AddArg(idx) 22061 v1.AddArg(mem) 22062 v0.AddArg(v1) 22063 return true 22064 } 22065 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 22066 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 22067 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 22068 for { 22069 _ = v.Args[1] 22070 sh := v.Args[0] 22071 if sh.Op != OpAMD64SHLLconst { 22072 break 22073 } 22074 if sh.AuxInt != 16 { 22075 break 22076 } 22077 r0 := sh.Args[0] 22078 if r0.Op != OpAMD64ROLWconst { 22079 break 22080 } 22081 if r0.AuxInt != 8 { 22082 break 22083 } 22084 x0 := r0.Args[0] 22085 if x0.Op != OpAMD64MOVWloadidx1 { 22086 break 22087 } 22088 i0 := x0.AuxInt 22089 s := x0.Aux 22090 _ = x0.Args[2] 22091 p := x0.Args[0] 22092 idx := x0.Args[1] 22093 mem := x0.Args[2] 22094 r1 := v.Args[1] 22095 if r1.Op != OpAMD64ROLWconst { 22096 break 22097 } 22098 if r1.AuxInt != 8 { 22099 break 22100 } 22101 x1 := r1.Args[0] 22102 if x1.Op != OpAMD64MOVWloadidx1 { 22103 break 22104 } 22105 i1 := x1.AuxInt 22106 if x1.Aux != s { 22107 break 22108 } 22109 _ = x1.Args[2] 22110 if idx != x1.Args[0] { 22111 break 22112 } 22113 if p != x1.Args[1] { 22114 break 22115 } 22116 if mem != x1.Args[2] { 22117 break 22118 } 22119 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 22120 break 22121 } 22122 b = mergePoint(b, x0, x1) 22123 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 22124 v.reset(OpCopy) 22125 v.AddArg(v0) 22126 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 22127 v1.AuxInt = i0 22128 v1.Aux = s 22129 v1.AddArg(p) 22130 v1.AddArg(idx) 22131 v1.AddArg(mem) 22132 v0.AddArg(v1) 22133 return true 22134 } 22135 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 22136 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 22137 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 22138 for { 22139 _ = v.Args[1] 22140 sh := v.Args[0] 22141 if sh.Op != OpAMD64SHLLconst { 22142 break 22143 } 22144 if sh.AuxInt != 16 { 22145 break 22146 } 22147 r0 := sh.Args[0] 22148 if r0.Op != OpAMD64ROLWconst { 22149 break 22150 } 22151 if r0.AuxInt != 8 { 22152 break 22153 } 22154 x0 := r0.Args[0] 22155 if x0.Op != OpAMD64MOVWloadidx1 { 22156 break 22157 } 22158 i0 := x0.AuxInt 22159 s := x0.Aux 22160 _ = x0.Args[2] 22161 idx := x0.Args[0] 22162 p := x0.Args[1] 22163 mem := x0.Args[2] 22164 r1 := v.Args[1] 22165 if r1.Op != OpAMD64ROLWconst { 22166 break 22167 } 22168 if r1.AuxInt != 8 { 22169 break 22170 } 22171 x1 := r1.Args[0] 22172 if x1.Op != OpAMD64MOVWloadidx1 { 22173 break 22174 } 22175 i1 := x1.AuxInt 22176 if x1.Aux != s { 22177 break 22178 } 22179 _ = x1.Args[2] 22180 if idx != x1.Args[0] { 22181 break 22182 } 22183 if p != x1.Args[1] { 22184 break 22185 } 22186 if mem != x1.Args[2] { 22187 break 22188 } 22189 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 22190 break 22191 } 22192 b = mergePoint(b, x0, x1) 22193 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 22194 v.reset(OpCopy) 22195 v.AddArg(v0) 22196 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 22197 v1.AuxInt = i0 22198 v1.Aux = s 22199 v1.AddArg(p) 22200 v1.AddArg(idx) 22201 v1.AddArg(mem) 22202 v0.AddArg(v1) 22203 return true 22204 } 22205 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 22206 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22207 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22208 for { 22209 _ = v.Args[1] 22210 s0 := v.Args[0] 22211 if s0.Op != OpAMD64SHLLconst { 22212 break 22213 } 22214 j0 := s0.AuxInt 22215 x0 := s0.Args[0] 22216 if x0.Op != OpAMD64MOVBloadidx1 { 22217 break 22218 } 22219 i0 := x0.AuxInt 22220 s := x0.Aux 22221 _ = x0.Args[2] 22222 p := x0.Args[0] 22223 idx := x0.Args[1] 22224 mem := x0.Args[2] 22225 or := v.Args[1] 22226 if or.Op != OpAMD64ORL { 22227 break 22228 } 22229 _ = or.Args[1] 22230 s1 := or.Args[0] 22231 if s1.Op != OpAMD64SHLLconst { 22232 break 22233 } 22234 j1 := s1.AuxInt 22235 x1 := s1.Args[0] 22236 if x1.Op != OpAMD64MOVBloadidx1 { 22237 break 22238 } 22239 i1 := x1.AuxInt 22240 if x1.Aux != s { 22241 break 22242 } 22243 _ = x1.Args[2] 22244 if p != x1.Args[0] { 22245 break 22246 } 22247 if idx != x1.Args[1] { 22248 break 22249 } 22250 if mem != x1.Args[2] { 22251 break 22252 } 22253 y := or.Args[1] 22254 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22255 break 22256 } 22257 b = mergePoint(b, x0, x1) 22258 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22259 v.reset(OpCopy) 22260 v.AddArg(v0) 22261 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22262 v1.AuxInt = j1 22263 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22264 v2.AuxInt = 8 22265 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22266 v3.AuxInt = i0 22267 v3.Aux = s 22268 v3.AddArg(p) 22269 v3.AddArg(idx) 22270 v3.AddArg(mem) 22271 v2.AddArg(v3) 22272 v1.AddArg(v2) 22273 v0.AddArg(v1) 22274 v0.AddArg(y) 22275 return true 22276 } 22277 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 22278 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22279 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22280 for { 22281 _ = v.Args[1] 22282 s0 := v.Args[0] 22283 if s0.Op != OpAMD64SHLLconst { 22284 break 22285 } 22286 j0 := s0.AuxInt 22287 x0 := s0.Args[0] 22288 if x0.Op != OpAMD64MOVBloadidx1 { 22289 break 22290 } 22291 i0 := x0.AuxInt 22292 s := x0.Aux 22293 _ = x0.Args[2] 22294 idx := x0.Args[0] 22295 p := x0.Args[1] 22296 mem := x0.Args[2] 22297 or := v.Args[1] 22298 if or.Op != OpAMD64ORL { 22299 break 22300 } 22301 _ = or.Args[1] 22302 s1 := or.Args[0] 22303 if s1.Op != OpAMD64SHLLconst { 22304 break 22305 } 22306 j1 := s1.AuxInt 22307 x1 := s1.Args[0] 22308 if x1.Op != OpAMD64MOVBloadidx1 { 22309 break 22310 } 22311 i1 := x1.AuxInt 22312 if x1.Aux != s { 22313 break 22314 } 22315 _ = x1.Args[2] 22316 if p != x1.Args[0] { 22317 break 22318 } 22319 if idx != x1.Args[1] { 22320 break 22321 } 22322 if mem != x1.Args[2] { 22323 break 22324 } 22325 y := or.Args[1] 22326 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22327 break 22328 } 22329 b = mergePoint(b, x0, x1) 22330 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22331 v.reset(OpCopy) 22332 v.AddArg(v0) 22333 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22334 v1.AuxInt = j1 22335 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22336 v2.AuxInt = 8 22337 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22338 v3.AuxInt = i0 22339 v3.Aux = s 22340 v3.AddArg(p) 22341 v3.AddArg(idx) 22342 v3.AddArg(mem) 22343 v2.AddArg(v3) 22344 v1.AddArg(v2) 22345 v0.AddArg(v1) 22346 v0.AddArg(y) 22347 return true 22348 } 22349 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 22350 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22351 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22352 for { 22353 _ = v.Args[1] 22354 s0 := v.Args[0] 22355 if s0.Op != OpAMD64SHLLconst { 22356 break 22357 } 22358 j0 := s0.AuxInt 22359 x0 := s0.Args[0] 22360 if x0.Op != OpAMD64MOVBloadidx1 { 22361 break 22362 } 22363 i0 := x0.AuxInt 22364 s := x0.Aux 22365 _ = x0.Args[2] 22366 p := x0.Args[0] 22367 idx := x0.Args[1] 22368 mem := x0.Args[2] 22369 or := v.Args[1] 22370 if or.Op != OpAMD64ORL { 22371 break 22372 } 22373 _ = or.Args[1] 22374 s1 := or.Args[0] 22375 if s1.Op != OpAMD64SHLLconst { 22376 break 22377 } 22378 j1 := s1.AuxInt 22379 x1 := s1.Args[0] 22380 if x1.Op != OpAMD64MOVBloadidx1 { 22381 break 22382 } 22383 i1 := x1.AuxInt 22384 if x1.Aux != s { 22385 break 22386 } 22387 _ = x1.Args[2] 22388 if idx != x1.Args[0] { 22389 break 22390 } 22391 if p != x1.Args[1] { 22392 break 22393 } 22394 if mem != x1.Args[2] { 22395 break 22396 } 22397 y := or.Args[1] 22398 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22399 break 22400 } 22401 b = mergePoint(b, x0, x1) 22402 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22403 v.reset(OpCopy) 22404 v.AddArg(v0) 22405 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22406 v1.AuxInt = j1 22407 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22408 v2.AuxInt = 8 22409 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22410 v3.AuxInt = i0 22411 v3.Aux = s 22412 v3.AddArg(p) 22413 v3.AddArg(idx) 22414 v3.AddArg(mem) 22415 v2.AddArg(v3) 22416 v1.AddArg(v2) 22417 v0.AddArg(v1) 22418 v0.AddArg(y) 22419 return true 22420 } 22421 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 22422 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22423 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22424 for { 22425 _ = v.Args[1] 22426 s0 := v.Args[0] 22427 if s0.Op != OpAMD64SHLLconst { 22428 break 22429 } 22430 j0 := s0.AuxInt 22431 x0 := s0.Args[0] 22432 if x0.Op != OpAMD64MOVBloadidx1 { 22433 break 22434 } 22435 i0 := x0.AuxInt 22436 s := x0.Aux 22437 _ = x0.Args[2] 22438 idx := x0.Args[0] 22439 p := x0.Args[1] 22440 mem := x0.Args[2] 22441 or := v.Args[1] 22442 if or.Op != OpAMD64ORL { 22443 break 22444 } 22445 _ = or.Args[1] 22446 s1 := or.Args[0] 22447 if s1.Op != OpAMD64SHLLconst { 22448 break 22449 } 22450 j1 := s1.AuxInt 22451 x1 := s1.Args[0] 22452 if x1.Op != OpAMD64MOVBloadidx1 { 22453 break 22454 } 22455 i1 := x1.AuxInt 22456 if x1.Aux != s { 22457 break 22458 } 22459 _ = x1.Args[2] 22460 if idx != x1.Args[0] { 22461 break 22462 } 22463 if p != x1.Args[1] { 22464 break 22465 } 22466 if mem != x1.Args[2] { 22467 break 22468 } 22469 y := or.Args[1] 22470 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22471 break 22472 } 22473 b = mergePoint(b, x0, x1) 22474 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22475 v.reset(OpCopy) 22476 v.AddArg(v0) 22477 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22478 v1.AuxInt = j1 22479 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22480 v2.AuxInt = 8 22481 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22482 v3.AuxInt = i0 22483 v3.Aux = s 22484 v3.AddArg(p) 22485 v3.AddArg(idx) 22486 v3.AddArg(mem) 22487 v2.AddArg(v3) 22488 v1.AddArg(v2) 22489 v0.AddArg(v1) 22490 v0.AddArg(y) 22491 return true 22492 } 22493 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 22494 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22495 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22496 for { 22497 _ = v.Args[1] 22498 s0 := v.Args[0] 22499 if s0.Op != OpAMD64SHLLconst { 22500 break 22501 } 22502 j0 := s0.AuxInt 22503 x0 := s0.Args[0] 22504 if x0.Op != OpAMD64MOVBloadidx1 { 22505 break 22506 } 22507 i0 := x0.AuxInt 22508 s := x0.Aux 22509 _ = x0.Args[2] 22510 p := x0.Args[0] 22511 idx := x0.Args[1] 22512 mem := x0.Args[2] 22513 or := v.Args[1] 22514 if or.Op != OpAMD64ORL { 22515 break 22516 } 22517 _ = or.Args[1] 22518 y := or.Args[0] 22519 s1 := or.Args[1] 22520 if s1.Op != OpAMD64SHLLconst { 22521 break 22522 } 22523 j1 := s1.AuxInt 22524 x1 := s1.Args[0] 22525 if x1.Op != OpAMD64MOVBloadidx1 { 22526 break 22527 } 22528 i1 := x1.AuxInt 22529 if x1.Aux != s { 22530 break 22531 } 22532 _ = x1.Args[2] 22533 if p != x1.Args[0] { 22534 break 22535 } 22536 if idx != x1.Args[1] { 22537 break 22538 } 22539 if mem != x1.Args[2] { 22540 break 22541 } 22542 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22543 break 22544 } 22545 b = mergePoint(b, x0, x1) 22546 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22547 v.reset(OpCopy) 22548 v.AddArg(v0) 22549 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22550 v1.AuxInt = j1 22551 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22552 v2.AuxInt = 8 22553 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22554 v3.AuxInt = i0 22555 v3.Aux = s 22556 v3.AddArg(p) 22557 v3.AddArg(idx) 22558 v3.AddArg(mem) 22559 v2.AddArg(v3) 22560 v1.AddArg(v2) 22561 v0.AddArg(v1) 22562 v0.AddArg(y) 22563 return true 22564 } 22565 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 22566 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22567 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22568 for { 22569 _ = v.Args[1] 22570 s0 := v.Args[0] 22571 if s0.Op != OpAMD64SHLLconst { 22572 break 22573 } 22574 j0 := s0.AuxInt 22575 x0 := s0.Args[0] 22576 if x0.Op != OpAMD64MOVBloadidx1 { 22577 break 22578 } 22579 i0 := x0.AuxInt 22580 s := x0.Aux 22581 _ = x0.Args[2] 22582 idx := x0.Args[0] 22583 p := x0.Args[1] 22584 mem := x0.Args[2] 22585 or := v.Args[1] 22586 if or.Op != OpAMD64ORL { 22587 break 22588 } 22589 _ = or.Args[1] 22590 y := or.Args[0] 22591 s1 := or.Args[1] 22592 if s1.Op != OpAMD64SHLLconst { 22593 break 22594 } 22595 j1 := s1.AuxInt 22596 x1 := s1.Args[0] 22597 if x1.Op != OpAMD64MOVBloadidx1 { 22598 break 22599 } 22600 i1 := x1.AuxInt 22601 if x1.Aux != s { 22602 break 22603 } 22604 _ = x1.Args[2] 22605 if p != x1.Args[0] { 22606 break 22607 } 22608 if idx != x1.Args[1] { 22609 break 22610 } 22611 if mem != x1.Args[2] { 22612 break 22613 } 22614 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22615 break 22616 } 22617 b = mergePoint(b, x0, x1) 22618 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22619 v.reset(OpCopy) 22620 v.AddArg(v0) 22621 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22622 v1.AuxInt = j1 22623 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22624 v2.AuxInt = 8 22625 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22626 v3.AuxInt = i0 22627 v3.Aux = s 22628 v3.AddArg(p) 22629 v3.AddArg(idx) 22630 v3.AddArg(mem) 22631 v2.AddArg(v3) 22632 v1.AddArg(v2) 22633 v0.AddArg(v1) 22634 v0.AddArg(y) 22635 return true 22636 } 22637 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 22638 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22639 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22640 for { 22641 _ = v.Args[1] 22642 s0 := v.Args[0] 22643 if s0.Op != OpAMD64SHLLconst { 22644 break 22645 } 22646 j0 := s0.AuxInt 22647 x0 := s0.Args[0] 22648 if x0.Op != OpAMD64MOVBloadidx1 { 22649 break 22650 } 22651 i0 := x0.AuxInt 22652 s := x0.Aux 22653 _ = x0.Args[2] 22654 p := x0.Args[0] 22655 idx := x0.Args[1] 22656 mem := x0.Args[2] 22657 or := v.Args[1] 22658 if or.Op != OpAMD64ORL { 22659 break 22660 } 22661 _ = or.Args[1] 22662 y := or.Args[0] 22663 s1 := or.Args[1] 22664 if s1.Op != OpAMD64SHLLconst { 22665 break 22666 } 22667 j1 := s1.AuxInt 22668 x1 := s1.Args[0] 22669 if x1.Op != OpAMD64MOVBloadidx1 { 22670 break 22671 } 22672 i1 := x1.AuxInt 22673 if x1.Aux != s { 22674 break 22675 } 22676 _ = x1.Args[2] 22677 if idx != x1.Args[0] { 22678 break 22679 } 22680 if p != x1.Args[1] { 22681 break 22682 } 22683 if mem != x1.Args[2] { 22684 break 22685 } 22686 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22687 break 22688 } 22689 b = mergePoint(b, x0, x1) 22690 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22691 v.reset(OpCopy) 22692 v.AddArg(v0) 22693 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22694 v1.AuxInt = j1 22695 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22696 v2.AuxInt = 8 22697 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22698 v3.AuxInt = i0 22699 v3.Aux = s 22700 v3.AddArg(p) 22701 v3.AddArg(idx) 22702 v3.AddArg(mem) 22703 v2.AddArg(v3) 22704 v1.AddArg(v2) 22705 v0.AddArg(v1) 22706 v0.AddArg(y) 22707 return true 22708 } 22709 return false 22710 } 22711 func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool { 22712 b := v.Block 22713 _ = b 22714 typ := &b.Func.Config.Types 22715 _ = typ 22716 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 22717 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22718 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22719 for { 22720 _ = v.Args[1] 22721 s0 := v.Args[0] 22722 if s0.Op != OpAMD64SHLLconst { 22723 break 22724 } 22725 j0 := s0.AuxInt 22726 x0 := s0.Args[0] 22727 if x0.Op != OpAMD64MOVBloadidx1 { 22728 break 22729 } 22730 i0 := x0.AuxInt 22731 s := x0.Aux 22732 _ = x0.Args[2] 22733 idx := x0.Args[0] 22734 p := x0.Args[1] 22735 mem := x0.Args[2] 22736 or := v.Args[1] 22737 if or.Op != OpAMD64ORL { 22738 break 22739 } 22740 _ = or.Args[1] 22741 y := or.Args[0] 22742 s1 := or.Args[1] 22743 if s1.Op != OpAMD64SHLLconst { 22744 break 22745 } 22746 j1 := s1.AuxInt 22747 x1 := s1.Args[0] 22748 if x1.Op != OpAMD64MOVBloadidx1 { 22749 break 22750 } 22751 i1 := x1.AuxInt 22752 if x1.Aux != s { 22753 break 22754 } 22755 _ = x1.Args[2] 22756 if idx != x1.Args[0] { 22757 break 22758 } 22759 if p != x1.Args[1] { 22760 break 22761 } 22762 if mem != x1.Args[2] { 22763 break 22764 } 22765 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22766 break 22767 } 22768 b = mergePoint(b, x0, x1) 22769 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22770 v.reset(OpCopy) 22771 v.AddArg(v0) 22772 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22773 v1.AuxInt = j1 22774 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22775 v2.AuxInt = 8 22776 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22777 v3.AuxInt = i0 22778 v3.Aux = s 22779 v3.AddArg(p) 22780 v3.AddArg(idx) 22781 v3.AddArg(mem) 22782 v2.AddArg(v3) 22783 v1.AddArg(v2) 22784 v0.AddArg(v1) 22785 v0.AddArg(y) 22786 return true 22787 } 22788 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22789 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22790 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22791 for { 22792 _ = v.Args[1] 22793 or := v.Args[0] 22794 if or.Op != OpAMD64ORL { 22795 break 22796 } 22797 _ = or.Args[1] 22798 s1 := or.Args[0] 22799 if s1.Op != OpAMD64SHLLconst { 22800 break 22801 } 22802 j1 := s1.AuxInt 22803 x1 := s1.Args[0] 22804 if x1.Op != OpAMD64MOVBloadidx1 { 22805 break 22806 } 22807 i1 := x1.AuxInt 22808 s := x1.Aux 22809 _ = x1.Args[2] 22810 p := x1.Args[0] 22811 idx := x1.Args[1] 22812 mem := x1.Args[2] 22813 y := or.Args[1] 22814 s0 := v.Args[1] 22815 if s0.Op != OpAMD64SHLLconst { 22816 break 22817 } 22818 j0 := s0.AuxInt 22819 x0 := s0.Args[0] 22820 if x0.Op != OpAMD64MOVBloadidx1 { 22821 break 22822 } 22823 i0 := x0.AuxInt 22824 if x0.Aux != s { 22825 break 22826 } 22827 _ = x0.Args[2] 22828 if p != x0.Args[0] { 22829 break 22830 } 22831 if idx != x0.Args[1] { 22832 break 22833 } 22834 if mem != x0.Args[2] { 22835 break 22836 } 22837 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22838 break 22839 } 22840 b = mergePoint(b, x0, x1) 22841 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22842 v.reset(OpCopy) 22843 v.AddArg(v0) 22844 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22845 v1.AuxInt = j1 22846 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22847 v2.AuxInt = 8 22848 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22849 v3.AuxInt = i0 22850 v3.Aux = s 22851 v3.AddArg(p) 22852 v3.AddArg(idx) 22853 v3.AddArg(mem) 22854 v2.AddArg(v3) 22855 v1.AddArg(v2) 22856 v0.AddArg(v1) 22857 v0.AddArg(y) 22858 return true 22859 } 22860 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22861 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22862 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22863 for { 22864 _ = v.Args[1] 22865 or := v.Args[0] 22866 if or.Op != OpAMD64ORL { 22867 break 22868 } 22869 _ = or.Args[1] 22870 s1 := or.Args[0] 22871 if s1.Op != OpAMD64SHLLconst { 22872 break 22873 } 22874 j1 := s1.AuxInt 22875 x1 := s1.Args[0] 22876 if x1.Op != OpAMD64MOVBloadidx1 { 22877 break 22878 } 22879 i1 := x1.AuxInt 22880 s := x1.Aux 22881 _ = x1.Args[2] 22882 idx := x1.Args[0] 22883 p := x1.Args[1] 22884 mem := x1.Args[2] 22885 y := or.Args[1] 22886 s0 := v.Args[1] 22887 if s0.Op != OpAMD64SHLLconst { 22888 break 22889 } 22890 j0 := s0.AuxInt 22891 x0 := s0.Args[0] 22892 if x0.Op != OpAMD64MOVBloadidx1 { 22893 break 22894 } 22895 i0 := x0.AuxInt 22896 if x0.Aux != s { 22897 break 22898 } 22899 _ = x0.Args[2] 22900 if p != x0.Args[0] { 22901 break 22902 } 22903 if idx != x0.Args[1] { 22904 break 22905 } 22906 if mem != x0.Args[2] { 22907 break 22908 } 22909 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22910 break 22911 } 22912 b = mergePoint(b, x0, x1) 22913 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22914 v.reset(OpCopy) 22915 v.AddArg(v0) 22916 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22917 v1.AuxInt = j1 22918 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22919 v2.AuxInt = 8 22920 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22921 v3.AuxInt = i0 22922 v3.Aux = s 22923 v3.AddArg(p) 22924 v3.AddArg(idx) 22925 v3.AddArg(mem) 22926 v2.AddArg(v3) 22927 v1.AddArg(v2) 22928 v0.AddArg(v1) 22929 v0.AddArg(y) 22930 return true 22931 } 22932 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22933 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22934 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22935 for { 22936 _ = v.Args[1] 22937 or := v.Args[0] 22938 if or.Op != OpAMD64ORL { 22939 break 22940 } 22941 _ = or.Args[1] 22942 y := or.Args[0] 22943 s1 := or.Args[1] 22944 if s1.Op != OpAMD64SHLLconst { 22945 break 22946 } 22947 j1 := s1.AuxInt 22948 x1 := s1.Args[0] 22949 if x1.Op != OpAMD64MOVBloadidx1 { 22950 break 22951 } 22952 i1 := x1.AuxInt 22953 s := x1.Aux 22954 _ = x1.Args[2] 22955 p := x1.Args[0] 22956 idx := x1.Args[1] 22957 mem := x1.Args[2] 22958 s0 := v.Args[1] 22959 if s0.Op != OpAMD64SHLLconst { 22960 break 22961 } 22962 j0 := s0.AuxInt 22963 x0 := s0.Args[0] 22964 if x0.Op != OpAMD64MOVBloadidx1 { 22965 break 22966 } 22967 i0 := x0.AuxInt 22968 if x0.Aux != s { 22969 break 22970 } 22971 _ = x0.Args[2] 22972 if p != x0.Args[0] { 22973 break 22974 } 22975 if idx != x0.Args[1] { 22976 break 22977 } 22978 if mem != x0.Args[2] { 22979 break 22980 } 22981 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22982 break 22983 } 22984 b = mergePoint(b, x0, x1) 22985 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22986 v.reset(OpCopy) 22987 v.AddArg(v0) 22988 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22989 v1.AuxInt = j1 22990 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22991 v2.AuxInt = 8 22992 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22993 v3.AuxInt = i0 22994 v3.Aux = s 22995 v3.AddArg(p) 22996 v3.AddArg(idx) 22997 v3.AddArg(mem) 22998 v2.AddArg(v3) 22999 v1.AddArg(v2) 23000 v0.AddArg(v1) 23001 v0.AddArg(y) 23002 return true 23003 } 23004 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 23005 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23006 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23007 for { 23008 _ = v.Args[1] 23009 or := v.Args[0] 23010 if or.Op != OpAMD64ORL { 23011 break 23012 } 23013 _ = or.Args[1] 23014 y := or.Args[0] 23015 s1 := or.Args[1] 23016 if s1.Op != OpAMD64SHLLconst { 23017 break 23018 } 23019 j1 := s1.AuxInt 23020 x1 := s1.Args[0] 23021 if x1.Op != OpAMD64MOVBloadidx1 { 23022 break 23023 } 23024 i1 := x1.AuxInt 23025 s := x1.Aux 23026 _ = x1.Args[2] 23027 idx := x1.Args[0] 23028 p := x1.Args[1] 23029 mem := x1.Args[2] 23030 s0 := v.Args[1] 23031 if s0.Op != OpAMD64SHLLconst { 23032 break 23033 } 23034 j0 := s0.AuxInt 23035 x0 := s0.Args[0] 23036 if x0.Op != OpAMD64MOVBloadidx1 { 23037 break 23038 } 23039 i0 := x0.AuxInt 23040 if x0.Aux != s { 23041 break 23042 } 23043 _ = x0.Args[2] 23044 if p != x0.Args[0] { 23045 break 23046 } 23047 if idx != x0.Args[1] { 23048 break 23049 } 23050 if mem != x0.Args[2] { 23051 break 23052 } 23053 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23054 break 23055 } 23056 b = mergePoint(b, x0, x1) 23057 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23058 v.reset(OpCopy) 23059 v.AddArg(v0) 23060 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23061 v1.AuxInt = j1 23062 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23063 v2.AuxInt = 8 23064 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23065 v3.AuxInt = i0 23066 v3.Aux = s 23067 v3.AddArg(p) 23068 v3.AddArg(idx) 23069 v3.AddArg(mem) 23070 v2.AddArg(v3) 23071 v1.AddArg(v2) 23072 v0.AddArg(v1) 23073 v0.AddArg(y) 23074 return true 23075 } 23076 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23077 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23078 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23079 for { 23080 _ = v.Args[1] 23081 or := v.Args[0] 23082 if or.Op != OpAMD64ORL { 23083 break 23084 } 23085 _ = or.Args[1] 23086 s1 := or.Args[0] 23087 if s1.Op != OpAMD64SHLLconst { 23088 break 23089 } 23090 j1 := s1.AuxInt 23091 x1 := s1.Args[0] 23092 if x1.Op != OpAMD64MOVBloadidx1 { 23093 break 23094 } 23095 i1 := x1.AuxInt 23096 s := x1.Aux 23097 _ = x1.Args[2] 23098 p := x1.Args[0] 23099 idx := x1.Args[1] 23100 mem := x1.Args[2] 23101 y := or.Args[1] 23102 s0 := v.Args[1] 23103 if s0.Op != OpAMD64SHLLconst { 23104 break 23105 } 23106 j0 := s0.AuxInt 23107 x0 := s0.Args[0] 23108 if x0.Op != OpAMD64MOVBloadidx1 { 23109 break 23110 } 23111 i0 := x0.AuxInt 23112 if x0.Aux != s { 23113 break 23114 } 23115 _ = x0.Args[2] 23116 if idx != x0.Args[0] { 23117 break 23118 } 23119 if p != x0.Args[1] { 23120 break 23121 } 23122 if mem != x0.Args[2] { 23123 break 23124 } 23125 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23126 break 23127 } 23128 b = mergePoint(b, x0, x1) 23129 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23130 v.reset(OpCopy) 23131 v.AddArg(v0) 23132 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23133 v1.AuxInt = j1 23134 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23135 v2.AuxInt = 8 23136 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23137 v3.AuxInt = i0 23138 v3.Aux = s 23139 v3.AddArg(p) 23140 v3.AddArg(idx) 23141 v3.AddArg(mem) 23142 v2.AddArg(v3) 23143 v1.AddArg(v2) 23144 v0.AddArg(v1) 23145 v0.AddArg(y) 23146 return true 23147 } 23148 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23149 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23150 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23151 for { 23152 _ = v.Args[1] 23153 or := v.Args[0] 23154 if or.Op != OpAMD64ORL { 23155 break 23156 } 23157 _ = or.Args[1] 23158 s1 := or.Args[0] 23159 if s1.Op != OpAMD64SHLLconst { 23160 break 23161 } 23162 j1 := s1.AuxInt 23163 x1 := s1.Args[0] 23164 if x1.Op != OpAMD64MOVBloadidx1 { 23165 break 23166 } 23167 i1 := x1.AuxInt 23168 s := x1.Aux 23169 _ = x1.Args[2] 23170 idx := x1.Args[0] 23171 p := x1.Args[1] 23172 mem := x1.Args[2] 23173 y := or.Args[1] 23174 s0 := v.Args[1] 23175 if s0.Op != OpAMD64SHLLconst { 23176 break 23177 } 23178 j0 := s0.AuxInt 23179 x0 := s0.Args[0] 23180 if x0.Op != OpAMD64MOVBloadidx1 { 23181 break 23182 } 23183 i0 := x0.AuxInt 23184 if x0.Aux != s { 23185 break 23186 } 23187 _ = x0.Args[2] 23188 if idx != x0.Args[0] { 23189 break 23190 } 23191 if p != x0.Args[1] { 23192 break 23193 } 23194 if mem != x0.Args[2] { 23195 break 23196 } 23197 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23198 break 23199 } 23200 b = mergePoint(b, x0, x1) 23201 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23202 v.reset(OpCopy) 23203 v.AddArg(v0) 23204 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23205 v1.AuxInt = j1 23206 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23207 v2.AuxInt = 8 23208 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23209 v3.AuxInt = i0 23210 v3.Aux = s 23211 v3.AddArg(p) 23212 v3.AddArg(idx) 23213 v3.AddArg(mem) 23214 v2.AddArg(v3) 23215 v1.AddArg(v2) 23216 v0.AddArg(v1) 23217 v0.AddArg(y) 23218 return true 23219 } 23220 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23221 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23222 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23223 for { 23224 _ = v.Args[1] 23225 or := v.Args[0] 23226 if or.Op != OpAMD64ORL { 23227 break 23228 } 23229 _ = or.Args[1] 23230 y := or.Args[0] 23231 s1 := or.Args[1] 23232 if s1.Op != OpAMD64SHLLconst { 23233 break 23234 } 23235 j1 := s1.AuxInt 23236 x1 := s1.Args[0] 23237 if x1.Op != OpAMD64MOVBloadidx1 { 23238 break 23239 } 23240 i1 := x1.AuxInt 23241 s := x1.Aux 23242 _ = x1.Args[2] 23243 p := x1.Args[0] 23244 idx := x1.Args[1] 23245 mem := x1.Args[2] 23246 s0 := v.Args[1] 23247 if s0.Op != OpAMD64SHLLconst { 23248 break 23249 } 23250 j0 := s0.AuxInt 23251 x0 := s0.Args[0] 23252 if x0.Op != OpAMD64MOVBloadidx1 { 23253 break 23254 } 23255 i0 := x0.AuxInt 23256 if x0.Aux != s { 23257 break 23258 } 23259 _ = x0.Args[2] 23260 if idx != x0.Args[0] { 23261 break 23262 } 23263 if p != x0.Args[1] { 23264 break 23265 } 23266 if mem != x0.Args[2] { 23267 break 23268 } 23269 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23270 break 23271 } 23272 b = mergePoint(b, x0, x1) 23273 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23274 v.reset(OpCopy) 23275 v.AddArg(v0) 23276 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23277 v1.AuxInt = j1 23278 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23279 v2.AuxInt = 8 23280 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23281 v3.AuxInt = i0 23282 v3.Aux = s 23283 v3.AddArg(p) 23284 v3.AddArg(idx) 23285 v3.AddArg(mem) 23286 v2.AddArg(v3) 23287 v1.AddArg(v2) 23288 v0.AddArg(v1) 23289 v0.AddArg(y) 23290 return true 23291 } 23292 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23293 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23294 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23295 for { 23296 _ = v.Args[1] 23297 or := v.Args[0] 23298 if or.Op != OpAMD64ORL { 23299 break 23300 } 23301 _ = or.Args[1] 23302 y := or.Args[0] 23303 s1 := or.Args[1] 23304 if s1.Op != OpAMD64SHLLconst { 23305 break 23306 } 23307 j1 := s1.AuxInt 23308 x1 := s1.Args[0] 23309 if x1.Op != OpAMD64MOVBloadidx1 { 23310 break 23311 } 23312 i1 := x1.AuxInt 23313 s := x1.Aux 23314 _ = x1.Args[2] 23315 idx := x1.Args[0] 23316 p := x1.Args[1] 23317 mem := x1.Args[2] 23318 s0 := v.Args[1] 23319 if s0.Op != OpAMD64SHLLconst { 23320 break 23321 } 23322 j0 := s0.AuxInt 23323 x0 := s0.Args[0] 23324 if x0.Op != OpAMD64MOVBloadidx1 { 23325 break 23326 } 23327 i0 := x0.AuxInt 23328 if x0.Aux != s { 23329 break 23330 } 23331 _ = x0.Args[2] 23332 if idx != x0.Args[0] { 23333 break 23334 } 23335 if p != x0.Args[1] { 23336 break 23337 } 23338 if mem != x0.Args[2] { 23339 break 23340 } 23341 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23342 break 23343 } 23344 b = mergePoint(b, x0, x1) 23345 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23346 v.reset(OpCopy) 23347 v.AddArg(v0) 23348 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23349 v1.AuxInt = j1 23350 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23351 v2.AuxInt = 8 23352 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23353 v3.AuxInt = i0 23354 v3.Aux = s 23355 v3.AddArg(p) 23356 v3.AddArg(idx) 23357 v3.AddArg(mem) 23358 v2.AddArg(v3) 23359 v1.AddArg(v2) 23360 v0.AddArg(v1) 23361 v0.AddArg(y) 23362 return true 23363 } 23364 // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) 23365 // cond: canMergeLoad(v, l, x) && clobber(l) 23366 // result: (ORLmem x [off] {sym} ptr mem) 23367 for { 23368 _ = v.Args[1] 23369 x := v.Args[0] 23370 l := v.Args[1] 23371 if l.Op != OpAMD64MOVLload { 23372 break 23373 } 23374 off := l.AuxInt 23375 sym := l.Aux 23376 _ = l.Args[1] 23377 ptr := l.Args[0] 23378 mem := l.Args[1] 23379 if !(canMergeLoad(v, l, x) && clobber(l)) { 23380 break 23381 } 23382 v.reset(OpAMD64ORLmem) 23383 v.AuxInt = off 23384 v.Aux = sym 23385 v.AddArg(x) 23386 v.AddArg(ptr) 23387 v.AddArg(mem) 23388 return true 23389 } 23390 return false 23391 } 23392 func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { 23393 // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) 23394 // cond: canMergeLoad(v, l, x) && clobber(l) 23395 // result: (ORLmem x [off] {sym} ptr mem) 23396 for { 23397 _ = v.Args[1] 23398 l := v.Args[0] 23399 if l.Op != OpAMD64MOVLload { 23400 break 23401 } 23402 off := l.AuxInt 23403 sym := l.Aux 23404 _ = l.Args[1] 23405 ptr := l.Args[0] 23406 mem := l.Args[1] 23407 x := v.Args[1] 23408 if !(canMergeLoad(v, l, x) && clobber(l)) { 23409 break 23410 } 23411 v.reset(OpAMD64ORLmem) 23412 v.AuxInt = off 23413 v.Aux = sym 23414 v.AddArg(x) 23415 v.AddArg(ptr) 23416 v.AddArg(mem) 23417 return true 23418 } 23419 return false 23420 } 23421 func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { 23422 // match: (ORLconst [c] x) 23423 // cond: int32(c)==0 23424 // result: x 23425 for { 23426 c := v.AuxInt 23427 x := v.Args[0] 23428 if !(int32(c) == 0) { 23429 break 23430 } 23431 v.reset(OpCopy) 23432 v.Type = x.Type 23433 v.AddArg(x) 23434 return true 23435 } 23436 // match: (ORLconst [c] _) 23437 // cond: int32(c)==-1 23438 // result: (MOVLconst [-1]) 23439 for { 23440 c := v.AuxInt 23441 if !(int32(c) == -1) { 23442 break 23443 } 23444 v.reset(OpAMD64MOVLconst) 23445 v.AuxInt = -1 23446 return true 23447 } 23448 // match: (ORLconst [c] (MOVLconst [d])) 23449 // cond: 23450 // result: (MOVLconst [c|d]) 23451 for { 23452 c := v.AuxInt 23453 v_0 := v.Args[0] 23454 if v_0.Op != OpAMD64MOVLconst { 23455 break 23456 } 23457 d := v_0.AuxInt 23458 v.reset(OpAMD64MOVLconst) 23459 v.AuxInt = c | d 23460 return true 23461 } 23462 return false 23463 } 23464 func rewriteValueAMD64_OpAMD64ORLmem_0(v *Value) bool { 23465 b := v.Block 23466 _ = b 23467 typ := &b.Func.Config.Types 23468 _ = typ 23469 // match: (ORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 23470 // cond: 23471 // result: ( ORL x (MOVLf2i y)) 23472 for { 23473 off := v.AuxInt 23474 sym := v.Aux 23475 _ = v.Args[2] 23476 x := v.Args[0] 23477 ptr := v.Args[1] 23478 v_2 := v.Args[2] 23479 if v_2.Op != OpAMD64MOVSSstore { 23480 break 23481 } 23482 if v_2.AuxInt != off { 23483 break 23484 } 23485 if v_2.Aux != sym { 23486 break 23487 } 23488 _ = v_2.Args[2] 23489 if ptr != v_2.Args[0] { 23490 break 23491 } 23492 y := v_2.Args[1] 23493 v.reset(OpAMD64ORL) 23494 v.AddArg(x) 23495 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 23496 v0.AddArg(y) 23497 v.AddArg(v0) 23498 return true 23499 } 23500 return false 23501 } 23502 func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { 23503 // match: (ORQ x (MOVQconst [c])) 23504 // cond: is32Bit(c) 23505 // result: (ORQconst [c] x) 23506 for { 23507 _ = v.Args[1] 23508 x := v.Args[0] 23509 v_1 := v.Args[1] 23510 if v_1.Op != OpAMD64MOVQconst { 23511 break 23512 } 23513 c := v_1.AuxInt 23514 if !(is32Bit(c)) { 23515 break 23516 } 23517 v.reset(OpAMD64ORQconst) 23518 v.AuxInt = c 23519 v.AddArg(x) 23520 return true 23521 } 23522 // match: (ORQ (MOVQconst [c]) x) 23523 // cond: is32Bit(c) 23524 // result: (ORQconst [c] x) 23525 for { 23526 _ = v.Args[1] 23527 v_0 := v.Args[0] 23528 if v_0.Op != OpAMD64MOVQconst { 23529 break 23530 } 23531 c := v_0.AuxInt 23532 x := v.Args[1] 23533 if !(is32Bit(c)) { 23534 break 23535 } 23536 v.reset(OpAMD64ORQconst) 23537 v.AuxInt = c 23538 v.AddArg(x) 23539 return true 23540 } 23541 // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) 23542 // cond: d==64-c 23543 // result: (ROLQconst x [c]) 23544 for { 23545 _ = v.Args[1] 23546 v_0 := v.Args[0] 23547 if v_0.Op != OpAMD64SHLQconst { 23548 break 23549 } 23550 c := v_0.AuxInt 23551 x := v_0.Args[0] 23552 v_1 := v.Args[1] 23553 if v_1.Op != OpAMD64SHRQconst { 23554 break 23555 } 23556 d := v_1.AuxInt 23557 if x != v_1.Args[0] { 23558 break 23559 } 23560 if !(d == 64-c) { 23561 break 23562 } 23563 v.reset(OpAMD64ROLQconst) 23564 v.AuxInt = c 23565 v.AddArg(x) 23566 return true 23567 } 23568 // match: (ORQ (SHRQconst x [d]) (SHLQconst x [c])) 23569 // cond: d==64-c 23570 // result: (ROLQconst x [c]) 23571 for { 23572 _ = v.Args[1] 23573 v_0 := v.Args[0] 23574 if v_0.Op != OpAMD64SHRQconst { 23575 break 23576 } 23577 d := v_0.AuxInt 23578 x := v_0.Args[0] 23579 v_1 := v.Args[1] 23580 if v_1.Op != OpAMD64SHLQconst { 23581 break 23582 } 23583 c := v_1.AuxInt 23584 if x != v_1.Args[0] { 23585 break 23586 } 23587 if !(d == 64-c) { 23588 break 23589 } 23590 v.reset(OpAMD64ROLQconst) 23591 v.AuxInt = c 23592 v.AddArg(x) 23593 return true 23594 } 23595 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 23596 // cond: 23597 // result: (ROLQ x y) 23598 for { 23599 _ = v.Args[1] 23600 v_0 := v.Args[0] 23601 if v_0.Op != OpAMD64SHLQ { 23602 break 23603 } 23604 _ = v_0.Args[1] 23605 x := v_0.Args[0] 23606 y := v_0.Args[1] 23607 v_1 := v.Args[1] 23608 if v_1.Op != OpAMD64ANDQ { 23609 break 23610 } 23611 _ = v_1.Args[1] 23612 v_1_0 := v_1.Args[0] 23613 if v_1_0.Op != OpAMD64SHRQ { 23614 break 23615 } 23616 _ = v_1_0.Args[1] 23617 if x != v_1_0.Args[0] { 23618 break 23619 } 23620 v_1_0_1 := v_1_0.Args[1] 23621 if v_1_0_1.Op != OpAMD64NEGQ { 23622 break 23623 } 23624 if y != v_1_0_1.Args[0] { 23625 break 23626 } 23627 v_1_1 := v_1.Args[1] 23628 if v_1_1.Op != OpAMD64SBBQcarrymask { 23629 break 23630 } 23631 v_1_1_0 := v_1_1.Args[0] 23632 if v_1_1_0.Op != OpAMD64CMPQconst { 23633 break 23634 } 23635 if v_1_1_0.AuxInt != 64 { 23636 break 23637 } 23638 v_1_1_0_0 := v_1_1_0.Args[0] 23639 if v_1_1_0_0.Op != OpAMD64NEGQ { 23640 break 23641 } 23642 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 23643 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 23644 break 23645 } 23646 if v_1_1_0_0_0.AuxInt != -64 { 23647 break 23648 } 23649 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 23650 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 23651 break 23652 } 23653 if v_1_1_0_0_0_0.AuxInt != 63 { 23654 break 23655 } 23656 if y != v_1_1_0_0_0_0.Args[0] { 23657 break 23658 } 23659 v.reset(OpAMD64ROLQ) 23660 v.AddArg(x) 23661 v.AddArg(y) 23662 return true 23663 } 23664 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y)))) 23665 // cond: 23666 // result: (ROLQ x y) 23667 for { 23668 _ = v.Args[1] 23669 v_0 := v.Args[0] 23670 if v_0.Op != OpAMD64SHLQ { 23671 break 23672 } 23673 _ = v_0.Args[1] 23674 x := v_0.Args[0] 23675 y := v_0.Args[1] 23676 v_1 := v.Args[1] 23677 if v_1.Op != OpAMD64ANDQ { 23678 break 23679 } 23680 _ = v_1.Args[1] 23681 v_1_0 := v_1.Args[0] 23682 if v_1_0.Op != OpAMD64SBBQcarrymask { 23683 break 23684 } 23685 v_1_0_0 := v_1_0.Args[0] 23686 if v_1_0_0.Op != OpAMD64CMPQconst { 23687 break 23688 } 23689 if v_1_0_0.AuxInt != 64 { 23690 break 23691 } 23692 v_1_0_0_0 := v_1_0_0.Args[0] 23693 if v_1_0_0_0.Op != OpAMD64NEGQ { 23694 break 23695 } 23696 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 23697 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 23698 break 23699 } 23700 if v_1_0_0_0_0.AuxInt != -64 { 23701 break 23702 } 23703 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 23704 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 23705 break 23706 } 23707 if v_1_0_0_0_0_0.AuxInt != 63 { 23708 break 23709 } 23710 if y != v_1_0_0_0_0_0.Args[0] { 23711 break 23712 } 23713 v_1_1 := v_1.Args[1] 23714 if v_1_1.Op != OpAMD64SHRQ { 23715 break 23716 } 23717 _ = v_1_1.Args[1] 23718 if x != v_1_1.Args[0] { 23719 break 23720 } 23721 v_1_1_1 := v_1_1.Args[1] 23722 if v_1_1_1.Op != OpAMD64NEGQ { 23723 break 23724 } 23725 if y != v_1_1_1.Args[0] { 23726 break 23727 } 23728 v.reset(OpAMD64ROLQ) 23729 v.AddArg(x) 23730 v.AddArg(y) 23731 return true 23732 } 23733 // match: (ORQ (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHLQ x y)) 23734 // cond: 23735 // result: (ROLQ x y) 23736 for { 23737 _ = v.Args[1] 23738 v_0 := v.Args[0] 23739 if v_0.Op != OpAMD64ANDQ { 23740 break 23741 } 23742 _ = v_0.Args[1] 23743 v_0_0 := v_0.Args[0] 23744 if v_0_0.Op != OpAMD64SHRQ { 23745 break 23746 } 23747 _ = v_0_0.Args[1] 23748 x := v_0_0.Args[0] 23749 v_0_0_1 := v_0_0.Args[1] 23750 if v_0_0_1.Op != OpAMD64NEGQ { 23751 break 23752 } 23753 y := v_0_0_1.Args[0] 23754 v_0_1 := v_0.Args[1] 23755 if v_0_1.Op != OpAMD64SBBQcarrymask { 23756 break 23757 } 23758 v_0_1_0 := v_0_1.Args[0] 23759 if v_0_1_0.Op != OpAMD64CMPQconst { 23760 break 23761 } 23762 if v_0_1_0.AuxInt != 64 { 23763 break 23764 } 23765 v_0_1_0_0 := v_0_1_0.Args[0] 23766 if v_0_1_0_0.Op != OpAMD64NEGQ { 23767 break 23768 } 23769 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 23770 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 23771 break 23772 } 23773 if v_0_1_0_0_0.AuxInt != -64 { 23774 break 23775 } 23776 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 23777 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 23778 break 23779 } 23780 if v_0_1_0_0_0_0.AuxInt != 63 { 23781 break 23782 } 23783 if y != v_0_1_0_0_0_0.Args[0] { 23784 break 23785 } 23786 v_1 := v.Args[1] 23787 if v_1.Op != OpAMD64SHLQ { 23788 break 23789 } 23790 _ = v_1.Args[1] 23791 if x != v_1.Args[0] { 23792 break 23793 } 23794 if y != v_1.Args[1] { 23795 break 23796 } 23797 v.reset(OpAMD64ROLQ) 23798 v.AddArg(x) 23799 v.AddArg(y) 23800 return true 23801 } 23802 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y))) (SHLQ x y)) 23803 // cond: 23804 // result: (ROLQ x y) 23805 for { 23806 _ = v.Args[1] 23807 v_0 := v.Args[0] 23808 if v_0.Op != OpAMD64ANDQ { 23809 break 23810 } 23811 _ = v_0.Args[1] 23812 v_0_0 := v_0.Args[0] 23813 if v_0_0.Op != OpAMD64SBBQcarrymask { 23814 break 23815 } 23816 v_0_0_0 := v_0_0.Args[0] 23817 if v_0_0_0.Op != OpAMD64CMPQconst { 23818 break 23819 } 23820 if v_0_0_0.AuxInt != 64 { 23821 break 23822 } 23823 v_0_0_0_0 := v_0_0_0.Args[0] 23824 if v_0_0_0_0.Op != OpAMD64NEGQ { 23825 break 23826 } 23827 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 23828 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 23829 break 23830 } 23831 if v_0_0_0_0_0.AuxInt != -64 { 23832 break 23833 } 23834 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 23835 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 23836 break 23837 } 23838 if v_0_0_0_0_0_0.AuxInt != 63 { 23839 break 23840 } 23841 y := v_0_0_0_0_0_0.Args[0] 23842 v_0_1 := v_0.Args[1] 23843 if v_0_1.Op != OpAMD64SHRQ { 23844 break 23845 } 23846 _ = v_0_1.Args[1] 23847 x := v_0_1.Args[0] 23848 v_0_1_1 := v_0_1.Args[1] 23849 if v_0_1_1.Op != OpAMD64NEGQ { 23850 break 23851 } 23852 if y != v_0_1_1.Args[0] { 23853 break 23854 } 23855 v_1 := v.Args[1] 23856 if v_1.Op != OpAMD64SHLQ { 23857 break 23858 } 23859 _ = v_1.Args[1] 23860 if x != v_1.Args[0] { 23861 break 23862 } 23863 if y != v_1.Args[1] { 23864 break 23865 } 23866 v.reset(OpAMD64ROLQ) 23867 v.AddArg(x) 23868 v.AddArg(y) 23869 return true 23870 } 23871 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 23872 // cond: 23873 // result: (ROLQ x y) 23874 for { 23875 _ = v.Args[1] 23876 v_0 := v.Args[0] 23877 if v_0.Op != OpAMD64SHLQ { 23878 break 23879 } 23880 _ = v_0.Args[1] 23881 x := v_0.Args[0] 23882 y := v_0.Args[1] 23883 v_1 := v.Args[1] 23884 if v_1.Op != OpAMD64ANDQ { 23885 break 23886 } 23887 _ = v_1.Args[1] 23888 v_1_0 := v_1.Args[0] 23889 if v_1_0.Op != OpAMD64SHRQ { 23890 break 23891 } 23892 _ = v_1_0.Args[1] 23893 if x != v_1_0.Args[0] { 23894 break 23895 } 23896 v_1_0_1 := v_1_0.Args[1] 23897 if v_1_0_1.Op != OpAMD64NEGL { 23898 break 23899 } 23900 if y != v_1_0_1.Args[0] { 23901 break 23902 } 23903 v_1_1 := v_1.Args[1] 23904 if v_1_1.Op != OpAMD64SBBQcarrymask { 23905 break 23906 } 23907 v_1_1_0 := v_1_1.Args[0] 23908 if v_1_1_0.Op != OpAMD64CMPLconst { 23909 break 23910 } 23911 if v_1_1_0.AuxInt != 64 { 23912 break 23913 } 23914 v_1_1_0_0 := v_1_1_0.Args[0] 23915 if v_1_1_0_0.Op != OpAMD64NEGL { 23916 break 23917 } 23918 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 23919 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 23920 break 23921 } 23922 if v_1_1_0_0_0.AuxInt != -64 { 23923 break 23924 } 23925 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 23926 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 23927 break 23928 } 23929 if v_1_1_0_0_0_0.AuxInt != 63 { 23930 break 23931 } 23932 if y != v_1_1_0_0_0_0.Args[0] { 23933 break 23934 } 23935 v.reset(OpAMD64ROLQ) 23936 v.AddArg(x) 23937 v.AddArg(y) 23938 return true 23939 } 23940 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y)))) 23941 // cond: 23942 // result: (ROLQ x y) 23943 for { 23944 _ = v.Args[1] 23945 v_0 := v.Args[0] 23946 if v_0.Op != OpAMD64SHLQ { 23947 break 23948 } 23949 _ = v_0.Args[1] 23950 x := v_0.Args[0] 23951 y := v_0.Args[1] 23952 v_1 := v.Args[1] 23953 if v_1.Op != OpAMD64ANDQ { 23954 break 23955 } 23956 _ = v_1.Args[1] 23957 v_1_0 := v_1.Args[0] 23958 if v_1_0.Op != OpAMD64SBBQcarrymask { 23959 break 23960 } 23961 v_1_0_0 := v_1_0.Args[0] 23962 if v_1_0_0.Op != OpAMD64CMPLconst { 23963 break 23964 } 23965 if v_1_0_0.AuxInt != 64 { 23966 break 23967 } 23968 v_1_0_0_0 := v_1_0_0.Args[0] 23969 if v_1_0_0_0.Op != OpAMD64NEGL { 23970 break 23971 } 23972 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 23973 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 23974 break 23975 } 23976 if v_1_0_0_0_0.AuxInt != -64 { 23977 break 23978 } 23979 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 23980 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 23981 break 23982 } 23983 if v_1_0_0_0_0_0.AuxInt != 63 { 23984 break 23985 } 23986 if y != v_1_0_0_0_0_0.Args[0] { 23987 break 23988 } 23989 v_1_1 := v_1.Args[1] 23990 if v_1_1.Op != OpAMD64SHRQ { 23991 break 23992 } 23993 _ = v_1_1.Args[1] 23994 if x != v_1_1.Args[0] { 23995 break 23996 } 23997 v_1_1_1 := v_1_1.Args[1] 23998 if v_1_1_1.Op != OpAMD64NEGL { 23999 break 24000 } 24001 if y != v_1_1_1.Args[0] { 24002 break 24003 } 24004 v.reset(OpAMD64ROLQ) 24005 v.AddArg(x) 24006 v.AddArg(y) 24007 return true 24008 } 24009 return false 24010 } 24011 func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { 24012 // match: (ORQ (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHLQ x y)) 24013 // cond: 24014 // result: (ROLQ x y) 24015 for { 24016 _ = v.Args[1] 24017 v_0 := v.Args[0] 24018 if v_0.Op != OpAMD64ANDQ { 24019 break 24020 } 24021 _ = v_0.Args[1] 24022 v_0_0 := v_0.Args[0] 24023 if v_0_0.Op != OpAMD64SHRQ { 24024 break 24025 } 24026 _ = v_0_0.Args[1] 24027 x := v_0_0.Args[0] 24028 v_0_0_1 := v_0_0.Args[1] 24029 if v_0_0_1.Op != OpAMD64NEGL { 24030 break 24031 } 24032 y := v_0_0_1.Args[0] 24033 v_0_1 := v_0.Args[1] 24034 if v_0_1.Op != OpAMD64SBBQcarrymask { 24035 break 24036 } 24037 v_0_1_0 := v_0_1.Args[0] 24038 if v_0_1_0.Op != OpAMD64CMPLconst { 24039 break 24040 } 24041 if v_0_1_0.AuxInt != 64 { 24042 break 24043 } 24044 v_0_1_0_0 := v_0_1_0.Args[0] 24045 if v_0_1_0_0.Op != OpAMD64NEGL { 24046 break 24047 } 24048 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24049 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 24050 break 24051 } 24052 if v_0_1_0_0_0.AuxInt != -64 { 24053 break 24054 } 24055 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24056 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 24057 break 24058 } 24059 if v_0_1_0_0_0_0.AuxInt != 63 { 24060 break 24061 } 24062 if y != v_0_1_0_0_0_0.Args[0] { 24063 break 24064 } 24065 v_1 := v.Args[1] 24066 if v_1.Op != OpAMD64SHLQ { 24067 break 24068 } 24069 _ = v_1.Args[1] 24070 if x != v_1.Args[0] { 24071 break 24072 } 24073 if y != v_1.Args[1] { 24074 break 24075 } 24076 v.reset(OpAMD64ROLQ) 24077 v.AddArg(x) 24078 v.AddArg(y) 24079 return true 24080 } 24081 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y))) (SHLQ x y)) 24082 // cond: 24083 // result: (ROLQ x y) 24084 for { 24085 _ = v.Args[1] 24086 v_0 := v.Args[0] 24087 if v_0.Op != OpAMD64ANDQ { 24088 break 24089 } 24090 _ = v_0.Args[1] 24091 v_0_0 := v_0.Args[0] 24092 if v_0_0.Op != OpAMD64SBBQcarrymask { 24093 break 24094 } 24095 v_0_0_0 := v_0_0.Args[0] 24096 if v_0_0_0.Op != OpAMD64CMPLconst { 24097 break 24098 } 24099 if v_0_0_0.AuxInt != 64 { 24100 break 24101 } 24102 v_0_0_0_0 := v_0_0_0.Args[0] 24103 if v_0_0_0_0.Op != OpAMD64NEGL { 24104 break 24105 } 24106 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24107 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 24108 break 24109 } 24110 if v_0_0_0_0_0.AuxInt != -64 { 24111 break 24112 } 24113 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24114 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 24115 break 24116 } 24117 if v_0_0_0_0_0_0.AuxInt != 63 { 24118 break 24119 } 24120 y := v_0_0_0_0_0_0.Args[0] 24121 v_0_1 := v_0.Args[1] 24122 if v_0_1.Op != OpAMD64SHRQ { 24123 break 24124 } 24125 _ = v_0_1.Args[1] 24126 x := v_0_1.Args[0] 24127 v_0_1_1 := v_0_1.Args[1] 24128 if v_0_1_1.Op != OpAMD64NEGL { 24129 break 24130 } 24131 if y != v_0_1_1.Args[0] { 24132 break 24133 } 24134 v_1 := v.Args[1] 24135 if v_1.Op != OpAMD64SHLQ { 24136 break 24137 } 24138 _ = v_1.Args[1] 24139 if x != v_1.Args[0] { 24140 break 24141 } 24142 if y != v_1.Args[1] { 24143 break 24144 } 24145 v.reset(OpAMD64ROLQ) 24146 v.AddArg(x) 24147 v.AddArg(y) 24148 return true 24149 } 24150 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 24151 // cond: 24152 // result: (RORQ x y) 24153 for { 24154 _ = v.Args[1] 24155 v_0 := v.Args[0] 24156 if v_0.Op != OpAMD64SHRQ { 24157 break 24158 } 24159 _ = v_0.Args[1] 24160 x := v_0.Args[0] 24161 y := v_0.Args[1] 24162 v_1 := v.Args[1] 24163 if v_1.Op != OpAMD64ANDQ { 24164 break 24165 } 24166 _ = v_1.Args[1] 24167 v_1_0 := v_1.Args[0] 24168 if v_1_0.Op != OpAMD64SHLQ { 24169 break 24170 } 24171 _ = v_1_0.Args[1] 24172 if x != v_1_0.Args[0] { 24173 break 24174 } 24175 v_1_0_1 := v_1_0.Args[1] 24176 if v_1_0_1.Op != OpAMD64NEGQ { 24177 break 24178 } 24179 if y != v_1_0_1.Args[0] { 24180 break 24181 } 24182 v_1_1 := v_1.Args[1] 24183 if v_1_1.Op != OpAMD64SBBQcarrymask { 24184 break 24185 } 24186 v_1_1_0 := v_1_1.Args[0] 24187 if v_1_1_0.Op != OpAMD64CMPQconst { 24188 break 24189 } 24190 if v_1_1_0.AuxInt != 64 { 24191 break 24192 } 24193 v_1_1_0_0 := v_1_1_0.Args[0] 24194 if v_1_1_0_0.Op != OpAMD64NEGQ { 24195 break 24196 } 24197 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 24198 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 24199 break 24200 } 24201 if v_1_1_0_0_0.AuxInt != -64 { 24202 break 24203 } 24204 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 24205 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 24206 break 24207 } 24208 if v_1_1_0_0_0_0.AuxInt != 63 { 24209 break 24210 } 24211 if y != v_1_1_0_0_0_0.Args[0] { 24212 break 24213 } 24214 v.reset(OpAMD64RORQ) 24215 v.AddArg(x) 24216 v.AddArg(y) 24217 return true 24218 } 24219 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y)))) 24220 // cond: 24221 // result: (RORQ x y) 24222 for { 24223 _ = v.Args[1] 24224 v_0 := v.Args[0] 24225 if v_0.Op != OpAMD64SHRQ { 24226 break 24227 } 24228 _ = v_0.Args[1] 24229 x := v_0.Args[0] 24230 y := v_0.Args[1] 24231 v_1 := v.Args[1] 24232 if v_1.Op != OpAMD64ANDQ { 24233 break 24234 } 24235 _ = v_1.Args[1] 24236 v_1_0 := v_1.Args[0] 24237 if v_1_0.Op != OpAMD64SBBQcarrymask { 24238 break 24239 } 24240 v_1_0_0 := v_1_0.Args[0] 24241 if v_1_0_0.Op != OpAMD64CMPQconst { 24242 break 24243 } 24244 if v_1_0_0.AuxInt != 64 { 24245 break 24246 } 24247 v_1_0_0_0 := v_1_0_0.Args[0] 24248 if v_1_0_0_0.Op != OpAMD64NEGQ { 24249 break 24250 } 24251 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 24252 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 24253 break 24254 } 24255 if v_1_0_0_0_0.AuxInt != -64 { 24256 break 24257 } 24258 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 24259 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 24260 break 24261 } 24262 if v_1_0_0_0_0_0.AuxInt != 63 { 24263 break 24264 } 24265 if y != v_1_0_0_0_0_0.Args[0] { 24266 break 24267 } 24268 v_1_1 := v_1.Args[1] 24269 if v_1_1.Op != OpAMD64SHLQ { 24270 break 24271 } 24272 _ = v_1_1.Args[1] 24273 if x != v_1_1.Args[0] { 24274 break 24275 } 24276 v_1_1_1 := v_1_1.Args[1] 24277 if v_1_1_1.Op != OpAMD64NEGQ { 24278 break 24279 } 24280 if y != v_1_1_1.Args[0] { 24281 break 24282 } 24283 v.reset(OpAMD64RORQ) 24284 v.AddArg(x) 24285 v.AddArg(y) 24286 return true 24287 } 24288 // match: (ORQ (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHRQ x y)) 24289 // cond: 24290 // result: (RORQ x y) 24291 for { 24292 _ = v.Args[1] 24293 v_0 := v.Args[0] 24294 if v_0.Op != OpAMD64ANDQ { 24295 break 24296 } 24297 _ = v_0.Args[1] 24298 v_0_0 := v_0.Args[0] 24299 if v_0_0.Op != OpAMD64SHLQ { 24300 break 24301 } 24302 _ = v_0_0.Args[1] 24303 x := v_0_0.Args[0] 24304 v_0_0_1 := v_0_0.Args[1] 24305 if v_0_0_1.Op != OpAMD64NEGQ { 24306 break 24307 } 24308 y := v_0_0_1.Args[0] 24309 v_0_1 := v_0.Args[1] 24310 if v_0_1.Op != OpAMD64SBBQcarrymask { 24311 break 24312 } 24313 v_0_1_0 := v_0_1.Args[0] 24314 if v_0_1_0.Op != OpAMD64CMPQconst { 24315 break 24316 } 24317 if v_0_1_0.AuxInt != 64 { 24318 break 24319 } 24320 v_0_1_0_0 := v_0_1_0.Args[0] 24321 if v_0_1_0_0.Op != OpAMD64NEGQ { 24322 break 24323 } 24324 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24325 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 24326 break 24327 } 24328 if v_0_1_0_0_0.AuxInt != -64 { 24329 break 24330 } 24331 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24332 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 24333 break 24334 } 24335 if v_0_1_0_0_0_0.AuxInt != 63 { 24336 break 24337 } 24338 if y != v_0_1_0_0_0_0.Args[0] { 24339 break 24340 } 24341 v_1 := v.Args[1] 24342 if v_1.Op != OpAMD64SHRQ { 24343 break 24344 } 24345 _ = v_1.Args[1] 24346 if x != v_1.Args[0] { 24347 break 24348 } 24349 if y != v_1.Args[1] { 24350 break 24351 } 24352 v.reset(OpAMD64RORQ) 24353 v.AddArg(x) 24354 v.AddArg(y) 24355 return true 24356 } 24357 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y))) (SHRQ x y)) 24358 // cond: 24359 // result: (RORQ x y) 24360 for { 24361 _ = v.Args[1] 24362 v_0 := v.Args[0] 24363 if v_0.Op != OpAMD64ANDQ { 24364 break 24365 } 24366 _ = v_0.Args[1] 24367 v_0_0 := v_0.Args[0] 24368 if v_0_0.Op != OpAMD64SBBQcarrymask { 24369 break 24370 } 24371 v_0_0_0 := v_0_0.Args[0] 24372 if v_0_0_0.Op != OpAMD64CMPQconst { 24373 break 24374 } 24375 if v_0_0_0.AuxInt != 64 { 24376 break 24377 } 24378 v_0_0_0_0 := v_0_0_0.Args[0] 24379 if v_0_0_0_0.Op != OpAMD64NEGQ { 24380 break 24381 } 24382 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24383 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 24384 break 24385 } 24386 if v_0_0_0_0_0.AuxInt != -64 { 24387 break 24388 } 24389 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24390 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 24391 break 24392 } 24393 if v_0_0_0_0_0_0.AuxInt != 63 { 24394 break 24395 } 24396 y := v_0_0_0_0_0_0.Args[0] 24397 v_0_1 := v_0.Args[1] 24398 if v_0_1.Op != OpAMD64SHLQ { 24399 break 24400 } 24401 _ = v_0_1.Args[1] 24402 x := v_0_1.Args[0] 24403 v_0_1_1 := v_0_1.Args[1] 24404 if v_0_1_1.Op != OpAMD64NEGQ { 24405 break 24406 } 24407 if y != v_0_1_1.Args[0] { 24408 break 24409 } 24410 v_1 := v.Args[1] 24411 if v_1.Op != OpAMD64SHRQ { 24412 break 24413 } 24414 _ = v_1.Args[1] 24415 if x != v_1.Args[0] { 24416 break 24417 } 24418 if y != v_1.Args[1] { 24419 break 24420 } 24421 v.reset(OpAMD64RORQ) 24422 v.AddArg(x) 24423 v.AddArg(y) 24424 return true 24425 } 24426 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 24427 // cond: 24428 // result: (RORQ x y) 24429 for { 24430 _ = v.Args[1] 24431 v_0 := v.Args[0] 24432 if v_0.Op != OpAMD64SHRQ { 24433 break 24434 } 24435 _ = v_0.Args[1] 24436 x := v_0.Args[0] 24437 y := v_0.Args[1] 24438 v_1 := v.Args[1] 24439 if v_1.Op != OpAMD64ANDQ { 24440 break 24441 } 24442 _ = v_1.Args[1] 24443 v_1_0 := v_1.Args[0] 24444 if v_1_0.Op != OpAMD64SHLQ { 24445 break 24446 } 24447 _ = v_1_0.Args[1] 24448 if x != v_1_0.Args[0] { 24449 break 24450 } 24451 v_1_0_1 := v_1_0.Args[1] 24452 if v_1_0_1.Op != OpAMD64NEGL { 24453 break 24454 } 24455 if y != v_1_0_1.Args[0] { 24456 break 24457 } 24458 v_1_1 := v_1.Args[1] 24459 if v_1_1.Op != OpAMD64SBBQcarrymask { 24460 break 24461 } 24462 v_1_1_0 := v_1_1.Args[0] 24463 if v_1_1_0.Op != OpAMD64CMPLconst { 24464 break 24465 } 24466 if v_1_1_0.AuxInt != 64 { 24467 break 24468 } 24469 v_1_1_0_0 := v_1_1_0.Args[0] 24470 if v_1_1_0_0.Op != OpAMD64NEGL { 24471 break 24472 } 24473 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 24474 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 24475 break 24476 } 24477 if v_1_1_0_0_0.AuxInt != -64 { 24478 break 24479 } 24480 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 24481 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 24482 break 24483 } 24484 if v_1_1_0_0_0_0.AuxInt != 63 { 24485 break 24486 } 24487 if y != v_1_1_0_0_0_0.Args[0] { 24488 break 24489 } 24490 v.reset(OpAMD64RORQ) 24491 v.AddArg(x) 24492 v.AddArg(y) 24493 return true 24494 } 24495 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y)))) 24496 // cond: 24497 // result: (RORQ x y) 24498 for { 24499 _ = v.Args[1] 24500 v_0 := v.Args[0] 24501 if v_0.Op != OpAMD64SHRQ { 24502 break 24503 } 24504 _ = v_0.Args[1] 24505 x := v_0.Args[0] 24506 y := v_0.Args[1] 24507 v_1 := v.Args[1] 24508 if v_1.Op != OpAMD64ANDQ { 24509 break 24510 } 24511 _ = v_1.Args[1] 24512 v_1_0 := v_1.Args[0] 24513 if v_1_0.Op != OpAMD64SBBQcarrymask { 24514 break 24515 } 24516 v_1_0_0 := v_1_0.Args[0] 24517 if v_1_0_0.Op != OpAMD64CMPLconst { 24518 break 24519 } 24520 if v_1_0_0.AuxInt != 64 { 24521 break 24522 } 24523 v_1_0_0_0 := v_1_0_0.Args[0] 24524 if v_1_0_0_0.Op != OpAMD64NEGL { 24525 break 24526 } 24527 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 24528 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 24529 break 24530 } 24531 if v_1_0_0_0_0.AuxInt != -64 { 24532 break 24533 } 24534 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 24535 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 24536 break 24537 } 24538 if v_1_0_0_0_0_0.AuxInt != 63 { 24539 break 24540 } 24541 if y != v_1_0_0_0_0_0.Args[0] { 24542 break 24543 } 24544 v_1_1 := v_1.Args[1] 24545 if v_1_1.Op != OpAMD64SHLQ { 24546 break 24547 } 24548 _ = v_1_1.Args[1] 24549 if x != v_1_1.Args[0] { 24550 break 24551 } 24552 v_1_1_1 := v_1_1.Args[1] 24553 if v_1_1_1.Op != OpAMD64NEGL { 24554 break 24555 } 24556 if y != v_1_1_1.Args[0] { 24557 break 24558 } 24559 v.reset(OpAMD64RORQ) 24560 v.AddArg(x) 24561 v.AddArg(y) 24562 return true 24563 } 24564 // match: (ORQ (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHRQ x y)) 24565 // cond: 24566 // result: (RORQ x y) 24567 for { 24568 _ = v.Args[1] 24569 v_0 := v.Args[0] 24570 if v_0.Op != OpAMD64ANDQ { 24571 break 24572 } 24573 _ = v_0.Args[1] 24574 v_0_0 := v_0.Args[0] 24575 if v_0_0.Op != OpAMD64SHLQ { 24576 break 24577 } 24578 _ = v_0_0.Args[1] 24579 x := v_0_0.Args[0] 24580 v_0_0_1 := v_0_0.Args[1] 24581 if v_0_0_1.Op != OpAMD64NEGL { 24582 break 24583 } 24584 y := v_0_0_1.Args[0] 24585 v_0_1 := v_0.Args[1] 24586 if v_0_1.Op != OpAMD64SBBQcarrymask { 24587 break 24588 } 24589 v_0_1_0 := v_0_1.Args[0] 24590 if v_0_1_0.Op != OpAMD64CMPLconst { 24591 break 24592 } 24593 if v_0_1_0.AuxInt != 64 { 24594 break 24595 } 24596 v_0_1_0_0 := v_0_1_0.Args[0] 24597 if v_0_1_0_0.Op != OpAMD64NEGL { 24598 break 24599 } 24600 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24601 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 24602 break 24603 } 24604 if v_0_1_0_0_0.AuxInt != -64 { 24605 break 24606 } 24607 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24608 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 24609 break 24610 } 24611 if v_0_1_0_0_0_0.AuxInt != 63 { 24612 break 24613 } 24614 if y != v_0_1_0_0_0_0.Args[0] { 24615 break 24616 } 24617 v_1 := v.Args[1] 24618 if v_1.Op != OpAMD64SHRQ { 24619 break 24620 } 24621 _ = v_1.Args[1] 24622 if x != v_1.Args[0] { 24623 break 24624 } 24625 if y != v_1.Args[1] { 24626 break 24627 } 24628 v.reset(OpAMD64RORQ) 24629 v.AddArg(x) 24630 v.AddArg(y) 24631 return true 24632 } 24633 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y))) (SHRQ x y)) 24634 // cond: 24635 // result: (RORQ x y) 24636 for { 24637 _ = v.Args[1] 24638 v_0 := v.Args[0] 24639 if v_0.Op != OpAMD64ANDQ { 24640 break 24641 } 24642 _ = v_0.Args[1] 24643 v_0_0 := v_0.Args[0] 24644 if v_0_0.Op != OpAMD64SBBQcarrymask { 24645 break 24646 } 24647 v_0_0_0 := v_0_0.Args[0] 24648 if v_0_0_0.Op != OpAMD64CMPLconst { 24649 break 24650 } 24651 if v_0_0_0.AuxInt != 64 { 24652 break 24653 } 24654 v_0_0_0_0 := v_0_0_0.Args[0] 24655 if v_0_0_0_0.Op != OpAMD64NEGL { 24656 break 24657 } 24658 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24659 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 24660 break 24661 } 24662 if v_0_0_0_0_0.AuxInt != -64 { 24663 break 24664 } 24665 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24666 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 24667 break 24668 } 24669 if v_0_0_0_0_0_0.AuxInt != 63 { 24670 break 24671 } 24672 y := v_0_0_0_0_0_0.Args[0] 24673 v_0_1 := v_0.Args[1] 24674 if v_0_1.Op != OpAMD64SHLQ { 24675 break 24676 } 24677 _ = v_0_1.Args[1] 24678 x := v_0_1.Args[0] 24679 v_0_1_1 := v_0_1.Args[1] 24680 if v_0_1_1.Op != OpAMD64NEGL { 24681 break 24682 } 24683 if y != v_0_1_1.Args[0] { 24684 break 24685 } 24686 v_1 := v.Args[1] 24687 if v_1.Op != OpAMD64SHRQ { 24688 break 24689 } 24690 _ = v_1.Args[1] 24691 if x != v_1.Args[0] { 24692 break 24693 } 24694 if y != v_1.Args[1] { 24695 break 24696 } 24697 v.reset(OpAMD64RORQ) 24698 v.AddArg(x) 24699 v.AddArg(y) 24700 return true 24701 } 24702 return false 24703 } 24704 func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { 24705 b := v.Block 24706 _ = b 24707 typ := &b.Func.Config.Types 24708 _ = typ 24709 // match: (ORQ x x) 24710 // cond: 24711 // result: x 24712 for { 24713 _ = v.Args[1] 24714 x := v.Args[0] 24715 if x != v.Args[1] { 24716 break 24717 } 24718 v.reset(OpCopy) 24719 v.Type = x.Type 24720 v.AddArg(x) 24721 return true 24722 } 24723 // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) 24724 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24725 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 24726 for { 24727 _ = v.Args[1] 24728 x0 := v.Args[0] 24729 if x0.Op != OpAMD64MOVBload { 24730 break 24731 } 24732 i0 := x0.AuxInt 24733 s := x0.Aux 24734 _ = x0.Args[1] 24735 p := x0.Args[0] 24736 mem := x0.Args[1] 24737 sh := v.Args[1] 24738 if sh.Op != OpAMD64SHLQconst { 24739 break 24740 } 24741 if sh.AuxInt != 8 { 24742 break 24743 } 24744 x1 := sh.Args[0] 24745 if x1.Op != OpAMD64MOVBload { 24746 break 24747 } 24748 i1 := x1.AuxInt 24749 if x1.Aux != s { 24750 break 24751 } 24752 _ = x1.Args[1] 24753 if p != x1.Args[0] { 24754 break 24755 } 24756 if mem != x1.Args[1] { 24757 break 24758 } 24759 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24760 break 24761 } 24762 b = mergePoint(b, x0, x1) 24763 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 24764 v.reset(OpCopy) 24765 v.AddArg(v0) 24766 v0.AuxInt = i0 24767 v0.Aux = s 24768 v0.AddArg(p) 24769 v0.AddArg(mem) 24770 return true 24771 } 24772 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 24773 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24774 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 24775 for { 24776 _ = v.Args[1] 24777 sh := v.Args[0] 24778 if sh.Op != OpAMD64SHLQconst { 24779 break 24780 } 24781 if sh.AuxInt != 8 { 24782 break 24783 } 24784 x1 := sh.Args[0] 24785 if x1.Op != OpAMD64MOVBload { 24786 break 24787 } 24788 i1 := x1.AuxInt 24789 s := x1.Aux 24790 _ = x1.Args[1] 24791 p := x1.Args[0] 24792 mem := x1.Args[1] 24793 x0 := v.Args[1] 24794 if x0.Op != OpAMD64MOVBload { 24795 break 24796 } 24797 i0 := x0.AuxInt 24798 if x0.Aux != s { 24799 break 24800 } 24801 _ = x0.Args[1] 24802 if p != x0.Args[0] { 24803 break 24804 } 24805 if mem != x0.Args[1] { 24806 break 24807 } 24808 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24809 break 24810 } 24811 b = mergePoint(b, x0, x1) 24812 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 24813 v.reset(OpCopy) 24814 v.AddArg(v0) 24815 v0.AuxInt = i0 24816 v0.Aux = s 24817 v0.AddArg(p) 24818 v0.AddArg(mem) 24819 return true 24820 } 24821 // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) 24822 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24823 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 24824 for { 24825 _ = v.Args[1] 24826 x0 := v.Args[0] 24827 if x0.Op != OpAMD64MOVWload { 24828 break 24829 } 24830 i0 := x0.AuxInt 24831 s := x0.Aux 24832 _ = x0.Args[1] 24833 p := x0.Args[0] 24834 mem := x0.Args[1] 24835 sh := v.Args[1] 24836 if sh.Op != OpAMD64SHLQconst { 24837 break 24838 } 24839 if sh.AuxInt != 16 { 24840 break 24841 } 24842 x1 := sh.Args[0] 24843 if x1.Op != OpAMD64MOVWload { 24844 break 24845 } 24846 i1 := x1.AuxInt 24847 if x1.Aux != s { 24848 break 24849 } 24850 _ = x1.Args[1] 24851 if p != x1.Args[0] { 24852 break 24853 } 24854 if mem != x1.Args[1] { 24855 break 24856 } 24857 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24858 break 24859 } 24860 b = mergePoint(b, x0, x1) 24861 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 24862 v.reset(OpCopy) 24863 v.AddArg(v0) 24864 v0.AuxInt = i0 24865 v0.Aux = s 24866 v0.AddArg(p) 24867 v0.AddArg(mem) 24868 return true 24869 } 24870 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 24871 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24872 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 24873 for { 24874 _ = v.Args[1] 24875 sh := v.Args[0] 24876 if sh.Op != OpAMD64SHLQconst { 24877 break 24878 } 24879 if sh.AuxInt != 16 { 24880 break 24881 } 24882 x1 := sh.Args[0] 24883 if x1.Op != OpAMD64MOVWload { 24884 break 24885 } 24886 i1 := x1.AuxInt 24887 s := x1.Aux 24888 _ = x1.Args[1] 24889 p := x1.Args[0] 24890 mem := x1.Args[1] 24891 x0 := v.Args[1] 24892 if x0.Op != OpAMD64MOVWload { 24893 break 24894 } 24895 i0 := x0.AuxInt 24896 if x0.Aux != s { 24897 break 24898 } 24899 _ = x0.Args[1] 24900 if p != x0.Args[0] { 24901 break 24902 } 24903 if mem != x0.Args[1] { 24904 break 24905 } 24906 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24907 break 24908 } 24909 b = mergePoint(b, x0, x1) 24910 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 24911 v.reset(OpCopy) 24912 v.AddArg(v0) 24913 v0.AuxInt = i0 24914 v0.Aux = s 24915 v0.AddArg(p) 24916 v0.AddArg(mem) 24917 return true 24918 } 24919 // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) 24920 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24921 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 24922 for { 24923 _ = v.Args[1] 24924 x0 := v.Args[0] 24925 if x0.Op != OpAMD64MOVLload { 24926 break 24927 } 24928 i0 := x0.AuxInt 24929 s := x0.Aux 24930 _ = x0.Args[1] 24931 p := x0.Args[0] 24932 mem := x0.Args[1] 24933 sh := v.Args[1] 24934 if sh.Op != OpAMD64SHLQconst { 24935 break 24936 } 24937 if sh.AuxInt != 32 { 24938 break 24939 } 24940 x1 := sh.Args[0] 24941 if x1.Op != OpAMD64MOVLload { 24942 break 24943 } 24944 i1 := x1.AuxInt 24945 if x1.Aux != s { 24946 break 24947 } 24948 _ = x1.Args[1] 24949 if p != x1.Args[0] { 24950 break 24951 } 24952 if mem != x1.Args[1] { 24953 break 24954 } 24955 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24956 break 24957 } 24958 b = mergePoint(b, x0, x1) 24959 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 24960 v.reset(OpCopy) 24961 v.AddArg(v0) 24962 v0.AuxInt = i0 24963 v0.Aux = s 24964 v0.AddArg(p) 24965 v0.AddArg(mem) 24966 return true 24967 } 24968 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem)) 24969 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24970 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 24971 for { 24972 _ = v.Args[1] 24973 sh := v.Args[0] 24974 if sh.Op != OpAMD64SHLQconst { 24975 break 24976 } 24977 if sh.AuxInt != 32 { 24978 break 24979 } 24980 x1 := sh.Args[0] 24981 if x1.Op != OpAMD64MOVLload { 24982 break 24983 } 24984 i1 := x1.AuxInt 24985 s := x1.Aux 24986 _ = x1.Args[1] 24987 p := x1.Args[0] 24988 mem := x1.Args[1] 24989 x0 := v.Args[1] 24990 if x0.Op != OpAMD64MOVLload { 24991 break 24992 } 24993 i0 := x0.AuxInt 24994 if x0.Aux != s { 24995 break 24996 } 24997 _ = x0.Args[1] 24998 if p != x0.Args[0] { 24999 break 25000 } 25001 if mem != x0.Args[1] { 25002 break 25003 } 25004 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25005 break 25006 } 25007 b = mergePoint(b, x0, x1) 25008 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 25009 v.reset(OpCopy) 25010 v.AddArg(v0) 25011 v0.AuxInt = i0 25012 v0.Aux = s 25013 v0.AddArg(p) 25014 v0.AddArg(mem) 25015 return true 25016 } 25017 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 25018 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25019 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 25020 for { 25021 _ = v.Args[1] 25022 s1 := v.Args[0] 25023 if s1.Op != OpAMD64SHLQconst { 25024 break 25025 } 25026 j1 := s1.AuxInt 25027 x1 := s1.Args[0] 25028 if x1.Op != OpAMD64MOVBload { 25029 break 25030 } 25031 i1 := x1.AuxInt 25032 s := x1.Aux 25033 _ = x1.Args[1] 25034 p := x1.Args[0] 25035 mem := x1.Args[1] 25036 or := v.Args[1] 25037 if or.Op != OpAMD64ORQ { 25038 break 25039 } 25040 _ = or.Args[1] 25041 s0 := or.Args[0] 25042 if s0.Op != OpAMD64SHLQconst { 25043 break 25044 } 25045 j0 := s0.AuxInt 25046 x0 := s0.Args[0] 25047 if x0.Op != OpAMD64MOVBload { 25048 break 25049 } 25050 i0 := x0.AuxInt 25051 if x0.Aux != s { 25052 break 25053 } 25054 _ = x0.Args[1] 25055 if p != x0.Args[0] { 25056 break 25057 } 25058 if mem != x0.Args[1] { 25059 break 25060 } 25061 y := or.Args[1] 25062 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25063 break 25064 } 25065 b = mergePoint(b, x0, x1) 25066 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25067 v.reset(OpCopy) 25068 v.AddArg(v0) 25069 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25070 v1.AuxInt = j0 25071 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25072 v2.AuxInt = i0 25073 v2.Aux = s 25074 v2.AddArg(p) 25075 v2.AddArg(mem) 25076 v1.AddArg(v2) 25077 v0.AddArg(v1) 25078 v0.AddArg(y) 25079 return true 25080 } 25081 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))) 25082 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25083 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 25084 for { 25085 _ = v.Args[1] 25086 s1 := v.Args[0] 25087 if s1.Op != OpAMD64SHLQconst { 25088 break 25089 } 25090 j1 := s1.AuxInt 25091 x1 := s1.Args[0] 25092 if x1.Op != OpAMD64MOVBload { 25093 break 25094 } 25095 i1 := x1.AuxInt 25096 s := x1.Aux 25097 _ = x1.Args[1] 25098 p := x1.Args[0] 25099 mem := x1.Args[1] 25100 or := v.Args[1] 25101 if or.Op != OpAMD64ORQ { 25102 break 25103 } 25104 _ = or.Args[1] 25105 y := or.Args[0] 25106 s0 := or.Args[1] 25107 if s0.Op != OpAMD64SHLQconst { 25108 break 25109 } 25110 j0 := s0.AuxInt 25111 x0 := s0.Args[0] 25112 if x0.Op != OpAMD64MOVBload { 25113 break 25114 } 25115 i0 := x0.AuxInt 25116 if x0.Aux != s { 25117 break 25118 } 25119 _ = x0.Args[1] 25120 if p != x0.Args[0] { 25121 break 25122 } 25123 if mem != x0.Args[1] { 25124 break 25125 } 25126 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25127 break 25128 } 25129 b = mergePoint(b, x0, x1) 25130 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25131 v.reset(OpCopy) 25132 v.AddArg(v0) 25133 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25134 v1.AuxInt = j0 25135 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25136 v2.AuxInt = i0 25137 v2.Aux = s 25138 v2.AddArg(p) 25139 v2.AddArg(mem) 25140 v1.AddArg(v2) 25141 v0.AddArg(v1) 25142 v0.AddArg(y) 25143 return true 25144 } 25145 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 25146 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25147 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 25148 for { 25149 _ = v.Args[1] 25150 or := v.Args[0] 25151 if or.Op != OpAMD64ORQ { 25152 break 25153 } 25154 _ = or.Args[1] 25155 s0 := or.Args[0] 25156 if s0.Op != OpAMD64SHLQconst { 25157 break 25158 } 25159 j0 := s0.AuxInt 25160 x0 := s0.Args[0] 25161 if x0.Op != OpAMD64MOVBload { 25162 break 25163 } 25164 i0 := x0.AuxInt 25165 s := x0.Aux 25166 _ = x0.Args[1] 25167 p := x0.Args[0] 25168 mem := x0.Args[1] 25169 y := or.Args[1] 25170 s1 := v.Args[1] 25171 if s1.Op != OpAMD64SHLQconst { 25172 break 25173 } 25174 j1 := s1.AuxInt 25175 x1 := s1.Args[0] 25176 if x1.Op != OpAMD64MOVBload { 25177 break 25178 } 25179 i1 := x1.AuxInt 25180 if x1.Aux != s { 25181 break 25182 } 25183 _ = x1.Args[1] 25184 if p != x1.Args[0] { 25185 break 25186 } 25187 if mem != x1.Args[1] { 25188 break 25189 } 25190 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25191 break 25192 } 25193 b = mergePoint(b, x0, x1) 25194 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25195 v.reset(OpCopy) 25196 v.AddArg(v0) 25197 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25198 v1.AuxInt = j0 25199 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25200 v2.AuxInt = i0 25201 v2.Aux = s 25202 v2.AddArg(p) 25203 v2.AddArg(mem) 25204 v1.AddArg(v2) 25205 v0.AddArg(v1) 25206 v0.AddArg(y) 25207 return true 25208 } 25209 return false 25210 } 25211 func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool { 25212 b := v.Block 25213 _ = b 25214 typ := &b.Func.Config.Types 25215 _ = typ 25216 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 25217 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25218 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 25219 for { 25220 _ = v.Args[1] 25221 or := v.Args[0] 25222 if or.Op != OpAMD64ORQ { 25223 break 25224 } 25225 _ = or.Args[1] 25226 y := or.Args[0] 25227 s0 := or.Args[1] 25228 if s0.Op != OpAMD64SHLQconst { 25229 break 25230 } 25231 j0 := s0.AuxInt 25232 x0 := s0.Args[0] 25233 if x0.Op != OpAMD64MOVBload { 25234 break 25235 } 25236 i0 := x0.AuxInt 25237 s := x0.Aux 25238 _ = x0.Args[1] 25239 p := x0.Args[0] 25240 mem := x0.Args[1] 25241 s1 := v.Args[1] 25242 if s1.Op != OpAMD64SHLQconst { 25243 break 25244 } 25245 j1 := s1.AuxInt 25246 x1 := s1.Args[0] 25247 if x1.Op != OpAMD64MOVBload { 25248 break 25249 } 25250 i1 := x1.AuxInt 25251 if x1.Aux != s { 25252 break 25253 } 25254 _ = x1.Args[1] 25255 if p != x1.Args[0] { 25256 break 25257 } 25258 if mem != x1.Args[1] { 25259 break 25260 } 25261 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25262 break 25263 } 25264 b = mergePoint(b, x0, x1) 25265 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25266 v.reset(OpCopy) 25267 v.AddArg(v0) 25268 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25269 v1.AuxInt = j0 25270 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25271 v2.AuxInt = i0 25272 v2.Aux = s 25273 v2.AddArg(p) 25274 v2.AddArg(mem) 25275 v1.AddArg(v2) 25276 v0.AddArg(v1) 25277 v0.AddArg(y) 25278 return true 25279 } 25280 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y)) 25281 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25282 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25283 for { 25284 _ = v.Args[1] 25285 s1 := v.Args[0] 25286 if s1.Op != OpAMD64SHLQconst { 25287 break 25288 } 25289 j1 := s1.AuxInt 25290 x1 := s1.Args[0] 25291 if x1.Op != OpAMD64MOVWload { 25292 break 25293 } 25294 i1 := x1.AuxInt 25295 s := x1.Aux 25296 _ = x1.Args[1] 25297 p := x1.Args[0] 25298 mem := x1.Args[1] 25299 or := v.Args[1] 25300 if or.Op != OpAMD64ORQ { 25301 break 25302 } 25303 _ = or.Args[1] 25304 s0 := or.Args[0] 25305 if s0.Op != OpAMD64SHLQconst { 25306 break 25307 } 25308 j0 := s0.AuxInt 25309 x0 := s0.Args[0] 25310 if x0.Op != OpAMD64MOVWload { 25311 break 25312 } 25313 i0 := x0.AuxInt 25314 if x0.Aux != s { 25315 break 25316 } 25317 _ = x0.Args[1] 25318 if p != x0.Args[0] { 25319 break 25320 } 25321 if mem != x0.Args[1] { 25322 break 25323 } 25324 y := or.Args[1] 25325 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25326 break 25327 } 25328 b = mergePoint(b, x0, x1) 25329 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25330 v.reset(OpCopy) 25331 v.AddArg(v0) 25332 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25333 v1.AuxInt = j0 25334 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25335 v2.AuxInt = i0 25336 v2.Aux = s 25337 v2.AddArg(p) 25338 v2.AddArg(mem) 25339 v1.AddArg(v2) 25340 v0.AddArg(v1) 25341 v0.AddArg(y) 25342 return true 25343 } 25344 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)))) 25345 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25346 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25347 for { 25348 _ = v.Args[1] 25349 s1 := v.Args[0] 25350 if s1.Op != OpAMD64SHLQconst { 25351 break 25352 } 25353 j1 := s1.AuxInt 25354 x1 := s1.Args[0] 25355 if x1.Op != OpAMD64MOVWload { 25356 break 25357 } 25358 i1 := x1.AuxInt 25359 s := x1.Aux 25360 _ = x1.Args[1] 25361 p := x1.Args[0] 25362 mem := x1.Args[1] 25363 or := v.Args[1] 25364 if or.Op != OpAMD64ORQ { 25365 break 25366 } 25367 _ = or.Args[1] 25368 y := or.Args[0] 25369 s0 := or.Args[1] 25370 if s0.Op != OpAMD64SHLQconst { 25371 break 25372 } 25373 j0 := s0.AuxInt 25374 x0 := s0.Args[0] 25375 if x0.Op != OpAMD64MOVWload { 25376 break 25377 } 25378 i0 := x0.AuxInt 25379 if x0.Aux != s { 25380 break 25381 } 25382 _ = x0.Args[1] 25383 if p != x0.Args[0] { 25384 break 25385 } 25386 if mem != x0.Args[1] { 25387 break 25388 } 25389 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25390 break 25391 } 25392 b = mergePoint(b, x0, x1) 25393 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25394 v.reset(OpCopy) 25395 v.AddArg(v0) 25396 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25397 v1.AuxInt = j0 25398 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25399 v2.AuxInt = i0 25400 v2.Aux = s 25401 v2.AddArg(p) 25402 v2.AddArg(mem) 25403 v1.AddArg(v2) 25404 v0.AddArg(v1) 25405 v0.AddArg(y) 25406 return true 25407 } 25408 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 25409 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25410 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25411 for { 25412 _ = v.Args[1] 25413 or := v.Args[0] 25414 if or.Op != OpAMD64ORQ { 25415 break 25416 } 25417 _ = or.Args[1] 25418 s0 := or.Args[0] 25419 if s0.Op != OpAMD64SHLQconst { 25420 break 25421 } 25422 j0 := s0.AuxInt 25423 x0 := s0.Args[0] 25424 if x0.Op != OpAMD64MOVWload { 25425 break 25426 } 25427 i0 := x0.AuxInt 25428 s := x0.Aux 25429 _ = x0.Args[1] 25430 p := x0.Args[0] 25431 mem := x0.Args[1] 25432 y := or.Args[1] 25433 s1 := v.Args[1] 25434 if s1.Op != OpAMD64SHLQconst { 25435 break 25436 } 25437 j1 := s1.AuxInt 25438 x1 := s1.Args[0] 25439 if x1.Op != OpAMD64MOVWload { 25440 break 25441 } 25442 i1 := x1.AuxInt 25443 if x1.Aux != s { 25444 break 25445 } 25446 _ = x1.Args[1] 25447 if p != x1.Args[0] { 25448 break 25449 } 25450 if mem != x1.Args[1] { 25451 break 25452 } 25453 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25454 break 25455 } 25456 b = mergePoint(b, x0, x1) 25457 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25458 v.reset(OpCopy) 25459 v.AddArg(v0) 25460 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25461 v1.AuxInt = j0 25462 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25463 v2.AuxInt = i0 25464 v2.Aux = s 25465 v2.AddArg(p) 25466 v2.AddArg(mem) 25467 v1.AddArg(v2) 25468 v0.AddArg(v1) 25469 v0.AddArg(y) 25470 return true 25471 } 25472 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 25473 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25474 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25475 for { 25476 _ = v.Args[1] 25477 or := v.Args[0] 25478 if or.Op != OpAMD64ORQ { 25479 break 25480 } 25481 _ = or.Args[1] 25482 y := or.Args[0] 25483 s0 := or.Args[1] 25484 if s0.Op != OpAMD64SHLQconst { 25485 break 25486 } 25487 j0 := s0.AuxInt 25488 x0 := s0.Args[0] 25489 if x0.Op != OpAMD64MOVWload { 25490 break 25491 } 25492 i0 := x0.AuxInt 25493 s := x0.Aux 25494 _ = x0.Args[1] 25495 p := x0.Args[0] 25496 mem := x0.Args[1] 25497 s1 := v.Args[1] 25498 if s1.Op != OpAMD64SHLQconst { 25499 break 25500 } 25501 j1 := s1.AuxInt 25502 x1 := s1.Args[0] 25503 if x1.Op != OpAMD64MOVWload { 25504 break 25505 } 25506 i1 := x1.AuxInt 25507 if x1.Aux != s { 25508 break 25509 } 25510 _ = x1.Args[1] 25511 if p != x1.Args[0] { 25512 break 25513 } 25514 if mem != x1.Args[1] { 25515 break 25516 } 25517 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25518 break 25519 } 25520 b = mergePoint(b, x0, x1) 25521 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25522 v.reset(OpCopy) 25523 v.AddArg(v0) 25524 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25525 v1.AuxInt = j0 25526 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25527 v2.AuxInt = i0 25528 v2.Aux = s 25529 v2.AddArg(p) 25530 v2.AddArg(mem) 25531 v1.AddArg(v2) 25532 v0.AddArg(v1) 25533 v0.AddArg(y) 25534 return true 25535 } 25536 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25537 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25538 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25539 for { 25540 _ = v.Args[1] 25541 x0 := v.Args[0] 25542 if x0.Op != OpAMD64MOVBloadidx1 { 25543 break 25544 } 25545 i0 := x0.AuxInt 25546 s := x0.Aux 25547 _ = x0.Args[2] 25548 p := x0.Args[0] 25549 idx := x0.Args[1] 25550 mem := x0.Args[2] 25551 sh := v.Args[1] 25552 if sh.Op != OpAMD64SHLQconst { 25553 break 25554 } 25555 if sh.AuxInt != 8 { 25556 break 25557 } 25558 x1 := sh.Args[0] 25559 if x1.Op != OpAMD64MOVBloadidx1 { 25560 break 25561 } 25562 i1 := x1.AuxInt 25563 if x1.Aux != s { 25564 break 25565 } 25566 _ = x1.Args[2] 25567 if p != x1.Args[0] { 25568 break 25569 } 25570 if idx != x1.Args[1] { 25571 break 25572 } 25573 if mem != x1.Args[2] { 25574 break 25575 } 25576 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25577 break 25578 } 25579 b = mergePoint(b, x0, x1) 25580 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25581 v.reset(OpCopy) 25582 v.AddArg(v0) 25583 v0.AuxInt = i0 25584 v0.Aux = s 25585 v0.AddArg(p) 25586 v0.AddArg(idx) 25587 v0.AddArg(mem) 25588 return true 25589 } 25590 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25591 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25592 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25593 for { 25594 _ = v.Args[1] 25595 x0 := v.Args[0] 25596 if x0.Op != OpAMD64MOVBloadidx1 { 25597 break 25598 } 25599 i0 := x0.AuxInt 25600 s := x0.Aux 25601 _ = x0.Args[2] 25602 idx := x0.Args[0] 25603 p := x0.Args[1] 25604 mem := x0.Args[2] 25605 sh := v.Args[1] 25606 if sh.Op != OpAMD64SHLQconst { 25607 break 25608 } 25609 if sh.AuxInt != 8 { 25610 break 25611 } 25612 x1 := sh.Args[0] 25613 if x1.Op != OpAMD64MOVBloadidx1 { 25614 break 25615 } 25616 i1 := x1.AuxInt 25617 if x1.Aux != s { 25618 break 25619 } 25620 _ = x1.Args[2] 25621 if p != x1.Args[0] { 25622 break 25623 } 25624 if idx != x1.Args[1] { 25625 break 25626 } 25627 if mem != x1.Args[2] { 25628 break 25629 } 25630 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25631 break 25632 } 25633 b = mergePoint(b, x0, x1) 25634 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25635 v.reset(OpCopy) 25636 v.AddArg(v0) 25637 v0.AuxInt = i0 25638 v0.Aux = s 25639 v0.AddArg(p) 25640 v0.AddArg(idx) 25641 v0.AddArg(mem) 25642 return true 25643 } 25644 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 25645 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25646 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25647 for { 25648 _ = v.Args[1] 25649 x0 := v.Args[0] 25650 if x0.Op != OpAMD64MOVBloadidx1 { 25651 break 25652 } 25653 i0 := x0.AuxInt 25654 s := x0.Aux 25655 _ = x0.Args[2] 25656 p := x0.Args[0] 25657 idx := x0.Args[1] 25658 mem := x0.Args[2] 25659 sh := v.Args[1] 25660 if sh.Op != OpAMD64SHLQconst { 25661 break 25662 } 25663 if sh.AuxInt != 8 { 25664 break 25665 } 25666 x1 := sh.Args[0] 25667 if x1.Op != OpAMD64MOVBloadidx1 { 25668 break 25669 } 25670 i1 := x1.AuxInt 25671 if x1.Aux != s { 25672 break 25673 } 25674 _ = x1.Args[2] 25675 if idx != x1.Args[0] { 25676 break 25677 } 25678 if p != x1.Args[1] { 25679 break 25680 } 25681 if mem != x1.Args[2] { 25682 break 25683 } 25684 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25685 break 25686 } 25687 b = mergePoint(b, x0, x1) 25688 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25689 v.reset(OpCopy) 25690 v.AddArg(v0) 25691 v0.AuxInt = i0 25692 v0.Aux = s 25693 v0.AddArg(p) 25694 v0.AddArg(idx) 25695 v0.AddArg(mem) 25696 return true 25697 } 25698 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 25699 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25700 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25701 for { 25702 _ = v.Args[1] 25703 x0 := v.Args[0] 25704 if x0.Op != OpAMD64MOVBloadidx1 { 25705 break 25706 } 25707 i0 := x0.AuxInt 25708 s := x0.Aux 25709 _ = x0.Args[2] 25710 idx := x0.Args[0] 25711 p := x0.Args[1] 25712 mem := x0.Args[2] 25713 sh := v.Args[1] 25714 if sh.Op != OpAMD64SHLQconst { 25715 break 25716 } 25717 if sh.AuxInt != 8 { 25718 break 25719 } 25720 x1 := sh.Args[0] 25721 if x1.Op != OpAMD64MOVBloadidx1 { 25722 break 25723 } 25724 i1 := x1.AuxInt 25725 if x1.Aux != s { 25726 break 25727 } 25728 _ = x1.Args[2] 25729 if idx != x1.Args[0] { 25730 break 25731 } 25732 if p != x1.Args[1] { 25733 break 25734 } 25735 if mem != x1.Args[2] { 25736 break 25737 } 25738 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25739 break 25740 } 25741 b = mergePoint(b, x0, x1) 25742 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25743 v.reset(OpCopy) 25744 v.AddArg(v0) 25745 v0.AuxInt = i0 25746 v0.Aux = s 25747 v0.AddArg(p) 25748 v0.AddArg(idx) 25749 v0.AddArg(mem) 25750 return true 25751 } 25752 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 25753 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25754 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25755 for { 25756 _ = v.Args[1] 25757 sh := v.Args[0] 25758 if sh.Op != OpAMD64SHLQconst { 25759 break 25760 } 25761 if sh.AuxInt != 8 { 25762 break 25763 } 25764 x1 := sh.Args[0] 25765 if x1.Op != OpAMD64MOVBloadidx1 { 25766 break 25767 } 25768 i1 := x1.AuxInt 25769 s := x1.Aux 25770 _ = x1.Args[2] 25771 p := x1.Args[0] 25772 idx := x1.Args[1] 25773 mem := x1.Args[2] 25774 x0 := v.Args[1] 25775 if x0.Op != OpAMD64MOVBloadidx1 { 25776 break 25777 } 25778 i0 := x0.AuxInt 25779 if x0.Aux != s { 25780 break 25781 } 25782 _ = x0.Args[2] 25783 if p != x0.Args[0] { 25784 break 25785 } 25786 if idx != x0.Args[1] { 25787 break 25788 } 25789 if mem != x0.Args[2] { 25790 break 25791 } 25792 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25793 break 25794 } 25795 b = mergePoint(b, x0, x1) 25796 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25797 v.reset(OpCopy) 25798 v.AddArg(v0) 25799 v0.AuxInt = i0 25800 v0.Aux = s 25801 v0.AddArg(p) 25802 v0.AddArg(idx) 25803 v0.AddArg(mem) 25804 return true 25805 } 25806 return false 25807 } 25808 func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool { 25809 b := v.Block 25810 _ = b 25811 typ := &b.Func.Config.Types 25812 _ = typ 25813 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 25814 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25815 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25816 for { 25817 _ = v.Args[1] 25818 sh := v.Args[0] 25819 if sh.Op != OpAMD64SHLQconst { 25820 break 25821 } 25822 if sh.AuxInt != 8 { 25823 break 25824 } 25825 x1 := sh.Args[0] 25826 if x1.Op != OpAMD64MOVBloadidx1 { 25827 break 25828 } 25829 i1 := x1.AuxInt 25830 s := x1.Aux 25831 _ = x1.Args[2] 25832 idx := x1.Args[0] 25833 p := x1.Args[1] 25834 mem := x1.Args[2] 25835 x0 := v.Args[1] 25836 if x0.Op != OpAMD64MOVBloadidx1 { 25837 break 25838 } 25839 i0 := x0.AuxInt 25840 if x0.Aux != s { 25841 break 25842 } 25843 _ = x0.Args[2] 25844 if p != x0.Args[0] { 25845 break 25846 } 25847 if idx != x0.Args[1] { 25848 break 25849 } 25850 if mem != x0.Args[2] { 25851 break 25852 } 25853 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25854 break 25855 } 25856 b = mergePoint(b, x0, x1) 25857 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25858 v.reset(OpCopy) 25859 v.AddArg(v0) 25860 v0.AuxInt = i0 25861 v0.Aux = s 25862 v0.AddArg(p) 25863 v0.AddArg(idx) 25864 v0.AddArg(mem) 25865 return true 25866 } 25867 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 25868 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25869 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25870 for { 25871 _ = v.Args[1] 25872 sh := v.Args[0] 25873 if sh.Op != OpAMD64SHLQconst { 25874 break 25875 } 25876 if sh.AuxInt != 8 { 25877 break 25878 } 25879 x1 := sh.Args[0] 25880 if x1.Op != OpAMD64MOVBloadidx1 { 25881 break 25882 } 25883 i1 := x1.AuxInt 25884 s := x1.Aux 25885 _ = x1.Args[2] 25886 p := x1.Args[0] 25887 idx := x1.Args[1] 25888 mem := x1.Args[2] 25889 x0 := v.Args[1] 25890 if x0.Op != OpAMD64MOVBloadidx1 { 25891 break 25892 } 25893 i0 := x0.AuxInt 25894 if x0.Aux != s { 25895 break 25896 } 25897 _ = x0.Args[2] 25898 if idx != x0.Args[0] { 25899 break 25900 } 25901 if p != x0.Args[1] { 25902 break 25903 } 25904 if mem != x0.Args[2] { 25905 break 25906 } 25907 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25908 break 25909 } 25910 b = mergePoint(b, x0, x1) 25911 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25912 v.reset(OpCopy) 25913 v.AddArg(v0) 25914 v0.AuxInt = i0 25915 v0.Aux = s 25916 v0.AddArg(p) 25917 v0.AddArg(idx) 25918 v0.AddArg(mem) 25919 return true 25920 } 25921 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 25922 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25923 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25924 for { 25925 _ = v.Args[1] 25926 sh := v.Args[0] 25927 if sh.Op != OpAMD64SHLQconst { 25928 break 25929 } 25930 if sh.AuxInt != 8 { 25931 break 25932 } 25933 x1 := sh.Args[0] 25934 if x1.Op != OpAMD64MOVBloadidx1 { 25935 break 25936 } 25937 i1 := x1.AuxInt 25938 s := x1.Aux 25939 _ = x1.Args[2] 25940 idx := x1.Args[0] 25941 p := x1.Args[1] 25942 mem := x1.Args[2] 25943 x0 := v.Args[1] 25944 if x0.Op != OpAMD64MOVBloadidx1 { 25945 break 25946 } 25947 i0 := x0.AuxInt 25948 if x0.Aux != s { 25949 break 25950 } 25951 _ = x0.Args[2] 25952 if idx != x0.Args[0] { 25953 break 25954 } 25955 if p != x0.Args[1] { 25956 break 25957 } 25958 if mem != x0.Args[2] { 25959 break 25960 } 25961 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25962 break 25963 } 25964 b = mergePoint(b, x0, x1) 25965 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25966 v.reset(OpCopy) 25967 v.AddArg(v0) 25968 v0.AuxInt = i0 25969 v0.Aux = s 25970 v0.AddArg(p) 25971 v0.AddArg(idx) 25972 v0.AddArg(mem) 25973 return true 25974 } 25975 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 25976 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25977 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 25978 for { 25979 _ = v.Args[1] 25980 x0 := v.Args[0] 25981 if x0.Op != OpAMD64MOVWloadidx1 { 25982 break 25983 } 25984 i0 := x0.AuxInt 25985 s := x0.Aux 25986 _ = x0.Args[2] 25987 p := x0.Args[0] 25988 idx := x0.Args[1] 25989 mem := x0.Args[2] 25990 sh := v.Args[1] 25991 if sh.Op != OpAMD64SHLQconst { 25992 break 25993 } 25994 if sh.AuxInt != 16 { 25995 break 25996 } 25997 x1 := sh.Args[0] 25998 if x1.Op != OpAMD64MOVWloadidx1 { 25999 break 26000 } 26001 i1 := x1.AuxInt 26002 if x1.Aux != s { 26003 break 26004 } 26005 _ = x1.Args[2] 26006 if p != x1.Args[0] { 26007 break 26008 } 26009 if idx != x1.Args[1] { 26010 break 26011 } 26012 if mem != x1.Args[2] { 26013 break 26014 } 26015 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26016 break 26017 } 26018 b = mergePoint(b, x0, x1) 26019 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26020 v.reset(OpCopy) 26021 v.AddArg(v0) 26022 v0.AuxInt = i0 26023 v0.Aux = s 26024 v0.AddArg(p) 26025 v0.AddArg(idx) 26026 v0.AddArg(mem) 26027 return true 26028 } 26029 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 26030 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26031 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26032 for { 26033 _ = v.Args[1] 26034 x0 := v.Args[0] 26035 if x0.Op != OpAMD64MOVWloadidx1 { 26036 break 26037 } 26038 i0 := x0.AuxInt 26039 s := x0.Aux 26040 _ = x0.Args[2] 26041 idx := x0.Args[0] 26042 p := x0.Args[1] 26043 mem := x0.Args[2] 26044 sh := v.Args[1] 26045 if sh.Op != OpAMD64SHLQconst { 26046 break 26047 } 26048 if sh.AuxInt != 16 { 26049 break 26050 } 26051 x1 := sh.Args[0] 26052 if x1.Op != OpAMD64MOVWloadidx1 { 26053 break 26054 } 26055 i1 := x1.AuxInt 26056 if x1.Aux != s { 26057 break 26058 } 26059 _ = x1.Args[2] 26060 if p != x1.Args[0] { 26061 break 26062 } 26063 if idx != x1.Args[1] { 26064 break 26065 } 26066 if mem != x1.Args[2] { 26067 break 26068 } 26069 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26070 break 26071 } 26072 b = mergePoint(b, x0, x1) 26073 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26074 v.reset(OpCopy) 26075 v.AddArg(v0) 26076 v0.AuxInt = i0 26077 v0.Aux = s 26078 v0.AddArg(p) 26079 v0.AddArg(idx) 26080 v0.AddArg(mem) 26081 return true 26082 } 26083 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 26084 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26085 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26086 for { 26087 _ = v.Args[1] 26088 x0 := v.Args[0] 26089 if x0.Op != OpAMD64MOVWloadidx1 { 26090 break 26091 } 26092 i0 := x0.AuxInt 26093 s := x0.Aux 26094 _ = x0.Args[2] 26095 p := x0.Args[0] 26096 idx := x0.Args[1] 26097 mem := x0.Args[2] 26098 sh := v.Args[1] 26099 if sh.Op != OpAMD64SHLQconst { 26100 break 26101 } 26102 if sh.AuxInt != 16 { 26103 break 26104 } 26105 x1 := sh.Args[0] 26106 if x1.Op != OpAMD64MOVWloadidx1 { 26107 break 26108 } 26109 i1 := x1.AuxInt 26110 if x1.Aux != s { 26111 break 26112 } 26113 _ = x1.Args[2] 26114 if idx != x1.Args[0] { 26115 break 26116 } 26117 if p != x1.Args[1] { 26118 break 26119 } 26120 if mem != x1.Args[2] { 26121 break 26122 } 26123 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26124 break 26125 } 26126 b = mergePoint(b, x0, x1) 26127 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26128 v.reset(OpCopy) 26129 v.AddArg(v0) 26130 v0.AuxInt = i0 26131 v0.Aux = s 26132 v0.AddArg(p) 26133 v0.AddArg(idx) 26134 v0.AddArg(mem) 26135 return true 26136 } 26137 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 26138 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26139 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26140 for { 26141 _ = v.Args[1] 26142 x0 := v.Args[0] 26143 if x0.Op != OpAMD64MOVWloadidx1 { 26144 break 26145 } 26146 i0 := x0.AuxInt 26147 s := x0.Aux 26148 _ = x0.Args[2] 26149 idx := x0.Args[0] 26150 p := x0.Args[1] 26151 mem := x0.Args[2] 26152 sh := v.Args[1] 26153 if sh.Op != OpAMD64SHLQconst { 26154 break 26155 } 26156 if sh.AuxInt != 16 { 26157 break 26158 } 26159 x1 := sh.Args[0] 26160 if x1.Op != OpAMD64MOVWloadidx1 { 26161 break 26162 } 26163 i1 := x1.AuxInt 26164 if x1.Aux != s { 26165 break 26166 } 26167 _ = x1.Args[2] 26168 if idx != x1.Args[0] { 26169 break 26170 } 26171 if p != x1.Args[1] { 26172 break 26173 } 26174 if mem != x1.Args[2] { 26175 break 26176 } 26177 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26178 break 26179 } 26180 b = mergePoint(b, x0, x1) 26181 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26182 v.reset(OpCopy) 26183 v.AddArg(v0) 26184 v0.AuxInt = i0 26185 v0.Aux = s 26186 v0.AddArg(p) 26187 v0.AddArg(idx) 26188 v0.AddArg(mem) 26189 return true 26190 } 26191 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 26192 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26193 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26194 for { 26195 _ = v.Args[1] 26196 sh := v.Args[0] 26197 if sh.Op != OpAMD64SHLQconst { 26198 break 26199 } 26200 if sh.AuxInt != 16 { 26201 break 26202 } 26203 x1 := sh.Args[0] 26204 if x1.Op != OpAMD64MOVWloadidx1 { 26205 break 26206 } 26207 i1 := x1.AuxInt 26208 s := x1.Aux 26209 _ = x1.Args[2] 26210 p := x1.Args[0] 26211 idx := x1.Args[1] 26212 mem := x1.Args[2] 26213 x0 := v.Args[1] 26214 if x0.Op != OpAMD64MOVWloadidx1 { 26215 break 26216 } 26217 i0 := x0.AuxInt 26218 if x0.Aux != s { 26219 break 26220 } 26221 _ = x0.Args[2] 26222 if p != x0.Args[0] { 26223 break 26224 } 26225 if idx != x0.Args[1] { 26226 break 26227 } 26228 if mem != x0.Args[2] { 26229 break 26230 } 26231 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26232 break 26233 } 26234 b = mergePoint(b, x0, x1) 26235 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26236 v.reset(OpCopy) 26237 v.AddArg(v0) 26238 v0.AuxInt = i0 26239 v0.Aux = s 26240 v0.AddArg(p) 26241 v0.AddArg(idx) 26242 v0.AddArg(mem) 26243 return true 26244 } 26245 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 26246 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26247 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26248 for { 26249 _ = v.Args[1] 26250 sh := v.Args[0] 26251 if sh.Op != OpAMD64SHLQconst { 26252 break 26253 } 26254 if sh.AuxInt != 16 { 26255 break 26256 } 26257 x1 := sh.Args[0] 26258 if x1.Op != OpAMD64MOVWloadidx1 { 26259 break 26260 } 26261 i1 := x1.AuxInt 26262 s := x1.Aux 26263 _ = x1.Args[2] 26264 idx := x1.Args[0] 26265 p := x1.Args[1] 26266 mem := x1.Args[2] 26267 x0 := v.Args[1] 26268 if x0.Op != OpAMD64MOVWloadidx1 { 26269 break 26270 } 26271 i0 := x0.AuxInt 26272 if x0.Aux != s { 26273 break 26274 } 26275 _ = x0.Args[2] 26276 if p != x0.Args[0] { 26277 break 26278 } 26279 if idx != x0.Args[1] { 26280 break 26281 } 26282 if mem != x0.Args[2] { 26283 break 26284 } 26285 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26286 break 26287 } 26288 b = mergePoint(b, x0, x1) 26289 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26290 v.reset(OpCopy) 26291 v.AddArg(v0) 26292 v0.AuxInt = i0 26293 v0.Aux = s 26294 v0.AddArg(p) 26295 v0.AddArg(idx) 26296 v0.AddArg(mem) 26297 return true 26298 } 26299 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 26300 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26301 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26302 for { 26303 _ = v.Args[1] 26304 sh := v.Args[0] 26305 if sh.Op != OpAMD64SHLQconst { 26306 break 26307 } 26308 if sh.AuxInt != 16 { 26309 break 26310 } 26311 x1 := sh.Args[0] 26312 if x1.Op != OpAMD64MOVWloadidx1 { 26313 break 26314 } 26315 i1 := x1.AuxInt 26316 s := x1.Aux 26317 _ = x1.Args[2] 26318 p := x1.Args[0] 26319 idx := x1.Args[1] 26320 mem := x1.Args[2] 26321 x0 := v.Args[1] 26322 if x0.Op != OpAMD64MOVWloadidx1 { 26323 break 26324 } 26325 i0 := x0.AuxInt 26326 if x0.Aux != s { 26327 break 26328 } 26329 _ = x0.Args[2] 26330 if idx != x0.Args[0] { 26331 break 26332 } 26333 if p != x0.Args[1] { 26334 break 26335 } 26336 if mem != x0.Args[2] { 26337 break 26338 } 26339 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26340 break 26341 } 26342 b = mergePoint(b, x0, x1) 26343 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26344 v.reset(OpCopy) 26345 v.AddArg(v0) 26346 v0.AuxInt = i0 26347 v0.Aux = s 26348 v0.AddArg(p) 26349 v0.AddArg(idx) 26350 v0.AddArg(mem) 26351 return true 26352 } 26353 return false 26354 } 26355 func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool { 26356 b := v.Block 26357 _ = b 26358 typ := &b.Func.Config.Types 26359 _ = typ 26360 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 26361 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26362 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26363 for { 26364 _ = v.Args[1] 26365 sh := v.Args[0] 26366 if sh.Op != OpAMD64SHLQconst { 26367 break 26368 } 26369 if sh.AuxInt != 16 { 26370 break 26371 } 26372 x1 := sh.Args[0] 26373 if x1.Op != OpAMD64MOVWloadidx1 { 26374 break 26375 } 26376 i1 := x1.AuxInt 26377 s := x1.Aux 26378 _ = x1.Args[2] 26379 idx := x1.Args[0] 26380 p := x1.Args[1] 26381 mem := x1.Args[2] 26382 x0 := v.Args[1] 26383 if x0.Op != OpAMD64MOVWloadidx1 { 26384 break 26385 } 26386 i0 := x0.AuxInt 26387 if x0.Aux != s { 26388 break 26389 } 26390 _ = x0.Args[2] 26391 if idx != x0.Args[0] { 26392 break 26393 } 26394 if p != x0.Args[1] { 26395 break 26396 } 26397 if mem != x0.Args[2] { 26398 break 26399 } 26400 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26401 break 26402 } 26403 b = mergePoint(b, x0, x1) 26404 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26405 v.reset(OpCopy) 26406 v.AddArg(v0) 26407 v0.AuxInt = i0 26408 v0.Aux = s 26409 v0.AddArg(p) 26410 v0.AddArg(idx) 26411 v0.AddArg(mem) 26412 return true 26413 } 26414 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 26415 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26416 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26417 for { 26418 _ = v.Args[1] 26419 x0 := v.Args[0] 26420 if x0.Op != OpAMD64MOVLloadidx1 { 26421 break 26422 } 26423 i0 := x0.AuxInt 26424 s := x0.Aux 26425 _ = x0.Args[2] 26426 p := x0.Args[0] 26427 idx := x0.Args[1] 26428 mem := x0.Args[2] 26429 sh := v.Args[1] 26430 if sh.Op != OpAMD64SHLQconst { 26431 break 26432 } 26433 if sh.AuxInt != 32 { 26434 break 26435 } 26436 x1 := sh.Args[0] 26437 if x1.Op != OpAMD64MOVLloadidx1 { 26438 break 26439 } 26440 i1 := x1.AuxInt 26441 if x1.Aux != s { 26442 break 26443 } 26444 _ = x1.Args[2] 26445 if p != x1.Args[0] { 26446 break 26447 } 26448 if idx != x1.Args[1] { 26449 break 26450 } 26451 if mem != x1.Args[2] { 26452 break 26453 } 26454 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26455 break 26456 } 26457 b = mergePoint(b, x0, x1) 26458 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26459 v.reset(OpCopy) 26460 v.AddArg(v0) 26461 v0.AuxInt = i0 26462 v0.Aux = s 26463 v0.AddArg(p) 26464 v0.AddArg(idx) 26465 v0.AddArg(mem) 26466 return true 26467 } 26468 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 26469 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26470 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26471 for { 26472 _ = v.Args[1] 26473 x0 := v.Args[0] 26474 if x0.Op != OpAMD64MOVLloadidx1 { 26475 break 26476 } 26477 i0 := x0.AuxInt 26478 s := x0.Aux 26479 _ = x0.Args[2] 26480 idx := x0.Args[0] 26481 p := x0.Args[1] 26482 mem := x0.Args[2] 26483 sh := v.Args[1] 26484 if sh.Op != OpAMD64SHLQconst { 26485 break 26486 } 26487 if sh.AuxInt != 32 { 26488 break 26489 } 26490 x1 := sh.Args[0] 26491 if x1.Op != OpAMD64MOVLloadidx1 { 26492 break 26493 } 26494 i1 := x1.AuxInt 26495 if x1.Aux != s { 26496 break 26497 } 26498 _ = x1.Args[2] 26499 if p != x1.Args[0] { 26500 break 26501 } 26502 if idx != x1.Args[1] { 26503 break 26504 } 26505 if mem != x1.Args[2] { 26506 break 26507 } 26508 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26509 break 26510 } 26511 b = mergePoint(b, x0, x1) 26512 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26513 v.reset(OpCopy) 26514 v.AddArg(v0) 26515 v0.AuxInt = i0 26516 v0.Aux = s 26517 v0.AddArg(p) 26518 v0.AddArg(idx) 26519 v0.AddArg(mem) 26520 return true 26521 } 26522 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 26523 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26524 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26525 for { 26526 _ = v.Args[1] 26527 x0 := v.Args[0] 26528 if x0.Op != OpAMD64MOVLloadidx1 { 26529 break 26530 } 26531 i0 := x0.AuxInt 26532 s := x0.Aux 26533 _ = x0.Args[2] 26534 p := x0.Args[0] 26535 idx := x0.Args[1] 26536 mem := x0.Args[2] 26537 sh := v.Args[1] 26538 if sh.Op != OpAMD64SHLQconst { 26539 break 26540 } 26541 if sh.AuxInt != 32 { 26542 break 26543 } 26544 x1 := sh.Args[0] 26545 if x1.Op != OpAMD64MOVLloadidx1 { 26546 break 26547 } 26548 i1 := x1.AuxInt 26549 if x1.Aux != s { 26550 break 26551 } 26552 _ = x1.Args[2] 26553 if idx != x1.Args[0] { 26554 break 26555 } 26556 if p != x1.Args[1] { 26557 break 26558 } 26559 if mem != x1.Args[2] { 26560 break 26561 } 26562 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26563 break 26564 } 26565 b = mergePoint(b, x0, x1) 26566 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26567 v.reset(OpCopy) 26568 v.AddArg(v0) 26569 v0.AuxInt = i0 26570 v0.Aux = s 26571 v0.AddArg(p) 26572 v0.AddArg(idx) 26573 v0.AddArg(mem) 26574 return true 26575 } 26576 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 26577 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26578 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26579 for { 26580 _ = v.Args[1] 26581 x0 := v.Args[0] 26582 if x0.Op != OpAMD64MOVLloadidx1 { 26583 break 26584 } 26585 i0 := x0.AuxInt 26586 s := x0.Aux 26587 _ = x0.Args[2] 26588 idx := x0.Args[0] 26589 p := x0.Args[1] 26590 mem := x0.Args[2] 26591 sh := v.Args[1] 26592 if sh.Op != OpAMD64SHLQconst { 26593 break 26594 } 26595 if sh.AuxInt != 32 { 26596 break 26597 } 26598 x1 := sh.Args[0] 26599 if x1.Op != OpAMD64MOVLloadidx1 { 26600 break 26601 } 26602 i1 := x1.AuxInt 26603 if x1.Aux != s { 26604 break 26605 } 26606 _ = x1.Args[2] 26607 if idx != x1.Args[0] { 26608 break 26609 } 26610 if p != x1.Args[1] { 26611 break 26612 } 26613 if mem != x1.Args[2] { 26614 break 26615 } 26616 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26617 break 26618 } 26619 b = mergePoint(b, x0, x1) 26620 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26621 v.reset(OpCopy) 26622 v.AddArg(v0) 26623 v0.AuxInt = i0 26624 v0.Aux = s 26625 v0.AddArg(p) 26626 v0.AddArg(idx) 26627 v0.AddArg(mem) 26628 return true 26629 } 26630 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 26631 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26632 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26633 for { 26634 _ = v.Args[1] 26635 sh := v.Args[0] 26636 if sh.Op != OpAMD64SHLQconst { 26637 break 26638 } 26639 if sh.AuxInt != 32 { 26640 break 26641 } 26642 x1 := sh.Args[0] 26643 if x1.Op != OpAMD64MOVLloadidx1 { 26644 break 26645 } 26646 i1 := x1.AuxInt 26647 s := x1.Aux 26648 _ = x1.Args[2] 26649 p := x1.Args[0] 26650 idx := x1.Args[1] 26651 mem := x1.Args[2] 26652 x0 := v.Args[1] 26653 if x0.Op != OpAMD64MOVLloadidx1 { 26654 break 26655 } 26656 i0 := x0.AuxInt 26657 if x0.Aux != s { 26658 break 26659 } 26660 _ = x0.Args[2] 26661 if p != x0.Args[0] { 26662 break 26663 } 26664 if idx != x0.Args[1] { 26665 break 26666 } 26667 if mem != x0.Args[2] { 26668 break 26669 } 26670 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26671 break 26672 } 26673 b = mergePoint(b, x0, x1) 26674 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26675 v.reset(OpCopy) 26676 v.AddArg(v0) 26677 v0.AuxInt = i0 26678 v0.Aux = s 26679 v0.AddArg(p) 26680 v0.AddArg(idx) 26681 v0.AddArg(mem) 26682 return true 26683 } 26684 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 26685 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26686 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26687 for { 26688 _ = v.Args[1] 26689 sh := v.Args[0] 26690 if sh.Op != OpAMD64SHLQconst { 26691 break 26692 } 26693 if sh.AuxInt != 32 { 26694 break 26695 } 26696 x1 := sh.Args[0] 26697 if x1.Op != OpAMD64MOVLloadidx1 { 26698 break 26699 } 26700 i1 := x1.AuxInt 26701 s := x1.Aux 26702 _ = x1.Args[2] 26703 idx := x1.Args[0] 26704 p := x1.Args[1] 26705 mem := x1.Args[2] 26706 x0 := v.Args[1] 26707 if x0.Op != OpAMD64MOVLloadidx1 { 26708 break 26709 } 26710 i0 := x0.AuxInt 26711 if x0.Aux != s { 26712 break 26713 } 26714 _ = x0.Args[2] 26715 if p != x0.Args[0] { 26716 break 26717 } 26718 if idx != x0.Args[1] { 26719 break 26720 } 26721 if mem != x0.Args[2] { 26722 break 26723 } 26724 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26725 break 26726 } 26727 b = mergePoint(b, x0, x1) 26728 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26729 v.reset(OpCopy) 26730 v.AddArg(v0) 26731 v0.AuxInt = i0 26732 v0.Aux = s 26733 v0.AddArg(p) 26734 v0.AddArg(idx) 26735 v0.AddArg(mem) 26736 return true 26737 } 26738 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 26739 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26740 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26741 for { 26742 _ = v.Args[1] 26743 sh := v.Args[0] 26744 if sh.Op != OpAMD64SHLQconst { 26745 break 26746 } 26747 if sh.AuxInt != 32 { 26748 break 26749 } 26750 x1 := sh.Args[0] 26751 if x1.Op != OpAMD64MOVLloadidx1 { 26752 break 26753 } 26754 i1 := x1.AuxInt 26755 s := x1.Aux 26756 _ = x1.Args[2] 26757 p := x1.Args[0] 26758 idx := x1.Args[1] 26759 mem := x1.Args[2] 26760 x0 := v.Args[1] 26761 if x0.Op != OpAMD64MOVLloadidx1 { 26762 break 26763 } 26764 i0 := x0.AuxInt 26765 if x0.Aux != s { 26766 break 26767 } 26768 _ = x0.Args[2] 26769 if idx != x0.Args[0] { 26770 break 26771 } 26772 if p != x0.Args[1] { 26773 break 26774 } 26775 if mem != x0.Args[2] { 26776 break 26777 } 26778 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26779 break 26780 } 26781 b = mergePoint(b, x0, x1) 26782 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26783 v.reset(OpCopy) 26784 v.AddArg(v0) 26785 v0.AuxInt = i0 26786 v0.Aux = s 26787 v0.AddArg(p) 26788 v0.AddArg(idx) 26789 v0.AddArg(mem) 26790 return true 26791 } 26792 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 26793 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26794 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26795 for { 26796 _ = v.Args[1] 26797 sh := v.Args[0] 26798 if sh.Op != OpAMD64SHLQconst { 26799 break 26800 } 26801 if sh.AuxInt != 32 { 26802 break 26803 } 26804 x1 := sh.Args[0] 26805 if x1.Op != OpAMD64MOVLloadidx1 { 26806 break 26807 } 26808 i1 := x1.AuxInt 26809 s := x1.Aux 26810 _ = x1.Args[2] 26811 idx := x1.Args[0] 26812 p := x1.Args[1] 26813 mem := x1.Args[2] 26814 x0 := v.Args[1] 26815 if x0.Op != OpAMD64MOVLloadidx1 { 26816 break 26817 } 26818 i0 := x0.AuxInt 26819 if x0.Aux != s { 26820 break 26821 } 26822 _ = x0.Args[2] 26823 if idx != x0.Args[0] { 26824 break 26825 } 26826 if p != x0.Args[1] { 26827 break 26828 } 26829 if mem != x0.Args[2] { 26830 break 26831 } 26832 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26833 break 26834 } 26835 b = mergePoint(b, x0, x1) 26836 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26837 v.reset(OpCopy) 26838 v.AddArg(v0) 26839 v0.AuxInt = i0 26840 v0.Aux = s 26841 v0.AddArg(p) 26842 v0.AddArg(idx) 26843 v0.AddArg(mem) 26844 return true 26845 } 26846 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 26847 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26848 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26849 for { 26850 _ = v.Args[1] 26851 s1 := v.Args[0] 26852 if s1.Op != OpAMD64SHLQconst { 26853 break 26854 } 26855 j1 := s1.AuxInt 26856 x1 := s1.Args[0] 26857 if x1.Op != OpAMD64MOVBloadidx1 { 26858 break 26859 } 26860 i1 := x1.AuxInt 26861 s := x1.Aux 26862 _ = x1.Args[2] 26863 p := x1.Args[0] 26864 idx := x1.Args[1] 26865 mem := x1.Args[2] 26866 or := v.Args[1] 26867 if or.Op != OpAMD64ORQ { 26868 break 26869 } 26870 _ = or.Args[1] 26871 s0 := or.Args[0] 26872 if s0.Op != OpAMD64SHLQconst { 26873 break 26874 } 26875 j0 := s0.AuxInt 26876 x0 := s0.Args[0] 26877 if x0.Op != OpAMD64MOVBloadidx1 { 26878 break 26879 } 26880 i0 := x0.AuxInt 26881 if x0.Aux != s { 26882 break 26883 } 26884 _ = x0.Args[2] 26885 if p != x0.Args[0] { 26886 break 26887 } 26888 if idx != x0.Args[1] { 26889 break 26890 } 26891 if mem != x0.Args[2] { 26892 break 26893 } 26894 y := or.Args[1] 26895 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26896 break 26897 } 26898 b = mergePoint(b, x0, x1) 26899 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26900 v.reset(OpCopy) 26901 v.AddArg(v0) 26902 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26903 v1.AuxInt = j0 26904 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26905 v2.AuxInt = i0 26906 v2.Aux = s 26907 v2.AddArg(p) 26908 v2.AddArg(idx) 26909 v2.AddArg(mem) 26910 v1.AddArg(v2) 26911 v0.AddArg(v1) 26912 v0.AddArg(y) 26913 return true 26914 } 26915 return false 26916 } 26917 func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool { 26918 b := v.Block 26919 _ = b 26920 typ := &b.Func.Config.Types 26921 _ = typ 26922 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 26923 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26924 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26925 for { 26926 _ = v.Args[1] 26927 s1 := v.Args[0] 26928 if s1.Op != OpAMD64SHLQconst { 26929 break 26930 } 26931 j1 := s1.AuxInt 26932 x1 := s1.Args[0] 26933 if x1.Op != OpAMD64MOVBloadidx1 { 26934 break 26935 } 26936 i1 := x1.AuxInt 26937 s := x1.Aux 26938 _ = x1.Args[2] 26939 idx := x1.Args[0] 26940 p := x1.Args[1] 26941 mem := x1.Args[2] 26942 or := v.Args[1] 26943 if or.Op != OpAMD64ORQ { 26944 break 26945 } 26946 _ = or.Args[1] 26947 s0 := or.Args[0] 26948 if s0.Op != OpAMD64SHLQconst { 26949 break 26950 } 26951 j0 := s0.AuxInt 26952 x0 := s0.Args[0] 26953 if x0.Op != OpAMD64MOVBloadidx1 { 26954 break 26955 } 26956 i0 := x0.AuxInt 26957 if x0.Aux != s { 26958 break 26959 } 26960 _ = x0.Args[2] 26961 if p != x0.Args[0] { 26962 break 26963 } 26964 if idx != x0.Args[1] { 26965 break 26966 } 26967 if mem != x0.Args[2] { 26968 break 26969 } 26970 y := or.Args[1] 26971 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26972 break 26973 } 26974 b = mergePoint(b, x0, x1) 26975 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26976 v.reset(OpCopy) 26977 v.AddArg(v0) 26978 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26979 v1.AuxInt = j0 26980 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26981 v2.AuxInt = i0 26982 v2.Aux = s 26983 v2.AddArg(p) 26984 v2.AddArg(idx) 26985 v2.AddArg(mem) 26986 v1.AddArg(v2) 26987 v0.AddArg(v1) 26988 v0.AddArg(y) 26989 return true 26990 } 26991 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 26992 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26993 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26994 for { 26995 _ = v.Args[1] 26996 s1 := v.Args[0] 26997 if s1.Op != OpAMD64SHLQconst { 26998 break 26999 } 27000 j1 := s1.AuxInt 27001 x1 := s1.Args[0] 27002 if x1.Op != OpAMD64MOVBloadidx1 { 27003 break 27004 } 27005 i1 := x1.AuxInt 27006 s := x1.Aux 27007 _ = x1.Args[2] 27008 p := x1.Args[0] 27009 idx := x1.Args[1] 27010 mem := x1.Args[2] 27011 or := v.Args[1] 27012 if or.Op != OpAMD64ORQ { 27013 break 27014 } 27015 _ = or.Args[1] 27016 s0 := or.Args[0] 27017 if s0.Op != OpAMD64SHLQconst { 27018 break 27019 } 27020 j0 := s0.AuxInt 27021 x0 := s0.Args[0] 27022 if x0.Op != OpAMD64MOVBloadidx1 { 27023 break 27024 } 27025 i0 := x0.AuxInt 27026 if x0.Aux != s { 27027 break 27028 } 27029 _ = x0.Args[2] 27030 if idx != x0.Args[0] { 27031 break 27032 } 27033 if p != x0.Args[1] { 27034 break 27035 } 27036 if mem != x0.Args[2] { 27037 break 27038 } 27039 y := or.Args[1] 27040 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27041 break 27042 } 27043 b = mergePoint(b, x0, x1) 27044 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27045 v.reset(OpCopy) 27046 v.AddArg(v0) 27047 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27048 v1.AuxInt = j0 27049 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27050 v2.AuxInt = i0 27051 v2.Aux = s 27052 v2.AddArg(p) 27053 v2.AddArg(idx) 27054 v2.AddArg(mem) 27055 v1.AddArg(v2) 27056 v0.AddArg(v1) 27057 v0.AddArg(y) 27058 return true 27059 } 27060 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 27061 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27062 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27063 for { 27064 _ = v.Args[1] 27065 s1 := v.Args[0] 27066 if s1.Op != OpAMD64SHLQconst { 27067 break 27068 } 27069 j1 := s1.AuxInt 27070 x1 := s1.Args[0] 27071 if x1.Op != OpAMD64MOVBloadidx1 { 27072 break 27073 } 27074 i1 := x1.AuxInt 27075 s := x1.Aux 27076 _ = x1.Args[2] 27077 idx := x1.Args[0] 27078 p := x1.Args[1] 27079 mem := x1.Args[2] 27080 or := v.Args[1] 27081 if or.Op != OpAMD64ORQ { 27082 break 27083 } 27084 _ = or.Args[1] 27085 s0 := or.Args[0] 27086 if s0.Op != OpAMD64SHLQconst { 27087 break 27088 } 27089 j0 := s0.AuxInt 27090 x0 := s0.Args[0] 27091 if x0.Op != OpAMD64MOVBloadidx1 { 27092 break 27093 } 27094 i0 := x0.AuxInt 27095 if x0.Aux != s { 27096 break 27097 } 27098 _ = x0.Args[2] 27099 if idx != x0.Args[0] { 27100 break 27101 } 27102 if p != x0.Args[1] { 27103 break 27104 } 27105 if mem != x0.Args[2] { 27106 break 27107 } 27108 y := or.Args[1] 27109 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27110 break 27111 } 27112 b = mergePoint(b, x0, x1) 27113 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27114 v.reset(OpCopy) 27115 v.AddArg(v0) 27116 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27117 v1.AuxInt = j0 27118 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27119 v2.AuxInt = i0 27120 v2.Aux = s 27121 v2.AddArg(p) 27122 v2.AddArg(idx) 27123 v2.AddArg(mem) 27124 v1.AddArg(v2) 27125 v0.AddArg(v1) 27126 v0.AddArg(y) 27127 return true 27128 } 27129 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 27130 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27131 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27132 for { 27133 _ = v.Args[1] 27134 s1 := v.Args[0] 27135 if s1.Op != OpAMD64SHLQconst { 27136 break 27137 } 27138 j1 := s1.AuxInt 27139 x1 := s1.Args[0] 27140 if x1.Op != OpAMD64MOVBloadidx1 { 27141 break 27142 } 27143 i1 := x1.AuxInt 27144 s := x1.Aux 27145 _ = x1.Args[2] 27146 p := x1.Args[0] 27147 idx := x1.Args[1] 27148 mem := x1.Args[2] 27149 or := v.Args[1] 27150 if or.Op != OpAMD64ORQ { 27151 break 27152 } 27153 _ = or.Args[1] 27154 y := or.Args[0] 27155 s0 := or.Args[1] 27156 if s0.Op != OpAMD64SHLQconst { 27157 break 27158 } 27159 j0 := s0.AuxInt 27160 x0 := s0.Args[0] 27161 if x0.Op != OpAMD64MOVBloadidx1 { 27162 break 27163 } 27164 i0 := x0.AuxInt 27165 if x0.Aux != s { 27166 break 27167 } 27168 _ = x0.Args[2] 27169 if p != x0.Args[0] { 27170 break 27171 } 27172 if idx != x0.Args[1] { 27173 break 27174 } 27175 if mem != x0.Args[2] { 27176 break 27177 } 27178 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27179 break 27180 } 27181 b = mergePoint(b, x0, x1) 27182 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27183 v.reset(OpCopy) 27184 v.AddArg(v0) 27185 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27186 v1.AuxInt = j0 27187 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27188 v2.AuxInt = i0 27189 v2.Aux = s 27190 v2.AddArg(p) 27191 v2.AddArg(idx) 27192 v2.AddArg(mem) 27193 v1.AddArg(v2) 27194 v0.AddArg(v1) 27195 v0.AddArg(y) 27196 return true 27197 } 27198 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 27199 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27200 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27201 for { 27202 _ = v.Args[1] 27203 s1 := v.Args[0] 27204 if s1.Op != OpAMD64SHLQconst { 27205 break 27206 } 27207 j1 := s1.AuxInt 27208 x1 := s1.Args[0] 27209 if x1.Op != OpAMD64MOVBloadidx1 { 27210 break 27211 } 27212 i1 := x1.AuxInt 27213 s := x1.Aux 27214 _ = x1.Args[2] 27215 idx := x1.Args[0] 27216 p := x1.Args[1] 27217 mem := x1.Args[2] 27218 or := v.Args[1] 27219 if or.Op != OpAMD64ORQ { 27220 break 27221 } 27222 _ = or.Args[1] 27223 y := or.Args[0] 27224 s0 := or.Args[1] 27225 if s0.Op != OpAMD64SHLQconst { 27226 break 27227 } 27228 j0 := s0.AuxInt 27229 x0 := s0.Args[0] 27230 if x0.Op != OpAMD64MOVBloadidx1 { 27231 break 27232 } 27233 i0 := x0.AuxInt 27234 if x0.Aux != s { 27235 break 27236 } 27237 _ = x0.Args[2] 27238 if p != x0.Args[0] { 27239 break 27240 } 27241 if idx != x0.Args[1] { 27242 break 27243 } 27244 if mem != x0.Args[2] { 27245 break 27246 } 27247 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27248 break 27249 } 27250 b = mergePoint(b, x0, x1) 27251 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27252 v.reset(OpCopy) 27253 v.AddArg(v0) 27254 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27255 v1.AuxInt = j0 27256 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27257 v2.AuxInt = i0 27258 v2.Aux = s 27259 v2.AddArg(p) 27260 v2.AddArg(idx) 27261 v2.AddArg(mem) 27262 v1.AddArg(v2) 27263 v0.AddArg(v1) 27264 v0.AddArg(y) 27265 return true 27266 } 27267 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 27268 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27269 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27270 for { 27271 _ = v.Args[1] 27272 s1 := v.Args[0] 27273 if s1.Op != OpAMD64SHLQconst { 27274 break 27275 } 27276 j1 := s1.AuxInt 27277 x1 := s1.Args[0] 27278 if x1.Op != OpAMD64MOVBloadidx1 { 27279 break 27280 } 27281 i1 := x1.AuxInt 27282 s := x1.Aux 27283 _ = x1.Args[2] 27284 p := x1.Args[0] 27285 idx := x1.Args[1] 27286 mem := x1.Args[2] 27287 or := v.Args[1] 27288 if or.Op != OpAMD64ORQ { 27289 break 27290 } 27291 _ = or.Args[1] 27292 y := or.Args[0] 27293 s0 := or.Args[1] 27294 if s0.Op != OpAMD64SHLQconst { 27295 break 27296 } 27297 j0 := s0.AuxInt 27298 x0 := s0.Args[0] 27299 if x0.Op != OpAMD64MOVBloadidx1 { 27300 break 27301 } 27302 i0 := x0.AuxInt 27303 if x0.Aux != s { 27304 break 27305 } 27306 _ = x0.Args[2] 27307 if idx != x0.Args[0] { 27308 break 27309 } 27310 if p != x0.Args[1] { 27311 break 27312 } 27313 if mem != x0.Args[2] { 27314 break 27315 } 27316 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27317 break 27318 } 27319 b = mergePoint(b, x0, x1) 27320 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27321 v.reset(OpCopy) 27322 v.AddArg(v0) 27323 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27324 v1.AuxInt = j0 27325 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27326 v2.AuxInt = i0 27327 v2.Aux = s 27328 v2.AddArg(p) 27329 v2.AddArg(idx) 27330 v2.AddArg(mem) 27331 v1.AddArg(v2) 27332 v0.AddArg(v1) 27333 v0.AddArg(y) 27334 return true 27335 } 27336 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 27337 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27338 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27339 for { 27340 _ = v.Args[1] 27341 s1 := v.Args[0] 27342 if s1.Op != OpAMD64SHLQconst { 27343 break 27344 } 27345 j1 := s1.AuxInt 27346 x1 := s1.Args[0] 27347 if x1.Op != OpAMD64MOVBloadidx1 { 27348 break 27349 } 27350 i1 := x1.AuxInt 27351 s := x1.Aux 27352 _ = x1.Args[2] 27353 idx := x1.Args[0] 27354 p := x1.Args[1] 27355 mem := x1.Args[2] 27356 or := v.Args[1] 27357 if or.Op != OpAMD64ORQ { 27358 break 27359 } 27360 _ = or.Args[1] 27361 y := or.Args[0] 27362 s0 := or.Args[1] 27363 if s0.Op != OpAMD64SHLQconst { 27364 break 27365 } 27366 j0 := s0.AuxInt 27367 x0 := s0.Args[0] 27368 if x0.Op != OpAMD64MOVBloadidx1 { 27369 break 27370 } 27371 i0 := x0.AuxInt 27372 if x0.Aux != s { 27373 break 27374 } 27375 _ = x0.Args[2] 27376 if idx != x0.Args[0] { 27377 break 27378 } 27379 if p != x0.Args[1] { 27380 break 27381 } 27382 if mem != x0.Args[2] { 27383 break 27384 } 27385 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27386 break 27387 } 27388 b = mergePoint(b, x0, x1) 27389 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27390 v.reset(OpCopy) 27391 v.AddArg(v0) 27392 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27393 v1.AuxInt = j0 27394 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27395 v2.AuxInt = i0 27396 v2.Aux = s 27397 v2.AddArg(p) 27398 v2.AddArg(idx) 27399 v2.AddArg(mem) 27400 v1.AddArg(v2) 27401 v0.AddArg(v1) 27402 v0.AddArg(y) 27403 return true 27404 } 27405 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27406 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27407 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27408 for { 27409 _ = v.Args[1] 27410 or := v.Args[0] 27411 if or.Op != OpAMD64ORQ { 27412 break 27413 } 27414 _ = or.Args[1] 27415 s0 := or.Args[0] 27416 if s0.Op != OpAMD64SHLQconst { 27417 break 27418 } 27419 j0 := s0.AuxInt 27420 x0 := s0.Args[0] 27421 if x0.Op != OpAMD64MOVBloadidx1 { 27422 break 27423 } 27424 i0 := x0.AuxInt 27425 s := x0.Aux 27426 _ = x0.Args[2] 27427 p := x0.Args[0] 27428 idx := x0.Args[1] 27429 mem := x0.Args[2] 27430 y := or.Args[1] 27431 s1 := v.Args[1] 27432 if s1.Op != OpAMD64SHLQconst { 27433 break 27434 } 27435 j1 := s1.AuxInt 27436 x1 := s1.Args[0] 27437 if x1.Op != OpAMD64MOVBloadidx1 { 27438 break 27439 } 27440 i1 := x1.AuxInt 27441 if x1.Aux != s { 27442 break 27443 } 27444 _ = x1.Args[2] 27445 if p != x1.Args[0] { 27446 break 27447 } 27448 if idx != x1.Args[1] { 27449 break 27450 } 27451 if mem != x1.Args[2] { 27452 break 27453 } 27454 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27455 break 27456 } 27457 b = mergePoint(b, x0, x1) 27458 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27459 v.reset(OpCopy) 27460 v.AddArg(v0) 27461 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27462 v1.AuxInt = j0 27463 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27464 v2.AuxInt = i0 27465 v2.Aux = s 27466 v2.AddArg(p) 27467 v2.AddArg(idx) 27468 v2.AddArg(mem) 27469 v1.AddArg(v2) 27470 v0.AddArg(v1) 27471 v0.AddArg(y) 27472 return true 27473 } 27474 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27475 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27476 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27477 for { 27478 _ = v.Args[1] 27479 or := v.Args[0] 27480 if or.Op != OpAMD64ORQ { 27481 break 27482 } 27483 _ = or.Args[1] 27484 s0 := or.Args[0] 27485 if s0.Op != OpAMD64SHLQconst { 27486 break 27487 } 27488 j0 := s0.AuxInt 27489 x0 := s0.Args[0] 27490 if x0.Op != OpAMD64MOVBloadidx1 { 27491 break 27492 } 27493 i0 := x0.AuxInt 27494 s := x0.Aux 27495 _ = x0.Args[2] 27496 idx := x0.Args[0] 27497 p := x0.Args[1] 27498 mem := x0.Args[2] 27499 y := or.Args[1] 27500 s1 := v.Args[1] 27501 if s1.Op != OpAMD64SHLQconst { 27502 break 27503 } 27504 j1 := s1.AuxInt 27505 x1 := s1.Args[0] 27506 if x1.Op != OpAMD64MOVBloadidx1 { 27507 break 27508 } 27509 i1 := x1.AuxInt 27510 if x1.Aux != s { 27511 break 27512 } 27513 _ = x1.Args[2] 27514 if p != x1.Args[0] { 27515 break 27516 } 27517 if idx != x1.Args[1] { 27518 break 27519 } 27520 if mem != x1.Args[2] { 27521 break 27522 } 27523 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27524 break 27525 } 27526 b = mergePoint(b, x0, x1) 27527 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27528 v.reset(OpCopy) 27529 v.AddArg(v0) 27530 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27531 v1.AuxInt = j0 27532 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27533 v2.AuxInt = i0 27534 v2.Aux = s 27535 v2.AddArg(p) 27536 v2.AddArg(idx) 27537 v2.AddArg(mem) 27538 v1.AddArg(v2) 27539 v0.AddArg(v1) 27540 v0.AddArg(y) 27541 return true 27542 } 27543 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27544 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27545 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27546 for { 27547 _ = v.Args[1] 27548 or := v.Args[0] 27549 if or.Op != OpAMD64ORQ { 27550 break 27551 } 27552 _ = or.Args[1] 27553 y := or.Args[0] 27554 s0 := or.Args[1] 27555 if s0.Op != OpAMD64SHLQconst { 27556 break 27557 } 27558 j0 := s0.AuxInt 27559 x0 := s0.Args[0] 27560 if x0.Op != OpAMD64MOVBloadidx1 { 27561 break 27562 } 27563 i0 := x0.AuxInt 27564 s := x0.Aux 27565 _ = x0.Args[2] 27566 p := x0.Args[0] 27567 idx := x0.Args[1] 27568 mem := x0.Args[2] 27569 s1 := v.Args[1] 27570 if s1.Op != OpAMD64SHLQconst { 27571 break 27572 } 27573 j1 := s1.AuxInt 27574 x1 := s1.Args[0] 27575 if x1.Op != OpAMD64MOVBloadidx1 { 27576 break 27577 } 27578 i1 := x1.AuxInt 27579 if x1.Aux != s { 27580 break 27581 } 27582 _ = x1.Args[2] 27583 if p != x1.Args[0] { 27584 break 27585 } 27586 if idx != x1.Args[1] { 27587 break 27588 } 27589 if mem != x1.Args[2] { 27590 break 27591 } 27592 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27593 break 27594 } 27595 b = mergePoint(b, x0, x1) 27596 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27597 v.reset(OpCopy) 27598 v.AddArg(v0) 27599 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27600 v1.AuxInt = j0 27601 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27602 v2.AuxInt = i0 27603 v2.Aux = s 27604 v2.AddArg(p) 27605 v2.AddArg(idx) 27606 v2.AddArg(mem) 27607 v1.AddArg(v2) 27608 v0.AddArg(v1) 27609 v0.AddArg(y) 27610 return true 27611 } 27612 return false 27613 } 27614 func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool { 27615 b := v.Block 27616 _ = b 27617 typ := &b.Func.Config.Types 27618 _ = typ 27619 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27620 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27621 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27622 for { 27623 _ = v.Args[1] 27624 or := v.Args[0] 27625 if or.Op != OpAMD64ORQ { 27626 break 27627 } 27628 _ = or.Args[1] 27629 y := or.Args[0] 27630 s0 := or.Args[1] 27631 if s0.Op != OpAMD64SHLQconst { 27632 break 27633 } 27634 j0 := s0.AuxInt 27635 x0 := s0.Args[0] 27636 if x0.Op != OpAMD64MOVBloadidx1 { 27637 break 27638 } 27639 i0 := x0.AuxInt 27640 s := x0.Aux 27641 _ = x0.Args[2] 27642 idx := x0.Args[0] 27643 p := x0.Args[1] 27644 mem := x0.Args[2] 27645 s1 := v.Args[1] 27646 if s1.Op != OpAMD64SHLQconst { 27647 break 27648 } 27649 j1 := s1.AuxInt 27650 x1 := s1.Args[0] 27651 if x1.Op != OpAMD64MOVBloadidx1 { 27652 break 27653 } 27654 i1 := x1.AuxInt 27655 if x1.Aux != s { 27656 break 27657 } 27658 _ = x1.Args[2] 27659 if p != x1.Args[0] { 27660 break 27661 } 27662 if idx != x1.Args[1] { 27663 break 27664 } 27665 if mem != x1.Args[2] { 27666 break 27667 } 27668 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27669 break 27670 } 27671 b = mergePoint(b, x0, x1) 27672 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27673 v.reset(OpCopy) 27674 v.AddArg(v0) 27675 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27676 v1.AuxInt = j0 27677 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27678 v2.AuxInt = i0 27679 v2.Aux = s 27680 v2.AddArg(p) 27681 v2.AddArg(idx) 27682 v2.AddArg(mem) 27683 v1.AddArg(v2) 27684 v0.AddArg(v1) 27685 v0.AddArg(y) 27686 return true 27687 } 27688 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27689 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27690 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27691 for { 27692 _ = v.Args[1] 27693 or := v.Args[0] 27694 if or.Op != OpAMD64ORQ { 27695 break 27696 } 27697 _ = or.Args[1] 27698 s0 := or.Args[0] 27699 if s0.Op != OpAMD64SHLQconst { 27700 break 27701 } 27702 j0 := s0.AuxInt 27703 x0 := s0.Args[0] 27704 if x0.Op != OpAMD64MOVBloadidx1 { 27705 break 27706 } 27707 i0 := x0.AuxInt 27708 s := x0.Aux 27709 _ = x0.Args[2] 27710 p := x0.Args[0] 27711 idx := x0.Args[1] 27712 mem := x0.Args[2] 27713 y := or.Args[1] 27714 s1 := v.Args[1] 27715 if s1.Op != OpAMD64SHLQconst { 27716 break 27717 } 27718 j1 := s1.AuxInt 27719 x1 := s1.Args[0] 27720 if x1.Op != OpAMD64MOVBloadidx1 { 27721 break 27722 } 27723 i1 := x1.AuxInt 27724 if x1.Aux != s { 27725 break 27726 } 27727 _ = x1.Args[2] 27728 if idx != x1.Args[0] { 27729 break 27730 } 27731 if p != x1.Args[1] { 27732 break 27733 } 27734 if mem != x1.Args[2] { 27735 break 27736 } 27737 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27738 break 27739 } 27740 b = mergePoint(b, x0, x1) 27741 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27742 v.reset(OpCopy) 27743 v.AddArg(v0) 27744 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27745 v1.AuxInt = j0 27746 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27747 v2.AuxInt = i0 27748 v2.Aux = s 27749 v2.AddArg(p) 27750 v2.AddArg(idx) 27751 v2.AddArg(mem) 27752 v1.AddArg(v2) 27753 v0.AddArg(v1) 27754 v0.AddArg(y) 27755 return true 27756 } 27757 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27758 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27759 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27760 for { 27761 _ = v.Args[1] 27762 or := v.Args[0] 27763 if or.Op != OpAMD64ORQ { 27764 break 27765 } 27766 _ = or.Args[1] 27767 s0 := or.Args[0] 27768 if s0.Op != OpAMD64SHLQconst { 27769 break 27770 } 27771 j0 := s0.AuxInt 27772 x0 := s0.Args[0] 27773 if x0.Op != OpAMD64MOVBloadidx1 { 27774 break 27775 } 27776 i0 := x0.AuxInt 27777 s := x0.Aux 27778 _ = x0.Args[2] 27779 idx := x0.Args[0] 27780 p := x0.Args[1] 27781 mem := x0.Args[2] 27782 y := or.Args[1] 27783 s1 := v.Args[1] 27784 if s1.Op != OpAMD64SHLQconst { 27785 break 27786 } 27787 j1 := s1.AuxInt 27788 x1 := s1.Args[0] 27789 if x1.Op != OpAMD64MOVBloadidx1 { 27790 break 27791 } 27792 i1 := x1.AuxInt 27793 if x1.Aux != s { 27794 break 27795 } 27796 _ = x1.Args[2] 27797 if idx != x1.Args[0] { 27798 break 27799 } 27800 if p != x1.Args[1] { 27801 break 27802 } 27803 if mem != x1.Args[2] { 27804 break 27805 } 27806 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27807 break 27808 } 27809 b = mergePoint(b, x0, x1) 27810 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27811 v.reset(OpCopy) 27812 v.AddArg(v0) 27813 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27814 v1.AuxInt = j0 27815 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27816 v2.AuxInt = i0 27817 v2.Aux = s 27818 v2.AddArg(p) 27819 v2.AddArg(idx) 27820 v2.AddArg(mem) 27821 v1.AddArg(v2) 27822 v0.AddArg(v1) 27823 v0.AddArg(y) 27824 return true 27825 } 27826 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27827 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27828 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27829 for { 27830 _ = v.Args[1] 27831 or := v.Args[0] 27832 if or.Op != OpAMD64ORQ { 27833 break 27834 } 27835 _ = or.Args[1] 27836 y := or.Args[0] 27837 s0 := or.Args[1] 27838 if s0.Op != OpAMD64SHLQconst { 27839 break 27840 } 27841 j0 := s0.AuxInt 27842 x0 := s0.Args[0] 27843 if x0.Op != OpAMD64MOVBloadidx1 { 27844 break 27845 } 27846 i0 := x0.AuxInt 27847 s := x0.Aux 27848 _ = x0.Args[2] 27849 p := x0.Args[0] 27850 idx := x0.Args[1] 27851 mem := x0.Args[2] 27852 s1 := v.Args[1] 27853 if s1.Op != OpAMD64SHLQconst { 27854 break 27855 } 27856 j1 := s1.AuxInt 27857 x1 := s1.Args[0] 27858 if x1.Op != OpAMD64MOVBloadidx1 { 27859 break 27860 } 27861 i1 := x1.AuxInt 27862 if x1.Aux != s { 27863 break 27864 } 27865 _ = x1.Args[2] 27866 if idx != x1.Args[0] { 27867 break 27868 } 27869 if p != x1.Args[1] { 27870 break 27871 } 27872 if mem != x1.Args[2] { 27873 break 27874 } 27875 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27876 break 27877 } 27878 b = mergePoint(b, x0, x1) 27879 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27880 v.reset(OpCopy) 27881 v.AddArg(v0) 27882 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27883 v1.AuxInt = j0 27884 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27885 v2.AuxInt = i0 27886 v2.Aux = s 27887 v2.AddArg(p) 27888 v2.AddArg(idx) 27889 v2.AddArg(mem) 27890 v1.AddArg(v2) 27891 v0.AddArg(v1) 27892 v0.AddArg(y) 27893 return true 27894 } 27895 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27896 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27897 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27898 for { 27899 _ = v.Args[1] 27900 or := v.Args[0] 27901 if or.Op != OpAMD64ORQ { 27902 break 27903 } 27904 _ = or.Args[1] 27905 y := or.Args[0] 27906 s0 := or.Args[1] 27907 if s0.Op != OpAMD64SHLQconst { 27908 break 27909 } 27910 j0 := s0.AuxInt 27911 x0 := s0.Args[0] 27912 if x0.Op != OpAMD64MOVBloadidx1 { 27913 break 27914 } 27915 i0 := x0.AuxInt 27916 s := x0.Aux 27917 _ = x0.Args[2] 27918 idx := x0.Args[0] 27919 p := x0.Args[1] 27920 mem := x0.Args[2] 27921 s1 := v.Args[1] 27922 if s1.Op != OpAMD64SHLQconst { 27923 break 27924 } 27925 j1 := s1.AuxInt 27926 x1 := s1.Args[0] 27927 if x1.Op != OpAMD64MOVBloadidx1 { 27928 break 27929 } 27930 i1 := x1.AuxInt 27931 if x1.Aux != s { 27932 break 27933 } 27934 _ = x1.Args[2] 27935 if idx != x1.Args[0] { 27936 break 27937 } 27938 if p != x1.Args[1] { 27939 break 27940 } 27941 if mem != x1.Args[2] { 27942 break 27943 } 27944 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27945 break 27946 } 27947 b = mergePoint(b, x0, x1) 27948 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27949 v.reset(OpCopy) 27950 v.AddArg(v0) 27951 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27952 v1.AuxInt = j0 27953 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27954 v2.AuxInt = i0 27955 v2.Aux = s 27956 v2.AddArg(p) 27957 v2.AddArg(idx) 27958 v2.AddArg(mem) 27959 v1.AddArg(v2) 27960 v0.AddArg(v1) 27961 v0.AddArg(y) 27962 return true 27963 } 27964 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 27965 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27966 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27967 for { 27968 _ = v.Args[1] 27969 s1 := v.Args[0] 27970 if s1.Op != OpAMD64SHLQconst { 27971 break 27972 } 27973 j1 := s1.AuxInt 27974 x1 := s1.Args[0] 27975 if x1.Op != OpAMD64MOVWloadidx1 { 27976 break 27977 } 27978 i1 := x1.AuxInt 27979 s := x1.Aux 27980 _ = x1.Args[2] 27981 p := x1.Args[0] 27982 idx := x1.Args[1] 27983 mem := x1.Args[2] 27984 or := v.Args[1] 27985 if or.Op != OpAMD64ORQ { 27986 break 27987 } 27988 _ = or.Args[1] 27989 s0 := or.Args[0] 27990 if s0.Op != OpAMD64SHLQconst { 27991 break 27992 } 27993 j0 := s0.AuxInt 27994 x0 := s0.Args[0] 27995 if x0.Op != OpAMD64MOVWloadidx1 { 27996 break 27997 } 27998 i0 := x0.AuxInt 27999 if x0.Aux != s { 28000 break 28001 } 28002 _ = x0.Args[2] 28003 if p != x0.Args[0] { 28004 break 28005 } 28006 if idx != x0.Args[1] { 28007 break 28008 } 28009 if mem != x0.Args[2] { 28010 break 28011 } 28012 y := or.Args[1] 28013 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28014 break 28015 } 28016 b = mergePoint(b, x0, x1) 28017 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28018 v.reset(OpCopy) 28019 v.AddArg(v0) 28020 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28021 v1.AuxInt = j0 28022 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28023 v2.AuxInt = i0 28024 v2.Aux = s 28025 v2.AddArg(p) 28026 v2.AddArg(idx) 28027 v2.AddArg(mem) 28028 v1.AddArg(v2) 28029 v0.AddArg(v1) 28030 v0.AddArg(y) 28031 return true 28032 } 28033 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 28034 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28035 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28036 for { 28037 _ = v.Args[1] 28038 s1 := v.Args[0] 28039 if s1.Op != OpAMD64SHLQconst { 28040 break 28041 } 28042 j1 := s1.AuxInt 28043 x1 := s1.Args[0] 28044 if x1.Op != OpAMD64MOVWloadidx1 { 28045 break 28046 } 28047 i1 := x1.AuxInt 28048 s := x1.Aux 28049 _ = x1.Args[2] 28050 idx := x1.Args[0] 28051 p := x1.Args[1] 28052 mem := x1.Args[2] 28053 or := v.Args[1] 28054 if or.Op != OpAMD64ORQ { 28055 break 28056 } 28057 _ = or.Args[1] 28058 s0 := or.Args[0] 28059 if s0.Op != OpAMD64SHLQconst { 28060 break 28061 } 28062 j0 := s0.AuxInt 28063 x0 := s0.Args[0] 28064 if x0.Op != OpAMD64MOVWloadidx1 { 28065 break 28066 } 28067 i0 := x0.AuxInt 28068 if x0.Aux != s { 28069 break 28070 } 28071 _ = x0.Args[2] 28072 if p != x0.Args[0] { 28073 break 28074 } 28075 if idx != x0.Args[1] { 28076 break 28077 } 28078 if mem != x0.Args[2] { 28079 break 28080 } 28081 y := or.Args[1] 28082 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28083 break 28084 } 28085 b = mergePoint(b, x0, x1) 28086 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28087 v.reset(OpCopy) 28088 v.AddArg(v0) 28089 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28090 v1.AuxInt = j0 28091 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28092 v2.AuxInt = i0 28093 v2.Aux = s 28094 v2.AddArg(p) 28095 v2.AddArg(idx) 28096 v2.AddArg(mem) 28097 v1.AddArg(v2) 28098 v0.AddArg(v1) 28099 v0.AddArg(y) 28100 return true 28101 } 28102 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 28103 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28104 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28105 for { 28106 _ = v.Args[1] 28107 s1 := v.Args[0] 28108 if s1.Op != OpAMD64SHLQconst { 28109 break 28110 } 28111 j1 := s1.AuxInt 28112 x1 := s1.Args[0] 28113 if x1.Op != OpAMD64MOVWloadidx1 { 28114 break 28115 } 28116 i1 := x1.AuxInt 28117 s := x1.Aux 28118 _ = x1.Args[2] 28119 p := x1.Args[0] 28120 idx := x1.Args[1] 28121 mem := x1.Args[2] 28122 or := v.Args[1] 28123 if or.Op != OpAMD64ORQ { 28124 break 28125 } 28126 _ = or.Args[1] 28127 s0 := or.Args[0] 28128 if s0.Op != OpAMD64SHLQconst { 28129 break 28130 } 28131 j0 := s0.AuxInt 28132 x0 := s0.Args[0] 28133 if x0.Op != OpAMD64MOVWloadidx1 { 28134 break 28135 } 28136 i0 := x0.AuxInt 28137 if x0.Aux != s { 28138 break 28139 } 28140 _ = x0.Args[2] 28141 if idx != x0.Args[0] { 28142 break 28143 } 28144 if p != x0.Args[1] { 28145 break 28146 } 28147 if mem != x0.Args[2] { 28148 break 28149 } 28150 y := or.Args[1] 28151 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28152 break 28153 } 28154 b = mergePoint(b, x0, x1) 28155 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28156 v.reset(OpCopy) 28157 v.AddArg(v0) 28158 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28159 v1.AuxInt = j0 28160 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28161 v2.AuxInt = i0 28162 v2.Aux = s 28163 v2.AddArg(p) 28164 v2.AddArg(idx) 28165 v2.AddArg(mem) 28166 v1.AddArg(v2) 28167 v0.AddArg(v1) 28168 v0.AddArg(y) 28169 return true 28170 } 28171 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 28172 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28173 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28174 for { 28175 _ = v.Args[1] 28176 s1 := v.Args[0] 28177 if s1.Op != OpAMD64SHLQconst { 28178 break 28179 } 28180 j1 := s1.AuxInt 28181 x1 := s1.Args[0] 28182 if x1.Op != OpAMD64MOVWloadidx1 { 28183 break 28184 } 28185 i1 := x1.AuxInt 28186 s := x1.Aux 28187 _ = x1.Args[2] 28188 idx := x1.Args[0] 28189 p := x1.Args[1] 28190 mem := x1.Args[2] 28191 or := v.Args[1] 28192 if or.Op != OpAMD64ORQ { 28193 break 28194 } 28195 _ = or.Args[1] 28196 s0 := or.Args[0] 28197 if s0.Op != OpAMD64SHLQconst { 28198 break 28199 } 28200 j0 := s0.AuxInt 28201 x0 := s0.Args[0] 28202 if x0.Op != OpAMD64MOVWloadidx1 { 28203 break 28204 } 28205 i0 := x0.AuxInt 28206 if x0.Aux != s { 28207 break 28208 } 28209 _ = x0.Args[2] 28210 if idx != x0.Args[0] { 28211 break 28212 } 28213 if p != x0.Args[1] { 28214 break 28215 } 28216 if mem != x0.Args[2] { 28217 break 28218 } 28219 y := or.Args[1] 28220 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28221 break 28222 } 28223 b = mergePoint(b, x0, x1) 28224 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28225 v.reset(OpCopy) 28226 v.AddArg(v0) 28227 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28228 v1.AuxInt = j0 28229 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28230 v2.AuxInt = i0 28231 v2.Aux = s 28232 v2.AddArg(p) 28233 v2.AddArg(idx) 28234 v2.AddArg(mem) 28235 v1.AddArg(v2) 28236 v0.AddArg(v1) 28237 v0.AddArg(y) 28238 return true 28239 } 28240 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 28241 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28242 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28243 for { 28244 _ = v.Args[1] 28245 s1 := v.Args[0] 28246 if s1.Op != OpAMD64SHLQconst { 28247 break 28248 } 28249 j1 := s1.AuxInt 28250 x1 := s1.Args[0] 28251 if x1.Op != OpAMD64MOVWloadidx1 { 28252 break 28253 } 28254 i1 := x1.AuxInt 28255 s := x1.Aux 28256 _ = x1.Args[2] 28257 p := x1.Args[0] 28258 idx := x1.Args[1] 28259 mem := x1.Args[2] 28260 or := v.Args[1] 28261 if or.Op != OpAMD64ORQ { 28262 break 28263 } 28264 _ = or.Args[1] 28265 y := or.Args[0] 28266 s0 := or.Args[1] 28267 if s0.Op != OpAMD64SHLQconst { 28268 break 28269 } 28270 j0 := s0.AuxInt 28271 x0 := s0.Args[0] 28272 if x0.Op != OpAMD64MOVWloadidx1 { 28273 break 28274 } 28275 i0 := x0.AuxInt 28276 if x0.Aux != s { 28277 break 28278 } 28279 _ = x0.Args[2] 28280 if p != x0.Args[0] { 28281 break 28282 } 28283 if idx != x0.Args[1] { 28284 break 28285 } 28286 if mem != x0.Args[2] { 28287 break 28288 } 28289 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28290 break 28291 } 28292 b = mergePoint(b, x0, x1) 28293 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28294 v.reset(OpCopy) 28295 v.AddArg(v0) 28296 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28297 v1.AuxInt = j0 28298 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28299 v2.AuxInt = i0 28300 v2.Aux = s 28301 v2.AddArg(p) 28302 v2.AddArg(idx) 28303 v2.AddArg(mem) 28304 v1.AddArg(v2) 28305 v0.AddArg(v1) 28306 v0.AddArg(y) 28307 return true 28308 } 28309 return false 28310 } 28311 func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool { 28312 b := v.Block 28313 _ = b 28314 typ := &b.Func.Config.Types 28315 _ = typ 28316 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 28317 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28318 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28319 for { 28320 _ = v.Args[1] 28321 s1 := v.Args[0] 28322 if s1.Op != OpAMD64SHLQconst { 28323 break 28324 } 28325 j1 := s1.AuxInt 28326 x1 := s1.Args[0] 28327 if x1.Op != OpAMD64MOVWloadidx1 { 28328 break 28329 } 28330 i1 := x1.AuxInt 28331 s := x1.Aux 28332 _ = x1.Args[2] 28333 idx := x1.Args[0] 28334 p := x1.Args[1] 28335 mem := x1.Args[2] 28336 or := v.Args[1] 28337 if or.Op != OpAMD64ORQ { 28338 break 28339 } 28340 _ = or.Args[1] 28341 y := or.Args[0] 28342 s0 := or.Args[1] 28343 if s0.Op != OpAMD64SHLQconst { 28344 break 28345 } 28346 j0 := s0.AuxInt 28347 x0 := s0.Args[0] 28348 if x0.Op != OpAMD64MOVWloadidx1 { 28349 break 28350 } 28351 i0 := x0.AuxInt 28352 if x0.Aux != s { 28353 break 28354 } 28355 _ = x0.Args[2] 28356 if p != x0.Args[0] { 28357 break 28358 } 28359 if idx != x0.Args[1] { 28360 break 28361 } 28362 if mem != x0.Args[2] { 28363 break 28364 } 28365 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28366 break 28367 } 28368 b = mergePoint(b, x0, x1) 28369 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28370 v.reset(OpCopy) 28371 v.AddArg(v0) 28372 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28373 v1.AuxInt = j0 28374 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28375 v2.AuxInt = i0 28376 v2.Aux = s 28377 v2.AddArg(p) 28378 v2.AddArg(idx) 28379 v2.AddArg(mem) 28380 v1.AddArg(v2) 28381 v0.AddArg(v1) 28382 v0.AddArg(y) 28383 return true 28384 } 28385 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 28386 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28387 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28388 for { 28389 _ = v.Args[1] 28390 s1 := v.Args[0] 28391 if s1.Op != OpAMD64SHLQconst { 28392 break 28393 } 28394 j1 := s1.AuxInt 28395 x1 := s1.Args[0] 28396 if x1.Op != OpAMD64MOVWloadidx1 { 28397 break 28398 } 28399 i1 := x1.AuxInt 28400 s := x1.Aux 28401 _ = x1.Args[2] 28402 p := x1.Args[0] 28403 idx := x1.Args[1] 28404 mem := x1.Args[2] 28405 or := v.Args[1] 28406 if or.Op != OpAMD64ORQ { 28407 break 28408 } 28409 _ = or.Args[1] 28410 y := or.Args[0] 28411 s0 := or.Args[1] 28412 if s0.Op != OpAMD64SHLQconst { 28413 break 28414 } 28415 j0 := s0.AuxInt 28416 x0 := s0.Args[0] 28417 if x0.Op != OpAMD64MOVWloadidx1 { 28418 break 28419 } 28420 i0 := x0.AuxInt 28421 if x0.Aux != s { 28422 break 28423 } 28424 _ = x0.Args[2] 28425 if idx != x0.Args[0] { 28426 break 28427 } 28428 if p != x0.Args[1] { 28429 break 28430 } 28431 if mem != x0.Args[2] { 28432 break 28433 } 28434 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28435 break 28436 } 28437 b = mergePoint(b, x0, x1) 28438 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28439 v.reset(OpCopy) 28440 v.AddArg(v0) 28441 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28442 v1.AuxInt = j0 28443 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28444 v2.AuxInt = i0 28445 v2.Aux = s 28446 v2.AddArg(p) 28447 v2.AddArg(idx) 28448 v2.AddArg(mem) 28449 v1.AddArg(v2) 28450 v0.AddArg(v1) 28451 v0.AddArg(y) 28452 return true 28453 } 28454 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 28455 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28456 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28457 for { 28458 _ = v.Args[1] 28459 s1 := v.Args[0] 28460 if s1.Op != OpAMD64SHLQconst { 28461 break 28462 } 28463 j1 := s1.AuxInt 28464 x1 := s1.Args[0] 28465 if x1.Op != OpAMD64MOVWloadidx1 { 28466 break 28467 } 28468 i1 := x1.AuxInt 28469 s := x1.Aux 28470 _ = x1.Args[2] 28471 idx := x1.Args[0] 28472 p := x1.Args[1] 28473 mem := x1.Args[2] 28474 or := v.Args[1] 28475 if or.Op != OpAMD64ORQ { 28476 break 28477 } 28478 _ = or.Args[1] 28479 y := or.Args[0] 28480 s0 := or.Args[1] 28481 if s0.Op != OpAMD64SHLQconst { 28482 break 28483 } 28484 j0 := s0.AuxInt 28485 x0 := s0.Args[0] 28486 if x0.Op != OpAMD64MOVWloadidx1 { 28487 break 28488 } 28489 i0 := x0.AuxInt 28490 if x0.Aux != s { 28491 break 28492 } 28493 _ = x0.Args[2] 28494 if idx != x0.Args[0] { 28495 break 28496 } 28497 if p != x0.Args[1] { 28498 break 28499 } 28500 if mem != x0.Args[2] { 28501 break 28502 } 28503 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28504 break 28505 } 28506 b = mergePoint(b, x0, x1) 28507 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28508 v.reset(OpCopy) 28509 v.AddArg(v0) 28510 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28511 v1.AuxInt = j0 28512 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28513 v2.AuxInt = i0 28514 v2.Aux = s 28515 v2.AddArg(p) 28516 v2.AddArg(idx) 28517 v2.AddArg(mem) 28518 v1.AddArg(v2) 28519 v0.AddArg(v1) 28520 v0.AddArg(y) 28521 return true 28522 } 28523 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28524 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28525 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28526 for { 28527 _ = v.Args[1] 28528 or := v.Args[0] 28529 if or.Op != OpAMD64ORQ { 28530 break 28531 } 28532 _ = or.Args[1] 28533 s0 := or.Args[0] 28534 if s0.Op != OpAMD64SHLQconst { 28535 break 28536 } 28537 j0 := s0.AuxInt 28538 x0 := s0.Args[0] 28539 if x0.Op != OpAMD64MOVWloadidx1 { 28540 break 28541 } 28542 i0 := x0.AuxInt 28543 s := x0.Aux 28544 _ = x0.Args[2] 28545 p := x0.Args[0] 28546 idx := x0.Args[1] 28547 mem := x0.Args[2] 28548 y := or.Args[1] 28549 s1 := v.Args[1] 28550 if s1.Op != OpAMD64SHLQconst { 28551 break 28552 } 28553 j1 := s1.AuxInt 28554 x1 := s1.Args[0] 28555 if x1.Op != OpAMD64MOVWloadidx1 { 28556 break 28557 } 28558 i1 := x1.AuxInt 28559 if x1.Aux != s { 28560 break 28561 } 28562 _ = x1.Args[2] 28563 if p != x1.Args[0] { 28564 break 28565 } 28566 if idx != x1.Args[1] { 28567 break 28568 } 28569 if mem != x1.Args[2] { 28570 break 28571 } 28572 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28573 break 28574 } 28575 b = mergePoint(b, x0, x1) 28576 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28577 v.reset(OpCopy) 28578 v.AddArg(v0) 28579 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28580 v1.AuxInt = j0 28581 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28582 v2.AuxInt = i0 28583 v2.Aux = s 28584 v2.AddArg(p) 28585 v2.AddArg(idx) 28586 v2.AddArg(mem) 28587 v1.AddArg(v2) 28588 v0.AddArg(v1) 28589 v0.AddArg(y) 28590 return true 28591 } 28592 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28593 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28594 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28595 for { 28596 _ = v.Args[1] 28597 or := v.Args[0] 28598 if or.Op != OpAMD64ORQ { 28599 break 28600 } 28601 _ = or.Args[1] 28602 s0 := or.Args[0] 28603 if s0.Op != OpAMD64SHLQconst { 28604 break 28605 } 28606 j0 := s0.AuxInt 28607 x0 := s0.Args[0] 28608 if x0.Op != OpAMD64MOVWloadidx1 { 28609 break 28610 } 28611 i0 := x0.AuxInt 28612 s := x0.Aux 28613 _ = x0.Args[2] 28614 idx := x0.Args[0] 28615 p := x0.Args[1] 28616 mem := x0.Args[2] 28617 y := or.Args[1] 28618 s1 := v.Args[1] 28619 if s1.Op != OpAMD64SHLQconst { 28620 break 28621 } 28622 j1 := s1.AuxInt 28623 x1 := s1.Args[0] 28624 if x1.Op != OpAMD64MOVWloadidx1 { 28625 break 28626 } 28627 i1 := x1.AuxInt 28628 if x1.Aux != s { 28629 break 28630 } 28631 _ = x1.Args[2] 28632 if p != x1.Args[0] { 28633 break 28634 } 28635 if idx != x1.Args[1] { 28636 break 28637 } 28638 if mem != x1.Args[2] { 28639 break 28640 } 28641 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28642 break 28643 } 28644 b = mergePoint(b, x0, x1) 28645 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28646 v.reset(OpCopy) 28647 v.AddArg(v0) 28648 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28649 v1.AuxInt = j0 28650 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28651 v2.AuxInt = i0 28652 v2.Aux = s 28653 v2.AddArg(p) 28654 v2.AddArg(idx) 28655 v2.AddArg(mem) 28656 v1.AddArg(v2) 28657 v0.AddArg(v1) 28658 v0.AddArg(y) 28659 return true 28660 } 28661 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28662 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28663 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28664 for { 28665 _ = v.Args[1] 28666 or := v.Args[0] 28667 if or.Op != OpAMD64ORQ { 28668 break 28669 } 28670 _ = or.Args[1] 28671 y := or.Args[0] 28672 s0 := or.Args[1] 28673 if s0.Op != OpAMD64SHLQconst { 28674 break 28675 } 28676 j0 := s0.AuxInt 28677 x0 := s0.Args[0] 28678 if x0.Op != OpAMD64MOVWloadidx1 { 28679 break 28680 } 28681 i0 := x0.AuxInt 28682 s := x0.Aux 28683 _ = x0.Args[2] 28684 p := x0.Args[0] 28685 idx := x0.Args[1] 28686 mem := x0.Args[2] 28687 s1 := v.Args[1] 28688 if s1.Op != OpAMD64SHLQconst { 28689 break 28690 } 28691 j1 := s1.AuxInt 28692 x1 := s1.Args[0] 28693 if x1.Op != OpAMD64MOVWloadidx1 { 28694 break 28695 } 28696 i1 := x1.AuxInt 28697 if x1.Aux != s { 28698 break 28699 } 28700 _ = x1.Args[2] 28701 if p != x1.Args[0] { 28702 break 28703 } 28704 if idx != x1.Args[1] { 28705 break 28706 } 28707 if mem != x1.Args[2] { 28708 break 28709 } 28710 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28711 break 28712 } 28713 b = mergePoint(b, x0, x1) 28714 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28715 v.reset(OpCopy) 28716 v.AddArg(v0) 28717 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28718 v1.AuxInt = j0 28719 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28720 v2.AuxInt = i0 28721 v2.Aux = s 28722 v2.AddArg(p) 28723 v2.AddArg(idx) 28724 v2.AddArg(mem) 28725 v1.AddArg(v2) 28726 v0.AddArg(v1) 28727 v0.AddArg(y) 28728 return true 28729 } 28730 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28731 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28732 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28733 for { 28734 _ = v.Args[1] 28735 or := v.Args[0] 28736 if or.Op != OpAMD64ORQ { 28737 break 28738 } 28739 _ = or.Args[1] 28740 y := or.Args[0] 28741 s0 := or.Args[1] 28742 if s0.Op != OpAMD64SHLQconst { 28743 break 28744 } 28745 j0 := s0.AuxInt 28746 x0 := s0.Args[0] 28747 if x0.Op != OpAMD64MOVWloadidx1 { 28748 break 28749 } 28750 i0 := x0.AuxInt 28751 s := x0.Aux 28752 _ = x0.Args[2] 28753 idx := x0.Args[0] 28754 p := x0.Args[1] 28755 mem := x0.Args[2] 28756 s1 := v.Args[1] 28757 if s1.Op != OpAMD64SHLQconst { 28758 break 28759 } 28760 j1 := s1.AuxInt 28761 x1 := s1.Args[0] 28762 if x1.Op != OpAMD64MOVWloadidx1 { 28763 break 28764 } 28765 i1 := x1.AuxInt 28766 if x1.Aux != s { 28767 break 28768 } 28769 _ = x1.Args[2] 28770 if p != x1.Args[0] { 28771 break 28772 } 28773 if idx != x1.Args[1] { 28774 break 28775 } 28776 if mem != x1.Args[2] { 28777 break 28778 } 28779 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28780 break 28781 } 28782 b = mergePoint(b, x0, x1) 28783 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28784 v.reset(OpCopy) 28785 v.AddArg(v0) 28786 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28787 v1.AuxInt = j0 28788 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28789 v2.AuxInt = i0 28790 v2.Aux = s 28791 v2.AddArg(p) 28792 v2.AddArg(idx) 28793 v2.AddArg(mem) 28794 v1.AddArg(v2) 28795 v0.AddArg(v1) 28796 v0.AddArg(y) 28797 return true 28798 } 28799 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28800 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28801 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28802 for { 28803 _ = v.Args[1] 28804 or := v.Args[0] 28805 if or.Op != OpAMD64ORQ { 28806 break 28807 } 28808 _ = or.Args[1] 28809 s0 := or.Args[0] 28810 if s0.Op != OpAMD64SHLQconst { 28811 break 28812 } 28813 j0 := s0.AuxInt 28814 x0 := s0.Args[0] 28815 if x0.Op != OpAMD64MOVWloadidx1 { 28816 break 28817 } 28818 i0 := x0.AuxInt 28819 s := x0.Aux 28820 _ = x0.Args[2] 28821 p := x0.Args[0] 28822 idx := x0.Args[1] 28823 mem := x0.Args[2] 28824 y := or.Args[1] 28825 s1 := v.Args[1] 28826 if s1.Op != OpAMD64SHLQconst { 28827 break 28828 } 28829 j1 := s1.AuxInt 28830 x1 := s1.Args[0] 28831 if x1.Op != OpAMD64MOVWloadidx1 { 28832 break 28833 } 28834 i1 := x1.AuxInt 28835 if x1.Aux != s { 28836 break 28837 } 28838 _ = x1.Args[2] 28839 if idx != x1.Args[0] { 28840 break 28841 } 28842 if p != x1.Args[1] { 28843 break 28844 } 28845 if mem != x1.Args[2] { 28846 break 28847 } 28848 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28849 break 28850 } 28851 b = mergePoint(b, x0, x1) 28852 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28853 v.reset(OpCopy) 28854 v.AddArg(v0) 28855 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28856 v1.AuxInt = j0 28857 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28858 v2.AuxInt = i0 28859 v2.Aux = s 28860 v2.AddArg(p) 28861 v2.AddArg(idx) 28862 v2.AddArg(mem) 28863 v1.AddArg(v2) 28864 v0.AddArg(v1) 28865 v0.AddArg(y) 28866 return true 28867 } 28868 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28869 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28870 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28871 for { 28872 _ = v.Args[1] 28873 or := v.Args[0] 28874 if or.Op != OpAMD64ORQ { 28875 break 28876 } 28877 _ = or.Args[1] 28878 s0 := or.Args[0] 28879 if s0.Op != OpAMD64SHLQconst { 28880 break 28881 } 28882 j0 := s0.AuxInt 28883 x0 := s0.Args[0] 28884 if x0.Op != OpAMD64MOVWloadidx1 { 28885 break 28886 } 28887 i0 := x0.AuxInt 28888 s := x0.Aux 28889 _ = x0.Args[2] 28890 idx := x0.Args[0] 28891 p := x0.Args[1] 28892 mem := x0.Args[2] 28893 y := or.Args[1] 28894 s1 := v.Args[1] 28895 if s1.Op != OpAMD64SHLQconst { 28896 break 28897 } 28898 j1 := s1.AuxInt 28899 x1 := s1.Args[0] 28900 if x1.Op != OpAMD64MOVWloadidx1 { 28901 break 28902 } 28903 i1 := x1.AuxInt 28904 if x1.Aux != s { 28905 break 28906 } 28907 _ = x1.Args[2] 28908 if idx != x1.Args[0] { 28909 break 28910 } 28911 if p != x1.Args[1] { 28912 break 28913 } 28914 if mem != x1.Args[2] { 28915 break 28916 } 28917 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28918 break 28919 } 28920 b = mergePoint(b, x0, x1) 28921 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28922 v.reset(OpCopy) 28923 v.AddArg(v0) 28924 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28925 v1.AuxInt = j0 28926 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28927 v2.AuxInt = i0 28928 v2.Aux = s 28929 v2.AddArg(p) 28930 v2.AddArg(idx) 28931 v2.AddArg(mem) 28932 v1.AddArg(v2) 28933 v0.AddArg(v1) 28934 v0.AddArg(y) 28935 return true 28936 } 28937 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28938 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28939 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28940 for { 28941 _ = v.Args[1] 28942 or := v.Args[0] 28943 if or.Op != OpAMD64ORQ { 28944 break 28945 } 28946 _ = or.Args[1] 28947 y := or.Args[0] 28948 s0 := or.Args[1] 28949 if s0.Op != OpAMD64SHLQconst { 28950 break 28951 } 28952 j0 := s0.AuxInt 28953 x0 := s0.Args[0] 28954 if x0.Op != OpAMD64MOVWloadidx1 { 28955 break 28956 } 28957 i0 := x0.AuxInt 28958 s := x0.Aux 28959 _ = x0.Args[2] 28960 p := x0.Args[0] 28961 idx := x0.Args[1] 28962 mem := x0.Args[2] 28963 s1 := v.Args[1] 28964 if s1.Op != OpAMD64SHLQconst { 28965 break 28966 } 28967 j1 := s1.AuxInt 28968 x1 := s1.Args[0] 28969 if x1.Op != OpAMD64MOVWloadidx1 { 28970 break 28971 } 28972 i1 := x1.AuxInt 28973 if x1.Aux != s { 28974 break 28975 } 28976 _ = x1.Args[2] 28977 if idx != x1.Args[0] { 28978 break 28979 } 28980 if p != x1.Args[1] { 28981 break 28982 } 28983 if mem != x1.Args[2] { 28984 break 28985 } 28986 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28987 break 28988 } 28989 b = mergePoint(b, x0, x1) 28990 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28991 v.reset(OpCopy) 28992 v.AddArg(v0) 28993 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28994 v1.AuxInt = j0 28995 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28996 v2.AuxInt = i0 28997 v2.Aux = s 28998 v2.AddArg(p) 28999 v2.AddArg(idx) 29000 v2.AddArg(mem) 29001 v1.AddArg(v2) 29002 v0.AddArg(v1) 29003 v0.AddArg(y) 29004 return true 29005 } 29006 return false 29007 } 29008 func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool { 29009 b := v.Block 29010 _ = b 29011 typ := &b.Func.Config.Types 29012 _ = typ 29013 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 29014 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29015 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 29016 for { 29017 _ = v.Args[1] 29018 or := v.Args[0] 29019 if or.Op != OpAMD64ORQ { 29020 break 29021 } 29022 _ = or.Args[1] 29023 y := or.Args[0] 29024 s0 := or.Args[1] 29025 if s0.Op != OpAMD64SHLQconst { 29026 break 29027 } 29028 j0 := s0.AuxInt 29029 x0 := s0.Args[0] 29030 if x0.Op != OpAMD64MOVWloadidx1 { 29031 break 29032 } 29033 i0 := x0.AuxInt 29034 s := x0.Aux 29035 _ = x0.Args[2] 29036 idx := x0.Args[0] 29037 p := x0.Args[1] 29038 mem := x0.Args[2] 29039 s1 := v.Args[1] 29040 if s1.Op != OpAMD64SHLQconst { 29041 break 29042 } 29043 j1 := s1.AuxInt 29044 x1 := s1.Args[0] 29045 if x1.Op != OpAMD64MOVWloadidx1 { 29046 break 29047 } 29048 i1 := x1.AuxInt 29049 if x1.Aux != s { 29050 break 29051 } 29052 _ = x1.Args[2] 29053 if idx != x1.Args[0] { 29054 break 29055 } 29056 if p != x1.Args[1] { 29057 break 29058 } 29059 if mem != x1.Args[2] { 29060 break 29061 } 29062 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29063 break 29064 } 29065 b = mergePoint(b, x0, x1) 29066 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29067 v.reset(OpCopy) 29068 v.AddArg(v0) 29069 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29070 v1.AuxInt = j0 29071 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29072 v2.AuxInt = i0 29073 v2.Aux = s 29074 v2.AddArg(p) 29075 v2.AddArg(idx) 29076 v2.AddArg(mem) 29077 v1.AddArg(v2) 29078 v0.AddArg(v1) 29079 v0.AddArg(y) 29080 return true 29081 } 29082 // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) 29083 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29084 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 29085 for { 29086 _ = v.Args[1] 29087 x1 := v.Args[0] 29088 if x1.Op != OpAMD64MOVBload { 29089 break 29090 } 29091 i1 := x1.AuxInt 29092 s := x1.Aux 29093 _ = x1.Args[1] 29094 p := x1.Args[0] 29095 mem := x1.Args[1] 29096 sh := v.Args[1] 29097 if sh.Op != OpAMD64SHLQconst { 29098 break 29099 } 29100 if sh.AuxInt != 8 { 29101 break 29102 } 29103 x0 := sh.Args[0] 29104 if x0.Op != OpAMD64MOVBload { 29105 break 29106 } 29107 i0 := x0.AuxInt 29108 if x0.Aux != s { 29109 break 29110 } 29111 _ = x0.Args[1] 29112 if p != x0.Args[0] { 29113 break 29114 } 29115 if mem != x0.Args[1] { 29116 break 29117 } 29118 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29119 break 29120 } 29121 b = mergePoint(b, x0, x1) 29122 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 29123 v.reset(OpCopy) 29124 v.AddArg(v0) 29125 v0.AuxInt = 8 29126 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29127 v1.AuxInt = i0 29128 v1.Aux = s 29129 v1.AddArg(p) 29130 v1.AddArg(mem) 29131 v0.AddArg(v1) 29132 return true 29133 } 29134 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 29135 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29136 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 29137 for { 29138 _ = v.Args[1] 29139 sh := v.Args[0] 29140 if sh.Op != OpAMD64SHLQconst { 29141 break 29142 } 29143 if sh.AuxInt != 8 { 29144 break 29145 } 29146 x0 := sh.Args[0] 29147 if x0.Op != OpAMD64MOVBload { 29148 break 29149 } 29150 i0 := x0.AuxInt 29151 s := x0.Aux 29152 _ = x0.Args[1] 29153 p := x0.Args[0] 29154 mem := x0.Args[1] 29155 x1 := v.Args[1] 29156 if x1.Op != OpAMD64MOVBload { 29157 break 29158 } 29159 i1 := x1.AuxInt 29160 if x1.Aux != s { 29161 break 29162 } 29163 _ = x1.Args[1] 29164 if p != x1.Args[0] { 29165 break 29166 } 29167 if mem != x1.Args[1] { 29168 break 29169 } 29170 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29171 break 29172 } 29173 b = mergePoint(b, x0, x1) 29174 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 29175 v.reset(OpCopy) 29176 v.AddArg(v0) 29177 v0.AuxInt = 8 29178 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29179 v1.AuxInt = i0 29180 v1.Aux = s 29181 v1.AddArg(p) 29182 v1.AddArg(mem) 29183 v0.AddArg(v1) 29184 return true 29185 } 29186 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 29187 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29188 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 29189 for { 29190 _ = v.Args[1] 29191 r1 := v.Args[0] 29192 if r1.Op != OpAMD64ROLWconst { 29193 break 29194 } 29195 if r1.AuxInt != 8 { 29196 break 29197 } 29198 x1 := r1.Args[0] 29199 if x1.Op != OpAMD64MOVWload { 29200 break 29201 } 29202 i1 := x1.AuxInt 29203 s := x1.Aux 29204 _ = x1.Args[1] 29205 p := x1.Args[0] 29206 mem := x1.Args[1] 29207 sh := v.Args[1] 29208 if sh.Op != OpAMD64SHLQconst { 29209 break 29210 } 29211 if sh.AuxInt != 16 { 29212 break 29213 } 29214 r0 := sh.Args[0] 29215 if r0.Op != OpAMD64ROLWconst { 29216 break 29217 } 29218 if r0.AuxInt != 8 { 29219 break 29220 } 29221 x0 := r0.Args[0] 29222 if x0.Op != OpAMD64MOVWload { 29223 break 29224 } 29225 i0 := x0.AuxInt 29226 if x0.Aux != s { 29227 break 29228 } 29229 _ = x0.Args[1] 29230 if p != x0.Args[0] { 29231 break 29232 } 29233 if mem != x0.Args[1] { 29234 break 29235 } 29236 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29237 break 29238 } 29239 b = mergePoint(b, x0, x1) 29240 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29241 v.reset(OpCopy) 29242 v.AddArg(v0) 29243 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29244 v1.AuxInt = i0 29245 v1.Aux = s 29246 v1.AddArg(p) 29247 v1.AddArg(mem) 29248 v0.AddArg(v1) 29249 return true 29250 } 29251 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 29252 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29253 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 29254 for { 29255 _ = v.Args[1] 29256 sh := v.Args[0] 29257 if sh.Op != OpAMD64SHLQconst { 29258 break 29259 } 29260 if sh.AuxInt != 16 { 29261 break 29262 } 29263 r0 := sh.Args[0] 29264 if r0.Op != OpAMD64ROLWconst { 29265 break 29266 } 29267 if r0.AuxInt != 8 { 29268 break 29269 } 29270 x0 := r0.Args[0] 29271 if x0.Op != OpAMD64MOVWload { 29272 break 29273 } 29274 i0 := x0.AuxInt 29275 s := x0.Aux 29276 _ = x0.Args[1] 29277 p := x0.Args[0] 29278 mem := x0.Args[1] 29279 r1 := v.Args[1] 29280 if r1.Op != OpAMD64ROLWconst { 29281 break 29282 } 29283 if r1.AuxInt != 8 { 29284 break 29285 } 29286 x1 := r1.Args[0] 29287 if x1.Op != OpAMD64MOVWload { 29288 break 29289 } 29290 i1 := x1.AuxInt 29291 if x1.Aux != s { 29292 break 29293 } 29294 _ = x1.Args[1] 29295 if p != x1.Args[0] { 29296 break 29297 } 29298 if mem != x1.Args[1] { 29299 break 29300 } 29301 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29302 break 29303 } 29304 b = mergePoint(b, x0, x1) 29305 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29306 v.reset(OpCopy) 29307 v.AddArg(v0) 29308 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29309 v1.AuxInt = i0 29310 v1.Aux = s 29311 v1.AddArg(p) 29312 v1.AddArg(mem) 29313 v0.AddArg(v1) 29314 return true 29315 } 29316 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) 29317 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29318 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 29319 for { 29320 _ = v.Args[1] 29321 r1 := v.Args[0] 29322 if r1.Op != OpAMD64BSWAPL { 29323 break 29324 } 29325 x1 := r1.Args[0] 29326 if x1.Op != OpAMD64MOVLload { 29327 break 29328 } 29329 i1 := x1.AuxInt 29330 s := x1.Aux 29331 _ = x1.Args[1] 29332 p := x1.Args[0] 29333 mem := x1.Args[1] 29334 sh := v.Args[1] 29335 if sh.Op != OpAMD64SHLQconst { 29336 break 29337 } 29338 if sh.AuxInt != 32 { 29339 break 29340 } 29341 r0 := sh.Args[0] 29342 if r0.Op != OpAMD64BSWAPL { 29343 break 29344 } 29345 x0 := r0.Args[0] 29346 if x0.Op != OpAMD64MOVLload { 29347 break 29348 } 29349 i0 := x0.AuxInt 29350 if x0.Aux != s { 29351 break 29352 } 29353 _ = x0.Args[1] 29354 if p != x0.Args[0] { 29355 break 29356 } 29357 if mem != x0.Args[1] { 29358 break 29359 } 29360 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29361 break 29362 } 29363 b = mergePoint(b, x0, x1) 29364 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29365 v.reset(OpCopy) 29366 v.AddArg(v0) 29367 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 29368 v1.AuxInt = i0 29369 v1.Aux = s 29370 v1.AddArg(p) 29371 v1.AddArg(mem) 29372 v0.AddArg(v1) 29373 return true 29374 } 29375 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))) 29376 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29377 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 29378 for { 29379 _ = v.Args[1] 29380 sh := v.Args[0] 29381 if sh.Op != OpAMD64SHLQconst { 29382 break 29383 } 29384 if sh.AuxInt != 32 { 29385 break 29386 } 29387 r0 := sh.Args[0] 29388 if r0.Op != OpAMD64BSWAPL { 29389 break 29390 } 29391 x0 := r0.Args[0] 29392 if x0.Op != OpAMD64MOVLload { 29393 break 29394 } 29395 i0 := x0.AuxInt 29396 s := x0.Aux 29397 _ = x0.Args[1] 29398 p := x0.Args[0] 29399 mem := x0.Args[1] 29400 r1 := v.Args[1] 29401 if r1.Op != OpAMD64BSWAPL { 29402 break 29403 } 29404 x1 := r1.Args[0] 29405 if x1.Op != OpAMD64MOVLload { 29406 break 29407 } 29408 i1 := x1.AuxInt 29409 if x1.Aux != s { 29410 break 29411 } 29412 _ = x1.Args[1] 29413 if p != x1.Args[0] { 29414 break 29415 } 29416 if mem != x1.Args[1] { 29417 break 29418 } 29419 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29420 break 29421 } 29422 b = mergePoint(b, x0, x1) 29423 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29424 v.reset(OpCopy) 29425 v.AddArg(v0) 29426 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 29427 v1.AuxInt = i0 29428 v1.Aux = s 29429 v1.AddArg(p) 29430 v1.AddArg(mem) 29431 v0.AddArg(v1) 29432 return true 29433 } 29434 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 29435 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29436 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29437 for { 29438 _ = v.Args[1] 29439 s0 := v.Args[0] 29440 if s0.Op != OpAMD64SHLQconst { 29441 break 29442 } 29443 j0 := s0.AuxInt 29444 x0 := s0.Args[0] 29445 if x0.Op != OpAMD64MOVBload { 29446 break 29447 } 29448 i0 := x0.AuxInt 29449 s := x0.Aux 29450 _ = x0.Args[1] 29451 p := x0.Args[0] 29452 mem := x0.Args[1] 29453 or := v.Args[1] 29454 if or.Op != OpAMD64ORQ { 29455 break 29456 } 29457 _ = or.Args[1] 29458 s1 := or.Args[0] 29459 if s1.Op != OpAMD64SHLQconst { 29460 break 29461 } 29462 j1 := s1.AuxInt 29463 x1 := s1.Args[0] 29464 if x1.Op != OpAMD64MOVBload { 29465 break 29466 } 29467 i1 := x1.AuxInt 29468 if x1.Aux != s { 29469 break 29470 } 29471 _ = x1.Args[1] 29472 if p != x1.Args[0] { 29473 break 29474 } 29475 if mem != x1.Args[1] { 29476 break 29477 } 29478 y := or.Args[1] 29479 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29480 break 29481 } 29482 b = mergePoint(b, x0, x1) 29483 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29484 v.reset(OpCopy) 29485 v.AddArg(v0) 29486 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29487 v1.AuxInt = j1 29488 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29489 v2.AuxInt = 8 29490 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29491 v3.AuxInt = i0 29492 v3.Aux = s 29493 v3.AddArg(p) 29494 v3.AddArg(mem) 29495 v2.AddArg(v3) 29496 v1.AddArg(v2) 29497 v0.AddArg(v1) 29498 v0.AddArg(y) 29499 return true 29500 } 29501 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))) 29502 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29503 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29504 for { 29505 _ = v.Args[1] 29506 s0 := v.Args[0] 29507 if s0.Op != OpAMD64SHLQconst { 29508 break 29509 } 29510 j0 := s0.AuxInt 29511 x0 := s0.Args[0] 29512 if x0.Op != OpAMD64MOVBload { 29513 break 29514 } 29515 i0 := x0.AuxInt 29516 s := x0.Aux 29517 _ = x0.Args[1] 29518 p := x0.Args[0] 29519 mem := x0.Args[1] 29520 or := v.Args[1] 29521 if or.Op != OpAMD64ORQ { 29522 break 29523 } 29524 _ = or.Args[1] 29525 y := or.Args[0] 29526 s1 := or.Args[1] 29527 if s1.Op != OpAMD64SHLQconst { 29528 break 29529 } 29530 j1 := s1.AuxInt 29531 x1 := s1.Args[0] 29532 if x1.Op != OpAMD64MOVBload { 29533 break 29534 } 29535 i1 := x1.AuxInt 29536 if x1.Aux != s { 29537 break 29538 } 29539 _ = x1.Args[1] 29540 if p != x1.Args[0] { 29541 break 29542 } 29543 if mem != x1.Args[1] { 29544 break 29545 } 29546 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29547 break 29548 } 29549 b = mergePoint(b, x0, x1) 29550 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29551 v.reset(OpCopy) 29552 v.AddArg(v0) 29553 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29554 v1.AuxInt = j1 29555 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29556 v2.AuxInt = 8 29557 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29558 v3.AuxInt = i0 29559 v3.Aux = s 29560 v3.AddArg(p) 29561 v3.AddArg(mem) 29562 v2.AddArg(v3) 29563 v1.AddArg(v2) 29564 v0.AddArg(v1) 29565 v0.AddArg(y) 29566 return true 29567 } 29568 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 29569 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29570 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29571 for { 29572 _ = v.Args[1] 29573 or := v.Args[0] 29574 if or.Op != OpAMD64ORQ { 29575 break 29576 } 29577 _ = or.Args[1] 29578 s1 := or.Args[0] 29579 if s1.Op != OpAMD64SHLQconst { 29580 break 29581 } 29582 j1 := s1.AuxInt 29583 x1 := s1.Args[0] 29584 if x1.Op != OpAMD64MOVBload { 29585 break 29586 } 29587 i1 := x1.AuxInt 29588 s := x1.Aux 29589 _ = x1.Args[1] 29590 p := x1.Args[0] 29591 mem := x1.Args[1] 29592 y := or.Args[1] 29593 s0 := v.Args[1] 29594 if s0.Op != OpAMD64SHLQconst { 29595 break 29596 } 29597 j0 := s0.AuxInt 29598 x0 := s0.Args[0] 29599 if x0.Op != OpAMD64MOVBload { 29600 break 29601 } 29602 i0 := x0.AuxInt 29603 if x0.Aux != s { 29604 break 29605 } 29606 _ = x0.Args[1] 29607 if p != x0.Args[0] { 29608 break 29609 } 29610 if mem != x0.Args[1] { 29611 break 29612 } 29613 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29614 break 29615 } 29616 b = mergePoint(b, x0, x1) 29617 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29618 v.reset(OpCopy) 29619 v.AddArg(v0) 29620 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29621 v1.AuxInt = j1 29622 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29623 v2.AuxInt = 8 29624 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29625 v3.AuxInt = i0 29626 v3.Aux = s 29627 v3.AddArg(p) 29628 v3.AddArg(mem) 29629 v2.AddArg(v3) 29630 v1.AddArg(v2) 29631 v0.AddArg(v1) 29632 v0.AddArg(y) 29633 return true 29634 } 29635 return false 29636 } 29637 func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool { 29638 b := v.Block 29639 _ = b 29640 typ := &b.Func.Config.Types 29641 _ = typ 29642 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 29643 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29644 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29645 for { 29646 _ = v.Args[1] 29647 or := v.Args[0] 29648 if or.Op != OpAMD64ORQ { 29649 break 29650 } 29651 _ = or.Args[1] 29652 y := or.Args[0] 29653 s1 := or.Args[1] 29654 if s1.Op != OpAMD64SHLQconst { 29655 break 29656 } 29657 j1 := s1.AuxInt 29658 x1 := s1.Args[0] 29659 if x1.Op != OpAMD64MOVBload { 29660 break 29661 } 29662 i1 := x1.AuxInt 29663 s := x1.Aux 29664 _ = x1.Args[1] 29665 p := x1.Args[0] 29666 mem := x1.Args[1] 29667 s0 := v.Args[1] 29668 if s0.Op != OpAMD64SHLQconst { 29669 break 29670 } 29671 j0 := s0.AuxInt 29672 x0 := s0.Args[0] 29673 if x0.Op != OpAMD64MOVBload { 29674 break 29675 } 29676 i0 := x0.AuxInt 29677 if x0.Aux != s { 29678 break 29679 } 29680 _ = x0.Args[1] 29681 if p != x0.Args[0] { 29682 break 29683 } 29684 if mem != x0.Args[1] { 29685 break 29686 } 29687 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29688 break 29689 } 29690 b = mergePoint(b, x0, x1) 29691 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29692 v.reset(OpCopy) 29693 v.AddArg(v0) 29694 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29695 v1.AuxInt = j1 29696 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29697 v2.AuxInt = 8 29698 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29699 v3.AuxInt = i0 29700 v3.Aux = s 29701 v3.AddArg(p) 29702 v3.AddArg(mem) 29703 v2.AddArg(v3) 29704 v1.AddArg(v2) 29705 v0.AddArg(v1) 29706 v0.AddArg(y) 29707 return true 29708 } 29709 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y)) 29710 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29711 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29712 for { 29713 _ = v.Args[1] 29714 s0 := v.Args[0] 29715 if s0.Op != OpAMD64SHLQconst { 29716 break 29717 } 29718 j0 := s0.AuxInt 29719 r0 := s0.Args[0] 29720 if r0.Op != OpAMD64ROLWconst { 29721 break 29722 } 29723 if r0.AuxInt != 8 { 29724 break 29725 } 29726 x0 := r0.Args[0] 29727 if x0.Op != OpAMD64MOVWload { 29728 break 29729 } 29730 i0 := x0.AuxInt 29731 s := x0.Aux 29732 _ = x0.Args[1] 29733 p := x0.Args[0] 29734 mem := x0.Args[1] 29735 or := v.Args[1] 29736 if or.Op != OpAMD64ORQ { 29737 break 29738 } 29739 _ = or.Args[1] 29740 s1 := or.Args[0] 29741 if s1.Op != OpAMD64SHLQconst { 29742 break 29743 } 29744 j1 := s1.AuxInt 29745 r1 := s1.Args[0] 29746 if r1.Op != OpAMD64ROLWconst { 29747 break 29748 } 29749 if r1.AuxInt != 8 { 29750 break 29751 } 29752 x1 := r1.Args[0] 29753 if x1.Op != OpAMD64MOVWload { 29754 break 29755 } 29756 i1 := x1.AuxInt 29757 if x1.Aux != s { 29758 break 29759 } 29760 _ = x1.Args[1] 29761 if p != x1.Args[0] { 29762 break 29763 } 29764 if mem != x1.Args[1] { 29765 break 29766 } 29767 y := or.Args[1] 29768 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29769 break 29770 } 29771 b = mergePoint(b, x0, x1) 29772 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29773 v.reset(OpCopy) 29774 v.AddArg(v0) 29775 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29776 v1.AuxInt = j1 29777 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29778 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29779 v3.AuxInt = i0 29780 v3.Aux = s 29781 v3.AddArg(p) 29782 v3.AddArg(mem) 29783 v2.AddArg(v3) 29784 v1.AddArg(v2) 29785 v0.AddArg(v1) 29786 v0.AddArg(y) 29787 return true 29788 } 29789 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))))) 29790 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29791 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29792 for { 29793 _ = v.Args[1] 29794 s0 := v.Args[0] 29795 if s0.Op != OpAMD64SHLQconst { 29796 break 29797 } 29798 j0 := s0.AuxInt 29799 r0 := s0.Args[0] 29800 if r0.Op != OpAMD64ROLWconst { 29801 break 29802 } 29803 if r0.AuxInt != 8 { 29804 break 29805 } 29806 x0 := r0.Args[0] 29807 if x0.Op != OpAMD64MOVWload { 29808 break 29809 } 29810 i0 := x0.AuxInt 29811 s := x0.Aux 29812 _ = x0.Args[1] 29813 p := x0.Args[0] 29814 mem := x0.Args[1] 29815 or := v.Args[1] 29816 if or.Op != OpAMD64ORQ { 29817 break 29818 } 29819 _ = or.Args[1] 29820 y := or.Args[0] 29821 s1 := or.Args[1] 29822 if s1.Op != OpAMD64SHLQconst { 29823 break 29824 } 29825 j1 := s1.AuxInt 29826 r1 := s1.Args[0] 29827 if r1.Op != OpAMD64ROLWconst { 29828 break 29829 } 29830 if r1.AuxInt != 8 { 29831 break 29832 } 29833 x1 := r1.Args[0] 29834 if x1.Op != OpAMD64MOVWload { 29835 break 29836 } 29837 i1 := x1.AuxInt 29838 if x1.Aux != s { 29839 break 29840 } 29841 _ = x1.Args[1] 29842 if p != x1.Args[0] { 29843 break 29844 } 29845 if mem != x1.Args[1] { 29846 break 29847 } 29848 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29849 break 29850 } 29851 b = mergePoint(b, x0, x1) 29852 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29853 v.reset(OpCopy) 29854 v.AddArg(v0) 29855 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29856 v1.AuxInt = j1 29857 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29858 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29859 v3.AuxInt = i0 29860 v3.Aux = s 29861 v3.AddArg(p) 29862 v3.AddArg(mem) 29863 v2.AddArg(v3) 29864 v1.AddArg(v2) 29865 v0.AddArg(v1) 29866 v0.AddArg(y) 29867 return true 29868 } 29869 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 29870 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29871 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29872 for { 29873 _ = v.Args[1] 29874 or := v.Args[0] 29875 if or.Op != OpAMD64ORQ { 29876 break 29877 } 29878 _ = or.Args[1] 29879 s1 := or.Args[0] 29880 if s1.Op != OpAMD64SHLQconst { 29881 break 29882 } 29883 j1 := s1.AuxInt 29884 r1 := s1.Args[0] 29885 if r1.Op != OpAMD64ROLWconst { 29886 break 29887 } 29888 if r1.AuxInt != 8 { 29889 break 29890 } 29891 x1 := r1.Args[0] 29892 if x1.Op != OpAMD64MOVWload { 29893 break 29894 } 29895 i1 := x1.AuxInt 29896 s := x1.Aux 29897 _ = x1.Args[1] 29898 p := x1.Args[0] 29899 mem := x1.Args[1] 29900 y := or.Args[1] 29901 s0 := v.Args[1] 29902 if s0.Op != OpAMD64SHLQconst { 29903 break 29904 } 29905 j0 := s0.AuxInt 29906 r0 := s0.Args[0] 29907 if r0.Op != OpAMD64ROLWconst { 29908 break 29909 } 29910 if r0.AuxInt != 8 { 29911 break 29912 } 29913 x0 := r0.Args[0] 29914 if x0.Op != OpAMD64MOVWload { 29915 break 29916 } 29917 i0 := x0.AuxInt 29918 if x0.Aux != s { 29919 break 29920 } 29921 _ = x0.Args[1] 29922 if p != x0.Args[0] { 29923 break 29924 } 29925 if mem != x0.Args[1] { 29926 break 29927 } 29928 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29929 break 29930 } 29931 b = mergePoint(b, x0, x1) 29932 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29933 v.reset(OpCopy) 29934 v.AddArg(v0) 29935 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29936 v1.AuxInt = j1 29937 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29938 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29939 v3.AuxInt = i0 29940 v3.Aux = s 29941 v3.AddArg(p) 29942 v3.AddArg(mem) 29943 v2.AddArg(v3) 29944 v1.AddArg(v2) 29945 v0.AddArg(v1) 29946 v0.AddArg(y) 29947 return true 29948 } 29949 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 29950 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29951 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29952 for { 29953 _ = v.Args[1] 29954 or := v.Args[0] 29955 if or.Op != OpAMD64ORQ { 29956 break 29957 } 29958 _ = or.Args[1] 29959 y := or.Args[0] 29960 s1 := or.Args[1] 29961 if s1.Op != OpAMD64SHLQconst { 29962 break 29963 } 29964 j1 := s1.AuxInt 29965 r1 := s1.Args[0] 29966 if r1.Op != OpAMD64ROLWconst { 29967 break 29968 } 29969 if r1.AuxInt != 8 { 29970 break 29971 } 29972 x1 := r1.Args[0] 29973 if x1.Op != OpAMD64MOVWload { 29974 break 29975 } 29976 i1 := x1.AuxInt 29977 s := x1.Aux 29978 _ = x1.Args[1] 29979 p := x1.Args[0] 29980 mem := x1.Args[1] 29981 s0 := v.Args[1] 29982 if s0.Op != OpAMD64SHLQconst { 29983 break 29984 } 29985 j0 := s0.AuxInt 29986 r0 := s0.Args[0] 29987 if r0.Op != OpAMD64ROLWconst { 29988 break 29989 } 29990 if r0.AuxInt != 8 { 29991 break 29992 } 29993 x0 := r0.Args[0] 29994 if x0.Op != OpAMD64MOVWload { 29995 break 29996 } 29997 i0 := x0.AuxInt 29998 if x0.Aux != s { 29999 break 30000 } 30001 _ = x0.Args[1] 30002 if p != x0.Args[0] { 30003 break 30004 } 30005 if mem != x0.Args[1] { 30006 break 30007 } 30008 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 30009 break 30010 } 30011 b = mergePoint(b, x0, x1) 30012 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30013 v.reset(OpCopy) 30014 v.AddArg(v0) 30015 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30016 v1.AuxInt = j1 30017 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 30018 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 30019 v3.AuxInt = i0 30020 v3.Aux = s 30021 v3.AddArg(p) 30022 v3.AddArg(mem) 30023 v2.AddArg(v3) 30024 v1.AddArg(v2) 30025 v0.AddArg(v1) 30026 v0.AddArg(y) 30027 return true 30028 } 30029 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30030 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30031 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30032 for { 30033 _ = v.Args[1] 30034 x1 := v.Args[0] 30035 if x1.Op != OpAMD64MOVBloadidx1 { 30036 break 30037 } 30038 i1 := x1.AuxInt 30039 s := x1.Aux 30040 _ = x1.Args[2] 30041 p := x1.Args[0] 30042 idx := x1.Args[1] 30043 mem := x1.Args[2] 30044 sh := v.Args[1] 30045 if sh.Op != OpAMD64SHLQconst { 30046 break 30047 } 30048 if sh.AuxInt != 8 { 30049 break 30050 } 30051 x0 := sh.Args[0] 30052 if x0.Op != OpAMD64MOVBloadidx1 { 30053 break 30054 } 30055 i0 := x0.AuxInt 30056 if x0.Aux != s { 30057 break 30058 } 30059 _ = x0.Args[2] 30060 if p != x0.Args[0] { 30061 break 30062 } 30063 if idx != x0.Args[1] { 30064 break 30065 } 30066 if mem != x0.Args[2] { 30067 break 30068 } 30069 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30070 break 30071 } 30072 b = mergePoint(b, x0, x1) 30073 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30074 v.reset(OpCopy) 30075 v.AddArg(v0) 30076 v0.AuxInt = 8 30077 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30078 v1.AuxInt = i0 30079 v1.Aux = s 30080 v1.AddArg(p) 30081 v1.AddArg(idx) 30082 v1.AddArg(mem) 30083 v0.AddArg(v1) 30084 return true 30085 } 30086 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30087 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30088 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30089 for { 30090 _ = v.Args[1] 30091 x1 := v.Args[0] 30092 if x1.Op != OpAMD64MOVBloadidx1 { 30093 break 30094 } 30095 i1 := x1.AuxInt 30096 s := x1.Aux 30097 _ = x1.Args[2] 30098 idx := x1.Args[0] 30099 p := x1.Args[1] 30100 mem := x1.Args[2] 30101 sh := v.Args[1] 30102 if sh.Op != OpAMD64SHLQconst { 30103 break 30104 } 30105 if sh.AuxInt != 8 { 30106 break 30107 } 30108 x0 := sh.Args[0] 30109 if x0.Op != OpAMD64MOVBloadidx1 { 30110 break 30111 } 30112 i0 := x0.AuxInt 30113 if x0.Aux != s { 30114 break 30115 } 30116 _ = x0.Args[2] 30117 if p != x0.Args[0] { 30118 break 30119 } 30120 if idx != x0.Args[1] { 30121 break 30122 } 30123 if mem != x0.Args[2] { 30124 break 30125 } 30126 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30127 break 30128 } 30129 b = mergePoint(b, x0, x1) 30130 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30131 v.reset(OpCopy) 30132 v.AddArg(v0) 30133 v0.AuxInt = 8 30134 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30135 v1.AuxInt = i0 30136 v1.Aux = s 30137 v1.AddArg(p) 30138 v1.AddArg(idx) 30139 v1.AddArg(mem) 30140 v0.AddArg(v1) 30141 return true 30142 } 30143 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30144 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30145 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30146 for { 30147 _ = v.Args[1] 30148 x1 := v.Args[0] 30149 if x1.Op != OpAMD64MOVBloadidx1 { 30150 break 30151 } 30152 i1 := x1.AuxInt 30153 s := x1.Aux 30154 _ = x1.Args[2] 30155 p := x1.Args[0] 30156 idx := x1.Args[1] 30157 mem := x1.Args[2] 30158 sh := v.Args[1] 30159 if sh.Op != OpAMD64SHLQconst { 30160 break 30161 } 30162 if sh.AuxInt != 8 { 30163 break 30164 } 30165 x0 := sh.Args[0] 30166 if x0.Op != OpAMD64MOVBloadidx1 { 30167 break 30168 } 30169 i0 := x0.AuxInt 30170 if x0.Aux != s { 30171 break 30172 } 30173 _ = x0.Args[2] 30174 if idx != x0.Args[0] { 30175 break 30176 } 30177 if p != x0.Args[1] { 30178 break 30179 } 30180 if mem != x0.Args[2] { 30181 break 30182 } 30183 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30184 break 30185 } 30186 b = mergePoint(b, x0, x1) 30187 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30188 v.reset(OpCopy) 30189 v.AddArg(v0) 30190 v0.AuxInt = 8 30191 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30192 v1.AuxInt = i0 30193 v1.Aux = s 30194 v1.AddArg(p) 30195 v1.AddArg(idx) 30196 v1.AddArg(mem) 30197 v0.AddArg(v1) 30198 return true 30199 } 30200 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30201 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30202 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30203 for { 30204 _ = v.Args[1] 30205 x1 := v.Args[0] 30206 if x1.Op != OpAMD64MOVBloadidx1 { 30207 break 30208 } 30209 i1 := x1.AuxInt 30210 s := x1.Aux 30211 _ = x1.Args[2] 30212 idx := x1.Args[0] 30213 p := x1.Args[1] 30214 mem := x1.Args[2] 30215 sh := v.Args[1] 30216 if sh.Op != OpAMD64SHLQconst { 30217 break 30218 } 30219 if sh.AuxInt != 8 { 30220 break 30221 } 30222 x0 := sh.Args[0] 30223 if x0.Op != OpAMD64MOVBloadidx1 { 30224 break 30225 } 30226 i0 := x0.AuxInt 30227 if x0.Aux != s { 30228 break 30229 } 30230 _ = x0.Args[2] 30231 if idx != x0.Args[0] { 30232 break 30233 } 30234 if p != x0.Args[1] { 30235 break 30236 } 30237 if mem != x0.Args[2] { 30238 break 30239 } 30240 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30241 break 30242 } 30243 b = mergePoint(b, x0, x1) 30244 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30245 v.reset(OpCopy) 30246 v.AddArg(v0) 30247 v0.AuxInt = 8 30248 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30249 v1.AuxInt = i0 30250 v1.Aux = s 30251 v1.AddArg(p) 30252 v1.AddArg(idx) 30253 v1.AddArg(mem) 30254 v0.AddArg(v1) 30255 return true 30256 } 30257 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 30258 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30259 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30260 for { 30261 _ = v.Args[1] 30262 sh := v.Args[0] 30263 if sh.Op != OpAMD64SHLQconst { 30264 break 30265 } 30266 if sh.AuxInt != 8 { 30267 break 30268 } 30269 x0 := sh.Args[0] 30270 if x0.Op != OpAMD64MOVBloadidx1 { 30271 break 30272 } 30273 i0 := x0.AuxInt 30274 s := x0.Aux 30275 _ = x0.Args[2] 30276 p := x0.Args[0] 30277 idx := x0.Args[1] 30278 mem := x0.Args[2] 30279 x1 := v.Args[1] 30280 if x1.Op != OpAMD64MOVBloadidx1 { 30281 break 30282 } 30283 i1 := x1.AuxInt 30284 if x1.Aux != s { 30285 break 30286 } 30287 _ = x1.Args[2] 30288 if p != x1.Args[0] { 30289 break 30290 } 30291 if idx != x1.Args[1] { 30292 break 30293 } 30294 if mem != x1.Args[2] { 30295 break 30296 } 30297 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30298 break 30299 } 30300 b = mergePoint(b, x0, x1) 30301 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30302 v.reset(OpCopy) 30303 v.AddArg(v0) 30304 v0.AuxInt = 8 30305 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30306 v1.AuxInt = i0 30307 v1.Aux = s 30308 v1.AddArg(p) 30309 v1.AddArg(idx) 30310 v1.AddArg(mem) 30311 v0.AddArg(v1) 30312 return true 30313 } 30314 return false 30315 } 30316 func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool { 30317 b := v.Block 30318 _ = b 30319 typ := &b.Func.Config.Types 30320 _ = typ 30321 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 30322 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30323 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30324 for { 30325 _ = v.Args[1] 30326 sh := v.Args[0] 30327 if sh.Op != OpAMD64SHLQconst { 30328 break 30329 } 30330 if sh.AuxInt != 8 { 30331 break 30332 } 30333 x0 := sh.Args[0] 30334 if x0.Op != OpAMD64MOVBloadidx1 { 30335 break 30336 } 30337 i0 := x0.AuxInt 30338 s := x0.Aux 30339 _ = x0.Args[2] 30340 idx := x0.Args[0] 30341 p := x0.Args[1] 30342 mem := x0.Args[2] 30343 x1 := v.Args[1] 30344 if x1.Op != OpAMD64MOVBloadidx1 { 30345 break 30346 } 30347 i1 := x1.AuxInt 30348 if x1.Aux != s { 30349 break 30350 } 30351 _ = x1.Args[2] 30352 if p != x1.Args[0] { 30353 break 30354 } 30355 if idx != x1.Args[1] { 30356 break 30357 } 30358 if mem != x1.Args[2] { 30359 break 30360 } 30361 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30362 break 30363 } 30364 b = mergePoint(b, x0, x1) 30365 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30366 v.reset(OpCopy) 30367 v.AddArg(v0) 30368 v0.AuxInt = 8 30369 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30370 v1.AuxInt = i0 30371 v1.Aux = s 30372 v1.AddArg(p) 30373 v1.AddArg(idx) 30374 v1.AddArg(mem) 30375 v0.AddArg(v1) 30376 return true 30377 } 30378 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 30379 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30380 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30381 for { 30382 _ = v.Args[1] 30383 sh := v.Args[0] 30384 if sh.Op != OpAMD64SHLQconst { 30385 break 30386 } 30387 if sh.AuxInt != 8 { 30388 break 30389 } 30390 x0 := sh.Args[0] 30391 if x0.Op != OpAMD64MOVBloadidx1 { 30392 break 30393 } 30394 i0 := x0.AuxInt 30395 s := x0.Aux 30396 _ = x0.Args[2] 30397 p := x0.Args[0] 30398 idx := x0.Args[1] 30399 mem := x0.Args[2] 30400 x1 := v.Args[1] 30401 if x1.Op != OpAMD64MOVBloadidx1 { 30402 break 30403 } 30404 i1 := x1.AuxInt 30405 if x1.Aux != s { 30406 break 30407 } 30408 _ = x1.Args[2] 30409 if idx != x1.Args[0] { 30410 break 30411 } 30412 if p != x1.Args[1] { 30413 break 30414 } 30415 if mem != x1.Args[2] { 30416 break 30417 } 30418 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30419 break 30420 } 30421 b = mergePoint(b, x0, x1) 30422 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30423 v.reset(OpCopy) 30424 v.AddArg(v0) 30425 v0.AuxInt = 8 30426 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30427 v1.AuxInt = i0 30428 v1.Aux = s 30429 v1.AddArg(p) 30430 v1.AddArg(idx) 30431 v1.AddArg(mem) 30432 v0.AddArg(v1) 30433 return true 30434 } 30435 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 30436 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30437 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30438 for { 30439 _ = v.Args[1] 30440 sh := v.Args[0] 30441 if sh.Op != OpAMD64SHLQconst { 30442 break 30443 } 30444 if sh.AuxInt != 8 { 30445 break 30446 } 30447 x0 := sh.Args[0] 30448 if x0.Op != OpAMD64MOVBloadidx1 { 30449 break 30450 } 30451 i0 := x0.AuxInt 30452 s := x0.Aux 30453 _ = x0.Args[2] 30454 idx := x0.Args[0] 30455 p := x0.Args[1] 30456 mem := x0.Args[2] 30457 x1 := v.Args[1] 30458 if x1.Op != OpAMD64MOVBloadidx1 { 30459 break 30460 } 30461 i1 := x1.AuxInt 30462 if x1.Aux != s { 30463 break 30464 } 30465 _ = x1.Args[2] 30466 if idx != x1.Args[0] { 30467 break 30468 } 30469 if p != x1.Args[1] { 30470 break 30471 } 30472 if mem != x1.Args[2] { 30473 break 30474 } 30475 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30476 break 30477 } 30478 b = mergePoint(b, x0, x1) 30479 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30480 v.reset(OpCopy) 30481 v.AddArg(v0) 30482 v0.AuxInt = 8 30483 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30484 v1.AuxInt = i0 30485 v1.Aux = s 30486 v1.AddArg(p) 30487 v1.AddArg(idx) 30488 v1.AddArg(mem) 30489 v0.AddArg(v1) 30490 return true 30491 } 30492 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 30493 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30494 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30495 for { 30496 _ = v.Args[1] 30497 r1 := v.Args[0] 30498 if r1.Op != OpAMD64ROLWconst { 30499 break 30500 } 30501 if r1.AuxInt != 8 { 30502 break 30503 } 30504 x1 := r1.Args[0] 30505 if x1.Op != OpAMD64MOVWloadidx1 { 30506 break 30507 } 30508 i1 := x1.AuxInt 30509 s := x1.Aux 30510 _ = x1.Args[2] 30511 p := x1.Args[0] 30512 idx := x1.Args[1] 30513 mem := x1.Args[2] 30514 sh := v.Args[1] 30515 if sh.Op != OpAMD64SHLQconst { 30516 break 30517 } 30518 if sh.AuxInt != 16 { 30519 break 30520 } 30521 r0 := sh.Args[0] 30522 if r0.Op != OpAMD64ROLWconst { 30523 break 30524 } 30525 if r0.AuxInt != 8 { 30526 break 30527 } 30528 x0 := r0.Args[0] 30529 if x0.Op != OpAMD64MOVWloadidx1 { 30530 break 30531 } 30532 i0 := x0.AuxInt 30533 if x0.Aux != s { 30534 break 30535 } 30536 _ = x0.Args[2] 30537 if p != x0.Args[0] { 30538 break 30539 } 30540 if idx != x0.Args[1] { 30541 break 30542 } 30543 if mem != x0.Args[2] { 30544 break 30545 } 30546 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30547 break 30548 } 30549 b = mergePoint(b, x0, x1) 30550 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30551 v.reset(OpCopy) 30552 v.AddArg(v0) 30553 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30554 v1.AuxInt = i0 30555 v1.Aux = s 30556 v1.AddArg(p) 30557 v1.AddArg(idx) 30558 v1.AddArg(mem) 30559 v0.AddArg(v1) 30560 return true 30561 } 30562 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 30563 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30564 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30565 for { 30566 _ = v.Args[1] 30567 r1 := v.Args[0] 30568 if r1.Op != OpAMD64ROLWconst { 30569 break 30570 } 30571 if r1.AuxInt != 8 { 30572 break 30573 } 30574 x1 := r1.Args[0] 30575 if x1.Op != OpAMD64MOVWloadidx1 { 30576 break 30577 } 30578 i1 := x1.AuxInt 30579 s := x1.Aux 30580 _ = x1.Args[2] 30581 idx := x1.Args[0] 30582 p := x1.Args[1] 30583 mem := x1.Args[2] 30584 sh := v.Args[1] 30585 if sh.Op != OpAMD64SHLQconst { 30586 break 30587 } 30588 if sh.AuxInt != 16 { 30589 break 30590 } 30591 r0 := sh.Args[0] 30592 if r0.Op != OpAMD64ROLWconst { 30593 break 30594 } 30595 if r0.AuxInt != 8 { 30596 break 30597 } 30598 x0 := r0.Args[0] 30599 if x0.Op != OpAMD64MOVWloadidx1 { 30600 break 30601 } 30602 i0 := x0.AuxInt 30603 if x0.Aux != s { 30604 break 30605 } 30606 _ = x0.Args[2] 30607 if p != x0.Args[0] { 30608 break 30609 } 30610 if idx != x0.Args[1] { 30611 break 30612 } 30613 if mem != x0.Args[2] { 30614 break 30615 } 30616 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30617 break 30618 } 30619 b = mergePoint(b, x0, x1) 30620 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30621 v.reset(OpCopy) 30622 v.AddArg(v0) 30623 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30624 v1.AuxInt = i0 30625 v1.Aux = s 30626 v1.AddArg(p) 30627 v1.AddArg(idx) 30628 v1.AddArg(mem) 30629 v0.AddArg(v1) 30630 return true 30631 } 30632 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 30633 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30634 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30635 for { 30636 _ = v.Args[1] 30637 r1 := v.Args[0] 30638 if r1.Op != OpAMD64ROLWconst { 30639 break 30640 } 30641 if r1.AuxInt != 8 { 30642 break 30643 } 30644 x1 := r1.Args[0] 30645 if x1.Op != OpAMD64MOVWloadidx1 { 30646 break 30647 } 30648 i1 := x1.AuxInt 30649 s := x1.Aux 30650 _ = x1.Args[2] 30651 p := x1.Args[0] 30652 idx := x1.Args[1] 30653 mem := x1.Args[2] 30654 sh := v.Args[1] 30655 if sh.Op != OpAMD64SHLQconst { 30656 break 30657 } 30658 if sh.AuxInt != 16 { 30659 break 30660 } 30661 r0 := sh.Args[0] 30662 if r0.Op != OpAMD64ROLWconst { 30663 break 30664 } 30665 if r0.AuxInt != 8 { 30666 break 30667 } 30668 x0 := r0.Args[0] 30669 if x0.Op != OpAMD64MOVWloadidx1 { 30670 break 30671 } 30672 i0 := x0.AuxInt 30673 if x0.Aux != s { 30674 break 30675 } 30676 _ = x0.Args[2] 30677 if idx != x0.Args[0] { 30678 break 30679 } 30680 if p != x0.Args[1] { 30681 break 30682 } 30683 if mem != x0.Args[2] { 30684 break 30685 } 30686 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30687 break 30688 } 30689 b = mergePoint(b, x0, x1) 30690 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30691 v.reset(OpCopy) 30692 v.AddArg(v0) 30693 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30694 v1.AuxInt = i0 30695 v1.Aux = s 30696 v1.AddArg(p) 30697 v1.AddArg(idx) 30698 v1.AddArg(mem) 30699 v0.AddArg(v1) 30700 return true 30701 } 30702 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 30703 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30704 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30705 for { 30706 _ = v.Args[1] 30707 r1 := v.Args[0] 30708 if r1.Op != OpAMD64ROLWconst { 30709 break 30710 } 30711 if r1.AuxInt != 8 { 30712 break 30713 } 30714 x1 := r1.Args[0] 30715 if x1.Op != OpAMD64MOVWloadidx1 { 30716 break 30717 } 30718 i1 := x1.AuxInt 30719 s := x1.Aux 30720 _ = x1.Args[2] 30721 idx := x1.Args[0] 30722 p := x1.Args[1] 30723 mem := x1.Args[2] 30724 sh := v.Args[1] 30725 if sh.Op != OpAMD64SHLQconst { 30726 break 30727 } 30728 if sh.AuxInt != 16 { 30729 break 30730 } 30731 r0 := sh.Args[0] 30732 if r0.Op != OpAMD64ROLWconst { 30733 break 30734 } 30735 if r0.AuxInt != 8 { 30736 break 30737 } 30738 x0 := r0.Args[0] 30739 if x0.Op != OpAMD64MOVWloadidx1 { 30740 break 30741 } 30742 i0 := x0.AuxInt 30743 if x0.Aux != s { 30744 break 30745 } 30746 _ = x0.Args[2] 30747 if idx != x0.Args[0] { 30748 break 30749 } 30750 if p != x0.Args[1] { 30751 break 30752 } 30753 if mem != x0.Args[2] { 30754 break 30755 } 30756 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30757 break 30758 } 30759 b = mergePoint(b, x0, x1) 30760 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30761 v.reset(OpCopy) 30762 v.AddArg(v0) 30763 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30764 v1.AuxInt = i0 30765 v1.Aux = s 30766 v1.AddArg(p) 30767 v1.AddArg(idx) 30768 v1.AddArg(mem) 30769 v0.AddArg(v1) 30770 return true 30771 } 30772 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 30773 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30774 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30775 for { 30776 _ = v.Args[1] 30777 sh := v.Args[0] 30778 if sh.Op != OpAMD64SHLQconst { 30779 break 30780 } 30781 if sh.AuxInt != 16 { 30782 break 30783 } 30784 r0 := sh.Args[0] 30785 if r0.Op != OpAMD64ROLWconst { 30786 break 30787 } 30788 if r0.AuxInt != 8 { 30789 break 30790 } 30791 x0 := r0.Args[0] 30792 if x0.Op != OpAMD64MOVWloadidx1 { 30793 break 30794 } 30795 i0 := x0.AuxInt 30796 s := x0.Aux 30797 _ = x0.Args[2] 30798 p := x0.Args[0] 30799 idx := x0.Args[1] 30800 mem := x0.Args[2] 30801 r1 := v.Args[1] 30802 if r1.Op != OpAMD64ROLWconst { 30803 break 30804 } 30805 if r1.AuxInt != 8 { 30806 break 30807 } 30808 x1 := r1.Args[0] 30809 if x1.Op != OpAMD64MOVWloadidx1 { 30810 break 30811 } 30812 i1 := x1.AuxInt 30813 if x1.Aux != s { 30814 break 30815 } 30816 _ = x1.Args[2] 30817 if p != x1.Args[0] { 30818 break 30819 } 30820 if idx != x1.Args[1] { 30821 break 30822 } 30823 if mem != x1.Args[2] { 30824 break 30825 } 30826 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30827 break 30828 } 30829 b = mergePoint(b, x0, x1) 30830 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30831 v.reset(OpCopy) 30832 v.AddArg(v0) 30833 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30834 v1.AuxInt = i0 30835 v1.Aux = s 30836 v1.AddArg(p) 30837 v1.AddArg(idx) 30838 v1.AddArg(mem) 30839 v0.AddArg(v1) 30840 return true 30841 } 30842 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 30843 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30844 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30845 for { 30846 _ = v.Args[1] 30847 sh := v.Args[0] 30848 if sh.Op != OpAMD64SHLQconst { 30849 break 30850 } 30851 if sh.AuxInt != 16 { 30852 break 30853 } 30854 r0 := sh.Args[0] 30855 if r0.Op != OpAMD64ROLWconst { 30856 break 30857 } 30858 if r0.AuxInt != 8 { 30859 break 30860 } 30861 x0 := r0.Args[0] 30862 if x0.Op != OpAMD64MOVWloadidx1 { 30863 break 30864 } 30865 i0 := x0.AuxInt 30866 s := x0.Aux 30867 _ = x0.Args[2] 30868 idx := x0.Args[0] 30869 p := x0.Args[1] 30870 mem := x0.Args[2] 30871 r1 := v.Args[1] 30872 if r1.Op != OpAMD64ROLWconst { 30873 break 30874 } 30875 if r1.AuxInt != 8 { 30876 break 30877 } 30878 x1 := r1.Args[0] 30879 if x1.Op != OpAMD64MOVWloadidx1 { 30880 break 30881 } 30882 i1 := x1.AuxInt 30883 if x1.Aux != s { 30884 break 30885 } 30886 _ = x1.Args[2] 30887 if p != x1.Args[0] { 30888 break 30889 } 30890 if idx != x1.Args[1] { 30891 break 30892 } 30893 if mem != x1.Args[2] { 30894 break 30895 } 30896 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30897 break 30898 } 30899 b = mergePoint(b, x0, x1) 30900 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30901 v.reset(OpCopy) 30902 v.AddArg(v0) 30903 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30904 v1.AuxInt = i0 30905 v1.Aux = s 30906 v1.AddArg(p) 30907 v1.AddArg(idx) 30908 v1.AddArg(mem) 30909 v0.AddArg(v1) 30910 return true 30911 } 30912 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 30913 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30914 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30915 for { 30916 _ = v.Args[1] 30917 sh := v.Args[0] 30918 if sh.Op != OpAMD64SHLQconst { 30919 break 30920 } 30921 if sh.AuxInt != 16 { 30922 break 30923 } 30924 r0 := sh.Args[0] 30925 if r0.Op != OpAMD64ROLWconst { 30926 break 30927 } 30928 if r0.AuxInt != 8 { 30929 break 30930 } 30931 x0 := r0.Args[0] 30932 if x0.Op != OpAMD64MOVWloadidx1 { 30933 break 30934 } 30935 i0 := x0.AuxInt 30936 s := x0.Aux 30937 _ = x0.Args[2] 30938 p := x0.Args[0] 30939 idx := x0.Args[1] 30940 mem := x0.Args[2] 30941 r1 := v.Args[1] 30942 if r1.Op != OpAMD64ROLWconst { 30943 break 30944 } 30945 if r1.AuxInt != 8 { 30946 break 30947 } 30948 x1 := r1.Args[0] 30949 if x1.Op != OpAMD64MOVWloadidx1 { 30950 break 30951 } 30952 i1 := x1.AuxInt 30953 if x1.Aux != s { 30954 break 30955 } 30956 _ = x1.Args[2] 30957 if idx != x1.Args[0] { 30958 break 30959 } 30960 if p != x1.Args[1] { 30961 break 30962 } 30963 if mem != x1.Args[2] { 30964 break 30965 } 30966 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30967 break 30968 } 30969 b = mergePoint(b, x0, x1) 30970 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30971 v.reset(OpCopy) 30972 v.AddArg(v0) 30973 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30974 v1.AuxInt = i0 30975 v1.Aux = s 30976 v1.AddArg(p) 30977 v1.AddArg(idx) 30978 v1.AddArg(mem) 30979 v0.AddArg(v1) 30980 return true 30981 } 30982 return false 30983 } 30984 func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool { 30985 b := v.Block 30986 _ = b 30987 typ := &b.Func.Config.Types 30988 _ = typ 30989 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 30990 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30991 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30992 for { 30993 _ = v.Args[1] 30994 sh := v.Args[0] 30995 if sh.Op != OpAMD64SHLQconst { 30996 break 30997 } 30998 if sh.AuxInt != 16 { 30999 break 31000 } 31001 r0 := sh.Args[0] 31002 if r0.Op != OpAMD64ROLWconst { 31003 break 31004 } 31005 if r0.AuxInt != 8 { 31006 break 31007 } 31008 x0 := r0.Args[0] 31009 if x0.Op != OpAMD64MOVWloadidx1 { 31010 break 31011 } 31012 i0 := x0.AuxInt 31013 s := x0.Aux 31014 _ = x0.Args[2] 31015 idx := x0.Args[0] 31016 p := x0.Args[1] 31017 mem := x0.Args[2] 31018 r1 := v.Args[1] 31019 if r1.Op != OpAMD64ROLWconst { 31020 break 31021 } 31022 if r1.AuxInt != 8 { 31023 break 31024 } 31025 x1 := r1.Args[0] 31026 if x1.Op != OpAMD64MOVWloadidx1 { 31027 break 31028 } 31029 i1 := x1.AuxInt 31030 if x1.Aux != s { 31031 break 31032 } 31033 _ = x1.Args[2] 31034 if idx != x1.Args[0] { 31035 break 31036 } 31037 if p != x1.Args[1] { 31038 break 31039 } 31040 if mem != x1.Args[2] { 31041 break 31042 } 31043 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31044 break 31045 } 31046 b = mergePoint(b, x0, x1) 31047 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 31048 v.reset(OpCopy) 31049 v.AddArg(v0) 31050 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31051 v1.AuxInt = i0 31052 v1.Aux = s 31053 v1.AddArg(p) 31054 v1.AddArg(idx) 31055 v1.AddArg(mem) 31056 v0.AddArg(v1) 31057 return true 31058 } 31059 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 31060 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31061 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31062 for { 31063 _ = v.Args[1] 31064 r1 := v.Args[0] 31065 if r1.Op != OpAMD64BSWAPL { 31066 break 31067 } 31068 x1 := r1.Args[0] 31069 if x1.Op != OpAMD64MOVLloadidx1 { 31070 break 31071 } 31072 i1 := x1.AuxInt 31073 s := x1.Aux 31074 _ = x1.Args[2] 31075 p := x1.Args[0] 31076 idx := x1.Args[1] 31077 mem := x1.Args[2] 31078 sh := v.Args[1] 31079 if sh.Op != OpAMD64SHLQconst { 31080 break 31081 } 31082 if sh.AuxInt != 32 { 31083 break 31084 } 31085 r0 := sh.Args[0] 31086 if r0.Op != OpAMD64BSWAPL { 31087 break 31088 } 31089 x0 := r0.Args[0] 31090 if x0.Op != OpAMD64MOVLloadidx1 { 31091 break 31092 } 31093 i0 := x0.AuxInt 31094 if x0.Aux != s { 31095 break 31096 } 31097 _ = x0.Args[2] 31098 if p != x0.Args[0] { 31099 break 31100 } 31101 if idx != x0.Args[1] { 31102 break 31103 } 31104 if mem != x0.Args[2] { 31105 break 31106 } 31107 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31108 break 31109 } 31110 b = mergePoint(b, x0, x1) 31111 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31112 v.reset(OpCopy) 31113 v.AddArg(v0) 31114 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31115 v1.AuxInt = i0 31116 v1.Aux = s 31117 v1.AddArg(p) 31118 v1.AddArg(idx) 31119 v1.AddArg(mem) 31120 v0.AddArg(v1) 31121 return true 31122 } 31123 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 31124 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31125 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31126 for { 31127 _ = v.Args[1] 31128 r1 := v.Args[0] 31129 if r1.Op != OpAMD64BSWAPL { 31130 break 31131 } 31132 x1 := r1.Args[0] 31133 if x1.Op != OpAMD64MOVLloadidx1 { 31134 break 31135 } 31136 i1 := x1.AuxInt 31137 s := x1.Aux 31138 _ = x1.Args[2] 31139 idx := x1.Args[0] 31140 p := x1.Args[1] 31141 mem := x1.Args[2] 31142 sh := v.Args[1] 31143 if sh.Op != OpAMD64SHLQconst { 31144 break 31145 } 31146 if sh.AuxInt != 32 { 31147 break 31148 } 31149 r0 := sh.Args[0] 31150 if r0.Op != OpAMD64BSWAPL { 31151 break 31152 } 31153 x0 := r0.Args[0] 31154 if x0.Op != OpAMD64MOVLloadidx1 { 31155 break 31156 } 31157 i0 := x0.AuxInt 31158 if x0.Aux != s { 31159 break 31160 } 31161 _ = x0.Args[2] 31162 if p != x0.Args[0] { 31163 break 31164 } 31165 if idx != x0.Args[1] { 31166 break 31167 } 31168 if mem != x0.Args[2] { 31169 break 31170 } 31171 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31172 break 31173 } 31174 b = mergePoint(b, x0, x1) 31175 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31176 v.reset(OpCopy) 31177 v.AddArg(v0) 31178 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31179 v1.AuxInt = i0 31180 v1.Aux = s 31181 v1.AddArg(p) 31182 v1.AddArg(idx) 31183 v1.AddArg(mem) 31184 v0.AddArg(v1) 31185 return true 31186 } 31187 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 31188 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31189 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31190 for { 31191 _ = v.Args[1] 31192 r1 := v.Args[0] 31193 if r1.Op != OpAMD64BSWAPL { 31194 break 31195 } 31196 x1 := r1.Args[0] 31197 if x1.Op != OpAMD64MOVLloadidx1 { 31198 break 31199 } 31200 i1 := x1.AuxInt 31201 s := x1.Aux 31202 _ = x1.Args[2] 31203 p := x1.Args[0] 31204 idx := x1.Args[1] 31205 mem := x1.Args[2] 31206 sh := v.Args[1] 31207 if sh.Op != OpAMD64SHLQconst { 31208 break 31209 } 31210 if sh.AuxInt != 32 { 31211 break 31212 } 31213 r0 := sh.Args[0] 31214 if r0.Op != OpAMD64BSWAPL { 31215 break 31216 } 31217 x0 := r0.Args[0] 31218 if x0.Op != OpAMD64MOVLloadidx1 { 31219 break 31220 } 31221 i0 := x0.AuxInt 31222 if x0.Aux != s { 31223 break 31224 } 31225 _ = x0.Args[2] 31226 if idx != x0.Args[0] { 31227 break 31228 } 31229 if p != x0.Args[1] { 31230 break 31231 } 31232 if mem != x0.Args[2] { 31233 break 31234 } 31235 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31236 break 31237 } 31238 b = mergePoint(b, x0, x1) 31239 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31240 v.reset(OpCopy) 31241 v.AddArg(v0) 31242 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31243 v1.AuxInt = i0 31244 v1.Aux = s 31245 v1.AddArg(p) 31246 v1.AddArg(idx) 31247 v1.AddArg(mem) 31248 v0.AddArg(v1) 31249 return true 31250 } 31251 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 31252 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31253 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31254 for { 31255 _ = v.Args[1] 31256 r1 := v.Args[0] 31257 if r1.Op != OpAMD64BSWAPL { 31258 break 31259 } 31260 x1 := r1.Args[0] 31261 if x1.Op != OpAMD64MOVLloadidx1 { 31262 break 31263 } 31264 i1 := x1.AuxInt 31265 s := x1.Aux 31266 _ = x1.Args[2] 31267 idx := x1.Args[0] 31268 p := x1.Args[1] 31269 mem := x1.Args[2] 31270 sh := v.Args[1] 31271 if sh.Op != OpAMD64SHLQconst { 31272 break 31273 } 31274 if sh.AuxInt != 32 { 31275 break 31276 } 31277 r0 := sh.Args[0] 31278 if r0.Op != OpAMD64BSWAPL { 31279 break 31280 } 31281 x0 := r0.Args[0] 31282 if x0.Op != OpAMD64MOVLloadidx1 { 31283 break 31284 } 31285 i0 := x0.AuxInt 31286 if x0.Aux != s { 31287 break 31288 } 31289 _ = x0.Args[2] 31290 if idx != x0.Args[0] { 31291 break 31292 } 31293 if p != x0.Args[1] { 31294 break 31295 } 31296 if mem != x0.Args[2] { 31297 break 31298 } 31299 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31300 break 31301 } 31302 b = mergePoint(b, x0, x1) 31303 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31304 v.reset(OpCopy) 31305 v.AddArg(v0) 31306 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31307 v1.AuxInt = i0 31308 v1.Aux = s 31309 v1.AddArg(p) 31310 v1.AddArg(idx) 31311 v1.AddArg(mem) 31312 v0.AddArg(v1) 31313 return true 31314 } 31315 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 31316 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31317 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31318 for { 31319 _ = v.Args[1] 31320 sh := v.Args[0] 31321 if sh.Op != OpAMD64SHLQconst { 31322 break 31323 } 31324 if sh.AuxInt != 32 { 31325 break 31326 } 31327 r0 := sh.Args[0] 31328 if r0.Op != OpAMD64BSWAPL { 31329 break 31330 } 31331 x0 := r0.Args[0] 31332 if x0.Op != OpAMD64MOVLloadidx1 { 31333 break 31334 } 31335 i0 := x0.AuxInt 31336 s := x0.Aux 31337 _ = x0.Args[2] 31338 p := x0.Args[0] 31339 idx := x0.Args[1] 31340 mem := x0.Args[2] 31341 r1 := v.Args[1] 31342 if r1.Op != OpAMD64BSWAPL { 31343 break 31344 } 31345 x1 := r1.Args[0] 31346 if x1.Op != OpAMD64MOVLloadidx1 { 31347 break 31348 } 31349 i1 := x1.AuxInt 31350 if x1.Aux != s { 31351 break 31352 } 31353 _ = x1.Args[2] 31354 if p != x1.Args[0] { 31355 break 31356 } 31357 if idx != x1.Args[1] { 31358 break 31359 } 31360 if mem != x1.Args[2] { 31361 break 31362 } 31363 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31364 break 31365 } 31366 b = mergePoint(b, x0, x1) 31367 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31368 v.reset(OpCopy) 31369 v.AddArg(v0) 31370 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31371 v1.AuxInt = i0 31372 v1.Aux = s 31373 v1.AddArg(p) 31374 v1.AddArg(idx) 31375 v1.AddArg(mem) 31376 v0.AddArg(v1) 31377 return true 31378 } 31379 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 31380 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31381 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31382 for { 31383 _ = v.Args[1] 31384 sh := v.Args[0] 31385 if sh.Op != OpAMD64SHLQconst { 31386 break 31387 } 31388 if sh.AuxInt != 32 { 31389 break 31390 } 31391 r0 := sh.Args[0] 31392 if r0.Op != OpAMD64BSWAPL { 31393 break 31394 } 31395 x0 := r0.Args[0] 31396 if x0.Op != OpAMD64MOVLloadidx1 { 31397 break 31398 } 31399 i0 := x0.AuxInt 31400 s := x0.Aux 31401 _ = x0.Args[2] 31402 idx := x0.Args[0] 31403 p := x0.Args[1] 31404 mem := x0.Args[2] 31405 r1 := v.Args[1] 31406 if r1.Op != OpAMD64BSWAPL { 31407 break 31408 } 31409 x1 := r1.Args[0] 31410 if x1.Op != OpAMD64MOVLloadidx1 { 31411 break 31412 } 31413 i1 := x1.AuxInt 31414 if x1.Aux != s { 31415 break 31416 } 31417 _ = x1.Args[2] 31418 if p != x1.Args[0] { 31419 break 31420 } 31421 if idx != x1.Args[1] { 31422 break 31423 } 31424 if mem != x1.Args[2] { 31425 break 31426 } 31427 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31428 break 31429 } 31430 b = mergePoint(b, x0, x1) 31431 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31432 v.reset(OpCopy) 31433 v.AddArg(v0) 31434 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31435 v1.AuxInt = i0 31436 v1.Aux = s 31437 v1.AddArg(p) 31438 v1.AddArg(idx) 31439 v1.AddArg(mem) 31440 v0.AddArg(v1) 31441 return true 31442 } 31443 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 31444 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31445 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31446 for { 31447 _ = v.Args[1] 31448 sh := v.Args[0] 31449 if sh.Op != OpAMD64SHLQconst { 31450 break 31451 } 31452 if sh.AuxInt != 32 { 31453 break 31454 } 31455 r0 := sh.Args[0] 31456 if r0.Op != OpAMD64BSWAPL { 31457 break 31458 } 31459 x0 := r0.Args[0] 31460 if x0.Op != OpAMD64MOVLloadidx1 { 31461 break 31462 } 31463 i0 := x0.AuxInt 31464 s := x0.Aux 31465 _ = x0.Args[2] 31466 p := x0.Args[0] 31467 idx := x0.Args[1] 31468 mem := x0.Args[2] 31469 r1 := v.Args[1] 31470 if r1.Op != OpAMD64BSWAPL { 31471 break 31472 } 31473 x1 := r1.Args[0] 31474 if x1.Op != OpAMD64MOVLloadidx1 { 31475 break 31476 } 31477 i1 := x1.AuxInt 31478 if x1.Aux != s { 31479 break 31480 } 31481 _ = x1.Args[2] 31482 if idx != x1.Args[0] { 31483 break 31484 } 31485 if p != x1.Args[1] { 31486 break 31487 } 31488 if mem != x1.Args[2] { 31489 break 31490 } 31491 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31492 break 31493 } 31494 b = mergePoint(b, x0, x1) 31495 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31496 v.reset(OpCopy) 31497 v.AddArg(v0) 31498 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31499 v1.AuxInt = i0 31500 v1.Aux = s 31501 v1.AddArg(p) 31502 v1.AddArg(idx) 31503 v1.AddArg(mem) 31504 v0.AddArg(v1) 31505 return true 31506 } 31507 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 31508 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31509 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31510 for { 31511 _ = v.Args[1] 31512 sh := v.Args[0] 31513 if sh.Op != OpAMD64SHLQconst { 31514 break 31515 } 31516 if sh.AuxInt != 32 { 31517 break 31518 } 31519 r0 := sh.Args[0] 31520 if r0.Op != OpAMD64BSWAPL { 31521 break 31522 } 31523 x0 := r0.Args[0] 31524 if x0.Op != OpAMD64MOVLloadidx1 { 31525 break 31526 } 31527 i0 := x0.AuxInt 31528 s := x0.Aux 31529 _ = x0.Args[2] 31530 idx := x0.Args[0] 31531 p := x0.Args[1] 31532 mem := x0.Args[2] 31533 r1 := v.Args[1] 31534 if r1.Op != OpAMD64BSWAPL { 31535 break 31536 } 31537 x1 := r1.Args[0] 31538 if x1.Op != OpAMD64MOVLloadidx1 { 31539 break 31540 } 31541 i1 := x1.AuxInt 31542 if x1.Aux != s { 31543 break 31544 } 31545 _ = x1.Args[2] 31546 if idx != x1.Args[0] { 31547 break 31548 } 31549 if p != x1.Args[1] { 31550 break 31551 } 31552 if mem != x1.Args[2] { 31553 break 31554 } 31555 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31556 break 31557 } 31558 b = mergePoint(b, x0, x1) 31559 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31560 v.reset(OpCopy) 31561 v.AddArg(v0) 31562 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31563 v1.AuxInt = i0 31564 v1.Aux = s 31565 v1.AddArg(p) 31566 v1.AddArg(idx) 31567 v1.AddArg(mem) 31568 v0.AddArg(v1) 31569 return true 31570 } 31571 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 31572 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31573 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31574 for { 31575 _ = v.Args[1] 31576 s0 := v.Args[0] 31577 if s0.Op != OpAMD64SHLQconst { 31578 break 31579 } 31580 j0 := s0.AuxInt 31581 x0 := s0.Args[0] 31582 if x0.Op != OpAMD64MOVBloadidx1 { 31583 break 31584 } 31585 i0 := x0.AuxInt 31586 s := x0.Aux 31587 _ = x0.Args[2] 31588 p := x0.Args[0] 31589 idx := x0.Args[1] 31590 mem := x0.Args[2] 31591 or := v.Args[1] 31592 if or.Op != OpAMD64ORQ { 31593 break 31594 } 31595 _ = or.Args[1] 31596 s1 := or.Args[0] 31597 if s1.Op != OpAMD64SHLQconst { 31598 break 31599 } 31600 j1 := s1.AuxInt 31601 x1 := s1.Args[0] 31602 if x1.Op != OpAMD64MOVBloadidx1 { 31603 break 31604 } 31605 i1 := x1.AuxInt 31606 if x1.Aux != s { 31607 break 31608 } 31609 _ = x1.Args[2] 31610 if p != x1.Args[0] { 31611 break 31612 } 31613 if idx != x1.Args[1] { 31614 break 31615 } 31616 if mem != x1.Args[2] { 31617 break 31618 } 31619 y := or.Args[1] 31620 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31621 break 31622 } 31623 b = mergePoint(b, x0, x1) 31624 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31625 v.reset(OpCopy) 31626 v.AddArg(v0) 31627 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31628 v1.AuxInt = j1 31629 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31630 v2.AuxInt = 8 31631 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31632 v3.AuxInt = i0 31633 v3.Aux = s 31634 v3.AddArg(p) 31635 v3.AddArg(idx) 31636 v3.AddArg(mem) 31637 v2.AddArg(v3) 31638 v1.AddArg(v2) 31639 v0.AddArg(v1) 31640 v0.AddArg(y) 31641 return true 31642 } 31643 return false 31644 } 31645 func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool { 31646 b := v.Block 31647 _ = b 31648 typ := &b.Func.Config.Types 31649 _ = typ 31650 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 31651 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31652 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31653 for { 31654 _ = v.Args[1] 31655 s0 := v.Args[0] 31656 if s0.Op != OpAMD64SHLQconst { 31657 break 31658 } 31659 j0 := s0.AuxInt 31660 x0 := s0.Args[0] 31661 if x0.Op != OpAMD64MOVBloadidx1 { 31662 break 31663 } 31664 i0 := x0.AuxInt 31665 s := x0.Aux 31666 _ = x0.Args[2] 31667 idx := x0.Args[0] 31668 p := x0.Args[1] 31669 mem := x0.Args[2] 31670 or := v.Args[1] 31671 if or.Op != OpAMD64ORQ { 31672 break 31673 } 31674 _ = or.Args[1] 31675 s1 := or.Args[0] 31676 if s1.Op != OpAMD64SHLQconst { 31677 break 31678 } 31679 j1 := s1.AuxInt 31680 x1 := s1.Args[0] 31681 if x1.Op != OpAMD64MOVBloadidx1 { 31682 break 31683 } 31684 i1 := x1.AuxInt 31685 if x1.Aux != s { 31686 break 31687 } 31688 _ = x1.Args[2] 31689 if p != x1.Args[0] { 31690 break 31691 } 31692 if idx != x1.Args[1] { 31693 break 31694 } 31695 if mem != x1.Args[2] { 31696 break 31697 } 31698 y := or.Args[1] 31699 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31700 break 31701 } 31702 b = mergePoint(b, x0, x1) 31703 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31704 v.reset(OpCopy) 31705 v.AddArg(v0) 31706 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31707 v1.AuxInt = j1 31708 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31709 v2.AuxInt = 8 31710 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31711 v3.AuxInt = i0 31712 v3.Aux = s 31713 v3.AddArg(p) 31714 v3.AddArg(idx) 31715 v3.AddArg(mem) 31716 v2.AddArg(v3) 31717 v1.AddArg(v2) 31718 v0.AddArg(v1) 31719 v0.AddArg(y) 31720 return true 31721 } 31722 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 31723 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31724 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31725 for { 31726 _ = v.Args[1] 31727 s0 := v.Args[0] 31728 if s0.Op != OpAMD64SHLQconst { 31729 break 31730 } 31731 j0 := s0.AuxInt 31732 x0 := s0.Args[0] 31733 if x0.Op != OpAMD64MOVBloadidx1 { 31734 break 31735 } 31736 i0 := x0.AuxInt 31737 s := x0.Aux 31738 _ = x0.Args[2] 31739 p := x0.Args[0] 31740 idx := x0.Args[1] 31741 mem := x0.Args[2] 31742 or := v.Args[1] 31743 if or.Op != OpAMD64ORQ { 31744 break 31745 } 31746 _ = or.Args[1] 31747 s1 := or.Args[0] 31748 if s1.Op != OpAMD64SHLQconst { 31749 break 31750 } 31751 j1 := s1.AuxInt 31752 x1 := s1.Args[0] 31753 if x1.Op != OpAMD64MOVBloadidx1 { 31754 break 31755 } 31756 i1 := x1.AuxInt 31757 if x1.Aux != s { 31758 break 31759 } 31760 _ = x1.Args[2] 31761 if idx != x1.Args[0] { 31762 break 31763 } 31764 if p != x1.Args[1] { 31765 break 31766 } 31767 if mem != x1.Args[2] { 31768 break 31769 } 31770 y := or.Args[1] 31771 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31772 break 31773 } 31774 b = mergePoint(b, x0, x1) 31775 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31776 v.reset(OpCopy) 31777 v.AddArg(v0) 31778 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31779 v1.AuxInt = j1 31780 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31781 v2.AuxInt = 8 31782 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31783 v3.AuxInt = i0 31784 v3.Aux = s 31785 v3.AddArg(p) 31786 v3.AddArg(idx) 31787 v3.AddArg(mem) 31788 v2.AddArg(v3) 31789 v1.AddArg(v2) 31790 v0.AddArg(v1) 31791 v0.AddArg(y) 31792 return true 31793 } 31794 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 31795 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31796 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31797 for { 31798 _ = v.Args[1] 31799 s0 := v.Args[0] 31800 if s0.Op != OpAMD64SHLQconst { 31801 break 31802 } 31803 j0 := s0.AuxInt 31804 x0 := s0.Args[0] 31805 if x0.Op != OpAMD64MOVBloadidx1 { 31806 break 31807 } 31808 i0 := x0.AuxInt 31809 s := x0.Aux 31810 _ = x0.Args[2] 31811 idx := x0.Args[0] 31812 p := x0.Args[1] 31813 mem := x0.Args[2] 31814 or := v.Args[1] 31815 if or.Op != OpAMD64ORQ { 31816 break 31817 } 31818 _ = or.Args[1] 31819 s1 := or.Args[0] 31820 if s1.Op != OpAMD64SHLQconst { 31821 break 31822 } 31823 j1 := s1.AuxInt 31824 x1 := s1.Args[0] 31825 if x1.Op != OpAMD64MOVBloadidx1 { 31826 break 31827 } 31828 i1 := x1.AuxInt 31829 if x1.Aux != s { 31830 break 31831 } 31832 _ = x1.Args[2] 31833 if idx != x1.Args[0] { 31834 break 31835 } 31836 if p != x1.Args[1] { 31837 break 31838 } 31839 if mem != x1.Args[2] { 31840 break 31841 } 31842 y := or.Args[1] 31843 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31844 break 31845 } 31846 b = mergePoint(b, x0, x1) 31847 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31848 v.reset(OpCopy) 31849 v.AddArg(v0) 31850 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31851 v1.AuxInt = j1 31852 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31853 v2.AuxInt = 8 31854 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31855 v3.AuxInt = i0 31856 v3.Aux = s 31857 v3.AddArg(p) 31858 v3.AddArg(idx) 31859 v3.AddArg(mem) 31860 v2.AddArg(v3) 31861 v1.AddArg(v2) 31862 v0.AddArg(v1) 31863 v0.AddArg(y) 31864 return true 31865 } 31866 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 31867 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31868 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31869 for { 31870 _ = v.Args[1] 31871 s0 := v.Args[0] 31872 if s0.Op != OpAMD64SHLQconst { 31873 break 31874 } 31875 j0 := s0.AuxInt 31876 x0 := s0.Args[0] 31877 if x0.Op != OpAMD64MOVBloadidx1 { 31878 break 31879 } 31880 i0 := x0.AuxInt 31881 s := x0.Aux 31882 _ = x0.Args[2] 31883 p := x0.Args[0] 31884 idx := x0.Args[1] 31885 mem := x0.Args[2] 31886 or := v.Args[1] 31887 if or.Op != OpAMD64ORQ { 31888 break 31889 } 31890 _ = or.Args[1] 31891 y := or.Args[0] 31892 s1 := or.Args[1] 31893 if s1.Op != OpAMD64SHLQconst { 31894 break 31895 } 31896 j1 := s1.AuxInt 31897 x1 := s1.Args[0] 31898 if x1.Op != OpAMD64MOVBloadidx1 { 31899 break 31900 } 31901 i1 := x1.AuxInt 31902 if x1.Aux != s { 31903 break 31904 } 31905 _ = x1.Args[2] 31906 if p != x1.Args[0] { 31907 break 31908 } 31909 if idx != x1.Args[1] { 31910 break 31911 } 31912 if mem != x1.Args[2] { 31913 break 31914 } 31915 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31916 break 31917 } 31918 b = mergePoint(b, x0, x1) 31919 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31920 v.reset(OpCopy) 31921 v.AddArg(v0) 31922 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31923 v1.AuxInt = j1 31924 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31925 v2.AuxInt = 8 31926 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31927 v3.AuxInt = i0 31928 v3.Aux = s 31929 v3.AddArg(p) 31930 v3.AddArg(idx) 31931 v3.AddArg(mem) 31932 v2.AddArg(v3) 31933 v1.AddArg(v2) 31934 v0.AddArg(v1) 31935 v0.AddArg(y) 31936 return true 31937 } 31938 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 31939 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31940 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31941 for { 31942 _ = v.Args[1] 31943 s0 := v.Args[0] 31944 if s0.Op != OpAMD64SHLQconst { 31945 break 31946 } 31947 j0 := s0.AuxInt 31948 x0 := s0.Args[0] 31949 if x0.Op != OpAMD64MOVBloadidx1 { 31950 break 31951 } 31952 i0 := x0.AuxInt 31953 s := x0.Aux 31954 _ = x0.Args[2] 31955 idx := x0.Args[0] 31956 p := x0.Args[1] 31957 mem := x0.Args[2] 31958 or := v.Args[1] 31959 if or.Op != OpAMD64ORQ { 31960 break 31961 } 31962 _ = or.Args[1] 31963 y := or.Args[0] 31964 s1 := or.Args[1] 31965 if s1.Op != OpAMD64SHLQconst { 31966 break 31967 } 31968 j1 := s1.AuxInt 31969 x1 := s1.Args[0] 31970 if x1.Op != OpAMD64MOVBloadidx1 { 31971 break 31972 } 31973 i1 := x1.AuxInt 31974 if x1.Aux != s { 31975 break 31976 } 31977 _ = x1.Args[2] 31978 if p != x1.Args[0] { 31979 break 31980 } 31981 if idx != x1.Args[1] { 31982 break 31983 } 31984 if mem != x1.Args[2] { 31985 break 31986 } 31987 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31988 break 31989 } 31990 b = mergePoint(b, x0, x1) 31991 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31992 v.reset(OpCopy) 31993 v.AddArg(v0) 31994 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31995 v1.AuxInt = j1 31996 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31997 v2.AuxInt = 8 31998 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31999 v3.AuxInt = i0 32000 v3.Aux = s 32001 v3.AddArg(p) 32002 v3.AddArg(idx) 32003 v3.AddArg(mem) 32004 v2.AddArg(v3) 32005 v1.AddArg(v2) 32006 v0.AddArg(v1) 32007 v0.AddArg(y) 32008 return true 32009 } 32010 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 32011 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32012 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32013 for { 32014 _ = v.Args[1] 32015 s0 := v.Args[0] 32016 if s0.Op != OpAMD64SHLQconst { 32017 break 32018 } 32019 j0 := s0.AuxInt 32020 x0 := s0.Args[0] 32021 if x0.Op != OpAMD64MOVBloadidx1 { 32022 break 32023 } 32024 i0 := x0.AuxInt 32025 s := x0.Aux 32026 _ = x0.Args[2] 32027 p := x0.Args[0] 32028 idx := x0.Args[1] 32029 mem := x0.Args[2] 32030 or := v.Args[1] 32031 if or.Op != OpAMD64ORQ { 32032 break 32033 } 32034 _ = or.Args[1] 32035 y := or.Args[0] 32036 s1 := or.Args[1] 32037 if s1.Op != OpAMD64SHLQconst { 32038 break 32039 } 32040 j1 := s1.AuxInt 32041 x1 := s1.Args[0] 32042 if x1.Op != OpAMD64MOVBloadidx1 { 32043 break 32044 } 32045 i1 := x1.AuxInt 32046 if x1.Aux != s { 32047 break 32048 } 32049 _ = x1.Args[2] 32050 if idx != x1.Args[0] { 32051 break 32052 } 32053 if p != x1.Args[1] { 32054 break 32055 } 32056 if mem != x1.Args[2] { 32057 break 32058 } 32059 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32060 break 32061 } 32062 b = mergePoint(b, x0, x1) 32063 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32064 v.reset(OpCopy) 32065 v.AddArg(v0) 32066 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32067 v1.AuxInt = j1 32068 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32069 v2.AuxInt = 8 32070 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32071 v3.AuxInt = i0 32072 v3.Aux = s 32073 v3.AddArg(p) 32074 v3.AddArg(idx) 32075 v3.AddArg(mem) 32076 v2.AddArg(v3) 32077 v1.AddArg(v2) 32078 v0.AddArg(v1) 32079 v0.AddArg(y) 32080 return true 32081 } 32082 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 32083 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32084 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32085 for { 32086 _ = v.Args[1] 32087 s0 := v.Args[0] 32088 if s0.Op != OpAMD64SHLQconst { 32089 break 32090 } 32091 j0 := s0.AuxInt 32092 x0 := s0.Args[0] 32093 if x0.Op != OpAMD64MOVBloadidx1 { 32094 break 32095 } 32096 i0 := x0.AuxInt 32097 s := x0.Aux 32098 _ = x0.Args[2] 32099 idx := x0.Args[0] 32100 p := x0.Args[1] 32101 mem := x0.Args[2] 32102 or := v.Args[1] 32103 if or.Op != OpAMD64ORQ { 32104 break 32105 } 32106 _ = or.Args[1] 32107 y := or.Args[0] 32108 s1 := or.Args[1] 32109 if s1.Op != OpAMD64SHLQconst { 32110 break 32111 } 32112 j1 := s1.AuxInt 32113 x1 := s1.Args[0] 32114 if x1.Op != OpAMD64MOVBloadidx1 { 32115 break 32116 } 32117 i1 := x1.AuxInt 32118 if x1.Aux != s { 32119 break 32120 } 32121 _ = x1.Args[2] 32122 if idx != x1.Args[0] { 32123 break 32124 } 32125 if p != x1.Args[1] { 32126 break 32127 } 32128 if mem != x1.Args[2] { 32129 break 32130 } 32131 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32132 break 32133 } 32134 b = mergePoint(b, x0, x1) 32135 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32136 v.reset(OpCopy) 32137 v.AddArg(v0) 32138 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32139 v1.AuxInt = j1 32140 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32141 v2.AuxInt = 8 32142 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32143 v3.AuxInt = i0 32144 v3.Aux = s 32145 v3.AddArg(p) 32146 v3.AddArg(idx) 32147 v3.AddArg(mem) 32148 v2.AddArg(v3) 32149 v1.AddArg(v2) 32150 v0.AddArg(v1) 32151 v0.AddArg(y) 32152 return true 32153 } 32154 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32155 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32156 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32157 for { 32158 _ = v.Args[1] 32159 or := v.Args[0] 32160 if or.Op != OpAMD64ORQ { 32161 break 32162 } 32163 _ = or.Args[1] 32164 s1 := or.Args[0] 32165 if s1.Op != OpAMD64SHLQconst { 32166 break 32167 } 32168 j1 := s1.AuxInt 32169 x1 := s1.Args[0] 32170 if x1.Op != OpAMD64MOVBloadidx1 { 32171 break 32172 } 32173 i1 := x1.AuxInt 32174 s := x1.Aux 32175 _ = x1.Args[2] 32176 p := x1.Args[0] 32177 idx := x1.Args[1] 32178 mem := x1.Args[2] 32179 y := or.Args[1] 32180 s0 := v.Args[1] 32181 if s0.Op != OpAMD64SHLQconst { 32182 break 32183 } 32184 j0 := s0.AuxInt 32185 x0 := s0.Args[0] 32186 if x0.Op != OpAMD64MOVBloadidx1 { 32187 break 32188 } 32189 i0 := x0.AuxInt 32190 if x0.Aux != s { 32191 break 32192 } 32193 _ = x0.Args[2] 32194 if p != x0.Args[0] { 32195 break 32196 } 32197 if idx != x0.Args[1] { 32198 break 32199 } 32200 if mem != x0.Args[2] { 32201 break 32202 } 32203 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32204 break 32205 } 32206 b = mergePoint(b, x0, x1) 32207 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32208 v.reset(OpCopy) 32209 v.AddArg(v0) 32210 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32211 v1.AuxInt = j1 32212 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32213 v2.AuxInt = 8 32214 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32215 v3.AuxInt = i0 32216 v3.Aux = s 32217 v3.AddArg(p) 32218 v3.AddArg(idx) 32219 v3.AddArg(mem) 32220 v2.AddArg(v3) 32221 v1.AddArg(v2) 32222 v0.AddArg(v1) 32223 v0.AddArg(y) 32224 return true 32225 } 32226 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32227 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32228 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32229 for { 32230 _ = v.Args[1] 32231 or := v.Args[0] 32232 if or.Op != OpAMD64ORQ { 32233 break 32234 } 32235 _ = or.Args[1] 32236 s1 := or.Args[0] 32237 if s1.Op != OpAMD64SHLQconst { 32238 break 32239 } 32240 j1 := s1.AuxInt 32241 x1 := s1.Args[0] 32242 if x1.Op != OpAMD64MOVBloadidx1 { 32243 break 32244 } 32245 i1 := x1.AuxInt 32246 s := x1.Aux 32247 _ = x1.Args[2] 32248 idx := x1.Args[0] 32249 p := x1.Args[1] 32250 mem := x1.Args[2] 32251 y := or.Args[1] 32252 s0 := v.Args[1] 32253 if s0.Op != OpAMD64SHLQconst { 32254 break 32255 } 32256 j0 := s0.AuxInt 32257 x0 := s0.Args[0] 32258 if x0.Op != OpAMD64MOVBloadidx1 { 32259 break 32260 } 32261 i0 := x0.AuxInt 32262 if x0.Aux != s { 32263 break 32264 } 32265 _ = x0.Args[2] 32266 if p != x0.Args[0] { 32267 break 32268 } 32269 if idx != x0.Args[1] { 32270 break 32271 } 32272 if mem != x0.Args[2] { 32273 break 32274 } 32275 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32276 break 32277 } 32278 b = mergePoint(b, x0, x1) 32279 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32280 v.reset(OpCopy) 32281 v.AddArg(v0) 32282 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32283 v1.AuxInt = j1 32284 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32285 v2.AuxInt = 8 32286 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32287 v3.AuxInt = i0 32288 v3.Aux = s 32289 v3.AddArg(p) 32290 v3.AddArg(idx) 32291 v3.AddArg(mem) 32292 v2.AddArg(v3) 32293 v1.AddArg(v2) 32294 v0.AddArg(v1) 32295 v0.AddArg(y) 32296 return true 32297 } 32298 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32299 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32300 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32301 for { 32302 _ = v.Args[1] 32303 or := v.Args[0] 32304 if or.Op != OpAMD64ORQ { 32305 break 32306 } 32307 _ = or.Args[1] 32308 y := or.Args[0] 32309 s1 := or.Args[1] 32310 if s1.Op != OpAMD64SHLQconst { 32311 break 32312 } 32313 j1 := s1.AuxInt 32314 x1 := s1.Args[0] 32315 if x1.Op != OpAMD64MOVBloadidx1 { 32316 break 32317 } 32318 i1 := x1.AuxInt 32319 s := x1.Aux 32320 _ = x1.Args[2] 32321 p := x1.Args[0] 32322 idx := x1.Args[1] 32323 mem := x1.Args[2] 32324 s0 := v.Args[1] 32325 if s0.Op != OpAMD64SHLQconst { 32326 break 32327 } 32328 j0 := s0.AuxInt 32329 x0 := s0.Args[0] 32330 if x0.Op != OpAMD64MOVBloadidx1 { 32331 break 32332 } 32333 i0 := x0.AuxInt 32334 if x0.Aux != s { 32335 break 32336 } 32337 _ = x0.Args[2] 32338 if p != x0.Args[0] { 32339 break 32340 } 32341 if idx != x0.Args[1] { 32342 break 32343 } 32344 if mem != x0.Args[2] { 32345 break 32346 } 32347 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32348 break 32349 } 32350 b = mergePoint(b, x0, x1) 32351 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32352 v.reset(OpCopy) 32353 v.AddArg(v0) 32354 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32355 v1.AuxInt = j1 32356 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32357 v2.AuxInt = 8 32358 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32359 v3.AuxInt = i0 32360 v3.Aux = s 32361 v3.AddArg(p) 32362 v3.AddArg(idx) 32363 v3.AddArg(mem) 32364 v2.AddArg(v3) 32365 v1.AddArg(v2) 32366 v0.AddArg(v1) 32367 v0.AddArg(y) 32368 return true 32369 } 32370 return false 32371 } 32372 func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool { 32373 b := v.Block 32374 _ = b 32375 typ := &b.Func.Config.Types 32376 _ = typ 32377 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32378 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32379 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32380 for { 32381 _ = v.Args[1] 32382 or := v.Args[0] 32383 if or.Op != OpAMD64ORQ { 32384 break 32385 } 32386 _ = or.Args[1] 32387 y := or.Args[0] 32388 s1 := or.Args[1] 32389 if s1.Op != OpAMD64SHLQconst { 32390 break 32391 } 32392 j1 := s1.AuxInt 32393 x1 := s1.Args[0] 32394 if x1.Op != OpAMD64MOVBloadidx1 { 32395 break 32396 } 32397 i1 := x1.AuxInt 32398 s := x1.Aux 32399 _ = x1.Args[2] 32400 idx := x1.Args[0] 32401 p := x1.Args[1] 32402 mem := x1.Args[2] 32403 s0 := v.Args[1] 32404 if s0.Op != OpAMD64SHLQconst { 32405 break 32406 } 32407 j0 := s0.AuxInt 32408 x0 := s0.Args[0] 32409 if x0.Op != OpAMD64MOVBloadidx1 { 32410 break 32411 } 32412 i0 := x0.AuxInt 32413 if x0.Aux != s { 32414 break 32415 } 32416 _ = x0.Args[2] 32417 if p != x0.Args[0] { 32418 break 32419 } 32420 if idx != x0.Args[1] { 32421 break 32422 } 32423 if mem != x0.Args[2] { 32424 break 32425 } 32426 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32427 break 32428 } 32429 b = mergePoint(b, x0, x1) 32430 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32431 v.reset(OpCopy) 32432 v.AddArg(v0) 32433 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32434 v1.AuxInt = j1 32435 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32436 v2.AuxInt = 8 32437 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32438 v3.AuxInt = i0 32439 v3.Aux = s 32440 v3.AddArg(p) 32441 v3.AddArg(idx) 32442 v3.AddArg(mem) 32443 v2.AddArg(v3) 32444 v1.AddArg(v2) 32445 v0.AddArg(v1) 32446 v0.AddArg(y) 32447 return true 32448 } 32449 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32450 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32451 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32452 for { 32453 _ = v.Args[1] 32454 or := v.Args[0] 32455 if or.Op != OpAMD64ORQ { 32456 break 32457 } 32458 _ = or.Args[1] 32459 s1 := or.Args[0] 32460 if s1.Op != OpAMD64SHLQconst { 32461 break 32462 } 32463 j1 := s1.AuxInt 32464 x1 := s1.Args[0] 32465 if x1.Op != OpAMD64MOVBloadidx1 { 32466 break 32467 } 32468 i1 := x1.AuxInt 32469 s := x1.Aux 32470 _ = x1.Args[2] 32471 p := x1.Args[0] 32472 idx := x1.Args[1] 32473 mem := x1.Args[2] 32474 y := or.Args[1] 32475 s0 := v.Args[1] 32476 if s0.Op != OpAMD64SHLQconst { 32477 break 32478 } 32479 j0 := s0.AuxInt 32480 x0 := s0.Args[0] 32481 if x0.Op != OpAMD64MOVBloadidx1 { 32482 break 32483 } 32484 i0 := x0.AuxInt 32485 if x0.Aux != s { 32486 break 32487 } 32488 _ = x0.Args[2] 32489 if idx != x0.Args[0] { 32490 break 32491 } 32492 if p != x0.Args[1] { 32493 break 32494 } 32495 if mem != x0.Args[2] { 32496 break 32497 } 32498 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32499 break 32500 } 32501 b = mergePoint(b, x0, x1) 32502 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32503 v.reset(OpCopy) 32504 v.AddArg(v0) 32505 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32506 v1.AuxInt = j1 32507 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32508 v2.AuxInt = 8 32509 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32510 v3.AuxInt = i0 32511 v3.Aux = s 32512 v3.AddArg(p) 32513 v3.AddArg(idx) 32514 v3.AddArg(mem) 32515 v2.AddArg(v3) 32516 v1.AddArg(v2) 32517 v0.AddArg(v1) 32518 v0.AddArg(y) 32519 return true 32520 } 32521 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32522 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32523 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32524 for { 32525 _ = v.Args[1] 32526 or := v.Args[0] 32527 if or.Op != OpAMD64ORQ { 32528 break 32529 } 32530 _ = or.Args[1] 32531 s1 := or.Args[0] 32532 if s1.Op != OpAMD64SHLQconst { 32533 break 32534 } 32535 j1 := s1.AuxInt 32536 x1 := s1.Args[0] 32537 if x1.Op != OpAMD64MOVBloadidx1 { 32538 break 32539 } 32540 i1 := x1.AuxInt 32541 s := x1.Aux 32542 _ = x1.Args[2] 32543 idx := x1.Args[0] 32544 p := x1.Args[1] 32545 mem := x1.Args[2] 32546 y := or.Args[1] 32547 s0 := v.Args[1] 32548 if s0.Op != OpAMD64SHLQconst { 32549 break 32550 } 32551 j0 := s0.AuxInt 32552 x0 := s0.Args[0] 32553 if x0.Op != OpAMD64MOVBloadidx1 { 32554 break 32555 } 32556 i0 := x0.AuxInt 32557 if x0.Aux != s { 32558 break 32559 } 32560 _ = x0.Args[2] 32561 if idx != x0.Args[0] { 32562 break 32563 } 32564 if p != x0.Args[1] { 32565 break 32566 } 32567 if mem != x0.Args[2] { 32568 break 32569 } 32570 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32571 break 32572 } 32573 b = mergePoint(b, x0, x1) 32574 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32575 v.reset(OpCopy) 32576 v.AddArg(v0) 32577 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32578 v1.AuxInt = j1 32579 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32580 v2.AuxInt = 8 32581 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32582 v3.AuxInt = i0 32583 v3.Aux = s 32584 v3.AddArg(p) 32585 v3.AddArg(idx) 32586 v3.AddArg(mem) 32587 v2.AddArg(v3) 32588 v1.AddArg(v2) 32589 v0.AddArg(v1) 32590 v0.AddArg(y) 32591 return true 32592 } 32593 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32594 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32595 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32596 for { 32597 _ = v.Args[1] 32598 or := v.Args[0] 32599 if or.Op != OpAMD64ORQ { 32600 break 32601 } 32602 _ = or.Args[1] 32603 y := or.Args[0] 32604 s1 := or.Args[1] 32605 if s1.Op != OpAMD64SHLQconst { 32606 break 32607 } 32608 j1 := s1.AuxInt 32609 x1 := s1.Args[0] 32610 if x1.Op != OpAMD64MOVBloadidx1 { 32611 break 32612 } 32613 i1 := x1.AuxInt 32614 s := x1.Aux 32615 _ = x1.Args[2] 32616 p := x1.Args[0] 32617 idx := x1.Args[1] 32618 mem := x1.Args[2] 32619 s0 := v.Args[1] 32620 if s0.Op != OpAMD64SHLQconst { 32621 break 32622 } 32623 j0 := s0.AuxInt 32624 x0 := s0.Args[0] 32625 if x0.Op != OpAMD64MOVBloadidx1 { 32626 break 32627 } 32628 i0 := x0.AuxInt 32629 if x0.Aux != s { 32630 break 32631 } 32632 _ = x0.Args[2] 32633 if idx != x0.Args[0] { 32634 break 32635 } 32636 if p != x0.Args[1] { 32637 break 32638 } 32639 if mem != x0.Args[2] { 32640 break 32641 } 32642 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32643 break 32644 } 32645 b = mergePoint(b, x0, x1) 32646 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32647 v.reset(OpCopy) 32648 v.AddArg(v0) 32649 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32650 v1.AuxInt = j1 32651 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32652 v2.AuxInt = 8 32653 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32654 v3.AuxInt = i0 32655 v3.Aux = s 32656 v3.AddArg(p) 32657 v3.AddArg(idx) 32658 v3.AddArg(mem) 32659 v2.AddArg(v3) 32660 v1.AddArg(v2) 32661 v0.AddArg(v1) 32662 v0.AddArg(y) 32663 return true 32664 } 32665 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32666 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32667 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32668 for { 32669 _ = v.Args[1] 32670 or := v.Args[0] 32671 if or.Op != OpAMD64ORQ { 32672 break 32673 } 32674 _ = or.Args[1] 32675 y := or.Args[0] 32676 s1 := or.Args[1] 32677 if s1.Op != OpAMD64SHLQconst { 32678 break 32679 } 32680 j1 := s1.AuxInt 32681 x1 := s1.Args[0] 32682 if x1.Op != OpAMD64MOVBloadidx1 { 32683 break 32684 } 32685 i1 := x1.AuxInt 32686 s := x1.Aux 32687 _ = x1.Args[2] 32688 idx := x1.Args[0] 32689 p := x1.Args[1] 32690 mem := x1.Args[2] 32691 s0 := v.Args[1] 32692 if s0.Op != OpAMD64SHLQconst { 32693 break 32694 } 32695 j0 := s0.AuxInt 32696 x0 := s0.Args[0] 32697 if x0.Op != OpAMD64MOVBloadidx1 { 32698 break 32699 } 32700 i0 := x0.AuxInt 32701 if x0.Aux != s { 32702 break 32703 } 32704 _ = x0.Args[2] 32705 if idx != x0.Args[0] { 32706 break 32707 } 32708 if p != x0.Args[1] { 32709 break 32710 } 32711 if mem != x0.Args[2] { 32712 break 32713 } 32714 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32715 break 32716 } 32717 b = mergePoint(b, x0, x1) 32718 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32719 v.reset(OpCopy) 32720 v.AddArg(v0) 32721 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32722 v1.AuxInt = j1 32723 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32724 v2.AuxInt = 8 32725 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32726 v3.AuxInt = i0 32727 v3.Aux = s 32728 v3.AddArg(p) 32729 v3.AddArg(idx) 32730 v3.AddArg(mem) 32731 v2.AddArg(v3) 32732 v1.AddArg(v2) 32733 v0.AddArg(v1) 32734 v0.AddArg(y) 32735 return true 32736 } 32737 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 32738 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32739 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32740 for { 32741 _ = v.Args[1] 32742 s0 := v.Args[0] 32743 if s0.Op != OpAMD64SHLQconst { 32744 break 32745 } 32746 j0 := s0.AuxInt 32747 r0 := s0.Args[0] 32748 if r0.Op != OpAMD64ROLWconst { 32749 break 32750 } 32751 if r0.AuxInt != 8 { 32752 break 32753 } 32754 x0 := r0.Args[0] 32755 if x0.Op != OpAMD64MOVWloadidx1 { 32756 break 32757 } 32758 i0 := x0.AuxInt 32759 s := x0.Aux 32760 _ = x0.Args[2] 32761 p := x0.Args[0] 32762 idx := x0.Args[1] 32763 mem := x0.Args[2] 32764 or := v.Args[1] 32765 if or.Op != OpAMD64ORQ { 32766 break 32767 } 32768 _ = or.Args[1] 32769 s1 := or.Args[0] 32770 if s1.Op != OpAMD64SHLQconst { 32771 break 32772 } 32773 j1 := s1.AuxInt 32774 r1 := s1.Args[0] 32775 if r1.Op != OpAMD64ROLWconst { 32776 break 32777 } 32778 if r1.AuxInt != 8 { 32779 break 32780 } 32781 x1 := r1.Args[0] 32782 if x1.Op != OpAMD64MOVWloadidx1 { 32783 break 32784 } 32785 i1 := x1.AuxInt 32786 if x1.Aux != s { 32787 break 32788 } 32789 _ = x1.Args[2] 32790 if p != x1.Args[0] { 32791 break 32792 } 32793 if idx != x1.Args[1] { 32794 break 32795 } 32796 if mem != x1.Args[2] { 32797 break 32798 } 32799 y := or.Args[1] 32800 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32801 break 32802 } 32803 b = mergePoint(b, x0, x1) 32804 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32805 v.reset(OpCopy) 32806 v.AddArg(v0) 32807 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32808 v1.AuxInt = j1 32809 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32810 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32811 v3.AuxInt = i0 32812 v3.Aux = s 32813 v3.AddArg(p) 32814 v3.AddArg(idx) 32815 v3.AddArg(mem) 32816 v2.AddArg(v3) 32817 v1.AddArg(v2) 32818 v0.AddArg(v1) 32819 v0.AddArg(y) 32820 return true 32821 } 32822 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 32823 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32824 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32825 for { 32826 _ = v.Args[1] 32827 s0 := v.Args[0] 32828 if s0.Op != OpAMD64SHLQconst { 32829 break 32830 } 32831 j0 := s0.AuxInt 32832 r0 := s0.Args[0] 32833 if r0.Op != OpAMD64ROLWconst { 32834 break 32835 } 32836 if r0.AuxInt != 8 { 32837 break 32838 } 32839 x0 := r0.Args[0] 32840 if x0.Op != OpAMD64MOVWloadidx1 { 32841 break 32842 } 32843 i0 := x0.AuxInt 32844 s := x0.Aux 32845 _ = x0.Args[2] 32846 idx := x0.Args[0] 32847 p := x0.Args[1] 32848 mem := x0.Args[2] 32849 or := v.Args[1] 32850 if or.Op != OpAMD64ORQ { 32851 break 32852 } 32853 _ = or.Args[1] 32854 s1 := or.Args[0] 32855 if s1.Op != OpAMD64SHLQconst { 32856 break 32857 } 32858 j1 := s1.AuxInt 32859 r1 := s1.Args[0] 32860 if r1.Op != OpAMD64ROLWconst { 32861 break 32862 } 32863 if r1.AuxInt != 8 { 32864 break 32865 } 32866 x1 := r1.Args[0] 32867 if x1.Op != OpAMD64MOVWloadidx1 { 32868 break 32869 } 32870 i1 := x1.AuxInt 32871 if x1.Aux != s { 32872 break 32873 } 32874 _ = x1.Args[2] 32875 if p != x1.Args[0] { 32876 break 32877 } 32878 if idx != x1.Args[1] { 32879 break 32880 } 32881 if mem != x1.Args[2] { 32882 break 32883 } 32884 y := or.Args[1] 32885 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32886 break 32887 } 32888 b = mergePoint(b, x0, x1) 32889 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32890 v.reset(OpCopy) 32891 v.AddArg(v0) 32892 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32893 v1.AuxInt = j1 32894 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32895 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32896 v3.AuxInt = i0 32897 v3.Aux = s 32898 v3.AddArg(p) 32899 v3.AddArg(idx) 32900 v3.AddArg(mem) 32901 v2.AddArg(v3) 32902 v1.AddArg(v2) 32903 v0.AddArg(v1) 32904 v0.AddArg(y) 32905 return true 32906 } 32907 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 32908 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32909 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32910 for { 32911 _ = v.Args[1] 32912 s0 := v.Args[0] 32913 if s0.Op != OpAMD64SHLQconst { 32914 break 32915 } 32916 j0 := s0.AuxInt 32917 r0 := s0.Args[0] 32918 if r0.Op != OpAMD64ROLWconst { 32919 break 32920 } 32921 if r0.AuxInt != 8 { 32922 break 32923 } 32924 x0 := r0.Args[0] 32925 if x0.Op != OpAMD64MOVWloadidx1 { 32926 break 32927 } 32928 i0 := x0.AuxInt 32929 s := x0.Aux 32930 _ = x0.Args[2] 32931 p := x0.Args[0] 32932 idx := x0.Args[1] 32933 mem := x0.Args[2] 32934 or := v.Args[1] 32935 if or.Op != OpAMD64ORQ { 32936 break 32937 } 32938 _ = or.Args[1] 32939 s1 := or.Args[0] 32940 if s1.Op != OpAMD64SHLQconst { 32941 break 32942 } 32943 j1 := s1.AuxInt 32944 r1 := s1.Args[0] 32945 if r1.Op != OpAMD64ROLWconst { 32946 break 32947 } 32948 if r1.AuxInt != 8 { 32949 break 32950 } 32951 x1 := r1.Args[0] 32952 if x1.Op != OpAMD64MOVWloadidx1 { 32953 break 32954 } 32955 i1 := x1.AuxInt 32956 if x1.Aux != s { 32957 break 32958 } 32959 _ = x1.Args[2] 32960 if idx != x1.Args[0] { 32961 break 32962 } 32963 if p != x1.Args[1] { 32964 break 32965 } 32966 if mem != x1.Args[2] { 32967 break 32968 } 32969 y := or.Args[1] 32970 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32971 break 32972 } 32973 b = mergePoint(b, x0, x1) 32974 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32975 v.reset(OpCopy) 32976 v.AddArg(v0) 32977 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32978 v1.AuxInt = j1 32979 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32980 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32981 v3.AuxInt = i0 32982 v3.Aux = s 32983 v3.AddArg(p) 32984 v3.AddArg(idx) 32985 v3.AddArg(mem) 32986 v2.AddArg(v3) 32987 v1.AddArg(v2) 32988 v0.AddArg(v1) 32989 v0.AddArg(y) 32990 return true 32991 } 32992 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 32993 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32994 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32995 for { 32996 _ = v.Args[1] 32997 s0 := v.Args[0] 32998 if s0.Op != OpAMD64SHLQconst { 32999 break 33000 } 33001 j0 := s0.AuxInt 33002 r0 := s0.Args[0] 33003 if r0.Op != OpAMD64ROLWconst { 33004 break 33005 } 33006 if r0.AuxInt != 8 { 33007 break 33008 } 33009 x0 := r0.Args[0] 33010 if x0.Op != OpAMD64MOVWloadidx1 { 33011 break 33012 } 33013 i0 := x0.AuxInt 33014 s := x0.Aux 33015 _ = x0.Args[2] 33016 idx := x0.Args[0] 33017 p := x0.Args[1] 33018 mem := x0.Args[2] 33019 or := v.Args[1] 33020 if or.Op != OpAMD64ORQ { 33021 break 33022 } 33023 _ = or.Args[1] 33024 s1 := or.Args[0] 33025 if s1.Op != OpAMD64SHLQconst { 33026 break 33027 } 33028 j1 := s1.AuxInt 33029 r1 := s1.Args[0] 33030 if r1.Op != OpAMD64ROLWconst { 33031 break 33032 } 33033 if r1.AuxInt != 8 { 33034 break 33035 } 33036 x1 := r1.Args[0] 33037 if x1.Op != OpAMD64MOVWloadidx1 { 33038 break 33039 } 33040 i1 := x1.AuxInt 33041 if x1.Aux != s { 33042 break 33043 } 33044 _ = x1.Args[2] 33045 if idx != x1.Args[0] { 33046 break 33047 } 33048 if p != x1.Args[1] { 33049 break 33050 } 33051 if mem != x1.Args[2] { 33052 break 33053 } 33054 y := or.Args[1] 33055 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33056 break 33057 } 33058 b = mergePoint(b, x0, x1) 33059 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33060 v.reset(OpCopy) 33061 v.AddArg(v0) 33062 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33063 v1.AuxInt = j1 33064 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33065 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33066 v3.AuxInt = i0 33067 v3.Aux = s 33068 v3.AddArg(p) 33069 v3.AddArg(idx) 33070 v3.AddArg(mem) 33071 v2.AddArg(v3) 33072 v1.AddArg(v2) 33073 v0.AddArg(v1) 33074 v0.AddArg(y) 33075 return true 33076 } 33077 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 33078 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33079 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33080 for { 33081 _ = v.Args[1] 33082 s0 := v.Args[0] 33083 if s0.Op != OpAMD64SHLQconst { 33084 break 33085 } 33086 j0 := s0.AuxInt 33087 r0 := s0.Args[0] 33088 if r0.Op != OpAMD64ROLWconst { 33089 break 33090 } 33091 if r0.AuxInt != 8 { 33092 break 33093 } 33094 x0 := r0.Args[0] 33095 if x0.Op != OpAMD64MOVWloadidx1 { 33096 break 33097 } 33098 i0 := x0.AuxInt 33099 s := x0.Aux 33100 _ = x0.Args[2] 33101 p := x0.Args[0] 33102 idx := x0.Args[1] 33103 mem := x0.Args[2] 33104 or := v.Args[1] 33105 if or.Op != OpAMD64ORQ { 33106 break 33107 } 33108 _ = or.Args[1] 33109 y := or.Args[0] 33110 s1 := or.Args[1] 33111 if s1.Op != OpAMD64SHLQconst { 33112 break 33113 } 33114 j1 := s1.AuxInt 33115 r1 := s1.Args[0] 33116 if r1.Op != OpAMD64ROLWconst { 33117 break 33118 } 33119 if r1.AuxInt != 8 { 33120 break 33121 } 33122 x1 := r1.Args[0] 33123 if x1.Op != OpAMD64MOVWloadidx1 { 33124 break 33125 } 33126 i1 := x1.AuxInt 33127 if x1.Aux != s { 33128 break 33129 } 33130 _ = x1.Args[2] 33131 if p != x1.Args[0] { 33132 break 33133 } 33134 if idx != x1.Args[1] { 33135 break 33136 } 33137 if mem != x1.Args[2] { 33138 break 33139 } 33140 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33141 break 33142 } 33143 b = mergePoint(b, x0, x1) 33144 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33145 v.reset(OpCopy) 33146 v.AddArg(v0) 33147 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33148 v1.AuxInt = j1 33149 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33150 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33151 v3.AuxInt = i0 33152 v3.Aux = s 33153 v3.AddArg(p) 33154 v3.AddArg(idx) 33155 v3.AddArg(mem) 33156 v2.AddArg(v3) 33157 v1.AddArg(v2) 33158 v0.AddArg(v1) 33159 v0.AddArg(y) 33160 return true 33161 } 33162 return false 33163 } 33164 func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool { 33165 b := v.Block 33166 _ = b 33167 typ := &b.Func.Config.Types 33168 _ = typ 33169 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 33170 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33171 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33172 for { 33173 _ = v.Args[1] 33174 s0 := v.Args[0] 33175 if s0.Op != OpAMD64SHLQconst { 33176 break 33177 } 33178 j0 := s0.AuxInt 33179 r0 := s0.Args[0] 33180 if r0.Op != OpAMD64ROLWconst { 33181 break 33182 } 33183 if r0.AuxInt != 8 { 33184 break 33185 } 33186 x0 := r0.Args[0] 33187 if x0.Op != OpAMD64MOVWloadidx1 { 33188 break 33189 } 33190 i0 := x0.AuxInt 33191 s := x0.Aux 33192 _ = x0.Args[2] 33193 idx := x0.Args[0] 33194 p := x0.Args[1] 33195 mem := x0.Args[2] 33196 or := v.Args[1] 33197 if or.Op != OpAMD64ORQ { 33198 break 33199 } 33200 _ = or.Args[1] 33201 y := or.Args[0] 33202 s1 := or.Args[1] 33203 if s1.Op != OpAMD64SHLQconst { 33204 break 33205 } 33206 j1 := s1.AuxInt 33207 r1 := s1.Args[0] 33208 if r1.Op != OpAMD64ROLWconst { 33209 break 33210 } 33211 if r1.AuxInt != 8 { 33212 break 33213 } 33214 x1 := r1.Args[0] 33215 if x1.Op != OpAMD64MOVWloadidx1 { 33216 break 33217 } 33218 i1 := x1.AuxInt 33219 if x1.Aux != s { 33220 break 33221 } 33222 _ = x1.Args[2] 33223 if p != x1.Args[0] { 33224 break 33225 } 33226 if idx != x1.Args[1] { 33227 break 33228 } 33229 if mem != x1.Args[2] { 33230 break 33231 } 33232 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33233 break 33234 } 33235 b = mergePoint(b, x0, x1) 33236 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33237 v.reset(OpCopy) 33238 v.AddArg(v0) 33239 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33240 v1.AuxInt = j1 33241 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33242 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33243 v3.AuxInt = i0 33244 v3.Aux = s 33245 v3.AddArg(p) 33246 v3.AddArg(idx) 33247 v3.AddArg(mem) 33248 v2.AddArg(v3) 33249 v1.AddArg(v2) 33250 v0.AddArg(v1) 33251 v0.AddArg(y) 33252 return true 33253 } 33254 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 33255 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33256 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33257 for { 33258 _ = v.Args[1] 33259 s0 := v.Args[0] 33260 if s0.Op != OpAMD64SHLQconst { 33261 break 33262 } 33263 j0 := s0.AuxInt 33264 r0 := s0.Args[0] 33265 if r0.Op != OpAMD64ROLWconst { 33266 break 33267 } 33268 if r0.AuxInt != 8 { 33269 break 33270 } 33271 x0 := r0.Args[0] 33272 if x0.Op != OpAMD64MOVWloadidx1 { 33273 break 33274 } 33275 i0 := x0.AuxInt 33276 s := x0.Aux 33277 _ = x0.Args[2] 33278 p := x0.Args[0] 33279 idx := x0.Args[1] 33280 mem := x0.Args[2] 33281 or := v.Args[1] 33282 if or.Op != OpAMD64ORQ { 33283 break 33284 } 33285 _ = or.Args[1] 33286 y := or.Args[0] 33287 s1 := or.Args[1] 33288 if s1.Op != OpAMD64SHLQconst { 33289 break 33290 } 33291 j1 := s1.AuxInt 33292 r1 := s1.Args[0] 33293 if r1.Op != OpAMD64ROLWconst { 33294 break 33295 } 33296 if r1.AuxInt != 8 { 33297 break 33298 } 33299 x1 := r1.Args[0] 33300 if x1.Op != OpAMD64MOVWloadidx1 { 33301 break 33302 } 33303 i1 := x1.AuxInt 33304 if x1.Aux != s { 33305 break 33306 } 33307 _ = x1.Args[2] 33308 if idx != x1.Args[0] { 33309 break 33310 } 33311 if p != x1.Args[1] { 33312 break 33313 } 33314 if mem != x1.Args[2] { 33315 break 33316 } 33317 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33318 break 33319 } 33320 b = mergePoint(b, x0, x1) 33321 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33322 v.reset(OpCopy) 33323 v.AddArg(v0) 33324 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33325 v1.AuxInt = j1 33326 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33327 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33328 v3.AuxInt = i0 33329 v3.Aux = s 33330 v3.AddArg(p) 33331 v3.AddArg(idx) 33332 v3.AddArg(mem) 33333 v2.AddArg(v3) 33334 v1.AddArg(v2) 33335 v0.AddArg(v1) 33336 v0.AddArg(y) 33337 return true 33338 } 33339 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 33340 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33341 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33342 for { 33343 _ = v.Args[1] 33344 s0 := v.Args[0] 33345 if s0.Op != OpAMD64SHLQconst { 33346 break 33347 } 33348 j0 := s0.AuxInt 33349 r0 := s0.Args[0] 33350 if r0.Op != OpAMD64ROLWconst { 33351 break 33352 } 33353 if r0.AuxInt != 8 { 33354 break 33355 } 33356 x0 := r0.Args[0] 33357 if x0.Op != OpAMD64MOVWloadidx1 { 33358 break 33359 } 33360 i0 := x0.AuxInt 33361 s := x0.Aux 33362 _ = x0.Args[2] 33363 idx := x0.Args[0] 33364 p := x0.Args[1] 33365 mem := x0.Args[2] 33366 or := v.Args[1] 33367 if or.Op != OpAMD64ORQ { 33368 break 33369 } 33370 _ = or.Args[1] 33371 y := or.Args[0] 33372 s1 := or.Args[1] 33373 if s1.Op != OpAMD64SHLQconst { 33374 break 33375 } 33376 j1 := s1.AuxInt 33377 r1 := s1.Args[0] 33378 if r1.Op != OpAMD64ROLWconst { 33379 break 33380 } 33381 if r1.AuxInt != 8 { 33382 break 33383 } 33384 x1 := r1.Args[0] 33385 if x1.Op != OpAMD64MOVWloadidx1 { 33386 break 33387 } 33388 i1 := x1.AuxInt 33389 if x1.Aux != s { 33390 break 33391 } 33392 _ = x1.Args[2] 33393 if idx != x1.Args[0] { 33394 break 33395 } 33396 if p != x1.Args[1] { 33397 break 33398 } 33399 if mem != x1.Args[2] { 33400 break 33401 } 33402 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33403 break 33404 } 33405 b = mergePoint(b, x0, x1) 33406 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33407 v.reset(OpCopy) 33408 v.AddArg(v0) 33409 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33410 v1.AuxInt = j1 33411 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33412 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33413 v3.AuxInt = i0 33414 v3.Aux = s 33415 v3.AddArg(p) 33416 v3.AddArg(idx) 33417 v3.AddArg(mem) 33418 v2.AddArg(v3) 33419 v1.AddArg(v2) 33420 v0.AddArg(v1) 33421 v0.AddArg(y) 33422 return true 33423 } 33424 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33425 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33426 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33427 for { 33428 _ = v.Args[1] 33429 or := v.Args[0] 33430 if or.Op != OpAMD64ORQ { 33431 break 33432 } 33433 _ = or.Args[1] 33434 s1 := or.Args[0] 33435 if s1.Op != OpAMD64SHLQconst { 33436 break 33437 } 33438 j1 := s1.AuxInt 33439 r1 := s1.Args[0] 33440 if r1.Op != OpAMD64ROLWconst { 33441 break 33442 } 33443 if r1.AuxInt != 8 { 33444 break 33445 } 33446 x1 := r1.Args[0] 33447 if x1.Op != OpAMD64MOVWloadidx1 { 33448 break 33449 } 33450 i1 := x1.AuxInt 33451 s := x1.Aux 33452 _ = x1.Args[2] 33453 p := x1.Args[0] 33454 idx := x1.Args[1] 33455 mem := x1.Args[2] 33456 y := or.Args[1] 33457 s0 := v.Args[1] 33458 if s0.Op != OpAMD64SHLQconst { 33459 break 33460 } 33461 j0 := s0.AuxInt 33462 r0 := s0.Args[0] 33463 if r0.Op != OpAMD64ROLWconst { 33464 break 33465 } 33466 if r0.AuxInt != 8 { 33467 break 33468 } 33469 x0 := r0.Args[0] 33470 if x0.Op != OpAMD64MOVWloadidx1 { 33471 break 33472 } 33473 i0 := x0.AuxInt 33474 if x0.Aux != s { 33475 break 33476 } 33477 _ = x0.Args[2] 33478 if p != x0.Args[0] { 33479 break 33480 } 33481 if idx != x0.Args[1] { 33482 break 33483 } 33484 if mem != x0.Args[2] { 33485 break 33486 } 33487 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33488 break 33489 } 33490 b = mergePoint(b, x0, x1) 33491 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33492 v.reset(OpCopy) 33493 v.AddArg(v0) 33494 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33495 v1.AuxInt = j1 33496 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33497 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33498 v3.AuxInt = i0 33499 v3.Aux = s 33500 v3.AddArg(p) 33501 v3.AddArg(idx) 33502 v3.AddArg(mem) 33503 v2.AddArg(v3) 33504 v1.AddArg(v2) 33505 v0.AddArg(v1) 33506 v0.AddArg(y) 33507 return true 33508 } 33509 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33510 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33511 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33512 for { 33513 _ = v.Args[1] 33514 or := v.Args[0] 33515 if or.Op != OpAMD64ORQ { 33516 break 33517 } 33518 _ = or.Args[1] 33519 s1 := or.Args[0] 33520 if s1.Op != OpAMD64SHLQconst { 33521 break 33522 } 33523 j1 := s1.AuxInt 33524 r1 := s1.Args[0] 33525 if r1.Op != OpAMD64ROLWconst { 33526 break 33527 } 33528 if r1.AuxInt != 8 { 33529 break 33530 } 33531 x1 := r1.Args[0] 33532 if x1.Op != OpAMD64MOVWloadidx1 { 33533 break 33534 } 33535 i1 := x1.AuxInt 33536 s := x1.Aux 33537 _ = x1.Args[2] 33538 idx := x1.Args[0] 33539 p := x1.Args[1] 33540 mem := x1.Args[2] 33541 y := or.Args[1] 33542 s0 := v.Args[1] 33543 if s0.Op != OpAMD64SHLQconst { 33544 break 33545 } 33546 j0 := s0.AuxInt 33547 r0 := s0.Args[0] 33548 if r0.Op != OpAMD64ROLWconst { 33549 break 33550 } 33551 if r0.AuxInt != 8 { 33552 break 33553 } 33554 x0 := r0.Args[0] 33555 if x0.Op != OpAMD64MOVWloadidx1 { 33556 break 33557 } 33558 i0 := x0.AuxInt 33559 if x0.Aux != s { 33560 break 33561 } 33562 _ = x0.Args[2] 33563 if p != x0.Args[0] { 33564 break 33565 } 33566 if idx != x0.Args[1] { 33567 break 33568 } 33569 if mem != x0.Args[2] { 33570 break 33571 } 33572 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33573 break 33574 } 33575 b = mergePoint(b, x0, x1) 33576 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33577 v.reset(OpCopy) 33578 v.AddArg(v0) 33579 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33580 v1.AuxInt = j1 33581 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33582 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33583 v3.AuxInt = i0 33584 v3.Aux = s 33585 v3.AddArg(p) 33586 v3.AddArg(idx) 33587 v3.AddArg(mem) 33588 v2.AddArg(v3) 33589 v1.AddArg(v2) 33590 v0.AddArg(v1) 33591 v0.AddArg(y) 33592 return true 33593 } 33594 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33595 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33596 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33597 for { 33598 _ = v.Args[1] 33599 or := v.Args[0] 33600 if or.Op != OpAMD64ORQ { 33601 break 33602 } 33603 _ = or.Args[1] 33604 y := or.Args[0] 33605 s1 := or.Args[1] 33606 if s1.Op != OpAMD64SHLQconst { 33607 break 33608 } 33609 j1 := s1.AuxInt 33610 r1 := s1.Args[0] 33611 if r1.Op != OpAMD64ROLWconst { 33612 break 33613 } 33614 if r1.AuxInt != 8 { 33615 break 33616 } 33617 x1 := r1.Args[0] 33618 if x1.Op != OpAMD64MOVWloadidx1 { 33619 break 33620 } 33621 i1 := x1.AuxInt 33622 s := x1.Aux 33623 _ = x1.Args[2] 33624 p := x1.Args[0] 33625 idx := x1.Args[1] 33626 mem := x1.Args[2] 33627 s0 := v.Args[1] 33628 if s0.Op != OpAMD64SHLQconst { 33629 break 33630 } 33631 j0 := s0.AuxInt 33632 r0 := s0.Args[0] 33633 if r0.Op != OpAMD64ROLWconst { 33634 break 33635 } 33636 if r0.AuxInt != 8 { 33637 break 33638 } 33639 x0 := r0.Args[0] 33640 if x0.Op != OpAMD64MOVWloadidx1 { 33641 break 33642 } 33643 i0 := x0.AuxInt 33644 if x0.Aux != s { 33645 break 33646 } 33647 _ = x0.Args[2] 33648 if p != x0.Args[0] { 33649 break 33650 } 33651 if idx != x0.Args[1] { 33652 break 33653 } 33654 if mem != x0.Args[2] { 33655 break 33656 } 33657 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33658 break 33659 } 33660 b = mergePoint(b, x0, x1) 33661 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33662 v.reset(OpCopy) 33663 v.AddArg(v0) 33664 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33665 v1.AuxInt = j1 33666 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33667 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33668 v3.AuxInt = i0 33669 v3.Aux = s 33670 v3.AddArg(p) 33671 v3.AddArg(idx) 33672 v3.AddArg(mem) 33673 v2.AddArg(v3) 33674 v1.AddArg(v2) 33675 v0.AddArg(v1) 33676 v0.AddArg(y) 33677 return true 33678 } 33679 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33680 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33681 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33682 for { 33683 _ = v.Args[1] 33684 or := v.Args[0] 33685 if or.Op != OpAMD64ORQ { 33686 break 33687 } 33688 _ = or.Args[1] 33689 y := or.Args[0] 33690 s1 := or.Args[1] 33691 if s1.Op != OpAMD64SHLQconst { 33692 break 33693 } 33694 j1 := s1.AuxInt 33695 r1 := s1.Args[0] 33696 if r1.Op != OpAMD64ROLWconst { 33697 break 33698 } 33699 if r1.AuxInt != 8 { 33700 break 33701 } 33702 x1 := r1.Args[0] 33703 if x1.Op != OpAMD64MOVWloadidx1 { 33704 break 33705 } 33706 i1 := x1.AuxInt 33707 s := x1.Aux 33708 _ = x1.Args[2] 33709 idx := x1.Args[0] 33710 p := x1.Args[1] 33711 mem := x1.Args[2] 33712 s0 := v.Args[1] 33713 if s0.Op != OpAMD64SHLQconst { 33714 break 33715 } 33716 j0 := s0.AuxInt 33717 r0 := s0.Args[0] 33718 if r0.Op != OpAMD64ROLWconst { 33719 break 33720 } 33721 if r0.AuxInt != 8 { 33722 break 33723 } 33724 x0 := r0.Args[0] 33725 if x0.Op != OpAMD64MOVWloadidx1 { 33726 break 33727 } 33728 i0 := x0.AuxInt 33729 if x0.Aux != s { 33730 break 33731 } 33732 _ = x0.Args[2] 33733 if p != x0.Args[0] { 33734 break 33735 } 33736 if idx != x0.Args[1] { 33737 break 33738 } 33739 if mem != x0.Args[2] { 33740 break 33741 } 33742 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33743 break 33744 } 33745 b = mergePoint(b, x0, x1) 33746 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33747 v.reset(OpCopy) 33748 v.AddArg(v0) 33749 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33750 v1.AuxInt = j1 33751 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33752 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33753 v3.AuxInt = i0 33754 v3.Aux = s 33755 v3.AddArg(p) 33756 v3.AddArg(idx) 33757 v3.AddArg(mem) 33758 v2.AddArg(v3) 33759 v1.AddArg(v2) 33760 v0.AddArg(v1) 33761 v0.AddArg(y) 33762 return true 33763 } 33764 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33765 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33766 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33767 for { 33768 _ = v.Args[1] 33769 or := v.Args[0] 33770 if or.Op != OpAMD64ORQ { 33771 break 33772 } 33773 _ = or.Args[1] 33774 s1 := or.Args[0] 33775 if s1.Op != OpAMD64SHLQconst { 33776 break 33777 } 33778 j1 := s1.AuxInt 33779 r1 := s1.Args[0] 33780 if r1.Op != OpAMD64ROLWconst { 33781 break 33782 } 33783 if r1.AuxInt != 8 { 33784 break 33785 } 33786 x1 := r1.Args[0] 33787 if x1.Op != OpAMD64MOVWloadidx1 { 33788 break 33789 } 33790 i1 := x1.AuxInt 33791 s := x1.Aux 33792 _ = x1.Args[2] 33793 p := x1.Args[0] 33794 idx := x1.Args[1] 33795 mem := x1.Args[2] 33796 y := or.Args[1] 33797 s0 := v.Args[1] 33798 if s0.Op != OpAMD64SHLQconst { 33799 break 33800 } 33801 j0 := s0.AuxInt 33802 r0 := s0.Args[0] 33803 if r0.Op != OpAMD64ROLWconst { 33804 break 33805 } 33806 if r0.AuxInt != 8 { 33807 break 33808 } 33809 x0 := r0.Args[0] 33810 if x0.Op != OpAMD64MOVWloadidx1 { 33811 break 33812 } 33813 i0 := x0.AuxInt 33814 if x0.Aux != s { 33815 break 33816 } 33817 _ = x0.Args[2] 33818 if idx != x0.Args[0] { 33819 break 33820 } 33821 if p != x0.Args[1] { 33822 break 33823 } 33824 if mem != x0.Args[2] { 33825 break 33826 } 33827 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33828 break 33829 } 33830 b = mergePoint(b, x0, x1) 33831 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33832 v.reset(OpCopy) 33833 v.AddArg(v0) 33834 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33835 v1.AuxInt = j1 33836 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33837 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33838 v3.AuxInt = i0 33839 v3.Aux = s 33840 v3.AddArg(p) 33841 v3.AddArg(idx) 33842 v3.AddArg(mem) 33843 v2.AddArg(v3) 33844 v1.AddArg(v2) 33845 v0.AddArg(v1) 33846 v0.AddArg(y) 33847 return true 33848 } 33849 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33850 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33851 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33852 for { 33853 _ = v.Args[1] 33854 or := v.Args[0] 33855 if or.Op != OpAMD64ORQ { 33856 break 33857 } 33858 _ = or.Args[1] 33859 s1 := or.Args[0] 33860 if s1.Op != OpAMD64SHLQconst { 33861 break 33862 } 33863 j1 := s1.AuxInt 33864 r1 := s1.Args[0] 33865 if r1.Op != OpAMD64ROLWconst { 33866 break 33867 } 33868 if r1.AuxInt != 8 { 33869 break 33870 } 33871 x1 := r1.Args[0] 33872 if x1.Op != OpAMD64MOVWloadidx1 { 33873 break 33874 } 33875 i1 := x1.AuxInt 33876 s := x1.Aux 33877 _ = x1.Args[2] 33878 idx := x1.Args[0] 33879 p := x1.Args[1] 33880 mem := x1.Args[2] 33881 y := or.Args[1] 33882 s0 := v.Args[1] 33883 if s0.Op != OpAMD64SHLQconst { 33884 break 33885 } 33886 j0 := s0.AuxInt 33887 r0 := s0.Args[0] 33888 if r0.Op != OpAMD64ROLWconst { 33889 break 33890 } 33891 if r0.AuxInt != 8 { 33892 break 33893 } 33894 x0 := r0.Args[0] 33895 if x0.Op != OpAMD64MOVWloadidx1 { 33896 break 33897 } 33898 i0 := x0.AuxInt 33899 if x0.Aux != s { 33900 break 33901 } 33902 _ = x0.Args[2] 33903 if idx != x0.Args[0] { 33904 break 33905 } 33906 if p != x0.Args[1] { 33907 break 33908 } 33909 if mem != x0.Args[2] { 33910 break 33911 } 33912 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33913 break 33914 } 33915 b = mergePoint(b, x0, x1) 33916 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33917 v.reset(OpCopy) 33918 v.AddArg(v0) 33919 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33920 v1.AuxInt = j1 33921 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33922 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33923 v3.AuxInt = i0 33924 v3.Aux = s 33925 v3.AddArg(p) 33926 v3.AddArg(idx) 33927 v3.AddArg(mem) 33928 v2.AddArg(v3) 33929 v1.AddArg(v2) 33930 v0.AddArg(v1) 33931 v0.AddArg(y) 33932 return true 33933 } 33934 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33935 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33936 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33937 for { 33938 _ = v.Args[1] 33939 or := v.Args[0] 33940 if or.Op != OpAMD64ORQ { 33941 break 33942 } 33943 _ = or.Args[1] 33944 y := or.Args[0] 33945 s1 := or.Args[1] 33946 if s1.Op != OpAMD64SHLQconst { 33947 break 33948 } 33949 j1 := s1.AuxInt 33950 r1 := s1.Args[0] 33951 if r1.Op != OpAMD64ROLWconst { 33952 break 33953 } 33954 if r1.AuxInt != 8 { 33955 break 33956 } 33957 x1 := r1.Args[0] 33958 if x1.Op != OpAMD64MOVWloadidx1 { 33959 break 33960 } 33961 i1 := x1.AuxInt 33962 s := x1.Aux 33963 _ = x1.Args[2] 33964 p := x1.Args[0] 33965 idx := x1.Args[1] 33966 mem := x1.Args[2] 33967 s0 := v.Args[1] 33968 if s0.Op != OpAMD64SHLQconst { 33969 break 33970 } 33971 j0 := s0.AuxInt 33972 r0 := s0.Args[0] 33973 if r0.Op != OpAMD64ROLWconst { 33974 break 33975 } 33976 if r0.AuxInt != 8 { 33977 break 33978 } 33979 x0 := r0.Args[0] 33980 if x0.Op != OpAMD64MOVWloadidx1 { 33981 break 33982 } 33983 i0 := x0.AuxInt 33984 if x0.Aux != s { 33985 break 33986 } 33987 _ = x0.Args[2] 33988 if idx != x0.Args[0] { 33989 break 33990 } 33991 if p != x0.Args[1] { 33992 break 33993 } 33994 if mem != x0.Args[2] { 33995 break 33996 } 33997 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33998 break 33999 } 34000 b = mergePoint(b, x0, x1) 34001 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34002 v.reset(OpCopy) 34003 v.AddArg(v0) 34004 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34005 v1.AuxInt = j1 34006 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 34007 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 34008 v3.AuxInt = i0 34009 v3.Aux = s 34010 v3.AddArg(p) 34011 v3.AddArg(idx) 34012 v3.AddArg(mem) 34013 v2.AddArg(v3) 34014 v1.AddArg(v2) 34015 v0.AddArg(v1) 34016 v0.AddArg(y) 34017 return true 34018 } 34019 return false 34020 } 34021 func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { 34022 b := v.Block 34023 _ = b 34024 typ := &b.Func.Config.Types 34025 _ = typ 34026 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 34027 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 34028 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 34029 for { 34030 _ = v.Args[1] 34031 or := v.Args[0] 34032 if or.Op != OpAMD64ORQ { 34033 break 34034 } 34035 _ = or.Args[1] 34036 y := or.Args[0] 34037 s1 := or.Args[1] 34038 if s1.Op != OpAMD64SHLQconst { 34039 break 34040 } 34041 j1 := s1.AuxInt 34042 r1 := s1.Args[0] 34043 if r1.Op != OpAMD64ROLWconst { 34044 break 34045 } 34046 if r1.AuxInt != 8 { 34047 break 34048 } 34049 x1 := r1.Args[0] 34050 if x1.Op != OpAMD64MOVWloadidx1 { 34051 break 34052 } 34053 i1 := x1.AuxInt 34054 s := x1.Aux 34055 _ = x1.Args[2] 34056 idx := x1.Args[0] 34057 p := x1.Args[1] 34058 mem := x1.Args[2] 34059 s0 := v.Args[1] 34060 if s0.Op != OpAMD64SHLQconst { 34061 break 34062 } 34063 j0 := s0.AuxInt 34064 r0 := s0.Args[0] 34065 if r0.Op != OpAMD64ROLWconst { 34066 break 34067 } 34068 if r0.AuxInt != 8 { 34069 break 34070 } 34071 x0 := r0.Args[0] 34072 if x0.Op != OpAMD64MOVWloadidx1 { 34073 break 34074 } 34075 i0 := x0.AuxInt 34076 if x0.Aux != s { 34077 break 34078 } 34079 _ = x0.Args[2] 34080 if idx != x0.Args[0] { 34081 break 34082 } 34083 if p != x0.Args[1] { 34084 break 34085 } 34086 if mem != x0.Args[2] { 34087 break 34088 } 34089 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 34090 break 34091 } 34092 b = mergePoint(b, x0, x1) 34093 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34094 v.reset(OpCopy) 34095 v.AddArg(v0) 34096 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34097 v1.AuxInt = j1 34098 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 34099 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 34100 v3.AuxInt = i0 34101 v3.Aux = s 34102 v3.AddArg(p) 34103 v3.AddArg(idx) 34104 v3.AddArg(mem) 34105 v2.AddArg(v3) 34106 v1.AddArg(v2) 34107 v0.AddArg(v1) 34108 v0.AddArg(y) 34109 return true 34110 } 34111 // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) 34112 // cond: canMergeLoad(v, l, x) && clobber(l) 34113 // result: (ORQmem x [off] {sym} ptr mem) 34114 for { 34115 _ = v.Args[1] 34116 x := v.Args[0] 34117 l := v.Args[1] 34118 if l.Op != OpAMD64MOVQload { 34119 break 34120 } 34121 off := l.AuxInt 34122 sym := l.Aux 34123 _ = l.Args[1] 34124 ptr := l.Args[0] 34125 mem := l.Args[1] 34126 if !(canMergeLoad(v, l, x) && clobber(l)) { 34127 break 34128 } 34129 v.reset(OpAMD64ORQmem) 34130 v.AuxInt = off 34131 v.Aux = sym 34132 v.AddArg(x) 34133 v.AddArg(ptr) 34134 v.AddArg(mem) 34135 return true 34136 } 34137 // match: (ORQ l:(MOVQload [off] {sym} ptr mem) x) 34138 // cond: canMergeLoad(v, l, x) && clobber(l) 34139 // result: (ORQmem x [off] {sym} ptr mem) 34140 for { 34141 _ = v.Args[1] 34142 l := v.Args[0] 34143 if l.Op != OpAMD64MOVQload { 34144 break 34145 } 34146 off := l.AuxInt 34147 sym := l.Aux 34148 _ = l.Args[1] 34149 ptr := l.Args[0] 34150 mem := l.Args[1] 34151 x := v.Args[1] 34152 if !(canMergeLoad(v, l, x) && clobber(l)) { 34153 break 34154 } 34155 v.reset(OpAMD64ORQmem) 34156 v.AuxInt = off 34157 v.Aux = sym 34158 v.AddArg(x) 34159 v.AddArg(ptr) 34160 v.AddArg(mem) 34161 return true 34162 } 34163 return false 34164 } 34165 func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool { 34166 // match: (ORQconst [0] x) 34167 // cond: 34168 // result: x 34169 for { 34170 if v.AuxInt != 0 { 34171 break 34172 } 34173 x := v.Args[0] 34174 v.reset(OpCopy) 34175 v.Type = x.Type 34176 v.AddArg(x) 34177 return true 34178 } 34179 // match: (ORQconst [-1] _) 34180 // cond: 34181 // result: (MOVQconst [-1]) 34182 for { 34183 if v.AuxInt != -1 { 34184 break 34185 } 34186 v.reset(OpAMD64MOVQconst) 34187 v.AuxInt = -1 34188 return true 34189 } 34190 // match: (ORQconst [c] (MOVQconst [d])) 34191 // cond: 34192 // result: (MOVQconst [c|d]) 34193 for { 34194 c := v.AuxInt 34195 v_0 := v.Args[0] 34196 if v_0.Op != OpAMD64MOVQconst { 34197 break 34198 } 34199 d := v_0.AuxInt 34200 v.reset(OpAMD64MOVQconst) 34201 v.AuxInt = c | d 34202 return true 34203 } 34204 return false 34205 } 34206 func rewriteValueAMD64_OpAMD64ORQmem_0(v *Value) bool { 34207 b := v.Block 34208 _ = b 34209 typ := &b.Func.Config.Types 34210 _ = typ 34211 // match: (ORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 34212 // cond: 34213 // result: ( ORQ x (MOVQf2i y)) 34214 for { 34215 off := v.AuxInt 34216 sym := v.Aux 34217 _ = v.Args[2] 34218 x := v.Args[0] 34219 ptr := v.Args[1] 34220 v_2 := v.Args[2] 34221 if v_2.Op != OpAMD64MOVSDstore { 34222 break 34223 } 34224 if v_2.AuxInt != off { 34225 break 34226 } 34227 if v_2.Aux != sym { 34228 break 34229 } 34230 _ = v_2.Args[2] 34231 if ptr != v_2.Args[0] { 34232 break 34233 } 34234 y := v_2.Args[1] 34235 v.reset(OpAMD64ORQ) 34236 v.AddArg(x) 34237 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 34238 v0.AddArg(y) 34239 v.AddArg(v0) 34240 return true 34241 } 34242 return false 34243 } 34244 func rewriteValueAMD64_OpAMD64ROLB_0(v *Value) bool { 34245 // match: (ROLB x (NEGQ y)) 34246 // cond: 34247 // result: (RORB x y) 34248 for { 34249 _ = v.Args[1] 34250 x := v.Args[0] 34251 v_1 := v.Args[1] 34252 if v_1.Op != OpAMD64NEGQ { 34253 break 34254 } 34255 y := v_1.Args[0] 34256 v.reset(OpAMD64RORB) 34257 v.AddArg(x) 34258 v.AddArg(y) 34259 return true 34260 } 34261 // match: (ROLB x (NEGL y)) 34262 // cond: 34263 // result: (RORB x y) 34264 for { 34265 _ = v.Args[1] 34266 x := v.Args[0] 34267 v_1 := v.Args[1] 34268 if v_1.Op != OpAMD64NEGL { 34269 break 34270 } 34271 y := v_1.Args[0] 34272 v.reset(OpAMD64RORB) 34273 v.AddArg(x) 34274 v.AddArg(y) 34275 return true 34276 } 34277 // match: (ROLB x (MOVQconst [c])) 34278 // cond: 34279 // result: (ROLBconst [c&7 ] x) 34280 for { 34281 _ = v.Args[1] 34282 x := v.Args[0] 34283 v_1 := v.Args[1] 34284 if v_1.Op != OpAMD64MOVQconst { 34285 break 34286 } 34287 c := v_1.AuxInt 34288 v.reset(OpAMD64ROLBconst) 34289 v.AuxInt = c & 7 34290 v.AddArg(x) 34291 return true 34292 } 34293 // match: (ROLB x (MOVLconst [c])) 34294 // cond: 34295 // result: (ROLBconst [c&7 ] x) 34296 for { 34297 _ = v.Args[1] 34298 x := v.Args[0] 34299 v_1 := v.Args[1] 34300 if v_1.Op != OpAMD64MOVLconst { 34301 break 34302 } 34303 c := v_1.AuxInt 34304 v.reset(OpAMD64ROLBconst) 34305 v.AuxInt = c & 7 34306 v.AddArg(x) 34307 return true 34308 } 34309 return false 34310 } 34311 func rewriteValueAMD64_OpAMD64ROLBconst_0(v *Value) bool { 34312 // match: (ROLBconst [c] (ROLBconst [d] x)) 34313 // cond: 34314 // result: (ROLBconst [(c+d)& 7] x) 34315 for { 34316 c := v.AuxInt 34317 v_0 := v.Args[0] 34318 if v_0.Op != OpAMD64ROLBconst { 34319 break 34320 } 34321 d := v_0.AuxInt 34322 x := v_0.Args[0] 34323 v.reset(OpAMD64ROLBconst) 34324 v.AuxInt = (c + d) & 7 34325 v.AddArg(x) 34326 return true 34327 } 34328 // match: (ROLBconst x [0]) 34329 // cond: 34330 // result: x 34331 for { 34332 if v.AuxInt != 0 { 34333 break 34334 } 34335 x := v.Args[0] 34336 v.reset(OpCopy) 34337 v.Type = x.Type 34338 v.AddArg(x) 34339 return true 34340 } 34341 return false 34342 } 34343 func rewriteValueAMD64_OpAMD64ROLL_0(v *Value) bool { 34344 // match: (ROLL x (NEGQ y)) 34345 // cond: 34346 // result: (RORL x y) 34347 for { 34348 _ = v.Args[1] 34349 x := v.Args[0] 34350 v_1 := v.Args[1] 34351 if v_1.Op != OpAMD64NEGQ { 34352 break 34353 } 34354 y := v_1.Args[0] 34355 v.reset(OpAMD64RORL) 34356 v.AddArg(x) 34357 v.AddArg(y) 34358 return true 34359 } 34360 // match: (ROLL x (NEGL y)) 34361 // cond: 34362 // result: (RORL x y) 34363 for { 34364 _ = v.Args[1] 34365 x := v.Args[0] 34366 v_1 := v.Args[1] 34367 if v_1.Op != OpAMD64NEGL { 34368 break 34369 } 34370 y := v_1.Args[0] 34371 v.reset(OpAMD64RORL) 34372 v.AddArg(x) 34373 v.AddArg(y) 34374 return true 34375 } 34376 // match: (ROLL x (MOVQconst [c])) 34377 // cond: 34378 // result: (ROLLconst [c&31] x) 34379 for { 34380 _ = v.Args[1] 34381 x := v.Args[0] 34382 v_1 := v.Args[1] 34383 if v_1.Op != OpAMD64MOVQconst { 34384 break 34385 } 34386 c := v_1.AuxInt 34387 v.reset(OpAMD64ROLLconst) 34388 v.AuxInt = c & 31 34389 v.AddArg(x) 34390 return true 34391 } 34392 // match: (ROLL x (MOVLconst [c])) 34393 // cond: 34394 // result: (ROLLconst [c&31] x) 34395 for { 34396 _ = v.Args[1] 34397 x := v.Args[0] 34398 v_1 := v.Args[1] 34399 if v_1.Op != OpAMD64MOVLconst { 34400 break 34401 } 34402 c := v_1.AuxInt 34403 v.reset(OpAMD64ROLLconst) 34404 v.AuxInt = c & 31 34405 v.AddArg(x) 34406 return true 34407 } 34408 return false 34409 } 34410 func rewriteValueAMD64_OpAMD64ROLLconst_0(v *Value) bool { 34411 // match: (ROLLconst [c] (ROLLconst [d] x)) 34412 // cond: 34413 // result: (ROLLconst [(c+d)&31] x) 34414 for { 34415 c := v.AuxInt 34416 v_0 := v.Args[0] 34417 if v_0.Op != OpAMD64ROLLconst { 34418 break 34419 } 34420 d := v_0.AuxInt 34421 x := v_0.Args[0] 34422 v.reset(OpAMD64ROLLconst) 34423 v.AuxInt = (c + d) & 31 34424 v.AddArg(x) 34425 return true 34426 } 34427 // match: (ROLLconst x [0]) 34428 // cond: 34429 // result: x 34430 for { 34431 if v.AuxInt != 0 { 34432 break 34433 } 34434 x := v.Args[0] 34435 v.reset(OpCopy) 34436 v.Type = x.Type 34437 v.AddArg(x) 34438 return true 34439 } 34440 return false 34441 } 34442 func rewriteValueAMD64_OpAMD64ROLQ_0(v *Value) bool { 34443 // match: (ROLQ x (NEGQ y)) 34444 // cond: 34445 // result: (RORQ x y) 34446 for { 34447 _ = v.Args[1] 34448 x := v.Args[0] 34449 v_1 := v.Args[1] 34450 if v_1.Op != OpAMD64NEGQ { 34451 break 34452 } 34453 y := v_1.Args[0] 34454 v.reset(OpAMD64RORQ) 34455 v.AddArg(x) 34456 v.AddArg(y) 34457 return true 34458 } 34459 // match: (ROLQ x (NEGL y)) 34460 // cond: 34461 // result: (RORQ x y) 34462 for { 34463 _ = v.Args[1] 34464 x := v.Args[0] 34465 v_1 := v.Args[1] 34466 if v_1.Op != OpAMD64NEGL { 34467 break 34468 } 34469 y := v_1.Args[0] 34470 v.reset(OpAMD64RORQ) 34471 v.AddArg(x) 34472 v.AddArg(y) 34473 return true 34474 } 34475 // match: (ROLQ x (MOVQconst [c])) 34476 // cond: 34477 // result: (ROLQconst [c&63] x) 34478 for { 34479 _ = v.Args[1] 34480 x := v.Args[0] 34481 v_1 := v.Args[1] 34482 if v_1.Op != OpAMD64MOVQconst { 34483 break 34484 } 34485 c := v_1.AuxInt 34486 v.reset(OpAMD64ROLQconst) 34487 v.AuxInt = c & 63 34488 v.AddArg(x) 34489 return true 34490 } 34491 // match: (ROLQ x (MOVLconst [c])) 34492 // cond: 34493 // result: (ROLQconst [c&63] x) 34494 for { 34495 _ = v.Args[1] 34496 x := v.Args[0] 34497 v_1 := v.Args[1] 34498 if v_1.Op != OpAMD64MOVLconst { 34499 break 34500 } 34501 c := v_1.AuxInt 34502 v.reset(OpAMD64ROLQconst) 34503 v.AuxInt = c & 63 34504 v.AddArg(x) 34505 return true 34506 } 34507 return false 34508 } 34509 func rewriteValueAMD64_OpAMD64ROLQconst_0(v *Value) bool { 34510 // match: (ROLQconst [c] (ROLQconst [d] x)) 34511 // cond: 34512 // result: (ROLQconst [(c+d)&63] x) 34513 for { 34514 c := v.AuxInt 34515 v_0 := v.Args[0] 34516 if v_0.Op != OpAMD64ROLQconst { 34517 break 34518 } 34519 d := v_0.AuxInt 34520 x := v_0.Args[0] 34521 v.reset(OpAMD64ROLQconst) 34522 v.AuxInt = (c + d) & 63 34523 v.AddArg(x) 34524 return true 34525 } 34526 // match: (ROLQconst x [0]) 34527 // cond: 34528 // result: x 34529 for { 34530 if v.AuxInt != 0 { 34531 break 34532 } 34533 x := v.Args[0] 34534 v.reset(OpCopy) 34535 v.Type = x.Type 34536 v.AddArg(x) 34537 return true 34538 } 34539 return false 34540 } 34541 func rewriteValueAMD64_OpAMD64ROLW_0(v *Value) bool { 34542 // match: (ROLW x (NEGQ y)) 34543 // cond: 34544 // result: (RORW x y) 34545 for { 34546 _ = v.Args[1] 34547 x := v.Args[0] 34548 v_1 := v.Args[1] 34549 if v_1.Op != OpAMD64NEGQ { 34550 break 34551 } 34552 y := v_1.Args[0] 34553 v.reset(OpAMD64RORW) 34554 v.AddArg(x) 34555 v.AddArg(y) 34556 return true 34557 } 34558 // match: (ROLW x (NEGL y)) 34559 // cond: 34560 // result: (RORW x y) 34561 for { 34562 _ = v.Args[1] 34563 x := v.Args[0] 34564 v_1 := v.Args[1] 34565 if v_1.Op != OpAMD64NEGL { 34566 break 34567 } 34568 y := v_1.Args[0] 34569 v.reset(OpAMD64RORW) 34570 v.AddArg(x) 34571 v.AddArg(y) 34572 return true 34573 } 34574 // match: (ROLW x (MOVQconst [c])) 34575 // cond: 34576 // result: (ROLWconst [c&15] x) 34577 for { 34578 _ = v.Args[1] 34579 x := v.Args[0] 34580 v_1 := v.Args[1] 34581 if v_1.Op != OpAMD64MOVQconst { 34582 break 34583 } 34584 c := v_1.AuxInt 34585 v.reset(OpAMD64ROLWconst) 34586 v.AuxInt = c & 15 34587 v.AddArg(x) 34588 return true 34589 } 34590 // match: (ROLW x (MOVLconst [c])) 34591 // cond: 34592 // result: (ROLWconst [c&15] x) 34593 for { 34594 _ = v.Args[1] 34595 x := v.Args[0] 34596 v_1 := v.Args[1] 34597 if v_1.Op != OpAMD64MOVLconst { 34598 break 34599 } 34600 c := v_1.AuxInt 34601 v.reset(OpAMD64ROLWconst) 34602 v.AuxInt = c & 15 34603 v.AddArg(x) 34604 return true 34605 } 34606 return false 34607 } 34608 func rewriteValueAMD64_OpAMD64ROLWconst_0(v *Value) bool { 34609 // match: (ROLWconst [c] (ROLWconst [d] x)) 34610 // cond: 34611 // result: (ROLWconst [(c+d)&15] x) 34612 for { 34613 c := v.AuxInt 34614 v_0 := v.Args[0] 34615 if v_0.Op != OpAMD64ROLWconst { 34616 break 34617 } 34618 d := v_0.AuxInt 34619 x := v_0.Args[0] 34620 v.reset(OpAMD64ROLWconst) 34621 v.AuxInt = (c + d) & 15 34622 v.AddArg(x) 34623 return true 34624 } 34625 // match: (ROLWconst x [0]) 34626 // cond: 34627 // result: x 34628 for { 34629 if v.AuxInt != 0 { 34630 break 34631 } 34632 x := v.Args[0] 34633 v.reset(OpCopy) 34634 v.Type = x.Type 34635 v.AddArg(x) 34636 return true 34637 } 34638 return false 34639 } 34640 func rewriteValueAMD64_OpAMD64RORB_0(v *Value) bool { 34641 // match: (RORB x (NEGQ y)) 34642 // cond: 34643 // result: (ROLB x y) 34644 for { 34645 _ = v.Args[1] 34646 x := v.Args[0] 34647 v_1 := v.Args[1] 34648 if v_1.Op != OpAMD64NEGQ { 34649 break 34650 } 34651 y := v_1.Args[0] 34652 v.reset(OpAMD64ROLB) 34653 v.AddArg(x) 34654 v.AddArg(y) 34655 return true 34656 } 34657 // match: (RORB x (NEGL y)) 34658 // cond: 34659 // result: (ROLB x y) 34660 for { 34661 _ = v.Args[1] 34662 x := v.Args[0] 34663 v_1 := v.Args[1] 34664 if v_1.Op != OpAMD64NEGL { 34665 break 34666 } 34667 y := v_1.Args[0] 34668 v.reset(OpAMD64ROLB) 34669 v.AddArg(x) 34670 v.AddArg(y) 34671 return true 34672 } 34673 // match: (RORB x (MOVQconst [c])) 34674 // cond: 34675 // result: (ROLBconst [(-c)&7 ] x) 34676 for { 34677 _ = v.Args[1] 34678 x := v.Args[0] 34679 v_1 := v.Args[1] 34680 if v_1.Op != OpAMD64MOVQconst { 34681 break 34682 } 34683 c := v_1.AuxInt 34684 v.reset(OpAMD64ROLBconst) 34685 v.AuxInt = (-c) & 7 34686 v.AddArg(x) 34687 return true 34688 } 34689 // match: (RORB x (MOVLconst [c])) 34690 // cond: 34691 // result: (ROLBconst [(-c)&7 ] x) 34692 for { 34693 _ = v.Args[1] 34694 x := v.Args[0] 34695 v_1 := v.Args[1] 34696 if v_1.Op != OpAMD64MOVLconst { 34697 break 34698 } 34699 c := v_1.AuxInt 34700 v.reset(OpAMD64ROLBconst) 34701 v.AuxInt = (-c) & 7 34702 v.AddArg(x) 34703 return true 34704 } 34705 return false 34706 } 34707 func rewriteValueAMD64_OpAMD64RORL_0(v *Value) bool { 34708 // match: (RORL x (NEGQ y)) 34709 // cond: 34710 // result: (ROLL x y) 34711 for { 34712 _ = v.Args[1] 34713 x := v.Args[0] 34714 v_1 := v.Args[1] 34715 if v_1.Op != OpAMD64NEGQ { 34716 break 34717 } 34718 y := v_1.Args[0] 34719 v.reset(OpAMD64ROLL) 34720 v.AddArg(x) 34721 v.AddArg(y) 34722 return true 34723 } 34724 // match: (RORL x (NEGL y)) 34725 // cond: 34726 // result: (ROLL x y) 34727 for { 34728 _ = v.Args[1] 34729 x := v.Args[0] 34730 v_1 := v.Args[1] 34731 if v_1.Op != OpAMD64NEGL { 34732 break 34733 } 34734 y := v_1.Args[0] 34735 v.reset(OpAMD64ROLL) 34736 v.AddArg(x) 34737 v.AddArg(y) 34738 return true 34739 } 34740 // match: (RORL x (MOVQconst [c])) 34741 // cond: 34742 // result: (ROLLconst [(-c)&31] x) 34743 for { 34744 _ = v.Args[1] 34745 x := v.Args[0] 34746 v_1 := v.Args[1] 34747 if v_1.Op != OpAMD64MOVQconst { 34748 break 34749 } 34750 c := v_1.AuxInt 34751 v.reset(OpAMD64ROLLconst) 34752 v.AuxInt = (-c) & 31 34753 v.AddArg(x) 34754 return true 34755 } 34756 // match: (RORL x (MOVLconst [c])) 34757 // cond: 34758 // result: (ROLLconst [(-c)&31] x) 34759 for { 34760 _ = v.Args[1] 34761 x := v.Args[0] 34762 v_1 := v.Args[1] 34763 if v_1.Op != OpAMD64MOVLconst { 34764 break 34765 } 34766 c := v_1.AuxInt 34767 v.reset(OpAMD64ROLLconst) 34768 v.AuxInt = (-c) & 31 34769 v.AddArg(x) 34770 return true 34771 } 34772 return false 34773 } 34774 func rewriteValueAMD64_OpAMD64RORQ_0(v *Value) bool { 34775 // match: (RORQ x (NEGQ y)) 34776 // cond: 34777 // result: (ROLQ x y) 34778 for { 34779 _ = v.Args[1] 34780 x := v.Args[0] 34781 v_1 := v.Args[1] 34782 if v_1.Op != OpAMD64NEGQ { 34783 break 34784 } 34785 y := v_1.Args[0] 34786 v.reset(OpAMD64ROLQ) 34787 v.AddArg(x) 34788 v.AddArg(y) 34789 return true 34790 } 34791 // match: (RORQ x (NEGL y)) 34792 // cond: 34793 // result: (ROLQ x y) 34794 for { 34795 _ = v.Args[1] 34796 x := v.Args[0] 34797 v_1 := v.Args[1] 34798 if v_1.Op != OpAMD64NEGL { 34799 break 34800 } 34801 y := v_1.Args[0] 34802 v.reset(OpAMD64ROLQ) 34803 v.AddArg(x) 34804 v.AddArg(y) 34805 return true 34806 } 34807 // match: (RORQ x (MOVQconst [c])) 34808 // cond: 34809 // result: (ROLQconst [(-c)&63] x) 34810 for { 34811 _ = v.Args[1] 34812 x := v.Args[0] 34813 v_1 := v.Args[1] 34814 if v_1.Op != OpAMD64MOVQconst { 34815 break 34816 } 34817 c := v_1.AuxInt 34818 v.reset(OpAMD64ROLQconst) 34819 v.AuxInt = (-c) & 63 34820 v.AddArg(x) 34821 return true 34822 } 34823 // match: (RORQ x (MOVLconst [c])) 34824 // cond: 34825 // result: (ROLQconst [(-c)&63] x) 34826 for { 34827 _ = v.Args[1] 34828 x := v.Args[0] 34829 v_1 := v.Args[1] 34830 if v_1.Op != OpAMD64MOVLconst { 34831 break 34832 } 34833 c := v_1.AuxInt 34834 v.reset(OpAMD64ROLQconst) 34835 v.AuxInt = (-c) & 63 34836 v.AddArg(x) 34837 return true 34838 } 34839 return false 34840 } 34841 func rewriteValueAMD64_OpAMD64RORW_0(v *Value) bool { 34842 // match: (RORW x (NEGQ y)) 34843 // cond: 34844 // result: (ROLW x y) 34845 for { 34846 _ = v.Args[1] 34847 x := v.Args[0] 34848 v_1 := v.Args[1] 34849 if v_1.Op != OpAMD64NEGQ { 34850 break 34851 } 34852 y := v_1.Args[0] 34853 v.reset(OpAMD64ROLW) 34854 v.AddArg(x) 34855 v.AddArg(y) 34856 return true 34857 } 34858 // match: (RORW x (NEGL y)) 34859 // cond: 34860 // result: (ROLW x y) 34861 for { 34862 _ = v.Args[1] 34863 x := v.Args[0] 34864 v_1 := v.Args[1] 34865 if v_1.Op != OpAMD64NEGL { 34866 break 34867 } 34868 y := v_1.Args[0] 34869 v.reset(OpAMD64ROLW) 34870 v.AddArg(x) 34871 v.AddArg(y) 34872 return true 34873 } 34874 // match: (RORW x (MOVQconst [c])) 34875 // cond: 34876 // result: (ROLWconst [(-c)&15] x) 34877 for { 34878 _ = v.Args[1] 34879 x := v.Args[0] 34880 v_1 := v.Args[1] 34881 if v_1.Op != OpAMD64MOVQconst { 34882 break 34883 } 34884 c := v_1.AuxInt 34885 v.reset(OpAMD64ROLWconst) 34886 v.AuxInt = (-c) & 15 34887 v.AddArg(x) 34888 return true 34889 } 34890 // match: (RORW x (MOVLconst [c])) 34891 // cond: 34892 // result: (ROLWconst [(-c)&15] x) 34893 for { 34894 _ = v.Args[1] 34895 x := v.Args[0] 34896 v_1 := v.Args[1] 34897 if v_1.Op != OpAMD64MOVLconst { 34898 break 34899 } 34900 c := v_1.AuxInt 34901 v.reset(OpAMD64ROLWconst) 34902 v.AuxInt = (-c) & 15 34903 v.AddArg(x) 34904 return true 34905 } 34906 return false 34907 } 34908 func rewriteValueAMD64_OpAMD64SARB_0(v *Value) bool { 34909 // match: (SARB x (MOVQconst [c])) 34910 // cond: 34911 // result: (SARBconst [min(c&31,7)] x) 34912 for { 34913 _ = v.Args[1] 34914 x := v.Args[0] 34915 v_1 := v.Args[1] 34916 if v_1.Op != OpAMD64MOVQconst { 34917 break 34918 } 34919 c := v_1.AuxInt 34920 v.reset(OpAMD64SARBconst) 34921 v.AuxInt = min(c&31, 7) 34922 v.AddArg(x) 34923 return true 34924 } 34925 // match: (SARB x (MOVLconst [c])) 34926 // cond: 34927 // result: (SARBconst [min(c&31,7)] x) 34928 for { 34929 _ = v.Args[1] 34930 x := v.Args[0] 34931 v_1 := v.Args[1] 34932 if v_1.Op != OpAMD64MOVLconst { 34933 break 34934 } 34935 c := v_1.AuxInt 34936 v.reset(OpAMD64SARBconst) 34937 v.AuxInt = min(c&31, 7) 34938 v.AddArg(x) 34939 return true 34940 } 34941 return false 34942 } 34943 func rewriteValueAMD64_OpAMD64SARBconst_0(v *Value) bool { 34944 // match: (SARBconst x [0]) 34945 // cond: 34946 // result: x 34947 for { 34948 if v.AuxInt != 0 { 34949 break 34950 } 34951 x := v.Args[0] 34952 v.reset(OpCopy) 34953 v.Type = x.Type 34954 v.AddArg(x) 34955 return true 34956 } 34957 // match: (SARBconst [c] (MOVQconst [d])) 34958 // cond: 34959 // result: (MOVQconst [d>>uint64(c)]) 34960 for { 34961 c := v.AuxInt 34962 v_0 := v.Args[0] 34963 if v_0.Op != OpAMD64MOVQconst { 34964 break 34965 } 34966 d := v_0.AuxInt 34967 v.reset(OpAMD64MOVQconst) 34968 v.AuxInt = d >> uint64(c) 34969 return true 34970 } 34971 return false 34972 } 34973 func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { 34974 b := v.Block 34975 _ = b 34976 // match: (SARL x (MOVQconst [c])) 34977 // cond: 34978 // result: (SARLconst [c&31] x) 34979 for { 34980 _ = v.Args[1] 34981 x := v.Args[0] 34982 v_1 := v.Args[1] 34983 if v_1.Op != OpAMD64MOVQconst { 34984 break 34985 } 34986 c := v_1.AuxInt 34987 v.reset(OpAMD64SARLconst) 34988 v.AuxInt = c & 31 34989 v.AddArg(x) 34990 return true 34991 } 34992 // match: (SARL x (MOVLconst [c])) 34993 // cond: 34994 // result: (SARLconst [c&31] x) 34995 for { 34996 _ = v.Args[1] 34997 x := v.Args[0] 34998 v_1 := v.Args[1] 34999 if v_1.Op != OpAMD64MOVLconst { 35000 break 35001 } 35002 c := v_1.AuxInt 35003 v.reset(OpAMD64SARLconst) 35004 v.AuxInt = c & 31 35005 v.AddArg(x) 35006 return true 35007 } 35008 // match: (SARL x (ADDQconst [c] y)) 35009 // cond: c & 31 == 0 35010 // result: (SARL x y) 35011 for { 35012 _ = v.Args[1] 35013 x := v.Args[0] 35014 v_1 := v.Args[1] 35015 if v_1.Op != OpAMD64ADDQconst { 35016 break 35017 } 35018 c := v_1.AuxInt 35019 y := v_1.Args[0] 35020 if !(c&31 == 0) { 35021 break 35022 } 35023 v.reset(OpAMD64SARL) 35024 v.AddArg(x) 35025 v.AddArg(y) 35026 return true 35027 } 35028 // match: (SARL x (NEGQ <t> (ADDQconst [c] y))) 35029 // cond: c & 31 == 0 35030 // result: (SARL x (NEGQ <t> y)) 35031 for { 35032 _ = v.Args[1] 35033 x := v.Args[0] 35034 v_1 := v.Args[1] 35035 if v_1.Op != OpAMD64NEGQ { 35036 break 35037 } 35038 t := v_1.Type 35039 v_1_0 := v_1.Args[0] 35040 if v_1_0.Op != OpAMD64ADDQconst { 35041 break 35042 } 35043 c := v_1_0.AuxInt 35044 y := v_1_0.Args[0] 35045 if !(c&31 == 0) { 35046 break 35047 } 35048 v.reset(OpAMD64SARL) 35049 v.AddArg(x) 35050 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35051 v0.AddArg(y) 35052 v.AddArg(v0) 35053 return true 35054 } 35055 // match: (SARL x (ANDQconst [c] y)) 35056 // cond: c & 31 == 31 35057 // result: (SARL x y) 35058 for { 35059 _ = v.Args[1] 35060 x := v.Args[0] 35061 v_1 := v.Args[1] 35062 if v_1.Op != OpAMD64ANDQconst { 35063 break 35064 } 35065 c := v_1.AuxInt 35066 y := v_1.Args[0] 35067 if !(c&31 == 31) { 35068 break 35069 } 35070 v.reset(OpAMD64SARL) 35071 v.AddArg(x) 35072 v.AddArg(y) 35073 return true 35074 } 35075 // match: (SARL x (NEGQ <t> (ANDQconst [c] y))) 35076 // cond: c & 31 == 31 35077 // result: (SARL x (NEGQ <t> y)) 35078 for { 35079 _ = v.Args[1] 35080 x := v.Args[0] 35081 v_1 := v.Args[1] 35082 if v_1.Op != OpAMD64NEGQ { 35083 break 35084 } 35085 t := v_1.Type 35086 v_1_0 := v_1.Args[0] 35087 if v_1_0.Op != OpAMD64ANDQconst { 35088 break 35089 } 35090 c := v_1_0.AuxInt 35091 y := v_1_0.Args[0] 35092 if !(c&31 == 31) { 35093 break 35094 } 35095 v.reset(OpAMD64SARL) 35096 v.AddArg(x) 35097 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35098 v0.AddArg(y) 35099 v.AddArg(v0) 35100 return true 35101 } 35102 // match: (SARL x (ADDLconst [c] y)) 35103 // cond: c & 31 == 0 35104 // result: (SARL x y) 35105 for { 35106 _ = v.Args[1] 35107 x := v.Args[0] 35108 v_1 := v.Args[1] 35109 if v_1.Op != OpAMD64ADDLconst { 35110 break 35111 } 35112 c := v_1.AuxInt 35113 y := v_1.Args[0] 35114 if !(c&31 == 0) { 35115 break 35116 } 35117 v.reset(OpAMD64SARL) 35118 v.AddArg(x) 35119 v.AddArg(y) 35120 return true 35121 } 35122 // match: (SARL x (NEGL <t> (ADDLconst [c] y))) 35123 // cond: c & 31 == 0 35124 // result: (SARL x (NEGL <t> y)) 35125 for { 35126 _ = v.Args[1] 35127 x := v.Args[0] 35128 v_1 := v.Args[1] 35129 if v_1.Op != OpAMD64NEGL { 35130 break 35131 } 35132 t := v_1.Type 35133 v_1_0 := v_1.Args[0] 35134 if v_1_0.Op != OpAMD64ADDLconst { 35135 break 35136 } 35137 c := v_1_0.AuxInt 35138 y := v_1_0.Args[0] 35139 if !(c&31 == 0) { 35140 break 35141 } 35142 v.reset(OpAMD64SARL) 35143 v.AddArg(x) 35144 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35145 v0.AddArg(y) 35146 v.AddArg(v0) 35147 return true 35148 } 35149 // match: (SARL x (ANDLconst [c] y)) 35150 // cond: c & 31 == 31 35151 // result: (SARL x y) 35152 for { 35153 _ = v.Args[1] 35154 x := v.Args[0] 35155 v_1 := v.Args[1] 35156 if v_1.Op != OpAMD64ANDLconst { 35157 break 35158 } 35159 c := v_1.AuxInt 35160 y := v_1.Args[0] 35161 if !(c&31 == 31) { 35162 break 35163 } 35164 v.reset(OpAMD64SARL) 35165 v.AddArg(x) 35166 v.AddArg(y) 35167 return true 35168 } 35169 // match: (SARL x (NEGL <t> (ANDLconst [c] y))) 35170 // cond: c & 31 == 31 35171 // result: (SARL x (NEGL <t> y)) 35172 for { 35173 _ = v.Args[1] 35174 x := v.Args[0] 35175 v_1 := v.Args[1] 35176 if v_1.Op != OpAMD64NEGL { 35177 break 35178 } 35179 t := v_1.Type 35180 v_1_0 := v_1.Args[0] 35181 if v_1_0.Op != OpAMD64ANDLconst { 35182 break 35183 } 35184 c := v_1_0.AuxInt 35185 y := v_1_0.Args[0] 35186 if !(c&31 == 31) { 35187 break 35188 } 35189 v.reset(OpAMD64SARL) 35190 v.AddArg(x) 35191 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35192 v0.AddArg(y) 35193 v.AddArg(v0) 35194 return true 35195 } 35196 return false 35197 } 35198 func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool { 35199 // match: (SARLconst x [0]) 35200 // cond: 35201 // result: x 35202 for { 35203 if v.AuxInt != 0 { 35204 break 35205 } 35206 x := v.Args[0] 35207 v.reset(OpCopy) 35208 v.Type = x.Type 35209 v.AddArg(x) 35210 return true 35211 } 35212 // match: (SARLconst [c] (MOVQconst [d])) 35213 // cond: 35214 // result: (MOVQconst [d>>uint64(c)]) 35215 for { 35216 c := v.AuxInt 35217 v_0 := v.Args[0] 35218 if v_0.Op != OpAMD64MOVQconst { 35219 break 35220 } 35221 d := v_0.AuxInt 35222 v.reset(OpAMD64MOVQconst) 35223 v.AuxInt = d >> uint64(c) 35224 return true 35225 } 35226 return false 35227 } 35228 func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { 35229 b := v.Block 35230 _ = b 35231 // match: (SARQ x (MOVQconst [c])) 35232 // cond: 35233 // result: (SARQconst [c&63] x) 35234 for { 35235 _ = v.Args[1] 35236 x := v.Args[0] 35237 v_1 := v.Args[1] 35238 if v_1.Op != OpAMD64MOVQconst { 35239 break 35240 } 35241 c := v_1.AuxInt 35242 v.reset(OpAMD64SARQconst) 35243 v.AuxInt = c & 63 35244 v.AddArg(x) 35245 return true 35246 } 35247 // match: (SARQ x (MOVLconst [c])) 35248 // cond: 35249 // result: (SARQconst [c&63] x) 35250 for { 35251 _ = v.Args[1] 35252 x := v.Args[0] 35253 v_1 := v.Args[1] 35254 if v_1.Op != OpAMD64MOVLconst { 35255 break 35256 } 35257 c := v_1.AuxInt 35258 v.reset(OpAMD64SARQconst) 35259 v.AuxInt = c & 63 35260 v.AddArg(x) 35261 return true 35262 } 35263 // match: (SARQ x (ADDQconst [c] y)) 35264 // cond: c & 63 == 0 35265 // result: (SARQ x y) 35266 for { 35267 _ = v.Args[1] 35268 x := v.Args[0] 35269 v_1 := v.Args[1] 35270 if v_1.Op != OpAMD64ADDQconst { 35271 break 35272 } 35273 c := v_1.AuxInt 35274 y := v_1.Args[0] 35275 if !(c&63 == 0) { 35276 break 35277 } 35278 v.reset(OpAMD64SARQ) 35279 v.AddArg(x) 35280 v.AddArg(y) 35281 return true 35282 } 35283 // match: (SARQ x (NEGQ <t> (ADDQconst [c] y))) 35284 // cond: c & 63 == 0 35285 // result: (SARQ x (NEGQ <t> y)) 35286 for { 35287 _ = v.Args[1] 35288 x := v.Args[0] 35289 v_1 := v.Args[1] 35290 if v_1.Op != OpAMD64NEGQ { 35291 break 35292 } 35293 t := v_1.Type 35294 v_1_0 := v_1.Args[0] 35295 if v_1_0.Op != OpAMD64ADDQconst { 35296 break 35297 } 35298 c := v_1_0.AuxInt 35299 y := v_1_0.Args[0] 35300 if !(c&63 == 0) { 35301 break 35302 } 35303 v.reset(OpAMD64SARQ) 35304 v.AddArg(x) 35305 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35306 v0.AddArg(y) 35307 v.AddArg(v0) 35308 return true 35309 } 35310 // match: (SARQ x (ANDQconst [c] y)) 35311 // cond: c & 63 == 63 35312 // result: (SARQ x y) 35313 for { 35314 _ = v.Args[1] 35315 x := v.Args[0] 35316 v_1 := v.Args[1] 35317 if v_1.Op != OpAMD64ANDQconst { 35318 break 35319 } 35320 c := v_1.AuxInt 35321 y := v_1.Args[0] 35322 if !(c&63 == 63) { 35323 break 35324 } 35325 v.reset(OpAMD64SARQ) 35326 v.AddArg(x) 35327 v.AddArg(y) 35328 return true 35329 } 35330 // match: (SARQ x (NEGQ <t> (ANDQconst [c] y))) 35331 // cond: c & 63 == 63 35332 // result: (SARQ x (NEGQ <t> y)) 35333 for { 35334 _ = v.Args[1] 35335 x := v.Args[0] 35336 v_1 := v.Args[1] 35337 if v_1.Op != OpAMD64NEGQ { 35338 break 35339 } 35340 t := v_1.Type 35341 v_1_0 := v_1.Args[0] 35342 if v_1_0.Op != OpAMD64ANDQconst { 35343 break 35344 } 35345 c := v_1_0.AuxInt 35346 y := v_1_0.Args[0] 35347 if !(c&63 == 63) { 35348 break 35349 } 35350 v.reset(OpAMD64SARQ) 35351 v.AddArg(x) 35352 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35353 v0.AddArg(y) 35354 v.AddArg(v0) 35355 return true 35356 } 35357 // match: (SARQ x (ADDLconst [c] y)) 35358 // cond: c & 63 == 0 35359 // result: (SARQ x y) 35360 for { 35361 _ = v.Args[1] 35362 x := v.Args[0] 35363 v_1 := v.Args[1] 35364 if v_1.Op != OpAMD64ADDLconst { 35365 break 35366 } 35367 c := v_1.AuxInt 35368 y := v_1.Args[0] 35369 if !(c&63 == 0) { 35370 break 35371 } 35372 v.reset(OpAMD64SARQ) 35373 v.AddArg(x) 35374 v.AddArg(y) 35375 return true 35376 } 35377 // match: (SARQ x (NEGL <t> (ADDLconst [c] y))) 35378 // cond: c & 63 == 0 35379 // result: (SARQ x (NEGL <t> y)) 35380 for { 35381 _ = v.Args[1] 35382 x := v.Args[0] 35383 v_1 := v.Args[1] 35384 if v_1.Op != OpAMD64NEGL { 35385 break 35386 } 35387 t := v_1.Type 35388 v_1_0 := v_1.Args[0] 35389 if v_1_0.Op != OpAMD64ADDLconst { 35390 break 35391 } 35392 c := v_1_0.AuxInt 35393 y := v_1_0.Args[0] 35394 if !(c&63 == 0) { 35395 break 35396 } 35397 v.reset(OpAMD64SARQ) 35398 v.AddArg(x) 35399 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35400 v0.AddArg(y) 35401 v.AddArg(v0) 35402 return true 35403 } 35404 // match: (SARQ x (ANDLconst [c] y)) 35405 // cond: c & 63 == 63 35406 // result: (SARQ x y) 35407 for { 35408 _ = v.Args[1] 35409 x := v.Args[0] 35410 v_1 := v.Args[1] 35411 if v_1.Op != OpAMD64ANDLconst { 35412 break 35413 } 35414 c := v_1.AuxInt 35415 y := v_1.Args[0] 35416 if !(c&63 == 63) { 35417 break 35418 } 35419 v.reset(OpAMD64SARQ) 35420 v.AddArg(x) 35421 v.AddArg(y) 35422 return true 35423 } 35424 // match: (SARQ x (NEGL <t> (ANDLconst [c] y))) 35425 // cond: c & 63 == 63 35426 // result: (SARQ x (NEGL <t> y)) 35427 for { 35428 _ = v.Args[1] 35429 x := v.Args[0] 35430 v_1 := v.Args[1] 35431 if v_1.Op != OpAMD64NEGL { 35432 break 35433 } 35434 t := v_1.Type 35435 v_1_0 := v_1.Args[0] 35436 if v_1_0.Op != OpAMD64ANDLconst { 35437 break 35438 } 35439 c := v_1_0.AuxInt 35440 y := v_1_0.Args[0] 35441 if !(c&63 == 63) { 35442 break 35443 } 35444 v.reset(OpAMD64SARQ) 35445 v.AddArg(x) 35446 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35447 v0.AddArg(y) 35448 v.AddArg(v0) 35449 return true 35450 } 35451 return false 35452 } 35453 func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool { 35454 // match: (SARQconst x [0]) 35455 // cond: 35456 // result: x 35457 for { 35458 if v.AuxInt != 0 { 35459 break 35460 } 35461 x := v.Args[0] 35462 v.reset(OpCopy) 35463 v.Type = x.Type 35464 v.AddArg(x) 35465 return true 35466 } 35467 // match: (SARQconst [c] (MOVQconst [d])) 35468 // cond: 35469 // result: (MOVQconst [d>>uint64(c)]) 35470 for { 35471 c := v.AuxInt 35472 v_0 := v.Args[0] 35473 if v_0.Op != OpAMD64MOVQconst { 35474 break 35475 } 35476 d := v_0.AuxInt 35477 v.reset(OpAMD64MOVQconst) 35478 v.AuxInt = d >> uint64(c) 35479 return true 35480 } 35481 return false 35482 } 35483 func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool { 35484 // match: (SARW x (MOVQconst [c])) 35485 // cond: 35486 // result: (SARWconst [min(c&31,15)] x) 35487 for { 35488 _ = v.Args[1] 35489 x := v.Args[0] 35490 v_1 := v.Args[1] 35491 if v_1.Op != OpAMD64MOVQconst { 35492 break 35493 } 35494 c := v_1.AuxInt 35495 v.reset(OpAMD64SARWconst) 35496 v.AuxInt = min(c&31, 15) 35497 v.AddArg(x) 35498 return true 35499 } 35500 // match: (SARW x (MOVLconst [c])) 35501 // cond: 35502 // result: (SARWconst [min(c&31,15)] x) 35503 for { 35504 _ = v.Args[1] 35505 x := v.Args[0] 35506 v_1 := v.Args[1] 35507 if v_1.Op != OpAMD64MOVLconst { 35508 break 35509 } 35510 c := v_1.AuxInt 35511 v.reset(OpAMD64SARWconst) 35512 v.AuxInt = min(c&31, 15) 35513 v.AddArg(x) 35514 return true 35515 } 35516 return false 35517 } 35518 func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool { 35519 // match: (SARWconst x [0]) 35520 // cond: 35521 // result: x 35522 for { 35523 if v.AuxInt != 0 { 35524 break 35525 } 35526 x := v.Args[0] 35527 v.reset(OpCopy) 35528 v.Type = x.Type 35529 v.AddArg(x) 35530 return true 35531 } 35532 // match: (SARWconst [c] (MOVQconst [d])) 35533 // cond: 35534 // result: (MOVQconst [d>>uint64(c)]) 35535 for { 35536 c := v.AuxInt 35537 v_0 := v.Args[0] 35538 if v_0.Op != OpAMD64MOVQconst { 35539 break 35540 } 35541 d := v_0.AuxInt 35542 v.reset(OpAMD64MOVQconst) 35543 v.AuxInt = d >> uint64(c) 35544 return true 35545 } 35546 return false 35547 } 35548 func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { 35549 // match: (SBBLcarrymask (FlagEQ)) 35550 // cond: 35551 // result: (MOVLconst [0]) 35552 for { 35553 v_0 := v.Args[0] 35554 if v_0.Op != OpAMD64FlagEQ { 35555 break 35556 } 35557 v.reset(OpAMD64MOVLconst) 35558 v.AuxInt = 0 35559 return true 35560 } 35561 // match: (SBBLcarrymask (FlagLT_ULT)) 35562 // cond: 35563 // result: (MOVLconst [-1]) 35564 for { 35565 v_0 := v.Args[0] 35566 if v_0.Op != OpAMD64FlagLT_ULT { 35567 break 35568 } 35569 v.reset(OpAMD64MOVLconst) 35570 v.AuxInt = -1 35571 return true 35572 } 35573 // match: (SBBLcarrymask (FlagLT_UGT)) 35574 // cond: 35575 // result: (MOVLconst [0]) 35576 for { 35577 v_0 := v.Args[0] 35578 if v_0.Op != OpAMD64FlagLT_UGT { 35579 break 35580 } 35581 v.reset(OpAMD64MOVLconst) 35582 v.AuxInt = 0 35583 return true 35584 } 35585 // match: (SBBLcarrymask (FlagGT_ULT)) 35586 // cond: 35587 // result: (MOVLconst [-1]) 35588 for { 35589 v_0 := v.Args[0] 35590 if v_0.Op != OpAMD64FlagGT_ULT { 35591 break 35592 } 35593 v.reset(OpAMD64MOVLconst) 35594 v.AuxInt = -1 35595 return true 35596 } 35597 // match: (SBBLcarrymask (FlagGT_UGT)) 35598 // cond: 35599 // result: (MOVLconst [0]) 35600 for { 35601 v_0 := v.Args[0] 35602 if v_0.Op != OpAMD64FlagGT_UGT { 35603 break 35604 } 35605 v.reset(OpAMD64MOVLconst) 35606 v.AuxInt = 0 35607 return true 35608 } 35609 return false 35610 } 35611 func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { 35612 // match: (SBBQcarrymask (FlagEQ)) 35613 // cond: 35614 // result: (MOVQconst [0]) 35615 for { 35616 v_0 := v.Args[0] 35617 if v_0.Op != OpAMD64FlagEQ { 35618 break 35619 } 35620 v.reset(OpAMD64MOVQconst) 35621 v.AuxInt = 0 35622 return true 35623 } 35624 // match: (SBBQcarrymask (FlagLT_ULT)) 35625 // cond: 35626 // result: (MOVQconst [-1]) 35627 for { 35628 v_0 := v.Args[0] 35629 if v_0.Op != OpAMD64FlagLT_ULT { 35630 break 35631 } 35632 v.reset(OpAMD64MOVQconst) 35633 v.AuxInt = -1 35634 return true 35635 } 35636 // match: (SBBQcarrymask (FlagLT_UGT)) 35637 // cond: 35638 // result: (MOVQconst [0]) 35639 for { 35640 v_0 := v.Args[0] 35641 if v_0.Op != OpAMD64FlagLT_UGT { 35642 break 35643 } 35644 v.reset(OpAMD64MOVQconst) 35645 v.AuxInt = 0 35646 return true 35647 } 35648 // match: (SBBQcarrymask (FlagGT_ULT)) 35649 // cond: 35650 // result: (MOVQconst [-1]) 35651 for { 35652 v_0 := v.Args[0] 35653 if v_0.Op != OpAMD64FlagGT_ULT { 35654 break 35655 } 35656 v.reset(OpAMD64MOVQconst) 35657 v.AuxInt = -1 35658 return true 35659 } 35660 // match: (SBBQcarrymask (FlagGT_UGT)) 35661 // cond: 35662 // result: (MOVQconst [0]) 35663 for { 35664 v_0 := v.Args[0] 35665 if v_0.Op != OpAMD64FlagGT_UGT { 35666 break 35667 } 35668 v.reset(OpAMD64MOVQconst) 35669 v.AuxInt = 0 35670 return true 35671 } 35672 return false 35673 } 35674 func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { 35675 // match: (SETA (InvertFlags x)) 35676 // cond: 35677 // result: (SETB x) 35678 for { 35679 v_0 := v.Args[0] 35680 if v_0.Op != OpAMD64InvertFlags { 35681 break 35682 } 35683 x := v_0.Args[0] 35684 v.reset(OpAMD64SETB) 35685 v.AddArg(x) 35686 return true 35687 } 35688 // match: (SETA (FlagEQ)) 35689 // cond: 35690 // result: (MOVLconst [0]) 35691 for { 35692 v_0 := v.Args[0] 35693 if v_0.Op != OpAMD64FlagEQ { 35694 break 35695 } 35696 v.reset(OpAMD64MOVLconst) 35697 v.AuxInt = 0 35698 return true 35699 } 35700 // match: (SETA (FlagLT_ULT)) 35701 // cond: 35702 // result: (MOVLconst [0]) 35703 for { 35704 v_0 := v.Args[0] 35705 if v_0.Op != OpAMD64FlagLT_ULT { 35706 break 35707 } 35708 v.reset(OpAMD64MOVLconst) 35709 v.AuxInt = 0 35710 return true 35711 } 35712 // match: (SETA (FlagLT_UGT)) 35713 // cond: 35714 // result: (MOVLconst [1]) 35715 for { 35716 v_0 := v.Args[0] 35717 if v_0.Op != OpAMD64FlagLT_UGT { 35718 break 35719 } 35720 v.reset(OpAMD64MOVLconst) 35721 v.AuxInt = 1 35722 return true 35723 } 35724 // match: (SETA (FlagGT_ULT)) 35725 // cond: 35726 // result: (MOVLconst [0]) 35727 for { 35728 v_0 := v.Args[0] 35729 if v_0.Op != OpAMD64FlagGT_ULT { 35730 break 35731 } 35732 v.reset(OpAMD64MOVLconst) 35733 v.AuxInt = 0 35734 return true 35735 } 35736 // match: (SETA (FlagGT_UGT)) 35737 // cond: 35738 // result: (MOVLconst [1]) 35739 for { 35740 v_0 := v.Args[0] 35741 if v_0.Op != OpAMD64FlagGT_UGT { 35742 break 35743 } 35744 v.reset(OpAMD64MOVLconst) 35745 v.AuxInt = 1 35746 return true 35747 } 35748 return false 35749 } 35750 func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { 35751 // match: (SETAE (InvertFlags x)) 35752 // cond: 35753 // result: (SETBE x) 35754 for { 35755 v_0 := v.Args[0] 35756 if v_0.Op != OpAMD64InvertFlags { 35757 break 35758 } 35759 x := v_0.Args[0] 35760 v.reset(OpAMD64SETBE) 35761 v.AddArg(x) 35762 return true 35763 } 35764 // match: (SETAE (FlagEQ)) 35765 // cond: 35766 // result: (MOVLconst [1]) 35767 for { 35768 v_0 := v.Args[0] 35769 if v_0.Op != OpAMD64FlagEQ { 35770 break 35771 } 35772 v.reset(OpAMD64MOVLconst) 35773 v.AuxInt = 1 35774 return true 35775 } 35776 // match: (SETAE (FlagLT_ULT)) 35777 // cond: 35778 // result: (MOVLconst [0]) 35779 for { 35780 v_0 := v.Args[0] 35781 if v_0.Op != OpAMD64FlagLT_ULT { 35782 break 35783 } 35784 v.reset(OpAMD64MOVLconst) 35785 v.AuxInt = 0 35786 return true 35787 } 35788 // match: (SETAE (FlagLT_UGT)) 35789 // cond: 35790 // result: (MOVLconst [1]) 35791 for { 35792 v_0 := v.Args[0] 35793 if v_0.Op != OpAMD64FlagLT_UGT { 35794 break 35795 } 35796 v.reset(OpAMD64MOVLconst) 35797 v.AuxInt = 1 35798 return true 35799 } 35800 // match: (SETAE (FlagGT_ULT)) 35801 // cond: 35802 // result: (MOVLconst [0]) 35803 for { 35804 v_0 := v.Args[0] 35805 if v_0.Op != OpAMD64FlagGT_ULT { 35806 break 35807 } 35808 v.reset(OpAMD64MOVLconst) 35809 v.AuxInt = 0 35810 return true 35811 } 35812 // match: (SETAE (FlagGT_UGT)) 35813 // cond: 35814 // result: (MOVLconst [1]) 35815 for { 35816 v_0 := v.Args[0] 35817 if v_0.Op != OpAMD64FlagGT_UGT { 35818 break 35819 } 35820 v.reset(OpAMD64MOVLconst) 35821 v.AuxInt = 1 35822 return true 35823 } 35824 return false 35825 } 35826 func rewriteValueAMD64_OpAMD64SETAEmem_0(v *Value) bool { 35827 // match: (SETAEmem [off] {sym} ptr (InvertFlags x) mem) 35828 // cond: 35829 // result: (SETBEmem [off] {sym} ptr x mem) 35830 for { 35831 off := v.AuxInt 35832 sym := v.Aux 35833 _ = v.Args[2] 35834 ptr := v.Args[0] 35835 v_1 := v.Args[1] 35836 if v_1.Op != OpAMD64InvertFlags { 35837 break 35838 } 35839 x := v_1.Args[0] 35840 mem := v.Args[2] 35841 v.reset(OpAMD64SETBEmem) 35842 v.AuxInt = off 35843 v.Aux = sym 35844 v.AddArg(ptr) 35845 v.AddArg(x) 35846 v.AddArg(mem) 35847 return true 35848 } 35849 return false 35850 } 35851 func rewriteValueAMD64_OpAMD64SETAmem_0(v *Value) bool { 35852 // match: (SETAmem [off] {sym} ptr (InvertFlags x) mem) 35853 // cond: 35854 // result: (SETBmem [off] {sym} ptr x mem) 35855 for { 35856 off := v.AuxInt 35857 sym := v.Aux 35858 _ = v.Args[2] 35859 ptr := v.Args[0] 35860 v_1 := v.Args[1] 35861 if v_1.Op != OpAMD64InvertFlags { 35862 break 35863 } 35864 x := v_1.Args[0] 35865 mem := v.Args[2] 35866 v.reset(OpAMD64SETBmem) 35867 v.AuxInt = off 35868 v.Aux = sym 35869 v.AddArg(ptr) 35870 v.AddArg(x) 35871 v.AddArg(mem) 35872 return true 35873 } 35874 return false 35875 } 35876 func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { 35877 // match: (SETB (InvertFlags x)) 35878 // cond: 35879 // result: (SETA x) 35880 for { 35881 v_0 := v.Args[0] 35882 if v_0.Op != OpAMD64InvertFlags { 35883 break 35884 } 35885 x := v_0.Args[0] 35886 v.reset(OpAMD64SETA) 35887 v.AddArg(x) 35888 return true 35889 } 35890 // match: (SETB (FlagEQ)) 35891 // cond: 35892 // result: (MOVLconst [0]) 35893 for { 35894 v_0 := v.Args[0] 35895 if v_0.Op != OpAMD64FlagEQ { 35896 break 35897 } 35898 v.reset(OpAMD64MOVLconst) 35899 v.AuxInt = 0 35900 return true 35901 } 35902 // match: (SETB (FlagLT_ULT)) 35903 // cond: 35904 // result: (MOVLconst [1]) 35905 for { 35906 v_0 := v.Args[0] 35907 if v_0.Op != OpAMD64FlagLT_ULT { 35908 break 35909 } 35910 v.reset(OpAMD64MOVLconst) 35911 v.AuxInt = 1 35912 return true 35913 } 35914 // match: (SETB (FlagLT_UGT)) 35915 // cond: 35916 // result: (MOVLconst [0]) 35917 for { 35918 v_0 := v.Args[0] 35919 if v_0.Op != OpAMD64FlagLT_UGT { 35920 break 35921 } 35922 v.reset(OpAMD64MOVLconst) 35923 v.AuxInt = 0 35924 return true 35925 } 35926 // match: (SETB (FlagGT_ULT)) 35927 // cond: 35928 // result: (MOVLconst [1]) 35929 for { 35930 v_0 := v.Args[0] 35931 if v_0.Op != OpAMD64FlagGT_ULT { 35932 break 35933 } 35934 v.reset(OpAMD64MOVLconst) 35935 v.AuxInt = 1 35936 return true 35937 } 35938 // match: (SETB (FlagGT_UGT)) 35939 // cond: 35940 // result: (MOVLconst [0]) 35941 for { 35942 v_0 := v.Args[0] 35943 if v_0.Op != OpAMD64FlagGT_UGT { 35944 break 35945 } 35946 v.reset(OpAMD64MOVLconst) 35947 v.AuxInt = 0 35948 return true 35949 } 35950 return false 35951 } 35952 func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { 35953 // match: (SETBE (InvertFlags x)) 35954 // cond: 35955 // result: (SETAE x) 35956 for { 35957 v_0 := v.Args[0] 35958 if v_0.Op != OpAMD64InvertFlags { 35959 break 35960 } 35961 x := v_0.Args[0] 35962 v.reset(OpAMD64SETAE) 35963 v.AddArg(x) 35964 return true 35965 } 35966 // match: (SETBE (FlagEQ)) 35967 // cond: 35968 // result: (MOVLconst [1]) 35969 for { 35970 v_0 := v.Args[0] 35971 if v_0.Op != OpAMD64FlagEQ { 35972 break 35973 } 35974 v.reset(OpAMD64MOVLconst) 35975 v.AuxInt = 1 35976 return true 35977 } 35978 // match: (SETBE (FlagLT_ULT)) 35979 // cond: 35980 // result: (MOVLconst [1]) 35981 for { 35982 v_0 := v.Args[0] 35983 if v_0.Op != OpAMD64FlagLT_ULT { 35984 break 35985 } 35986 v.reset(OpAMD64MOVLconst) 35987 v.AuxInt = 1 35988 return true 35989 } 35990 // match: (SETBE (FlagLT_UGT)) 35991 // cond: 35992 // result: (MOVLconst [0]) 35993 for { 35994 v_0 := v.Args[0] 35995 if v_0.Op != OpAMD64FlagLT_UGT { 35996 break 35997 } 35998 v.reset(OpAMD64MOVLconst) 35999 v.AuxInt = 0 36000 return true 36001 } 36002 // match: (SETBE (FlagGT_ULT)) 36003 // cond: 36004 // result: (MOVLconst [1]) 36005 for { 36006 v_0 := v.Args[0] 36007 if v_0.Op != OpAMD64FlagGT_ULT { 36008 break 36009 } 36010 v.reset(OpAMD64MOVLconst) 36011 v.AuxInt = 1 36012 return true 36013 } 36014 // match: (SETBE (FlagGT_UGT)) 36015 // cond: 36016 // result: (MOVLconst [0]) 36017 for { 36018 v_0 := v.Args[0] 36019 if v_0.Op != OpAMD64FlagGT_UGT { 36020 break 36021 } 36022 v.reset(OpAMD64MOVLconst) 36023 v.AuxInt = 0 36024 return true 36025 } 36026 return false 36027 } 36028 func rewriteValueAMD64_OpAMD64SETBEmem_0(v *Value) bool { 36029 // match: (SETBEmem [off] {sym} ptr (InvertFlags x) mem) 36030 // cond: 36031 // result: (SETAEmem [off] {sym} ptr x mem) 36032 for { 36033 off := v.AuxInt 36034 sym := v.Aux 36035 _ = v.Args[2] 36036 ptr := v.Args[0] 36037 v_1 := v.Args[1] 36038 if v_1.Op != OpAMD64InvertFlags { 36039 break 36040 } 36041 x := v_1.Args[0] 36042 mem := v.Args[2] 36043 v.reset(OpAMD64SETAEmem) 36044 v.AuxInt = off 36045 v.Aux = sym 36046 v.AddArg(ptr) 36047 v.AddArg(x) 36048 v.AddArg(mem) 36049 return true 36050 } 36051 return false 36052 } 36053 func rewriteValueAMD64_OpAMD64SETBmem_0(v *Value) bool { 36054 // match: (SETBmem [off] {sym} ptr (InvertFlags x) mem) 36055 // cond: 36056 // result: (SETAmem [off] {sym} ptr x mem) 36057 for { 36058 off := v.AuxInt 36059 sym := v.Aux 36060 _ = v.Args[2] 36061 ptr := v.Args[0] 36062 v_1 := v.Args[1] 36063 if v_1.Op != OpAMD64InvertFlags { 36064 break 36065 } 36066 x := v_1.Args[0] 36067 mem := v.Args[2] 36068 v.reset(OpAMD64SETAmem) 36069 v.AuxInt = off 36070 v.Aux = sym 36071 v.AddArg(ptr) 36072 v.AddArg(x) 36073 v.AddArg(mem) 36074 return true 36075 } 36076 return false 36077 } 36078 func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { 36079 b := v.Block 36080 _ = b 36081 config := b.Func.Config 36082 _ = config 36083 // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) 36084 // cond: !config.nacl 36085 // result: (SETAE (BTL x y)) 36086 for { 36087 v_0 := v.Args[0] 36088 if v_0.Op != OpAMD64TESTL { 36089 break 36090 } 36091 _ = v_0.Args[1] 36092 v_0_0 := v_0.Args[0] 36093 if v_0_0.Op != OpAMD64SHLL { 36094 break 36095 } 36096 _ = v_0_0.Args[1] 36097 v_0_0_0 := v_0_0.Args[0] 36098 if v_0_0_0.Op != OpAMD64MOVLconst { 36099 break 36100 } 36101 if v_0_0_0.AuxInt != 1 { 36102 break 36103 } 36104 x := v_0_0.Args[1] 36105 y := v_0.Args[1] 36106 if !(!config.nacl) { 36107 break 36108 } 36109 v.reset(OpAMD64SETAE) 36110 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36111 v0.AddArg(x) 36112 v0.AddArg(y) 36113 v.AddArg(v0) 36114 return true 36115 } 36116 // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x))) 36117 // cond: !config.nacl 36118 // result: (SETAE (BTL x y)) 36119 for { 36120 v_0 := v.Args[0] 36121 if v_0.Op != OpAMD64TESTL { 36122 break 36123 } 36124 _ = v_0.Args[1] 36125 y := v_0.Args[0] 36126 v_0_1 := v_0.Args[1] 36127 if v_0_1.Op != OpAMD64SHLL { 36128 break 36129 } 36130 _ = v_0_1.Args[1] 36131 v_0_1_0 := v_0_1.Args[0] 36132 if v_0_1_0.Op != OpAMD64MOVLconst { 36133 break 36134 } 36135 if v_0_1_0.AuxInt != 1 { 36136 break 36137 } 36138 x := v_0_1.Args[1] 36139 if !(!config.nacl) { 36140 break 36141 } 36142 v.reset(OpAMD64SETAE) 36143 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36144 v0.AddArg(x) 36145 v0.AddArg(y) 36146 v.AddArg(v0) 36147 return true 36148 } 36149 // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 36150 // cond: !config.nacl 36151 // result: (SETAE (BTQ x y)) 36152 for { 36153 v_0 := v.Args[0] 36154 if v_0.Op != OpAMD64TESTQ { 36155 break 36156 } 36157 _ = v_0.Args[1] 36158 v_0_0 := v_0.Args[0] 36159 if v_0_0.Op != OpAMD64SHLQ { 36160 break 36161 } 36162 _ = v_0_0.Args[1] 36163 v_0_0_0 := v_0_0.Args[0] 36164 if v_0_0_0.Op != OpAMD64MOVQconst { 36165 break 36166 } 36167 if v_0_0_0.AuxInt != 1 { 36168 break 36169 } 36170 x := v_0_0.Args[1] 36171 y := v_0.Args[1] 36172 if !(!config.nacl) { 36173 break 36174 } 36175 v.reset(OpAMD64SETAE) 36176 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36177 v0.AddArg(x) 36178 v0.AddArg(y) 36179 v.AddArg(v0) 36180 return true 36181 } 36182 // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 36183 // cond: !config.nacl 36184 // result: (SETAE (BTQ x y)) 36185 for { 36186 v_0 := v.Args[0] 36187 if v_0.Op != OpAMD64TESTQ { 36188 break 36189 } 36190 _ = v_0.Args[1] 36191 y := v_0.Args[0] 36192 v_0_1 := v_0.Args[1] 36193 if v_0_1.Op != OpAMD64SHLQ { 36194 break 36195 } 36196 _ = v_0_1.Args[1] 36197 v_0_1_0 := v_0_1.Args[0] 36198 if v_0_1_0.Op != OpAMD64MOVQconst { 36199 break 36200 } 36201 if v_0_1_0.AuxInt != 1 { 36202 break 36203 } 36204 x := v_0_1.Args[1] 36205 if !(!config.nacl) { 36206 break 36207 } 36208 v.reset(OpAMD64SETAE) 36209 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36210 v0.AddArg(x) 36211 v0.AddArg(y) 36212 v.AddArg(v0) 36213 return true 36214 } 36215 // match: (SETEQ (TESTLconst [c] x)) 36216 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 36217 // result: (SETAE (BTLconst [log2(c)] x)) 36218 for { 36219 v_0 := v.Args[0] 36220 if v_0.Op != OpAMD64TESTLconst { 36221 break 36222 } 36223 c := v_0.AuxInt 36224 x := v_0.Args[0] 36225 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 36226 break 36227 } 36228 v.reset(OpAMD64SETAE) 36229 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 36230 v0.AuxInt = log2(c) 36231 v0.AddArg(x) 36232 v.AddArg(v0) 36233 return true 36234 } 36235 // match: (SETEQ (TESTQconst [c] x)) 36236 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36237 // result: (SETAE (BTQconst [log2(c)] x)) 36238 for { 36239 v_0 := v.Args[0] 36240 if v_0.Op != OpAMD64TESTQconst { 36241 break 36242 } 36243 c := v_0.AuxInt 36244 x := v_0.Args[0] 36245 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36246 break 36247 } 36248 v.reset(OpAMD64SETAE) 36249 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36250 v0.AuxInt = log2(c) 36251 v0.AddArg(x) 36252 v.AddArg(v0) 36253 return true 36254 } 36255 // match: (SETEQ (TESTQ (MOVQconst [c]) x)) 36256 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36257 // result: (SETAE (BTQconst [log2(c)] x)) 36258 for { 36259 v_0 := v.Args[0] 36260 if v_0.Op != OpAMD64TESTQ { 36261 break 36262 } 36263 _ = v_0.Args[1] 36264 v_0_0 := v_0.Args[0] 36265 if v_0_0.Op != OpAMD64MOVQconst { 36266 break 36267 } 36268 c := v_0_0.AuxInt 36269 x := v_0.Args[1] 36270 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36271 break 36272 } 36273 v.reset(OpAMD64SETAE) 36274 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36275 v0.AuxInt = log2(c) 36276 v0.AddArg(x) 36277 v.AddArg(v0) 36278 return true 36279 } 36280 // match: (SETEQ (TESTQ x (MOVQconst [c]))) 36281 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36282 // result: (SETAE (BTQconst [log2(c)] x)) 36283 for { 36284 v_0 := v.Args[0] 36285 if v_0.Op != OpAMD64TESTQ { 36286 break 36287 } 36288 _ = v_0.Args[1] 36289 x := v_0.Args[0] 36290 v_0_1 := v_0.Args[1] 36291 if v_0_1.Op != OpAMD64MOVQconst { 36292 break 36293 } 36294 c := v_0_1.AuxInt 36295 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36296 break 36297 } 36298 v.reset(OpAMD64SETAE) 36299 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36300 v0.AuxInt = log2(c) 36301 v0.AddArg(x) 36302 v.AddArg(v0) 36303 return true 36304 } 36305 // match: (SETEQ (InvertFlags x)) 36306 // cond: 36307 // result: (SETEQ x) 36308 for { 36309 v_0 := v.Args[0] 36310 if v_0.Op != OpAMD64InvertFlags { 36311 break 36312 } 36313 x := v_0.Args[0] 36314 v.reset(OpAMD64SETEQ) 36315 v.AddArg(x) 36316 return true 36317 } 36318 // match: (SETEQ (FlagEQ)) 36319 // cond: 36320 // result: (MOVLconst [1]) 36321 for { 36322 v_0 := v.Args[0] 36323 if v_0.Op != OpAMD64FlagEQ { 36324 break 36325 } 36326 v.reset(OpAMD64MOVLconst) 36327 v.AuxInt = 1 36328 return true 36329 } 36330 return false 36331 } 36332 func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { 36333 // match: (SETEQ (FlagLT_ULT)) 36334 // cond: 36335 // result: (MOVLconst [0]) 36336 for { 36337 v_0 := v.Args[0] 36338 if v_0.Op != OpAMD64FlagLT_ULT { 36339 break 36340 } 36341 v.reset(OpAMD64MOVLconst) 36342 v.AuxInt = 0 36343 return true 36344 } 36345 // match: (SETEQ (FlagLT_UGT)) 36346 // cond: 36347 // result: (MOVLconst [0]) 36348 for { 36349 v_0 := v.Args[0] 36350 if v_0.Op != OpAMD64FlagLT_UGT { 36351 break 36352 } 36353 v.reset(OpAMD64MOVLconst) 36354 v.AuxInt = 0 36355 return true 36356 } 36357 // match: (SETEQ (FlagGT_ULT)) 36358 // cond: 36359 // result: (MOVLconst [0]) 36360 for { 36361 v_0 := v.Args[0] 36362 if v_0.Op != OpAMD64FlagGT_ULT { 36363 break 36364 } 36365 v.reset(OpAMD64MOVLconst) 36366 v.AuxInt = 0 36367 return true 36368 } 36369 // match: (SETEQ (FlagGT_UGT)) 36370 // cond: 36371 // result: (MOVLconst [0]) 36372 for { 36373 v_0 := v.Args[0] 36374 if v_0.Op != OpAMD64FlagGT_UGT { 36375 break 36376 } 36377 v.reset(OpAMD64MOVLconst) 36378 v.AuxInt = 0 36379 return true 36380 } 36381 return false 36382 } 36383 func rewriteValueAMD64_OpAMD64SETEQmem_0(v *Value) bool { 36384 b := v.Block 36385 _ = b 36386 config := b.Func.Config 36387 _ = config 36388 // match: (SETEQmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 36389 // cond: !config.nacl 36390 // result: (SETAEmem [off] {sym} ptr (BTL x y) mem) 36391 for { 36392 off := v.AuxInt 36393 sym := v.Aux 36394 _ = v.Args[2] 36395 ptr := v.Args[0] 36396 v_1 := v.Args[1] 36397 if v_1.Op != OpAMD64TESTL { 36398 break 36399 } 36400 _ = v_1.Args[1] 36401 v_1_0 := v_1.Args[0] 36402 if v_1_0.Op != OpAMD64SHLL { 36403 break 36404 } 36405 _ = v_1_0.Args[1] 36406 v_1_0_0 := v_1_0.Args[0] 36407 if v_1_0_0.Op != OpAMD64MOVLconst { 36408 break 36409 } 36410 if v_1_0_0.AuxInt != 1 { 36411 break 36412 } 36413 x := v_1_0.Args[1] 36414 y := v_1.Args[1] 36415 mem := v.Args[2] 36416 if !(!config.nacl) { 36417 break 36418 } 36419 v.reset(OpAMD64SETAEmem) 36420 v.AuxInt = off 36421 v.Aux = sym 36422 v.AddArg(ptr) 36423 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36424 v0.AddArg(x) 36425 v0.AddArg(y) 36426 v.AddArg(v0) 36427 v.AddArg(mem) 36428 return true 36429 } 36430 // match: (SETEQmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) 36431 // cond: !config.nacl 36432 // result: (SETAEmem [off] {sym} ptr (BTL x y) mem) 36433 for { 36434 off := v.AuxInt 36435 sym := v.Aux 36436 _ = v.Args[2] 36437 ptr := v.Args[0] 36438 v_1 := v.Args[1] 36439 if v_1.Op != OpAMD64TESTL { 36440 break 36441 } 36442 _ = v_1.Args[1] 36443 y := v_1.Args[0] 36444 v_1_1 := v_1.Args[1] 36445 if v_1_1.Op != OpAMD64SHLL { 36446 break 36447 } 36448 _ = v_1_1.Args[1] 36449 v_1_1_0 := v_1_1.Args[0] 36450 if v_1_1_0.Op != OpAMD64MOVLconst { 36451 break 36452 } 36453 if v_1_1_0.AuxInt != 1 { 36454 break 36455 } 36456 x := v_1_1.Args[1] 36457 mem := v.Args[2] 36458 if !(!config.nacl) { 36459 break 36460 } 36461 v.reset(OpAMD64SETAEmem) 36462 v.AuxInt = off 36463 v.Aux = sym 36464 v.AddArg(ptr) 36465 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36466 v0.AddArg(x) 36467 v0.AddArg(y) 36468 v.AddArg(v0) 36469 v.AddArg(mem) 36470 return true 36471 } 36472 // match: (SETEQmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 36473 // cond: !config.nacl 36474 // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem) 36475 for { 36476 off := v.AuxInt 36477 sym := v.Aux 36478 _ = v.Args[2] 36479 ptr := v.Args[0] 36480 v_1 := v.Args[1] 36481 if v_1.Op != OpAMD64TESTQ { 36482 break 36483 } 36484 _ = v_1.Args[1] 36485 v_1_0 := v_1.Args[0] 36486 if v_1_0.Op != OpAMD64SHLQ { 36487 break 36488 } 36489 _ = v_1_0.Args[1] 36490 v_1_0_0 := v_1_0.Args[0] 36491 if v_1_0_0.Op != OpAMD64MOVQconst { 36492 break 36493 } 36494 if v_1_0_0.AuxInt != 1 { 36495 break 36496 } 36497 x := v_1_0.Args[1] 36498 y := v_1.Args[1] 36499 mem := v.Args[2] 36500 if !(!config.nacl) { 36501 break 36502 } 36503 v.reset(OpAMD64SETAEmem) 36504 v.AuxInt = off 36505 v.Aux = sym 36506 v.AddArg(ptr) 36507 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36508 v0.AddArg(x) 36509 v0.AddArg(y) 36510 v.AddArg(v0) 36511 v.AddArg(mem) 36512 return true 36513 } 36514 // match: (SETEQmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) 36515 // cond: !config.nacl 36516 // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem) 36517 for { 36518 off := v.AuxInt 36519 sym := v.Aux 36520 _ = v.Args[2] 36521 ptr := v.Args[0] 36522 v_1 := v.Args[1] 36523 if v_1.Op != OpAMD64TESTQ { 36524 break 36525 } 36526 _ = v_1.Args[1] 36527 y := v_1.Args[0] 36528 v_1_1 := v_1.Args[1] 36529 if v_1_1.Op != OpAMD64SHLQ { 36530 break 36531 } 36532 _ = v_1_1.Args[1] 36533 v_1_1_0 := v_1_1.Args[0] 36534 if v_1_1_0.Op != OpAMD64MOVQconst { 36535 break 36536 } 36537 if v_1_1_0.AuxInt != 1 { 36538 break 36539 } 36540 x := v_1_1.Args[1] 36541 mem := v.Args[2] 36542 if !(!config.nacl) { 36543 break 36544 } 36545 v.reset(OpAMD64SETAEmem) 36546 v.AuxInt = off 36547 v.Aux = sym 36548 v.AddArg(ptr) 36549 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36550 v0.AddArg(x) 36551 v0.AddArg(y) 36552 v.AddArg(v0) 36553 v.AddArg(mem) 36554 return true 36555 } 36556 // match: (SETEQmem [off] {sym} ptr (TESTLconst [c] x) mem) 36557 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 36558 // result: (SETAEmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) 36559 for { 36560 off := v.AuxInt 36561 sym := v.Aux 36562 _ = v.Args[2] 36563 ptr := v.Args[0] 36564 v_1 := v.Args[1] 36565 if v_1.Op != OpAMD64TESTLconst { 36566 break 36567 } 36568 c := v_1.AuxInt 36569 x := v_1.Args[0] 36570 mem := v.Args[2] 36571 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 36572 break 36573 } 36574 v.reset(OpAMD64SETAEmem) 36575 v.AuxInt = off 36576 v.Aux = sym 36577 v.AddArg(ptr) 36578 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 36579 v0.AuxInt = log2(c) 36580 v0.AddArg(x) 36581 v.AddArg(v0) 36582 v.AddArg(mem) 36583 return true 36584 } 36585 // match: (SETEQmem [off] {sym} ptr (TESTQconst [c] x) mem) 36586 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36587 // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 36588 for { 36589 off := v.AuxInt 36590 sym := v.Aux 36591 _ = v.Args[2] 36592 ptr := v.Args[0] 36593 v_1 := v.Args[1] 36594 if v_1.Op != OpAMD64TESTQconst { 36595 break 36596 } 36597 c := v_1.AuxInt 36598 x := v_1.Args[0] 36599 mem := v.Args[2] 36600 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36601 break 36602 } 36603 v.reset(OpAMD64SETAEmem) 36604 v.AuxInt = off 36605 v.Aux = sym 36606 v.AddArg(ptr) 36607 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36608 v0.AuxInt = log2(c) 36609 v0.AddArg(x) 36610 v.AddArg(v0) 36611 v.AddArg(mem) 36612 return true 36613 } 36614 // match: (SETEQmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 36615 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36616 // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 36617 for { 36618 off := v.AuxInt 36619 sym := v.Aux 36620 _ = v.Args[2] 36621 ptr := v.Args[0] 36622 v_1 := v.Args[1] 36623 if v_1.Op != OpAMD64TESTQ { 36624 break 36625 } 36626 _ = v_1.Args[1] 36627 v_1_0 := v_1.Args[0] 36628 if v_1_0.Op != OpAMD64MOVQconst { 36629 break 36630 } 36631 c := v_1_0.AuxInt 36632 x := v_1.Args[1] 36633 mem := v.Args[2] 36634 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36635 break 36636 } 36637 v.reset(OpAMD64SETAEmem) 36638 v.AuxInt = off 36639 v.Aux = sym 36640 v.AddArg(ptr) 36641 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36642 v0.AuxInt = log2(c) 36643 v0.AddArg(x) 36644 v.AddArg(v0) 36645 v.AddArg(mem) 36646 return true 36647 } 36648 // match: (SETEQmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) 36649 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36650 // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 36651 for { 36652 off := v.AuxInt 36653 sym := v.Aux 36654 _ = v.Args[2] 36655 ptr := v.Args[0] 36656 v_1 := v.Args[1] 36657 if v_1.Op != OpAMD64TESTQ { 36658 break 36659 } 36660 _ = v_1.Args[1] 36661 x := v_1.Args[0] 36662 v_1_1 := v_1.Args[1] 36663 if v_1_1.Op != OpAMD64MOVQconst { 36664 break 36665 } 36666 c := v_1_1.AuxInt 36667 mem := v.Args[2] 36668 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36669 break 36670 } 36671 v.reset(OpAMD64SETAEmem) 36672 v.AuxInt = off 36673 v.Aux = sym 36674 v.AddArg(ptr) 36675 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36676 v0.AuxInt = log2(c) 36677 v0.AddArg(x) 36678 v.AddArg(v0) 36679 v.AddArg(mem) 36680 return true 36681 } 36682 // match: (SETEQmem [off] {sym} ptr (InvertFlags x) mem) 36683 // cond: 36684 // result: (SETEQmem [off] {sym} ptr x mem) 36685 for { 36686 off := v.AuxInt 36687 sym := v.Aux 36688 _ = v.Args[2] 36689 ptr := v.Args[0] 36690 v_1 := v.Args[1] 36691 if v_1.Op != OpAMD64InvertFlags { 36692 break 36693 } 36694 x := v_1.Args[0] 36695 mem := v.Args[2] 36696 v.reset(OpAMD64SETEQmem) 36697 v.AuxInt = off 36698 v.Aux = sym 36699 v.AddArg(ptr) 36700 v.AddArg(x) 36701 v.AddArg(mem) 36702 return true 36703 } 36704 return false 36705 } 36706 func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { 36707 // match: (SETG (InvertFlags x)) 36708 // cond: 36709 // result: (SETL x) 36710 for { 36711 v_0 := v.Args[0] 36712 if v_0.Op != OpAMD64InvertFlags { 36713 break 36714 } 36715 x := v_0.Args[0] 36716 v.reset(OpAMD64SETL) 36717 v.AddArg(x) 36718 return true 36719 } 36720 // match: (SETG (FlagEQ)) 36721 // cond: 36722 // result: (MOVLconst [0]) 36723 for { 36724 v_0 := v.Args[0] 36725 if v_0.Op != OpAMD64FlagEQ { 36726 break 36727 } 36728 v.reset(OpAMD64MOVLconst) 36729 v.AuxInt = 0 36730 return true 36731 } 36732 // match: (SETG (FlagLT_ULT)) 36733 // cond: 36734 // result: (MOVLconst [0]) 36735 for { 36736 v_0 := v.Args[0] 36737 if v_0.Op != OpAMD64FlagLT_ULT { 36738 break 36739 } 36740 v.reset(OpAMD64MOVLconst) 36741 v.AuxInt = 0 36742 return true 36743 } 36744 // match: (SETG (FlagLT_UGT)) 36745 // cond: 36746 // result: (MOVLconst [0]) 36747 for { 36748 v_0 := v.Args[0] 36749 if v_0.Op != OpAMD64FlagLT_UGT { 36750 break 36751 } 36752 v.reset(OpAMD64MOVLconst) 36753 v.AuxInt = 0 36754 return true 36755 } 36756 // match: (SETG (FlagGT_ULT)) 36757 // cond: 36758 // result: (MOVLconst [1]) 36759 for { 36760 v_0 := v.Args[0] 36761 if v_0.Op != OpAMD64FlagGT_ULT { 36762 break 36763 } 36764 v.reset(OpAMD64MOVLconst) 36765 v.AuxInt = 1 36766 return true 36767 } 36768 // match: (SETG (FlagGT_UGT)) 36769 // cond: 36770 // result: (MOVLconst [1]) 36771 for { 36772 v_0 := v.Args[0] 36773 if v_0.Op != OpAMD64FlagGT_UGT { 36774 break 36775 } 36776 v.reset(OpAMD64MOVLconst) 36777 v.AuxInt = 1 36778 return true 36779 } 36780 return false 36781 } 36782 func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { 36783 // match: (SETGE (InvertFlags x)) 36784 // cond: 36785 // result: (SETLE x) 36786 for { 36787 v_0 := v.Args[0] 36788 if v_0.Op != OpAMD64InvertFlags { 36789 break 36790 } 36791 x := v_0.Args[0] 36792 v.reset(OpAMD64SETLE) 36793 v.AddArg(x) 36794 return true 36795 } 36796 // match: (SETGE (FlagEQ)) 36797 // cond: 36798 // result: (MOVLconst [1]) 36799 for { 36800 v_0 := v.Args[0] 36801 if v_0.Op != OpAMD64FlagEQ { 36802 break 36803 } 36804 v.reset(OpAMD64MOVLconst) 36805 v.AuxInt = 1 36806 return true 36807 } 36808 // match: (SETGE (FlagLT_ULT)) 36809 // cond: 36810 // result: (MOVLconst [0]) 36811 for { 36812 v_0 := v.Args[0] 36813 if v_0.Op != OpAMD64FlagLT_ULT { 36814 break 36815 } 36816 v.reset(OpAMD64MOVLconst) 36817 v.AuxInt = 0 36818 return true 36819 } 36820 // match: (SETGE (FlagLT_UGT)) 36821 // cond: 36822 // result: (MOVLconst [0]) 36823 for { 36824 v_0 := v.Args[0] 36825 if v_0.Op != OpAMD64FlagLT_UGT { 36826 break 36827 } 36828 v.reset(OpAMD64MOVLconst) 36829 v.AuxInt = 0 36830 return true 36831 } 36832 // match: (SETGE (FlagGT_ULT)) 36833 // cond: 36834 // result: (MOVLconst [1]) 36835 for { 36836 v_0 := v.Args[0] 36837 if v_0.Op != OpAMD64FlagGT_ULT { 36838 break 36839 } 36840 v.reset(OpAMD64MOVLconst) 36841 v.AuxInt = 1 36842 return true 36843 } 36844 // match: (SETGE (FlagGT_UGT)) 36845 // cond: 36846 // result: (MOVLconst [1]) 36847 for { 36848 v_0 := v.Args[0] 36849 if v_0.Op != OpAMD64FlagGT_UGT { 36850 break 36851 } 36852 v.reset(OpAMD64MOVLconst) 36853 v.AuxInt = 1 36854 return true 36855 } 36856 return false 36857 } 36858 func rewriteValueAMD64_OpAMD64SETGEmem_0(v *Value) bool { 36859 // match: (SETGEmem [off] {sym} ptr (InvertFlags x) mem) 36860 // cond: 36861 // result: (SETLEmem [off] {sym} ptr x mem) 36862 for { 36863 off := v.AuxInt 36864 sym := v.Aux 36865 _ = v.Args[2] 36866 ptr := v.Args[0] 36867 v_1 := v.Args[1] 36868 if v_1.Op != OpAMD64InvertFlags { 36869 break 36870 } 36871 x := v_1.Args[0] 36872 mem := v.Args[2] 36873 v.reset(OpAMD64SETLEmem) 36874 v.AuxInt = off 36875 v.Aux = sym 36876 v.AddArg(ptr) 36877 v.AddArg(x) 36878 v.AddArg(mem) 36879 return true 36880 } 36881 return false 36882 } 36883 func rewriteValueAMD64_OpAMD64SETGmem_0(v *Value) bool { 36884 // match: (SETGmem [off] {sym} ptr (InvertFlags x) mem) 36885 // cond: 36886 // result: (SETLmem [off] {sym} ptr x mem) 36887 for { 36888 off := v.AuxInt 36889 sym := v.Aux 36890 _ = v.Args[2] 36891 ptr := v.Args[0] 36892 v_1 := v.Args[1] 36893 if v_1.Op != OpAMD64InvertFlags { 36894 break 36895 } 36896 x := v_1.Args[0] 36897 mem := v.Args[2] 36898 v.reset(OpAMD64SETLmem) 36899 v.AuxInt = off 36900 v.Aux = sym 36901 v.AddArg(ptr) 36902 v.AddArg(x) 36903 v.AddArg(mem) 36904 return true 36905 } 36906 return false 36907 } 36908 func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { 36909 // match: (SETL (InvertFlags x)) 36910 // cond: 36911 // result: (SETG x) 36912 for { 36913 v_0 := v.Args[0] 36914 if v_0.Op != OpAMD64InvertFlags { 36915 break 36916 } 36917 x := v_0.Args[0] 36918 v.reset(OpAMD64SETG) 36919 v.AddArg(x) 36920 return true 36921 } 36922 // match: (SETL (FlagEQ)) 36923 // cond: 36924 // result: (MOVLconst [0]) 36925 for { 36926 v_0 := v.Args[0] 36927 if v_0.Op != OpAMD64FlagEQ { 36928 break 36929 } 36930 v.reset(OpAMD64MOVLconst) 36931 v.AuxInt = 0 36932 return true 36933 } 36934 // match: (SETL (FlagLT_ULT)) 36935 // cond: 36936 // result: (MOVLconst [1]) 36937 for { 36938 v_0 := v.Args[0] 36939 if v_0.Op != OpAMD64FlagLT_ULT { 36940 break 36941 } 36942 v.reset(OpAMD64MOVLconst) 36943 v.AuxInt = 1 36944 return true 36945 } 36946 // match: (SETL (FlagLT_UGT)) 36947 // cond: 36948 // result: (MOVLconst [1]) 36949 for { 36950 v_0 := v.Args[0] 36951 if v_0.Op != OpAMD64FlagLT_UGT { 36952 break 36953 } 36954 v.reset(OpAMD64MOVLconst) 36955 v.AuxInt = 1 36956 return true 36957 } 36958 // match: (SETL (FlagGT_ULT)) 36959 // cond: 36960 // result: (MOVLconst [0]) 36961 for { 36962 v_0 := v.Args[0] 36963 if v_0.Op != OpAMD64FlagGT_ULT { 36964 break 36965 } 36966 v.reset(OpAMD64MOVLconst) 36967 v.AuxInt = 0 36968 return true 36969 } 36970 // match: (SETL (FlagGT_UGT)) 36971 // cond: 36972 // result: (MOVLconst [0]) 36973 for { 36974 v_0 := v.Args[0] 36975 if v_0.Op != OpAMD64FlagGT_UGT { 36976 break 36977 } 36978 v.reset(OpAMD64MOVLconst) 36979 v.AuxInt = 0 36980 return true 36981 } 36982 return false 36983 } 36984 func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { 36985 // match: (SETLE (InvertFlags x)) 36986 // cond: 36987 // result: (SETGE x) 36988 for { 36989 v_0 := v.Args[0] 36990 if v_0.Op != OpAMD64InvertFlags { 36991 break 36992 } 36993 x := v_0.Args[0] 36994 v.reset(OpAMD64SETGE) 36995 v.AddArg(x) 36996 return true 36997 } 36998 // match: (SETLE (FlagEQ)) 36999 // cond: 37000 // result: (MOVLconst [1]) 37001 for { 37002 v_0 := v.Args[0] 37003 if v_0.Op != OpAMD64FlagEQ { 37004 break 37005 } 37006 v.reset(OpAMD64MOVLconst) 37007 v.AuxInt = 1 37008 return true 37009 } 37010 // match: (SETLE (FlagLT_ULT)) 37011 // cond: 37012 // result: (MOVLconst [1]) 37013 for { 37014 v_0 := v.Args[0] 37015 if v_0.Op != OpAMD64FlagLT_ULT { 37016 break 37017 } 37018 v.reset(OpAMD64MOVLconst) 37019 v.AuxInt = 1 37020 return true 37021 } 37022 // match: (SETLE (FlagLT_UGT)) 37023 // cond: 37024 // result: (MOVLconst [1]) 37025 for { 37026 v_0 := v.Args[0] 37027 if v_0.Op != OpAMD64FlagLT_UGT { 37028 break 37029 } 37030 v.reset(OpAMD64MOVLconst) 37031 v.AuxInt = 1 37032 return true 37033 } 37034 // match: (SETLE (FlagGT_ULT)) 37035 // cond: 37036 // result: (MOVLconst [0]) 37037 for { 37038 v_0 := v.Args[0] 37039 if v_0.Op != OpAMD64FlagGT_ULT { 37040 break 37041 } 37042 v.reset(OpAMD64MOVLconst) 37043 v.AuxInt = 0 37044 return true 37045 } 37046 // match: (SETLE (FlagGT_UGT)) 37047 // cond: 37048 // result: (MOVLconst [0]) 37049 for { 37050 v_0 := v.Args[0] 37051 if v_0.Op != OpAMD64FlagGT_UGT { 37052 break 37053 } 37054 v.reset(OpAMD64MOVLconst) 37055 v.AuxInt = 0 37056 return true 37057 } 37058 return false 37059 } 37060 func rewriteValueAMD64_OpAMD64SETLEmem_0(v *Value) bool { 37061 // match: (SETLEmem [off] {sym} ptr (InvertFlags x) mem) 37062 // cond: 37063 // result: (SETGEmem [off] {sym} ptr x mem) 37064 for { 37065 off := v.AuxInt 37066 sym := v.Aux 37067 _ = v.Args[2] 37068 ptr := v.Args[0] 37069 v_1 := v.Args[1] 37070 if v_1.Op != OpAMD64InvertFlags { 37071 break 37072 } 37073 x := v_1.Args[0] 37074 mem := v.Args[2] 37075 v.reset(OpAMD64SETGEmem) 37076 v.AuxInt = off 37077 v.Aux = sym 37078 v.AddArg(ptr) 37079 v.AddArg(x) 37080 v.AddArg(mem) 37081 return true 37082 } 37083 return false 37084 } 37085 func rewriteValueAMD64_OpAMD64SETLmem_0(v *Value) bool { 37086 // match: (SETLmem [off] {sym} ptr (InvertFlags x) mem) 37087 // cond: 37088 // result: (SETGmem [off] {sym} ptr x mem) 37089 for { 37090 off := v.AuxInt 37091 sym := v.Aux 37092 _ = v.Args[2] 37093 ptr := v.Args[0] 37094 v_1 := v.Args[1] 37095 if v_1.Op != OpAMD64InvertFlags { 37096 break 37097 } 37098 x := v_1.Args[0] 37099 mem := v.Args[2] 37100 v.reset(OpAMD64SETGmem) 37101 v.AuxInt = off 37102 v.Aux = sym 37103 v.AddArg(ptr) 37104 v.AddArg(x) 37105 v.AddArg(mem) 37106 return true 37107 } 37108 return false 37109 } 37110 func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { 37111 b := v.Block 37112 _ = b 37113 config := b.Func.Config 37114 _ = config 37115 // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) 37116 // cond: !config.nacl 37117 // result: (SETB (BTL x y)) 37118 for { 37119 v_0 := v.Args[0] 37120 if v_0.Op != OpAMD64TESTL { 37121 break 37122 } 37123 _ = v_0.Args[1] 37124 v_0_0 := v_0.Args[0] 37125 if v_0_0.Op != OpAMD64SHLL { 37126 break 37127 } 37128 _ = v_0_0.Args[1] 37129 v_0_0_0 := v_0_0.Args[0] 37130 if v_0_0_0.Op != OpAMD64MOVLconst { 37131 break 37132 } 37133 if v_0_0_0.AuxInt != 1 { 37134 break 37135 } 37136 x := v_0_0.Args[1] 37137 y := v_0.Args[1] 37138 if !(!config.nacl) { 37139 break 37140 } 37141 v.reset(OpAMD64SETB) 37142 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 37143 v0.AddArg(x) 37144 v0.AddArg(y) 37145 v.AddArg(v0) 37146 return true 37147 } 37148 // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x))) 37149 // cond: !config.nacl 37150 // result: (SETB (BTL x y)) 37151 for { 37152 v_0 := v.Args[0] 37153 if v_0.Op != OpAMD64TESTL { 37154 break 37155 } 37156 _ = v_0.Args[1] 37157 y := v_0.Args[0] 37158 v_0_1 := v_0.Args[1] 37159 if v_0_1.Op != OpAMD64SHLL { 37160 break 37161 } 37162 _ = v_0_1.Args[1] 37163 v_0_1_0 := v_0_1.Args[0] 37164 if v_0_1_0.Op != OpAMD64MOVLconst { 37165 break 37166 } 37167 if v_0_1_0.AuxInt != 1 { 37168 break 37169 } 37170 x := v_0_1.Args[1] 37171 if !(!config.nacl) { 37172 break 37173 } 37174 v.reset(OpAMD64SETB) 37175 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 37176 v0.AddArg(x) 37177 v0.AddArg(y) 37178 v.AddArg(v0) 37179 return true 37180 } 37181 // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 37182 // cond: !config.nacl 37183 // result: (SETB (BTQ x y)) 37184 for { 37185 v_0 := v.Args[0] 37186 if v_0.Op != OpAMD64TESTQ { 37187 break 37188 } 37189 _ = v_0.Args[1] 37190 v_0_0 := v_0.Args[0] 37191 if v_0_0.Op != OpAMD64SHLQ { 37192 break 37193 } 37194 _ = v_0_0.Args[1] 37195 v_0_0_0 := v_0_0.Args[0] 37196 if v_0_0_0.Op != OpAMD64MOVQconst { 37197 break 37198 } 37199 if v_0_0_0.AuxInt != 1 { 37200 break 37201 } 37202 x := v_0_0.Args[1] 37203 y := v_0.Args[1] 37204 if !(!config.nacl) { 37205 break 37206 } 37207 v.reset(OpAMD64SETB) 37208 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 37209 v0.AddArg(x) 37210 v0.AddArg(y) 37211 v.AddArg(v0) 37212 return true 37213 } 37214 // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x))) 37215 // cond: !config.nacl 37216 // result: (SETB (BTQ x y)) 37217 for { 37218 v_0 := v.Args[0] 37219 if v_0.Op != OpAMD64TESTQ { 37220 break 37221 } 37222 _ = v_0.Args[1] 37223 y := v_0.Args[0] 37224 v_0_1 := v_0.Args[1] 37225 if v_0_1.Op != OpAMD64SHLQ { 37226 break 37227 } 37228 _ = v_0_1.Args[1] 37229 v_0_1_0 := v_0_1.Args[0] 37230 if v_0_1_0.Op != OpAMD64MOVQconst { 37231 break 37232 } 37233 if v_0_1_0.AuxInt != 1 { 37234 break 37235 } 37236 x := v_0_1.Args[1] 37237 if !(!config.nacl) { 37238 break 37239 } 37240 v.reset(OpAMD64SETB) 37241 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 37242 v0.AddArg(x) 37243 v0.AddArg(y) 37244 v.AddArg(v0) 37245 return true 37246 } 37247 // match: (SETNE (TESTLconst [c] x)) 37248 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 37249 // result: (SETB (BTLconst [log2(c)] x)) 37250 for { 37251 v_0 := v.Args[0] 37252 if v_0.Op != OpAMD64TESTLconst { 37253 break 37254 } 37255 c := v_0.AuxInt 37256 x := v_0.Args[0] 37257 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 37258 break 37259 } 37260 v.reset(OpAMD64SETB) 37261 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 37262 v0.AuxInt = log2(c) 37263 v0.AddArg(x) 37264 v.AddArg(v0) 37265 return true 37266 } 37267 // match: (SETNE (TESTQconst [c] x)) 37268 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 37269 // result: (SETB (BTQconst [log2(c)] x)) 37270 for { 37271 v_0 := v.Args[0] 37272 if v_0.Op != OpAMD64TESTQconst { 37273 break 37274 } 37275 c := v_0.AuxInt 37276 x := v_0.Args[0] 37277 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 37278 break 37279 } 37280 v.reset(OpAMD64SETB) 37281 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 37282 v0.AuxInt = log2(c) 37283 v0.AddArg(x) 37284 v.AddArg(v0) 37285 return true 37286 } 37287 // match: (SETNE (TESTQ (MOVQconst [c]) x)) 37288 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 37289 // result: (SETB (BTQconst [log2(c)] x)) 37290 for { 37291 v_0 := v.Args[0] 37292 if v_0.Op != OpAMD64TESTQ { 37293 break 37294 } 37295 _ = v_0.Args[1] 37296 v_0_0 := v_0.Args[0] 37297 if v_0_0.Op != OpAMD64MOVQconst { 37298 break 37299 } 37300 c := v_0_0.AuxInt 37301 x := v_0.Args[1] 37302 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 37303 break 37304 } 37305 v.reset(OpAMD64SETB) 37306 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 37307 v0.AuxInt = log2(c) 37308 v0.AddArg(x) 37309 v.AddArg(v0) 37310 return true 37311 } 37312 // match: (SETNE (TESTQ x (MOVQconst [c]))) 37313 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 37314 // result: (SETB (BTQconst [log2(c)] x)) 37315 for { 37316 v_0 := v.Args[0] 37317 if v_0.Op != OpAMD64TESTQ { 37318 break 37319 } 37320 _ = v_0.Args[1] 37321 x := v_0.Args[0] 37322 v_0_1 := v_0.Args[1] 37323 if v_0_1.Op != OpAMD64MOVQconst { 37324 break 37325 } 37326 c := v_0_1.AuxInt 37327 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 37328 break 37329 } 37330 v.reset(OpAMD64SETB) 37331 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 37332 v0.AuxInt = log2(c) 37333 v0.AddArg(x) 37334 v.AddArg(v0) 37335 return true 37336 } 37337 // match: (SETNE (InvertFlags x)) 37338 // cond: 37339 // result: (SETNE x) 37340 for { 37341 v_0 := v.Args[0] 37342 if v_0.Op != OpAMD64InvertFlags { 37343 break 37344 } 37345 x := v_0.Args[0] 37346 v.reset(OpAMD64SETNE) 37347 v.AddArg(x) 37348 return true 37349 } 37350 // match: (SETNE (FlagEQ)) 37351 // cond: 37352 // result: (MOVLconst [0]) 37353 for { 37354 v_0 := v.Args[0] 37355 if v_0.Op != OpAMD64FlagEQ { 37356 break 37357 } 37358 v.reset(OpAMD64MOVLconst) 37359 v.AuxInt = 0 37360 return true 37361 } 37362 return false 37363 } 37364 func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { 37365 // match: (SETNE (FlagLT_ULT)) 37366 // cond: 37367 // result: (MOVLconst [1]) 37368 for { 37369 v_0 := v.Args[0] 37370 if v_0.Op != OpAMD64FlagLT_ULT { 37371 break 37372 } 37373 v.reset(OpAMD64MOVLconst) 37374 v.AuxInt = 1 37375 return true 37376 } 37377 // match: (SETNE (FlagLT_UGT)) 37378 // cond: 37379 // result: (MOVLconst [1]) 37380 for { 37381 v_0 := v.Args[0] 37382 if v_0.Op != OpAMD64FlagLT_UGT { 37383 break 37384 } 37385 v.reset(OpAMD64MOVLconst) 37386 v.AuxInt = 1 37387 return true 37388 } 37389 // match: (SETNE (FlagGT_ULT)) 37390 // cond: 37391 // result: (MOVLconst [1]) 37392 for { 37393 v_0 := v.Args[0] 37394 if v_0.Op != OpAMD64FlagGT_ULT { 37395 break 37396 } 37397 v.reset(OpAMD64MOVLconst) 37398 v.AuxInt = 1 37399 return true 37400 } 37401 // match: (SETNE (FlagGT_UGT)) 37402 // cond: 37403 // result: (MOVLconst [1]) 37404 for { 37405 v_0 := v.Args[0] 37406 if v_0.Op != OpAMD64FlagGT_UGT { 37407 break 37408 } 37409 v.reset(OpAMD64MOVLconst) 37410 v.AuxInt = 1 37411 return true 37412 } 37413 return false 37414 } 37415 func rewriteValueAMD64_OpAMD64SETNEmem_0(v *Value) bool { 37416 b := v.Block 37417 _ = b 37418 config := b.Func.Config 37419 _ = config 37420 // match: (SETNEmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 37421 // cond: !config.nacl 37422 // result: (SETBmem [off] {sym} ptr (BTL x y) mem) 37423 for { 37424 off := v.AuxInt 37425 sym := v.Aux 37426 _ = v.Args[2] 37427 ptr := v.Args[0] 37428 v_1 := v.Args[1] 37429 if v_1.Op != OpAMD64TESTL { 37430 break 37431 } 37432 _ = v_1.Args[1] 37433 v_1_0 := v_1.Args[0] 37434 if v_1_0.Op != OpAMD64SHLL { 37435 break 37436 } 37437 _ = v_1_0.Args[1] 37438 v_1_0_0 := v_1_0.Args[0] 37439 if v_1_0_0.Op != OpAMD64MOVLconst { 37440 break 37441 } 37442 if v_1_0_0.AuxInt != 1 { 37443 break 37444 } 37445 x := v_1_0.Args[1] 37446 y := v_1.Args[1] 37447 mem := v.Args[2] 37448 if !(!config.nacl) { 37449 break 37450 } 37451 v.reset(OpAMD64SETBmem) 37452 v.AuxInt = off 37453 v.Aux = sym 37454 v.AddArg(ptr) 37455 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 37456 v0.AddArg(x) 37457 v0.AddArg(y) 37458 v.AddArg(v0) 37459 v.AddArg(mem) 37460 return true 37461 } 37462 // match: (SETNEmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) 37463 // cond: !config.nacl 37464 // result: (SETBmem [off] {sym} ptr (BTL x y) mem) 37465 for { 37466 off := v.AuxInt 37467 sym := v.Aux 37468 _ = v.Args[2] 37469 ptr := v.Args[0] 37470 v_1 := v.Args[1] 37471 if v_1.Op != OpAMD64TESTL { 37472 break 37473 } 37474 _ = v_1.Args[1] 37475 y := v_1.Args[0] 37476 v_1_1 := v_1.Args[1] 37477 if v_1_1.Op != OpAMD64SHLL { 37478 break 37479 } 37480 _ = v_1_1.Args[1] 37481 v_1_1_0 := v_1_1.Args[0] 37482 if v_1_1_0.Op != OpAMD64MOVLconst { 37483 break 37484 } 37485 if v_1_1_0.AuxInt != 1 { 37486 break 37487 } 37488 x := v_1_1.Args[1] 37489 mem := v.Args[2] 37490 if !(!config.nacl) { 37491 break 37492 } 37493 v.reset(OpAMD64SETBmem) 37494 v.AuxInt = off 37495 v.Aux = sym 37496 v.AddArg(ptr) 37497 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 37498 v0.AddArg(x) 37499 v0.AddArg(y) 37500 v.AddArg(v0) 37501 v.AddArg(mem) 37502 return true 37503 } 37504 // match: (SETNEmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 37505 // cond: !config.nacl 37506 // result: (SETBmem [off] {sym} ptr (BTQ x y) mem) 37507 for { 37508 off := v.AuxInt 37509 sym := v.Aux 37510 _ = v.Args[2] 37511 ptr := v.Args[0] 37512 v_1 := v.Args[1] 37513 if v_1.Op != OpAMD64TESTQ { 37514 break 37515 } 37516 _ = v_1.Args[1] 37517 v_1_0 := v_1.Args[0] 37518 if v_1_0.Op != OpAMD64SHLQ { 37519 break 37520 } 37521 _ = v_1_0.Args[1] 37522 v_1_0_0 := v_1_0.Args[0] 37523 if v_1_0_0.Op != OpAMD64MOVQconst { 37524 break 37525 } 37526 if v_1_0_0.AuxInt != 1 { 37527 break 37528 } 37529 x := v_1_0.Args[1] 37530 y := v_1.Args[1] 37531 mem := v.Args[2] 37532 if !(!config.nacl) { 37533 break 37534 } 37535 v.reset(OpAMD64SETBmem) 37536 v.AuxInt = off 37537 v.Aux = sym 37538 v.AddArg(ptr) 37539 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 37540 v0.AddArg(x) 37541 v0.AddArg(y) 37542 v.AddArg(v0) 37543 v.AddArg(mem) 37544 return true 37545 } 37546 // match: (SETNEmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) 37547 // cond: !config.nacl 37548 // result: (SETBmem [off] {sym} ptr (BTQ x y) mem) 37549 for { 37550 off := v.AuxInt 37551 sym := v.Aux 37552 _ = v.Args[2] 37553 ptr := v.Args[0] 37554 v_1 := v.Args[1] 37555 if v_1.Op != OpAMD64TESTQ { 37556 break 37557 } 37558 _ = v_1.Args[1] 37559 y := v_1.Args[0] 37560 v_1_1 := v_1.Args[1] 37561 if v_1_1.Op != OpAMD64SHLQ { 37562 break 37563 } 37564 _ = v_1_1.Args[1] 37565 v_1_1_0 := v_1_1.Args[0] 37566 if v_1_1_0.Op != OpAMD64MOVQconst { 37567 break 37568 } 37569 if v_1_1_0.AuxInt != 1 { 37570 break 37571 } 37572 x := v_1_1.Args[1] 37573 mem := v.Args[2] 37574 if !(!config.nacl) { 37575 break 37576 } 37577 v.reset(OpAMD64SETBmem) 37578 v.AuxInt = off 37579 v.Aux = sym 37580 v.AddArg(ptr) 37581 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 37582 v0.AddArg(x) 37583 v0.AddArg(y) 37584 v.AddArg(v0) 37585 v.AddArg(mem) 37586 return true 37587 } 37588 // match: (SETNEmem [off] {sym} ptr (TESTLconst [c] x) mem) 37589 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 37590 // result: (SETBmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) 37591 for { 37592 off := v.AuxInt 37593 sym := v.Aux 37594 _ = v.Args[2] 37595 ptr := v.Args[0] 37596 v_1 := v.Args[1] 37597 if v_1.Op != OpAMD64TESTLconst { 37598 break 37599 } 37600 c := v_1.AuxInt 37601 x := v_1.Args[0] 37602 mem := v.Args[2] 37603 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 37604 break 37605 } 37606 v.reset(OpAMD64SETBmem) 37607 v.AuxInt = off 37608 v.Aux = sym 37609 v.AddArg(ptr) 37610 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 37611 v0.AuxInt = log2(c) 37612 v0.AddArg(x) 37613 v.AddArg(v0) 37614 v.AddArg(mem) 37615 return true 37616 } 37617 // match: (SETNEmem [off] {sym} ptr (TESTQconst [c] x) mem) 37618 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 37619 // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 37620 for { 37621 off := v.AuxInt 37622 sym := v.Aux 37623 _ = v.Args[2] 37624 ptr := v.Args[0] 37625 v_1 := v.Args[1] 37626 if v_1.Op != OpAMD64TESTQconst { 37627 break 37628 } 37629 c := v_1.AuxInt 37630 x := v_1.Args[0] 37631 mem := v.Args[2] 37632 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 37633 break 37634 } 37635 v.reset(OpAMD64SETBmem) 37636 v.AuxInt = off 37637 v.Aux = sym 37638 v.AddArg(ptr) 37639 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 37640 v0.AuxInt = log2(c) 37641 v0.AddArg(x) 37642 v.AddArg(v0) 37643 v.AddArg(mem) 37644 return true 37645 } 37646 // match: (SETNEmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 37647 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 37648 // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 37649 for { 37650 off := v.AuxInt 37651 sym := v.Aux 37652 _ = v.Args[2] 37653 ptr := v.Args[0] 37654 v_1 := v.Args[1] 37655 if v_1.Op != OpAMD64TESTQ { 37656 break 37657 } 37658 _ = v_1.Args[1] 37659 v_1_0 := v_1.Args[0] 37660 if v_1_0.Op != OpAMD64MOVQconst { 37661 break 37662 } 37663 c := v_1_0.AuxInt 37664 x := v_1.Args[1] 37665 mem := v.Args[2] 37666 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 37667 break 37668 } 37669 v.reset(OpAMD64SETBmem) 37670 v.AuxInt = off 37671 v.Aux = sym 37672 v.AddArg(ptr) 37673 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 37674 v0.AuxInt = log2(c) 37675 v0.AddArg(x) 37676 v.AddArg(v0) 37677 v.AddArg(mem) 37678 return true 37679 } 37680 // match: (SETNEmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) 37681 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 37682 // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 37683 for { 37684 off := v.AuxInt 37685 sym := v.Aux 37686 _ = v.Args[2] 37687 ptr := v.Args[0] 37688 v_1 := v.Args[1] 37689 if v_1.Op != OpAMD64TESTQ { 37690 break 37691 } 37692 _ = v_1.Args[1] 37693 x := v_1.Args[0] 37694 v_1_1 := v_1.Args[1] 37695 if v_1_1.Op != OpAMD64MOVQconst { 37696 break 37697 } 37698 c := v_1_1.AuxInt 37699 mem := v.Args[2] 37700 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 37701 break 37702 } 37703 v.reset(OpAMD64SETBmem) 37704 v.AuxInt = off 37705 v.Aux = sym 37706 v.AddArg(ptr) 37707 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 37708 v0.AuxInt = log2(c) 37709 v0.AddArg(x) 37710 v.AddArg(v0) 37711 v.AddArg(mem) 37712 return true 37713 } 37714 // match: (SETNEmem [off] {sym} ptr (InvertFlags x) mem) 37715 // cond: 37716 // result: (SETNEmem [off] {sym} ptr x mem) 37717 for { 37718 off := v.AuxInt 37719 sym := v.Aux 37720 _ = v.Args[2] 37721 ptr := v.Args[0] 37722 v_1 := v.Args[1] 37723 if v_1.Op != OpAMD64InvertFlags { 37724 break 37725 } 37726 x := v_1.Args[0] 37727 mem := v.Args[2] 37728 v.reset(OpAMD64SETNEmem) 37729 v.AuxInt = off 37730 v.Aux = sym 37731 v.AddArg(ptr) 37732 v.AddArg(x) 37733 v.AddArg(mem) 37734 return true 37735 } 37736 return false 37737 } 37738 func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { 37739 b := v.Block 37740 _ = b 37741 // match: (SHLL x (MOVQconst [c])) 37742 // cond: 37743 // result: (SHLLconst [c&31] x) 37744 for { 37745 _ = v.Args[1] 37746 x := v.Args[0] 37747 v_1 := v.Args[1] 37748 if v_1.Op != OpAMD64MOVQconst { 37749 break 37750 } 37751 c := v_1.AuxInt 37752 v.reset(OpAMD64SHLLconst) 37753 v.AuxInt = c & 31 37754 v.AddArg(x) 37755 return true 37756 } 37757 // match: (SHLL x (MOVLconst [c])) 37758 // cond: 37759 // result: (SHLLconst [c&31] x) 37760 for { 37761 _ = v.Args[1] 37762 x := v.Args[0] 37763 v_1 := v.Args[1] 37764 if v_1.Op != OpAMD64MOVLconst { 37765 break 37766 } 37767 c := v_1.AuxInt 37768 v.reset(OpAMD64SHLLconst) 37769 v.AuxInt = c & 31 37770 v.AddArg(x) 37771 return true 37772 } 37773 // match: (SHLL x (ADDQconst [c] y)) 37774 // cond: c & 31 == 0 37775 // result: (SHLL x y) 37776 for { 37777 _ = v.Args[1] 37778 x := v.Args[0] 37779 v_1 := v.Args[1] 37780 if v_1.Op != OpAMD64ADDQconst { 37781 break 37782 } 37783 c := v_1.AuxInt 37784 y := v_1.Args[0] 37785 if !(c&31 == 0) { 37786 break 37787 } 37788 v.reset(OpAMD64SHLL) 37789 v.AddArg(x) 37790 v.AddArg(y) 37791 return true 37792 } 37793 // match: (SHLL x (NEGQ <t> (ADDQconst [c] y))) 37794 // cond: c & 31 == 0 37795 // result: (SHLL x (NEGQ <t> y)) 37796 for { 37797 _ = v.Args[1] 37798 x := v.Args[0] 37799 v_1 := v.Args[1] 37800 if v_1.Op != OpAMD64NEGQ { 37801 break 37802 } 37803 t := v_1.Type 37804 v_1_0 := v_1.Args[0] 37805 if v_1_0.Op != OpAMD64ADDQconst { 37806 break 37807 } 37808 c := v_1_0.AuxInt 37809 y := v_1_0.Args[0] 37810 if !(c&31 == 0) { 37811 break 37812 } 37813 v.reset(OpAMD64SHLL) 37814 v.AddArg(x) 37815 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 37816 v0.AddArg(y) 37817 v.AddArg(v0) 37818 return true 37819 } 37820 // match: (SHLL x (ANDQconst [c] y)) 37821 // cond: c & 31 == 31 37822 // result: (SHLL x y) 37823 for { 37824 _ = v.Args[1] 37825 x := v.Args[0] 37826 v_1 := v.Args[1] 37827 if v_1.Op != OpAMD64ANDQconst { 37828 break 37829 } 37830 c := v_1.AuxInt 37831 y := v_1.Args[0] 37832 if !(c&31 == 31) { 37833 break 37834 } 37835 v.reset(OpAMD64SHLL) 37836 v.AddArg(x) 37837 v.AddArg(y) 37838 return true 37839 } 37840 // match: (SHLL x (NEGQ <t> (ANDQconst [c] y))) 37841 // cond: c & 31 == 31 37842 // result: (SHLL x (NEGQ <t> y)) 37843 for { 37844 _ = v.Args[1] 37845 x := v.Args[0] 37846 v_1 := v.Args[1] 37847 if v_1.Op != OpAMD64NEGQ { 37848 break 37849 } 37850 t := v_1.Type 37851 v_1_0 := v_1.Args[0] 37852 if v_1_0.Op != OpAMD64ANDQconst { 37853 break 37854 } 37855 c := v_1_0.AuxInt 37856 y := v_1_0.Args[0] 37857 if !(c&31 == 31) { 37858 break 37859 } 37860 v.reset(OpAMD64SHLL) 37861 v.AddArg(x) 37862 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 37863 v0.AddArg(y) 37864 v.AddArg(v0) 37865 return true 37866 } 37867 // match: (SHLL x (ADDLconst [c] y)) 37868 // cond: c & 31 == 0 37869 // result: (SHLL x y) 37870 for { 37871 _ = v.Args[1] 37872 x := v.Args[0] 37873 v_1 := v.Args[1] 37874 if v_1.Op != OpAMD64ADDLconst { 37875 break 37876 } 37877 c := v_1.AuxInt 37878 y := v_1.Args[0] 37879 if !(c&31 == 0) { 37880 break 37881 } 37882 v.reset(OpAMD64SHLL) 37883 v.AddArg(x) 37884 v.AddArg(y) 37885 return true 37886 } 37887 // match: (SHLL x (NEGL <t> (ADDLconst [c] y))) 37888 // cond: c & 31 == 0 37889 // result: (SHLL x (NEGL <t> y)) 37890 for { 37891 _ = v.Args[1] 37892 x := v.Args[0] 37893 v_1 := v.Args[1] 37894 if v_1.Op != OpAMD64NEGL { 37895 break 37896 } 37897 t := v_1.Type 37898 v_1_0 := v_1.Args[0] 37899 if v_1_0.Op != OpAMD64ADDLconst { 37900 break 37901 } 37902 c := v_1_0.AuxInt 37903 y := v_1_0.Args[0] 37904 if !(c&31 == 0) { 37905 break 37906 } 37907 v.reset(OpAMD64SHLL) 37908 v.AddArg(x) 37909 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 37910 v0.AddArg(y) 37911 v.AddArg(v0) 37912 return true 37913 } 37914 // match: (SHLL x (ANDLconst [c] y)) 37915 // cond: c & 31 == 31 37916 // result: (SHLL x y) 37917 for { 37918 _ = v.Args[1] 37919 x := v.Args[0] 37920 v_1 := v.Args[1] 37921 if v_1.Op != OpAMD64ANDLconst { 37922 break 37923 } 37924 c := v_1.AuxInt 37925 y := v_1.Args[0] 37926 if !(c&31 == 31) { 37927 break 37928 } 37929 v.reset(OpAMD64SHLL) 37930 v.AddArg(x) 37931 v.AddArg(y) 37932 return true 37933 } 37934 // match: (SHLL x (NEGL <t> (ANDLconst [c] y))) 37935 // cond: c & 31 == 31 37936 // result: (SHLL x (NEGL <t> y)) 37937 for { 37938 _ = v.Args[1] 37939 x := v.Args[0] 37940 v_1 := v.Args[1] 37941 if v_1.Op != OpAMD64NEGL { 37942 break 37943 } 37944 t := v_1.Type 37945 v_1_0 := v_1.Args[0] 37946 if v_1_0.Op != OpAMD64ANDLconst { 37947 break 37948 } 37949 c := v_1_0.AuxInt 37950 y := v_1_0.Args[0] 37951 if !(c&31 == 31) { 37952 break 37953 } 37954 v.reset(OpAMD64SHLL) 37955 v.AddArg(x) 37956 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 37957 v0.AddArg(y) 37958 v.AddArg(v0) 37959 return true 37960 } 37961 return false 37962 } 37963 func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool { 37964 // match: (SHLLconst x [0]) 37965 // cond: 37966 // result: x 37967 for { 37968 if v.AuxInt != 0 { 37969 break 37970 } 37971 x := v.Args[0] 37972 v.reset(OpCopy) 37973 v.Type = x.Type 37974 v.AddArg(x) 37975 return true 37976 } 37977 return false 37978 } 37979 func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { 37980 b := v.Block 37981 _ = b 37982 // match: (SHLQ x (MOVQconst [c])) 37983 // cond: 37984 // result: (SHLQconst [c&63] x) 37985 for { 37986 _ = v.Args[1] 37987 x := v.Args[0] 37988 v_1 := v.Args[1] 37989 if v_1.Op != OpAMD64MOVQconst { 37990 break 37991 } 37992 c := v_1.AuxInt 37993 v.reset(OpAMD64SHLQconst) 37994 v.AuxInt = c & 63 37995 v.AddArg(x) 37996 return true 37997 } 37998 // match: (SHLQ x (MOVLconst [c])) 37999 // cond: 38000 // result: (SHLQconst [c&63] x) 38001 for { 38002 _ = v.Args[1] 38003 x := v.Args[0] 38004 v_1 := v.Args[1] 38005 if v_1.Op != OpAMD64MOVLconst { 38006 break 38007 } 38008 c := v_1.AuxInt 38009 v.reset(OpAMD64SHLQconst) 38010 v.AuxInt = c & 63 38011 v.AddArg(x) 38012 return true 38013 } 38014 // match: (SHLQ x (ADDQconst [c] y)) 38015 // cond: c & 63 == 0 38016 // result: (SHLQ x y) 38017 for { 38018 _ = v.Args[1] 38019 x := v.Args[0] 38020 v_1 := v.Args[1] 38021 if v_1.Op != OpAMD64ADDQconst { 38022 break 38023 } 38024 c := v_1.AuxInt 38025 y := v_1.Args[0] 38026 if !(c&63 == 0) { 38027 break 38028 } 38029 v.reset(OpAMD64SHLQ) 38030 v.AddArg(x) 38031 v.AddArg(y) 38032 return true 38033 } 38034 // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y))) 38035 // cond: c & 63 == 0 38036 // result: (SHLQ x (NEGQ <t> y)) 38037 for { 38038 _ = v.Args[1] 38039 x := v.Args[0] 38040 v_1 := v.Args[1] 38041 if v_1.Op != OpAMD64NEGQ { 38042 break 38043 } 38044 t := v_1.Type 38045 v_1_0 := v_1.Args[0] 38046 if v_1_0.Op != OpAMD64ADDQconst { 38047 break 38048 } 38049 c := v_1_0.AuxInt 38050 y := v_1_0.Args[0] 38051 if !(c&63 == 0) { 38052 break 38053 } 38054 v.reset(OpAMD64SHLQ) 38055 v.AddArg(x) 38056 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 38057 v0.AddArg(y) 38058 v.AddArg(v0) 38059 return true 38060 } 38061 // match: (SHLQ x (ANDQconst [c] y)) 38062 // cond: c & 63 == 63 38063 // result: (SHLQ x y) 38064 for { 38065 _ = v.Args[1] 38066 x := v.Args[0] 38067 v_1 := v.Args[1] 38068 if v_1.Op != OpAMD64ANDQconst { 38069 break 38070 } 38071 c := v_1.AuxInt 38072 y := v_1.Args[0] 38073 if !(c&63 == 63) { 38074 break 38075 } 38076 v.reset(OpAMD64SHLQ) 38077 v.AddArg(x) 38078 v.AddArg(y) 38079 return true 38080 } 38081 // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y))) 38082 // cond: c & 63 == 63 38083 // result: (SHLQ x (NEGQ <t> y)) 38084 for { 38085 _ = v.Args[1] 38086 x := v.Args[0] 38087 v_1 := v.Args[1] 38088 if v_1.Op != OpAMD64NEGQ { 38089 break 38090 } 38091 t := v_1.Type 38092 v_1_0 := v_1.Args[0] 38093 if v_1_0.Op != OpAMD64ANDQconst { 38094 break 38095 } 38096 c := v_1_0.AuxInt 38097 y := v_1_0.Args[0] 38098 if !(c&63 == 63) { 38099 break 38100 } 38101 v.reset(OpAMD64SHLQ) 38102 v.AddArg(x) 38103 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 38104 v0.AddArg(y) 38105 v.AddArg(v0) 38106 return true 38107 } 38108 // match: (SHLQ x (ADDLconst [c] y)) 38109 // cond: c & 63 == 0 38110 // result: (SHLQ x y) 38111 for { 38112 _ = v.Args[1] 38113 x := v.Args[0] 38114 v_1 := v.Args[1] 38115 if v_1.Op != OpAMD64ADDLconst { 38116 break 38117 } 38118 c := v_1.AuxInt 38119 y := v_1.Args[0] 38120 if !(c&63 == 0) { 38121 break 38122 } 38123 v.reset(OpAMD64SHLQ) 38124 v.AddArg(x) 38125 v.AddArg(y) 38126 return true 38127 } 38128 // match: (SHLQ x (NEGL <t> (ADDLconst [c] y))) 38129 // cond: c & 63 == 0 38130 // result: (SHLQ x (NEGL <t> y)) 38131 for { 38132 _ = v.Args[1] 38133 x := v.Args[0] 38134 v_1 := v.Args[1] 38135 if v_1.Op != OpAMD64NEGL { 38136 break 38137 } 38138 t := v_1.Type 38139 v_1_0 := v_1.Args[0] 38140 if v_1_0.Op != OpAMD64ADDLconst { 38141 break 38142 } 38143 c := v_1_0.AuxInt 38144 y := v_1_0.Args[0] 38145 if !(c&63 == 0) { 38146 break 38147 } 38148 v.reset(OpAMD64SHLQ) 38149 v.AddArg(x) 38150 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 38151 v0.AddArg(y) 38152 v.AddArg(v0) 38153 return true 38154 } 38155 // match: (SHLQ x (ANDLconst [c] y)) 38156 // cond: c & 63 == 63 38157 // result: (SHLQ x y) 38158 for { 38159 _ = v.Args[1] 38160 x := v.Args[0] 38161 v_1 := v.Args[1] 38162 if v_1.Op != OpAMD64ANDLconst { 38163 break 38164 } 38165 c := v_1.AuxInt 38166 y := v_1.Args[0] 38167 if !(c&63 == 63) { 38168 break 38169 } 38170 v.reset(OpAMD64SHLQ) 38171 v.AddArg(x) 38172 v.AddArg(y) 38173 return true 38174 } 38175 // match: (SHLQ x (NEGL <t> (ANDLconst [c] y))) 38176 // cond: c & 63 == 63 38177 // result: (SHLQ x (NEGL <t> y)) 38178 for { 38179 _ = v.Args[1] 38180 x := v.Args[0] 38181 v_1 := v.Args[1] 38182 if v_1.Op != OpAMD64NEGL { 38183 break 38184 } 38185 t := v_1.Type 38186 v_1_0 := v_1.Args[0] 38187 if v_1_0.Op != OpAMD64ANDLconst { 38188 break 38189 } 38190 c := v_1_0.AuxInt 38191 y := v_1_0.Args[0] 38192 if !(c&63 == 63) { 38193 break 38194 } 38195 v.reset(OpAMD64SHLQ) 38196 v.AddArg(x) 38197 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 38198 v0.AddArg(y) 38199 v.AddArg(v0) 38200 return true 38201 } 38202 return false 38203 } 38204 func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool { 38205 // match: (SHLQconst x [0]) 38206 // cond: 38207 // result: x 38208 for { 38209 if v.AuxInt != 0 { 38210 break 38211 } 38212 x := v.Args[0] 38213 v.reset(OpCopy) 38214 v.Type = x.Type 38215 v.AddArg(x) 38216 return true 38217 } 38218 return false 38219 } 38220 func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { 38221 // match: (SHRB x (MOVQconst [c])) 38222 // cond: c&31 < 8 38223 // result: (SHRBconst [c&31] x) 38224 for { 38225 _ = v.Args[1] 38226 x := v.Args[0] 38227 v_1 := v.Args[1] 38228 if v_1.Op != OpAMD64MOVQconst { 38229 break 38230 } 38231 c := v_1.AuxInt 38232 if !(c&31 < 8) { 38233 break 38234 } 38235 v.reset(OpAMD64SHRBconst) 38236 v.AuxInt = c & 31 38237 v.AddArg(x) 38238 return true 38239 } 38240 // match: (SHRB x (MOVLconst [c])) 38241 // cond: c&31 < 8 38242 // result: (SHRBconst [c&31] x) 38243 for { 38244 _ = v.Args[1] 38245 x := v.Args[0] 38246 v_1 := v.Args[1] 38247 if v_1.Op != OpAMD64MOVLconst { 38248 break 38249 } 38250 c := v_1.AuxInt 38251 if !(c&31 < 8) { 38252 break 38253 } 38254 v.reset(OpAMD64SHRBconst) 38255 v.AuxInt = c & 31 38256 v.AddArg(x) 38257 return true 38258 } 38259 // match: (SHRB _ (MOVQconst [c])) 38260 // cond: c&31 >= 8 38261 // result: (MOVLconst [0]) 38262 for { 38263 _ = v.Args[1] 38264 v_1 := v.Args[1] 38265 if v_1.Op != OpAMD64MOVQconst { 38266 break 38267 } 38268 c := v_1.AuxInt 38269 if !(c&31 >= 8) { 38270 break 38271 } 38272 v.reset(OpAMD64MOVLconst) 38273 v.AuxInt = 0 38274 return true 38275 } 38276 // match: (SHRB _ (MOVLconst [c])) 38277 // cond: c&31 >= 8 38278 // result: (MOVLconst [0]) 38279 for { 38280 _ = v.Args[1] 38281 v_1 := v.Args[1] 38282 if v_1.Op != OpAMD64MOVLconst { 38283 break 38284 } 38285 c := v_1.AuxInt 38286 if !(c&31 >= 8) { 38287 break 38288 } 38289 v.reset(OpAMD64MOVLconst) 38290 v.AuxInt = 0 38291 return true 38292 } 38293 return false 38294 } 38295 func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool { 38296 // match: (SHRBconst x [0]) 38297 // cond: 38298 // result: x 38299 for { 38300 if v.AuxInt != 0 { 38301 break 38302 } 38303 x := v.Args[0] 38304 v.reset(OpCopy) 38305 v.Type = x.Type 38306 v.AddArg(x) 38307 return true 38308 } 38309 return false 38310 } 38311 func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { 38312 b := v.Block 38313 _ = b 38314 // match: (SHRL x (MOVQconst [c])) 38315 // cond: 38316 // result: (SHRLconst [c&31] x) 38317 for { 38318 _ = v.Args[1] 38319 x := v.Args[0] 38320 v_1 := v.Args[1] 38321 if v_1.Op != OpAMD64MOVQconst { 38322 break 38323 } 38324 c := v_1.AuxInt 38325 v.reset(OpAMD64SHRLconst) 38326 v.AuxInt = c & 31 38327 v.AddArg(x) 38328 return true 38329 } 38330 // match: (SHRL x (MOVLconst [c])) 38331 // cond: 38332 // result: (SHRLconst [c&31] x) 38333 for { 38334 _ = v.Args[1] 38335 x := v.Args[0] 38336 v_1 := v.Args[1] 38337 if v_1.Op != OpAMD64MOVLconst { 38338 break 38339 } 38340 c := v_1.AuxInt 38341 v.reset(OpAMD64SHRLconst) 38342 v.AuxInt = c & 31 38343 v.AddArg(x) 38344 return true 38345 } 38346 // match: (SHRL x (ADDQconst [c] y)) 38347 // cond: c & 31 == 0 38348 // result: (SHRL x y) 38349 for { 38350 _ = v.Args[1] 38351 x := v.Args[0] 38352 v_1 := v.Args[1] 38353 if v_1.Op != OpAMD64ADDQconst { 38354 break 38355 } 38356 c := v_1.AuxInt 38357 y := v_1.Args[0] 38358 if !(c&31 == 0) { 38359 break 38360 } 38361 v.reset(OpAMD64SHRL) 38362 v.AddArg(x) 38363 v.AddArg(y) 38364 return true 38365 } 38366 // match: (SHRL x (NEGQ <t> (ADDQconst [c] y))) 38367 // cond: c & 31 == 0 38368 // result: (SHRL x (NEGQ <t> y)) 38369 for { 38370 _ = v.Args[1] 38371 x := v.Args[0] 38372 v_1 := v.Args[1] 38373 if v_1.Op != OpAMD64NEGQ { 38374 break 38375 } 38376 t := v_1.Type 38377 v_1_0 := v_1.Args[0] 38378 if v_1_0.Op != OpAMD64ADDQconst { 38379 break 38380 } 38381 c := v_1_0.AuxInt 38382 y := v_1_0.Args[0] 38383 if !(c&31 == 0) { 38384 break 38385 } 38386 v.reset(OpAMD64SHRL) 38387 v.AddArg(x) 38388 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 38389 v0.AddArg(y) 38390 v.AddArg(v0) 38391 return true 38392 } 38393 // match: (SHRL x (ANDQconst [c] y)) 38394 // cond: c & 31 == 31 38395 // result: (SHRL x y) 38396 for { 38397 _ = v.Args[1] 38398 x := v.Args[0] 38399 v_1 := v.Args[1] 38400 if v_1.Op != OpAMD64ANDQconst { 38401 break 38402 } 38403 c := v_1.AuxInt 38404 y := v_1.Args[0] 38405 if !(c&31 == 31) { 38406 break 38407 } 38408 v.reset(OpAMD64SHRL) 38409 v.AddArg(x) 38410 v.AddArg(y) 38411 return true 38412 } 38413 // match: (SHRL x (NEGQ <t> (ANDQconst [c] y))) 38414 // cond: c & 31 == 31 38415 // result: (SHRL x (NEGQ <t> y)) 38416 for { 38417 _ = v.Args[1] 38418 x := v.Args[0] 38419 v_1 := v.Args[1] 38420 if v_1.Op != OpAMD64NEGQ { 38421 break 38422 } 38423 t := v_1.Type 38424 v_1_0 := v_1.Args[0] 38425 if v_1_0.Op != OpAMD64ANDQconst { 38426 break 38427 } 38428 c := v_1_0.AuxInt 38429 y := v_1_0.Args[0] 38430 if !(c&31 == 31) { 38431 break 38432 } 38433 v.reset(OpAMD64SHRL) 38434 v.AddArg(x) 38435 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 38436 v0.AddArg(y) 38437 v.AddArg(v0) 38438 return true 38439 } 38440 // match: (SHRL x (ADDLconst [c] y)) 38441 // cond: c & 31 == 0 38442 // result: (SHRL x y) 38443 for { 38444 _ = v.Args[1] 38445 x := v.Args[0] 38446 v_1 := v.Args[1] 38447 if v_1.Op != OpAMD64ADDLconst { 38448 break 38449 } 38450 c := v_1.AuxInt 38451 y := v_1.Args[0] 38452 if !(c&31 == 0) { 38453 break 38454 } 38455 v.reset(OpAMD64SHRL) 38456 v.AddArg(x) 38457 v.AddArg(y) 38458 return true 38459 } 38460 // match: (SHRL x (NEGL <t> (ADDLconst [c] y))) 38461 // cond: c & 31 == 0 38462 // result: (SHRL x (NEGL <t> y)) 38463 for { 38464 _ = v.Args[1] 38465 x := v.Args[0] 38466 v_1 := v.Args[1] 38467 if v_1.Op != OpAMD64NEGL { 38468 break 38469 } 38470 t := v_1.Type 38471 v_1_0 := v_1.Args[0] 38472 if v_1_0.Op != OpAMD64ADDLconst { 38473 break 38474 } 38475 c := v_1_0.AuxInt 38476 y := v_1_0.Args[0] 38477 if !(c&31 == 0) { 38478 break 38479 } 38480 v.reset(OpAMD64SHRL) 38481 v.AddArg(x) 38482 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 38483 v0.AddArg(y) 38484 v.AddArg(v0) 38485 return true 38486 } 38487 // match: (SHRL x (ANDLconst [c] y)) 38488 // cond: c & 31 == 31 38489 // result: (SHRL x y) 38490 for { 38491 _ = v.Args[1] 38492 x := v.Args[0] 38493 v_1 := v.Args[1] 38494 if v_1.Op != OpAMD64ANDLconst { 38495 break 38496 } 38497 c := v_1.AuxInt 38498 y := v_1.Args[0] 38499 if !(c&31 == 31) { 38500 break 38501 } 38502 v.reset(OpAMD64SHRL) 38503 v.AddArg(x) 38504 v.AddArg(y) 38505 return true 38506 } 38507 // match: (SHRL x (NEGL <t> (ANDLconst [c] y))) 38508 // cond: c & 31 == 31 38509 // result: (SHRL x (NEGL <t> y)) 38510 for { 38511 _ = v.Args[1] 38512 x := v.Args[0] 38513 v_1 := v.Args[1] 38514 if v_1.Op != OpAMD64NEGL { 38515 break 38516 } 38517 t := v_1.Type 38518 v_1_0 := v_1.Args[0] 38519 if v_1_0.Op != OpAMD64ANDLconst { 38520 break 38521 } 38522 c := v_1_0.AuxInt 38523 y := v_1_0.Args[0] 38524 if !(c&31 == 31) { 38525 break 38526 } 38527 v.reset(OpAMD64SHRL) 38528 v.AddArg(x) 38529 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 38530 v0.AddArg(y) 38531 v.AddArg(v0) 38532 return true 38533 } 38534 return false 38535 } 38536 func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool { 38537 // match: (SHRLconst x [0]) 38538 // cond: 38539 // result: x 38540 for { 38541 if v.AuxInt != 0 { 38542 break 38543 } 38544 x := v.Args[0] 38545 v.reset(OpCopy) 38546 v.Type = x.Type 38547 v.AddArg(x) 38548 return true 38549 } 38550 return false 38551 } 38552 func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { 38553 b := v.Block 38554 _ = b 38555 // match: (SHRQ x (MOVQconst [c])) 38556 // cond: 38557 // result: (SHRQconst [c&63] x) 38558 for { 38559 _ = v.Args[1] 38560 x := v.Args[0] 38561 v_1 := v.Args[1] 38562 if v_1.Op != OpAMD64MOVQconst { 38563 break 38564 } 38565 c := v_1.AuxInt 38566 v.reset(OpAMD64SHRQconst) 38567 v.AuxInt = c & 63 38568 v.AddArg(x) 38569 return true 38570 } 38571 // match: (SHRQ x (MOVLconst [c])) 38572 // cond: 38573 // result: (SHRQconst [c&63] x) 38574 for { 38575 _ = v.Args[1] 38576 x := v.Args[0] 38577 v_1 := v.Args[1] 38578 if v_1.Op != OpAMD64MOVLconst { 38579 break 38580 } 38581 c := v_1.AuxInt 38582 v.reset(OpAMD64SHRQconst) 38583 v.AuxInt = c & 63 38584 v.AddArg(x) 38585 return true 38586 } 38587 // match: (SHRQ x (ADDQconst [c] y)) 38588 // cond: c & 63 == 0 38589 // result: (SHRQ x y) 38590 for { 38591 _ = v.Args[1] 38592 x := v.Args[0] 38593 v_1 := v.Args[1] 38594 if v_1.Op != OpAMD64ADDQconst { 38595 break 38596 } 38597 c := v_1.AuxInt 38598 y := v_1.Args[0] 38599 if !(c&63 == 0) { 38600 break 38601 } 38602 v.reset(OpAMD64SHRQ) 38603 v.AddArg(x) 38604 v.AddArg(y) 38605 return true 38606 } 38607 // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y))) 38608 // cond: c & 63 == 0 38609 // result: (SHRQ x (NEGQ <t> y)) 38610 for { 38611 _ = v.Args[1] 38612 x := v.Args[0] 38613 v_1 := v.Args[1] 38614 if v_1.Op != OpAMD64NEGQ { 38615 break 38616 } 38617 t := v_1.Type 38618 v_1_0 := v_1.Args[0] 38619 if v_1_0.Op != OpAMD64ADDQconst { 38620 break 38621 } 38622 c := v_1_0.AuxInt 38623 y := v_1_0.Args[0] 38624 if !(c&63 == 0) { 38625 break 38626 } 38627 v.reset(OpAMD64SHRQ) 38628 v.AddArg(x) 38629 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 38630 v0.AddArg(y) 38631 v.AddArg(v0) 38632 return true 38633 } 38634 // match: (SHRQ x (ANDQconst [c] y)) 38635 // cond: c & 63 == 63 38636 // result: (SHRQ x y) 38637 for { 38638 _ = v.Args[1] 38639 x := v.Args[0] 38640 v_1 := v.Args[1] 38641 if v_1.Op != OpAMD64ANDQconst { 38642 break 38643 } 38644 c := v_1.AuxInt 38645 y := v_1.Args[0] 38646 if !(c&63 == 63) { 38647 break 38648 } 38649 v.reset(OpAMD64SHRQ) 38650 v.AddArg(x) 38651 v.AddArg(y) 38652 return true 38653 } 38654 // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y))) 38655 // cond: c & 63 == 63 38656 // result: (SHRQ x (NEGQ <t> y)) 38657 for { 38658 _ = v.Args[1] 38659 x := v.Args[0] 38660 v_1 := v.Args[1] 38661 if v_1.Op != OpAMD64NEGQ { 38662 break 38663 } 38664 t := v_1.Type 38665 v_1_0 := v_1.Args[0] 38666 if v_1_0.Op != OpAMD64ANDQconst { 38667 break 38668 } 38669 c := v_1_0.AuxInt 38670 y := v_1_0.Args[0] 38671 if !(c&63 == 63) { 38672 break 38673 } 38674 v.reset(OpAMD64SHRQ) 38675 v.AddArg(x) 38676 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 38677 v0.AddArg(y) 38678 v.AddArg(v0) 38679 return true 38680 } 38681 // match: (SHRQ x (ADDLconst [c] y)) 38682 // cond: c & 63 == 0 38683 // result: (SHRQ x y) 38684 for { 38685 _ = v.Args[1] 38686 x := v.Args[0] 38687 v_1 := v.Args[1] 38688 if v_1.Op != OpAMD64ADDLconst { 38689 break 38690 } 38691 c := v_1.AuxInt 38692 y := v_1.Args[0] 38693 if !(c&63 == 0) { 38694 break 38695 } 38696 v.reset(OpAMD64SHRQ) 38697 v.AddArg(x) 38698 v.AddArg(y) 38699 return true 38700 } 38701 // match: (SHRQ x (NEGL <t> (ADDLconst [c] y))) 38702 // cond: c & 63 == 0 38703 // result: (SHRQ x (NEGL <t> y)) 38704 for { 38705 _ = v.Args[1] 38706 x := v.Args[0] 38707 v_1 := v.Args[1] 38708 if v_1.Op != OpAMD64NEGL { 38709 break 38710 } 38711 t := v_1.Type 38712 v_1_0 := v_1.Args[0] 38713 if v_1_0.Op != OpAMD64ADDLconst { 38714 break 38715 } 38716 c := v_1_0.AuxInt 38717 y := v_1_0.Args[0] 38718 if !(c&63 == 0) { 38719 break 38720 } 38721 v.reset(OpAMD64SHRQ) 38722 v.AddArg(x) 38723 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 38724 v0.AddArg(y) 38725 v.AddArg(v0) 38726 return true 38727 } 38728 // match: (SHRQ x (ANDLconst [c] y)) 38729 // cond: c & 63 == 63 38730 // result: (SHRQ x y) 38731 for { 38732 _ = v.Args[1] 38733 x := v.Args[0] 38734 v_1 := v.Args[1] 38735 if v_1.Op != OpAMD64ANDLconst { 38736 break 38737 } 38738 c := v_1.AuxInt 38739 y := v_1.Args[0] 38740 if !(c&63 == 63) { 38741 break 38742 } 38743 v.reset(OpAMD64SHRQ) 38744 v.AddArg(x) 38745 v.AddArg(y) 38746 return true 38747 } 38748 // match: (SHRQ x (NEGL <t> (ANDLconst [c] y))) 38749 // cond: c & 63 == 63 38750 // result: (SHRQ x (NEGL <t> y)) 38751 for { 38752 _ = v.Args[1] 38753 x := v.Args[0] 38754 v_1 := v.Args[1] 38755 if v_1.Op != OpAMD64NEGL { 38756 break 38757 } 38758 t := v_1.Type 38759 v_1_0 := v_1.Args[0] 38760 if v_1_0.Op != OpAMD64ANDLconst { 38761 break 38762 } 38763 c := v_1_0.AuxInt 38764 y := v_1_0.Args[0] 38765 if !(c&63 == 63) { 38766 break 38767 } 38768 v.reset(OpAMD64SHRQ) 38769 v.AddArg(x) 38770 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 38771 v0.AddArg(y) 38772 v.AddArg(v0) 38773 return true 38774 } 38775 return false 38776 } 38777 func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool { 38778 // match: (SHRQconst x [0]) 38779 // cond: 38780 // result: x 38781 for { 38782 if v.AuxInt != 0 { 38783 break 38784 } 38785 x := v.Args[0] 38786 v.reset(OpCopy) 38787 v.Type = x.Type 38788 v.AddArg(x) 38789 return true 38790 } 38791 return false 38792 } 38793 func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { 38794 // match: (SHRW x (MOVQconst [c])) 38795 // cond: c&31 < 16 38796 // result: (SHRWconst [c&31] x) 38797 for { 38798 _ = v.Args[1] 38799 x := v.Args[0] 38800 v_1 := v.Args[1] 38801 if v_1.Op != OpAMD64MOVQconst { 38802 break 38803 } 38804 c := v_1.AuxInt 38805 if !(c&31 < 16) { 38806 break 38807 } 38808 v.reset(OpAMD64SHRWconst) 38809 v.AuxInt = c & 31 38810 v.AddArg(x) 38811 return true 38812 } 38813 // match: (SHRW x (MOVLconst [c])) 38814 // cond: c&31 < 16 38815 // result: (SHRWconst [c&31] x) 38816 for { 38817 _ = v.Args[1] 38818 x := v.Args[0] 38819 v_1 := v.Args[1] 38820 if v_1.Op != OpAMD64MOVLconst { 38821 break 38822 } 38823 c := v_1.AuxInt 38824 if !(c&31 < 16) { 38825 break 38826 } 38827 v.reset(OpAMD64SHRWconst) 38828 v.AuxInt = c & 31 38829 v.AddArg(x) 38830 return true 38831 } 38832 // match: (SHRW _ (MOVQconst [c])) 38833 // cond: c&31 >= 16 38834 // result: (MOVLconst [0]) 38835 for { 38836 _ = v.Args[1] 38837 v_1 := v.Args[1] 38838 if v_1.Op != OpAMD64MOVQconst { 38839 break 38840 } 38841 c := v_1.AuxInt 38842 if !(c&31 >= 16) { 38843 break 38844 } 38845 v.reset(OpAMD64MOVLconst) 38846 v.AuxInt = 0 38847 return true 38848 } 38849 // match: (SHRW _ (MOVLconst [c])) 38850 // cond: c&31 >= 16 38851 // result: (MOVLconst [0]) 38852 for { 38853 _ = v.Args[1] 38854 v_1 := v.Args[1] 38855 if v_1.Op != OpAMD64MOVLconst { 38856 break 38857 } 38858 c := v_1.AuxInt 38859 if !(c&31 >= 16) { 38860 break 38861 } 38862 v.reset(OpAMD64MOVLconst) 38863 v.AuxInt = 0 38864 return true 38865 } 38866 return false 38867 } 38868 func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool { 38869 // match: (SHRWconst x [0]) 38870 // cond: 38871 // result: x 38872 for { 38873 if v.AuxInt != 0 { 38874 break 38875 } 38876 x := v.Args[0] 38877 v.reset(OpCopy) 38878 v.Type = x.Type 38879 v.AddArg(x) 38880 return true 38881 } 38882 return false 38883 } 38884 func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { 38885 b := v.Block 38886 _ = b 38887 // match: (SUBL x (MOVLconst [c])) 38888 // cond: 38889 // result: (SUBLconst x [c]) 38890 for { 38891 _ = v.Args[1] 38892 x := v.Args[0] 38893 v_1 := v.Args[1] 38894 if v_1.Op != OpAMD64MOVLconst { 38895 break 38896 } 38897 c := v_1.AuxInt 38898 v.reset(OpAMD64SUBLconst) 38899 v.AuxInt = c 38900 v.AddArg(x) 38901 return true 38902 } 38903 // match: (SUBL (MOVLconst [c]) x) 38904 // cond: 38905 // result: (NEGL (SUBLconst <v.Type> x [c])) 38906 for { 38907 _ = v.Args[1] 38908 v_0 := v.Args[0] 38909 if v_0.Op != OpAMD64MOVLconst { 38910 break 38911 } 38912 c := v_0.AuxInt 38913 x := v.Args[1] 38914 v.reset(OpAMD64NEGL) 38915 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 38916 v0.AuxInt = c 38917 v0.AddArg(x) 38918 v.AddArg(v0) 38919 return true 38920 } 38921 // match: (SUBL x x) 38922 // cond: 38923 // result: (MOVLconst [0]) 38924 for { 38925 _ = v.Args[1] 38926 x := v.Args[0] 38927 if x != v.Args[1] { 38928 break 38929 } 38930 v.reset(OpAMD64MOVLconst) 38931 v.AuxInt = 0 38932 return true 38933 } 38934 // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) 38935 // cond: canMergeLoad(v, l, x) && clobber(l) 38936 // result: (SUBLmem x [off] {sym} ptr mem) 38937 for { 38938 _ = v.Args[1] 38939 x := v.Args[0] 38940 l := v.Args[1] 38941 if l.Op != OpAMD64MOVLload { 38942 break 38943 } 38944 off := l.AuxInt 38945 sym := l.Aux 38946 _ = l.Args[1] 38947 ptr := l.Args[0] 38948 mem := l.Args[1] 38949 if !(canMergeLoad(v, l, x) && clobber(l)) { 38950 break 38951 } 38952 v.reset(OpAMD64SUBLmem) 38953 v.AuxInt = off 38954 v.Aux = sym 38955 v.AddArg(x) 38956 v.AddArg(ptr) 38957 v.AddArg(mem) 38958 return true 38959 } 38960 return false 38961 } 38962 func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool { 38963 // match: (SUBLconst [c] x) 38964 // cond: int32(c) == 0 38965 // result: x 38966 for { 38967 c := v.AuxInt 38968 x := v.Args[0] 38969 if !(int32(c) == 0) { 38970 break 38971 } 38972 v.reset(OpCopy) 38973 v.Type = x.Type 38974 v.AddArg(x) 38975 return true 38976 } 38977 // match: (SUBLconst [c] x) 38978 // cond: 38979 // result: (ADDLconst [int64(int32(-c))] x) 38980 for { 38981 c := v.AuxInt 38982 x := v.Args[0] 38983 v.reset(OpAMD64ADDLconst) 38984 v.AuxInt = int64(int32(-c)) 38985 v.AddArg(x) 38986 return true 38987 } 38988 } 38989 func rewriteValueAMD64_OpAMD64SUBLmem_0(v *Value) bool { 38990 b := v.Block 38991 _ = b 38992 typ := &b.Func.Config.Types 38993 _ = typ 38994 // match: (SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 38995 // cond: 38996 // result: (SUBL x (MOVLf2i y)) 38997 for { 38998 off := v.AuxInt 38999 sym := v.Aux 39000 _ = v.Args[2] 39001 x := v.Args[0] 39002 ptr := v.Args[1] 39003 v_2 := v.Args[2] 39004 if v_2.Op != OpAMD64MOVSSstore { 39005 break 39006 } 39007 if v_2.AuxInt != off { 39008 break 39009 } 39010 if v_2.Aux != sym { 39011 break 39012 } 39013 _ = v_2.Args[2] 39014 if ptr != v_2.Args[0] { 39015 break 39016 } 39017 y := v_2.Args[1] 39018 v.reset(OpAMD64SUBL) 39019 v.AddArg(x) 39020 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 39021 v0.AddArg(y) 39022 v.AddArg(v0) 39023 return true 39024 } 39025 return false 39026 } 39027 func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { 39028 b := v.Block 39029 _ = b 39030 // match: (SUBQ x (MOVQconst [c])) 39031 // cond: is32Bit(c) 39032 // result: (SUBQconst x [c]) 39033 for { 39034 _ = v.Args[1] 39035 x := v.Args[0] 39036 v_1 := v.Args[1] 39037 if v_1.Op != OpAMD64MOVQconst { 39038 break 39039 } 39040 c := v_1.AuxInt 39041 if !(is32Bit(c)) { 39042 break 39043 } 39044 v.reset(OpAMD64SUBQconst) 39045 v.AuxInt = c 39046 v.AddArg(x) 39047 return true 39048 } 39049 // match: (SUBQ (MOVQconst [c]) x) 39050 // cond: is32Bit(c) 39051 // result: (NEGQ (SUBQconst <v.Type> x [c])) 39052 for { 39053 _ = v.Args[1] 39054 v_0 := v.Args[0] 39055 if v_0.Op != OpAMD64MOVQconst { 39056 break 39057 } 39058 c := v_0.AuxInt 39059 x := v.Args[1] 39060 if !(is32Bit(c)) { 39061 break 39062 } 39063 v.reset(OpAMD64NEGQ) 39064 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 39065 v0.AuxInt = c 39066 v0.AddArg(x) 39067 v.AddArg(v0) 39068 return true 39069 } 39070 // match: (SUBQ x x) 39071 // cond: 39072 // result: (MOVQconst [0]) 39073 for { 39074 _ = v.Args[1] 39075 x := v.Args[0] 39076 if x != v.Args[1] { 39077 break 39078 } 39079 v.reset(OpAMD64MOVQconst) 39080 v.AuxInt = 0 39081 return true 39082 } 39083 // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) 39084 // cond: canMergeLoad(v, l, x) && clobber(l) 39085 // result: (SUBQmem x [off] {sym} ptr mem) 39086 for { 39087 _ = v.Args[1] 39088 x := v.Args[0] 39089 l := v.Args[1] 39090 if l.Op != OpAMD64MOVQload { 39091 break 39092 } 39093 off := l.AuxInt 39094 sym := l.Aux 39095 _ = l.Args[1] 39096 ptr := l.Args[0] 39097 mem := l.Args[1] 39098 if !(canMergeLoad(v, l, x) && clobber(l)) { 39099 break 39100 } 39101 v.reset(OpAMD64SUBQmem) 39102 v.AuxInt = off 39103 v.Aux = sym 39104 v.AddArg(x) 39105 v.AddArg(ptr) 39106 v.AddArg(mem) 39107 return true 39108 } 39109 return false 39110 } 39111 func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { 39112 // match: (SUBQconst [0] x) 39113 // cond: 39114 // result: x 39115 for { 39116 if v.AuxInt != 0 { 39117 break 39118 } 39119 x := v.Args[0] 39120 v.reset(OpCopy) 39121 v.Type = x.Type 39122 v.AddArg(x) 39123 return true 39124 } 39125 // match: (SUBQconst [c] x) 39126 // cond: c != -(1<<31) 39127 // result: (ADDQconst [-c] x) 39128 for { 39129 c := v.AuxInt 39130 x := v.Args[0] 39131 if !(c != -(1 << 31)) { 39132 break 39133 } 39134 v.reset(OpAMD64ADDQconst) 39135 v.AuxInt = -c 39136 v.AddArg(x) 39137 return true 39138 } 39139 // match: (SUBQconst (MOVQconst [d]) [c]) 39140 // cond: 39141 // result: (MOVQconst [d-c]) 39142 for { 39143 c := v.AuxInt 39144 v_0 := v.Args[0] 39145 if v_0.Op != OpAMD64MOVQconst { 39146 break 39147 } 39148 d := v_0.AuxInt 39149 v.reset(OpAMD64MOVQconst) 39150 v.AuxInt = d - c 39151 return true 39152 } 39153 // match: (SUBQconst (SUBQconst x [d]) [c]) 39154 // cond: is32Bit(-c-d) 39155 // result: (ADDQconst [-c-d] x) 39156 for { 39157 c := v.AuxInt 39158 v_0 := v.Args[0] 39159 if v_0.Op != OpAMD64SUBQconst { 39160 break 39161 } 39162 d := v_0.AuxInt 39163 x := v_0.Args[0] 39164 if !(is32Bit(-c - d)) { 39165 break 39166 } 39167 v.reset(OpAMD64ADDQconst) 39168 v.AuxInt = -c - d 39169 v.AddArg(x) 39170 return true 39171 } 39172 return false 39173 } 39174 func rewriteValueAMD64_OpAMD64SUBQmem_0(v *Value) bool { 39175 b := v.Block 39176 _ = b 39177 typ := &b.Func.Config.Types 39178 _ = typ 39179 // match: (SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 39180 // cond: 39181 // result: (SUBQ x (MOVQf2i y)) 39182 for { 39183 off := v.AuxInt 39184 sym := v.Aux 39185 _ = v.Args[2] 39186 x := v.Args[0] 39187 ptr := v.Args[1] 39188 v_2 := v.Args[2] 39189 if v_2.Op != OpAMD64MOVSDstore { 39190 break 39191 } 39192 if v_2.AuxInt != off { 39193 break 39194 } 39195 if v_2.Aux != sym { 39196 break 39197 } 39198 _ = v_2.Args[2] 39199 if ptr != v_2.Args[0] { 39200 break 39201 } 39202 y := v_2.Args[1] 39203 v.reset(OpAMD64SUBQ) 39204 v.AddArg(x) 39205 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 39206 v0.AddArg(y) 39207 v.AddArg(v0) 39208 return true 39209 } 39210 return false 39211 } 39212 func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { 39213 // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) 39214 // cond: canMergeLoad(v, l, x) && clobber(l) 39215 // result: (SUBSDmem x [off] {sym} ptr mem) 39216 for { 39217 _ = v.Args[1] 39218 x := v.Args[0] 39219 l := v.Args[1] 39220 if l.Op != OpAMD64MOVSDload { 39221 break 39222 } 39223 off := l.AuxInt 39224 sym := l.Aux 39225 _ = l.Args[1] 39226 ptr := l.Args[0] 39227 mem := l.Args[1] 39228 if !(canMergeLoad(v, l, x) && clobber(l)) { 39229 break 39230 } 39231 v.reset(OpAMD64SUBSDmem) 39232 v.AuxInt = off 39233 v.Aux = sym 39234 v.AddArg(x) 39235 v.AddArg(ptr) 39236 v.AddArg(mem) 39237 return true 39238 } 39239 return false 39240 } 39241 func rewriteValueAMD64_OpAMD64SUBSDmem_0(v *Value) bool { 39242 b := v.Block 39243 _ = b 39244 typ := &b.Func.Config.Types 39245 _ = typ 39246 // match: (SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 39247 // cond: 39248 // result: (SUBSD x (MOVQi2f y)) 39249 for { 39250 off := v.AuxInt 39251 sym := v.Aux 39252 _ = v.Args[2] 39253 x := v.Args[0] 39254 ptr := v.Args[1] 39255 v_2 := v.Args[2] 39256 if v_2.Op != OpAMD64MOVQstore { 39257 break 39258 } 39259 if v_2.AuxInt != off { 39260 break 39261 } 39262 if v_2.Aux != sym { 39263 break 39264 } 39265 _ = v_2.Args[2] 39266 if ptr != v_2.Args[0] { 39267 break 39268 } 39269 y := v_2.Args[1] 39270 v.reset(OpAMD64SUBSD) 39271 v.AddArg(x) 39272 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 39273 v0.AddArg(y) 39274 v.AddArg(v0) 39275 return true 39276 } 39277 return false 39278 } 39279 func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { 39280 // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) 39281 // cond: canMergeLoad(v, l, x) && clobber(l) 39282 // result: (SUBSSmem x [off] {sym} ptr mem) 39283 for { 39284 _ = v.Args[1] 39285 x := v.Args[0] 39286 l := v.Args[1] 39287 if l.Op != OpAMD64MOVSSload { 39288 break 39289 } 39290 off := l.AuxInt 39291 sym := l.Aux 39292 _ = l.Args[1] 39293 ptr := l.Args[0] 39294 mem := l.Args[1] 39295 if !(canMergeLoad(v, l, x) && clobber(l)) { 39296 break 39297 } 39298 v.reset(OpAMD64SUBSSmem) 39299 v.AuxInt = off 39300 v.Aux = sym 39301 v.AddArg(x) 39302 v.AddArg(ptr) 39303 v.AddArg(mem) 39304 return true 39305 } 39306 return false 39307 } 39308 func rewriteValueAMD64_OpAMD64SUBSSmem_0(v *Value) bool { 39309 b := v.Block 39310 _ = b 39311 typ := &b.Func.Config.Types 39312 _ = typ 39313 // match: (SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 39314 // cond: 39315 // result: (SUBSS x (MOVLi2f y)) 39316 for { 39317 off := v.AuxInt 39318 sym := v.Aux 39319 _ = v.Args[2] 39320 x := v.Args[0] 39321 ptr := v.Args[1] 39322 v_2 := v.Args[2] 39323 if v_2.Op != OpAMD64MOVLstore { 39324 break 39325 } 39326 if v_2.AuxInt != off { 39327 break 39328 } 39329 if v_2.Aux != sym { 39330 break 39331 } 39332 _ = v_2.Args[2] 39333 if ptr != v_2.Args[0] { 39334 break 39335 } 39336 y := v_2.Args[1] 39337 v.reset(OpAMD64SUBSS) 39338 v.AddArg(x) 39339 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 39340 v0.AddArg(y) 39341 v.AddArg(v0) 39342 return true 39343 } 39344 return false 39345 } 39346 func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { 39347 // match: (TESTB (MOVLconst [c]) x) 39348 // cond: 39349 // result: (TESTBconst [c] x) 39350 for { 39351 _ = v.Args[1] 39352 v_0 := v.Args[0] 39353 if v_0.Op != OpAMD64MOVLconst { 39354 break 39355 } 39356 c := v_0.AuxInt 39357 x := v.Args[1] 39358 v.reset(OpAMD64TESTBconst) 39359 v.AuxInt = c 39360 v.AddArg(x) 39361 return true 39362 } 39363 // match: (TESTB x (MOVLconst [c])) 39364 // cond: 39365 // result: (TESTBconst [c] x) 39366 for { 39367 _ = v.Args[1] 39368 x := v.Args[0] 39369 v_1 := v.Args[1] 39370 if v_1.Op != OpAMD64MOVLconst { 39371 break 39372 } 39373 c := v_1.AuxInt 39374 v.reset(OpAMD64TESTBconst) 39375 v.AuxInt = c 39376 v.AddArg(x) 39377 return true 39378 } 39379 return false 39380 } 39381 func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { 39382 // match: (TESTL (MOVLconst [c]) x) 39383 // cond: 39384 // result: (TESTLconst [c] x) 39385 for { 39386 _ = v.Args[1] 39387 v_0 := v.Args[0] 39388 if v_0.Op != OpAMD64MOVLconst { 39389 break 39390 } 39391 c := v_0.AuxInt 39392 x := v.Args[1] 39393 v.reset(OpAMD64TESTLconst) 39394 v.AuxInt = c 39395 v.AddArg(x) 39396 return true 39397 } 39398 // match: (TESTL x (MOVLconst [c])) 39399 // cond: 39400 // result: (TESTLconst [c] x) 39401 for { 39402 _ = v.Args[1] 39403 x := v.Args[0] 39404 v_1 := v.Args[1] 39405 if v_1.Op != OpAMD64MOVLconst { 39406 break 39407 } 39408 c := v_1.AuxInt 39409 v.reset(OpAMD64TESTLconst) 39410 v.AuxInt = c 39411 v.AddArg(x) 39412 return true 39413 } 39414 return false 39415 } 39416 func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { 39417 // match: (TESTQ (MOVQconst [c]) x) 39418 // cond: is32Bit(c) 39419 // result: (TESTQconst [c] x) 39420 for { 39421 _ = v.Args[1] 39422 v_0 := v.Args[0] 39423 if v_0.Op != OpAMD64MOVQconst { 39424 break 39425 } 39426 c := v_0.AuxInt 39427 x := v.Args[1] 39428 if !(is32Bit(c)) { 39429 break 39430 } 39431 v.reset(OpAMD64TESTQconst) 39432 v.AuxInt = c 39433 v.AddArg(x) 39434 return true 39435 } 39436 // match: (TESTQ x (MOVQconst [c])) 39437 // cond: is32Bit(c) 39438 // result: (TESTQconst [c] x) 39439 for { 39440 _ = v.Args[1] 39441 x := v.Args[0] 39442 v_1 := v.Args[1] 39443 if v_1.Op != OpAMD64MOVQconst { 39444 break 39445 } 39446 c := v_1.AuxInt 39447 if !(is32Bit(c)) { 39448 break 39449 } 39450 v.reset(OpAMD64TESTQconst) 39451 v.AuxInt = c 39452 v.AddArg(x) 39453 return true 39454 } 39455 return false 39456 } 39457 func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { 39458 // match: (TESTW (MOVLconst [c]) x) 39459 // cond: 39460 // result: (TESTWconst [c] x) 39461 for { 39462 _ = v.Args[1] 39463 v_0 := v.Args[0] 39464 if v_0.Op != OpAMD64MOVLconst { 39465 break 39466 } 39467 c := v_0.AuxInt 39468 x := v.Args[1] 39469 v.reset(OpAMD64TESTWconst) 39470 v.AuxInt = c 39471 v.AddArg(x) 39472 return true 39473 } 39474 // match: (TESTW x (MOVLconst [c])) 39475 // cond: 39476 // result: (TESTWconst [c] x) 39477 for { 39478 _ = v.Args[1] 39479 x := v.Args[0] 39480 v_1 := v.Args[1] 39481 if v_1.Op != OpAMD64MOVLconst { 39482 break 39483 } 39484 c := v_1.AuxInt 39485 v.reset(OpAMD64TESTWconst) 39486 v.AuxInt = c 39487 v.AddArg(x) 39488 return true 39489 } 39490 return false 39491 } 39492 func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool { 39493 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 39494 // cond: is32Bit(off1+off2) 39495 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 39496 for { 39497 off1 := v.AuxInt 39498 sym := v.Aux 39499 _ = v.Args[2] 39500 val := v.Args[0] 39501 v_1 := v.Args[1] 39502 if v_1.Op != OpAMD64ADDQconst { 39503 break 39504 } 39505 off2 := v_1.AuxInt 39506 ptr := v_1.Args[0] 39507 mem := v.Args[2] 39508 if !(is32Bit(off1 + off2)) { 39509 break 39510 } 39511 v.reset(OpAMD64XADDLlock) 39512 v.AuxInt = off1 + off2 39513 v.Aux = sym 39514 v.AddArg(val) 39515 v.AddArg(ptr) 39516 v.AddArg(mem) 39517 return true 39518 } 39519 return false 39520 } 39521 func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool { 39522 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 39523 // cond: is32Bit(off1+off2) 39524 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 39525 for { 39526 off1 := v.AuxInt 39527 sym := v.Aux 39528 _ = v.Args[2] 39529 val := v.Args[0] 39530 v_1 := v.Args[1] 39531 if v_1.Op != OpAMD64ADDQconst { 39532 break 39533 } 39534 off2 := v_1.AuxInt 39535 ptr := v_1.Args[0] 39536 mem := v.Args[2] 39537 if !(is32Bit(off1 + off2)) { 39538 break 39539 } 39540 v.reset(OpAMD64XADDQlock) 39541 v.AuxInt = off1 + off2 39542 v.Aux = sym 39543 v.AddArg(val) 39544 v.AddArg(ptr) 39545 v.AddArg(mem) 39546 return true 39547 } 39548 return false 39549 } 39550 func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool { 39551 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 39552 // cond: is32Bit(off1+off2) 39553 // result: (XCHGL [off1+off2] {sym} val ptr mem) 39554 for { 39555 off1 := v.AuxInt 39556 sym := v.Aux 39557 _ = v.Args[2] 39558 val := v.Args[0] 39559 v_1 := v.Args[1] 39560 if v_1.Op != OpAMD64ADDQconst { 39561 break 39562 } 39563 off2 := v_1.AuxInt 39564 ptr := v_1.Args[0] 39565 mem := v.Args[2] 39566 if !(is32Bit(off1 + off2)) { 39567 break 39568 } 39569 v.reset(OpAMD64XCHGL) 39570 v.AuxInt = off1 + off2 39571 v.Aux = sym 39572 v.AddArg(val) 39573 v.AddArg(ptr) 39574 v.AddArg(mem) 39575 return true 39576 } 39577 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 39578 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 39579 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 39580 for { 39581 off1 := v.AuxInt 39582 sym1 := v.Aux 39583 _ = v.Args[2] 39584 val := v.Args[0] 39585 v_1 := v.Args[1] 39586 if v_1.Op != OpAMD64LEAQ { 39587 break 39588 } 39589 off2 := v_1.AuxInt 39590 sym2 := v_1.Aux 39591 ptr := v_1.Args[0] 39592 mem := v.Args[2] 39593 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 39594 break 39595 } 39596 v.reset(OpAMD64XCHGL) 39597 v.AuxInt = off1 + off2 39598 v.Aux = mergeSym(sym1, sym2) 39599 v.AddArg(val) 39600 v.AddArg(ptr) 39601 v.AddArg(mem) 39602 return true 39603 } 39604 return false 39605 } 39606 func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool { 39607 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 39608 // cond: is32Bit(off1+off2) 39609 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 39610 for { 39611 off1 := v.AuxInt 39612 sym := v.Aux 39613 _ = v.Args[2] 39614 val := v.Args[0] 39615 v_1 := v.Args[1] 39616 if v_1.Op != OpAMD64ADDQconst { 39617 break 39618 } 39619 off2 := v_1.AuxInt 39620 ptr := v_1.Args[0] 39621 mem := v.Args[2] 39622 if !(is32Bit(off1 + off2)) { 39623 break 39624 } 39625 v.reset(OpAMD64XCHGQ) 39626 v.AuxInt = off1 + off2 39627 v.Aux = sym 39628 v.AddArg(val) 39629 v.AddArg(ptr) 39630 v.AddArg(mem) 39631 return true 39632 } 39633 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 39634 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 39635 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 39636 for { 39637 off1 := v.AuxInt 39638 sym1 := v.Aux 39639 _ = v.Args[2] 39640 val := v.Args[0] 39641 v_1 := v.Args[1] 39642 if v_1.Op != OpAMD64LEAQ { 39643 break 39644 } 39645 off2 := v_1.AuxInt 39646 sym2 := v_1.Aux 39647 ptr := v_1.Args[0] 39648 mem := v.Args[2] 39649 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 39650 break 39651 } 39652 v.reset(OpAMD64XCHGQ) 39653 v.AuxInt = off1 + off2 39654 v.Aux = mergeSym(sym1, sym2) 39655 v.AddArg(val) 39656 v.AddArg(ptr) 39657 v.AddArg(mem) 39658 return true 39659 } 39660 return false 39661 } 39662 func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { 39663 // match: (XORL x (MOVLconst [c])) 39664 // cond: 39665 // result: (XORLconst [c] x) 39666 for { 39667 _ = v.Args[1] 39668 x := v.Args[0] 39669 v_1 := v.Args[1] 39670 if v_1.Op != OpAMD64MOVLconst { 39671 break 39672 } 39673 c := v_1.AuxInt 39674 v.reset(OpAMD64XORLconst) 39675 v.AuxInt = c 39676 v.AddArg(x) 39677 return true 39678 } 39679 // match: (XORL (MOVLconst [c]) x) 39680 // cond: 39681 // result: (XORLconst [c] x) 39682 for { 39683 _ = v.Args[1] 39684 v_0 := v.Args[0] 39685 if v_0.Op != OpAMD64MOVLconst { 39686 break 39687 } 39688 c := v_0.AuxInt 39689 x := v.Args[1] 39690 v.reset(OpAMD64XORLconst) 39691 v.AuxInt = c 39692 v.AddArg(x) 39693 return true 39694 } 39695 // match: (XORL (SHLLconst x [c]) (SHRLconst x [d])) 39696 // cond: d==32-c 39697 // result: (ROLLconst x [c]) 39698 for { 39699 _ = v.Args[1] 39700 v_0 := v.Args[0] 39701 if v_0.Op != OpAMD64SHLLconst { 39702 break 39703 } 39704 c := v_0.AuxInt 39705 x := v_0.Args[0] 39706 v_1 := v.Args[1] 39707 if v_1.Op != OpAMD64SHRLconst { 39708 break 39709 } 39710 d := v_1.AuxInt 39711 if x != v_1.Args[0] { 39712 break 39713 } 39714 if !(d == 32-c) { 39715 break 39716 } 39717 v.reset(OpAMD64ROLLconst) 39718 v.AuxInt = c 39719 v.AddArg(x) 39720 return true 39721 } 39722 // match: (XORL (SHRLconst x [d]) (SHLLconst x [c])) 39723 // cond: d==32-c 39724 // result: (ROLLconst x [c]) 39725 for { 39726 _ = v.Args[1] 39727 v_0 := v.Args[0] 39728 if v_0.Op != OpAMD64SHRLconst { 39729 break 39730 } 39731 d := v_0.AuxInt 39732 x := v_0.Args[0] 39733 v_1 := v.Args[1] 39734 if v_1.Op != OpAMD64SHLLconst { 39735 break 39736 } 39737 c := v_1.AuxInt 39738 if x != v_1.Args[0] { 39739 break 39740 } 39741 if !(d == 32-c) { 39742 break 39743 } 39744 v.reset(OpAMD64ROLLconst) 39745 v.AuxInt = c 39746 v.AddArg(x) 39747 return true 39748 } 39749 // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 39750 // cond: d==16-c && c < 16 && t.Size() == 2 39751 // result: (ROLWconst x [c]) 39752 for { 39753 t := v.Type 39754 _ = v.Args[1] 39755 v_0 := v.Args[0] 39756 if v_0.Op != OpAMD64SHLLconst { 39757 break 39758 } 39759 c := v_0.AuxInt 39760 x := v_0.Args[0] 39761 v_1 := v.Args[1] 39762 if v_1.Op != OpAMD64SHRWconst { 39763 break 39764 } 39765 d := v_1.AuxInt 39766 if x != v_1.Args[0] { 39767 break 39768 } 39769 if !(d == 16-c && c < 16 && t.Size() == 2) { 39770 break 39771 } 39772 v.reset(OpAMD64ROLWconst) 39773 v.AuxInt = c 39774 v.AddArg(x) 39775 return true 39776 } 39777 // match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 39778 // cond: d==16-c && c < 16 && t.Size() == 2 39779 // result: (ROLWconst x [c]) 39780 for { 39781 t := v.Type 39782 _ = v.Args[1] 39783 v_0 := v.Args[0] 39784 if v_0.Op != OpAMD64SHRWconst { 39785 break 39786 } 39787 d := v_0.AuxInt 39788 x := v_0.Args[0] 39789 v_1 := v.Args[1] 39790 if v_1.Op != OpAMD64SHLLconst { 39791 break 39792 } 39793 c := v_1.AuxInt 39794 if x != v_1.Args[0] { 39795 break 39796 } 39797 if !(d == 16-c && c < 16 && t.Size() == 2) { 39798 break 39799 } 39800 v.reset(OpAMD64ROLWconst) 39801 v.AuxInt = c 39802 v.AddArg(x) 39803 return true 39804 } 39805 // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 39806 // cond: d==8-c && c < 8 && t.Size() == 1 39807 // result: (ROLBconst x [c]) 39808 for { 39809 t := v.Type 39810 _ = v.Args[1] 39811 v_0 := v.Args[0] 39812 if v_0.Op != OpAMD64SHLLconst { 39813 break 39814 } 39815 c := v_0.AuxInt 39816 x := v_0.Args[0] 39817 v_1 := v.Args[1] 39818 if v_1.Op != OpAMD64SHRBconst { 39819 break 39820 } 39821 d := v_1.AuxInt 39822 if x != v_1.Args[0] { 39823 break 39824 } 39825 if !(d == 8-c && c < 8 && t.Size() == 1) { 39826 break 39827 } 39828 v.reset(OpAMD64ROLBconst) 39829 v.AuxInt = c 39830 v.AddArg(x) 39831 return true 39832 } 39833 // match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 39834 // cond: d==8-c && c < 8 && t.Size() == 1 39835 // result: (ROLBconst x [c]) 39836 for { 39837 t := v.Type 39838 _ = v.Args[1] 39839 v_0 := v.Args[0] 39840 if v_0.Op != OpAMD64SHRBconst { 39841 break 39842 } 39843 d := v_0.AuxInt 39844 x := v_0.Args[0] 39845 v_1 := v.Args[1] 39846 if v_1.Op != OpAMD64SHLLconst { 39847 break 39848 } 39849 c := v_1.AuxInt 39850 if x != v_1.Args[0] { 39851 break 39852 } 39853 if !(d == 8-c && c < 8 && t.Size() == 1) { 39854 break 39855 } 39856 v.reset(OpAMD64ROLBconst) 39857 v.AuxInt = c 39858 v.AddArg(x) 39859 return true 39860 } 39861 // match: (XORL x x) 39862 // cond: 39863 // result: (MOVLconst [0]) 39864 for { 39865 _ = v.Args[1] 39866 x := v.Args[0] 39867 if x != v.Args[1] { 39868 break 39869 } 39870 v.reset(OpAMD64MOVLconst) 39871 v.AuxInt = 0 39872 return true 39873 } 39874 // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) 39875 // cond: canMergeLoad(v, l, x) && clobber(l) 39876 // result: (XORLmem x [off] {sym} ptr mem) 39877 for { 39878 _ = v.Args[1] 39879 x := v.Args[0] 39880 l := v.Args[1] 39881 if l.Op != OpAMD64MOVLload { 39882 break 39883 } 39884 off := l.AuxInt 39885 sym := l.Aux 39886 _ = l.Args[1] 39887 ptr := l.Args[0] 39888 mem := l.Args[1] 39889 if !(canMergeLoad(v, l, x) && clobber(l)) { 39890 break 39891 } 39892 v.reset(OpAMD64XORLmem) 39893 v.AuxInt = off 39894 v.Aux = sym 39895 v.AddArg(x) 39896 v.AddArg(ptr) 39897 v.AddArg(mem) 39898 return true 39899 } 39900 return false 39901 } 39902 func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { 39903 // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) 39904 // cond: canMergeLoad(v, l, x) && clobber(l) 39905 // result: (XORLmem x [off] {sym} ptr mem) 39906 for { 39907 _ = v.Args[1] 39908 l := v.Args[0] 39909 if l.Op != OpAMD64MOVLload { 39910 break 39911 } 39912 off := l.AuxInt 39913 sym := l.Aux 39914 _ = l.Args[1] 39915 ptr := l.Args[0] 39916 mem := l.Args[1] 39917 x := v.Args[1] 39918 if !(canMergeLoad(v, l, x) && clobber(l)) { 39919 break 39920 } 39921 v.reset(OpAMD64XORLmem) 39922 v.AuxInt = off 39923 v.Aux = sym 39924 v.AddArg(x) 39925 v.AddArg(ptr) 39926 v.AddArg(mem) 39927 return true 39928 } 39929 return false 39930 } 39931 func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { 39932 // match: (XORLconst [1] (SETNE x)) 39933 // cond: 39934 // result: (SETEQ x) 39935 for { 39936 if v.AuxInt != 1 { 39937 break 39938 } 39939 v_0 := v.Args[0] 39940 if v_0.Op != OpAMD64SETNE { 39941 break 39942 } 39943 x := v_0.Args[0] 39944 v.reset(OpAMD64SETEQ) 39945 v.AddArg(x) 39946 return true 39947 } 39948 // match: (XORLconst [1] (SETEQ x)) 39949 // cond: 39950 // result: (SETNE x) 39951 for { 39952 if v.AuxInt != 1 { 39953 break 39954 } 39955 v_0 := v.Args[0] 39956 if v_0.Op != OpAMD64SETEQ { 39957 break 39958 } 39959 x := v_0.Args[0] 39960 v.reset(OpAMD64SETNE) 39961 v.AddArg(x) 39962 return true 39963 } 39964 // match: (XORLconst [1] (SETL x)) 39965 // cond: 39966 // result: (SETGE x) 39967 for { 39968 if v.AuxInt != 1 { 39969 break 39970 } 39971 v_0 := v.Args[0] 39972 if v_0.Op != OpAMD64SETL { 39973 break 39974 } 39975 x := v_0.Args[0] 39976 v.reset(OpAMD64SETGE) 39977 v.AddArg(x) 39978 return true 39979 } 39980 // match: (XORLconst [1] (SETGE x)) 39981 // cond: 39982 // result: (SETL x) 39983 for { 39984 if v.AuxInt != 1 { 39985 break 39986 } 39987 v_0 := v.Args[0] 39988 if v_0.Op != OpAMD64SETGE { 39989 break 39990 } 39991 x := v_0.Args[0] 39992 v.reset(OpAMD64SETL) 39993 v.AddArg(x) 39994 return true 39995 } 39996 // match: (XORLconst [1] (SETLE x)) 39997 // cond: 39998 // result: (SETG x) 39999 for { 40000 if v.AuxInt != 1 { 40001 break 40002 } 40003 v_0 := v.Args[0] 40004 if v_0.Op != OpAMD64SETLE { 40005 break 40006 } 40007 x := v_0.Args[0] 40008 v.reset(OpAMD64SETG) 40009 v.AddArg(x) 40010 return true 40011 } 40012 // match: (XORLconst [1] (SETG x)) 40013 // cond: 40014 // result: (SETLE x) 40015 for { 40016 if v.AuxInt != 1 { 40017 break 40018 } 40019 v_0 := v.Args[0] 40020 if v_0.Op != OpAMD64SETG { 40021 break 40022 } 40023 x := v_0.Args[0] 40024 v.reset(OpAMD64SETLE) 40025 v.AddArg(x) 40026 return true 40027 } 40028 // match: (XORLconst [1] (SETB x)) 40029 // cond: 40030 // result: (SETAE x) 40031 for { 40032 if v.AuxInt != 1 { 40033 break 40034 } 40035 v_0 := v.Args[0] 40036 if v_0.Op != OpAMD64SETB { 40037 break 40038 } 40039 x := v_0.Args[0] 40040 v.reset(OpAMD64SETAE) 40041 v.AddArg(x) 40042 return true 40043 } 40044 // match: (XORLconst [1] (SETAE x)) 40045 // cond: 40046 // result: (SETB x) 40047 for { 40048 if v.AuxInt != 1 { 40049 break 40050 } 40051 v_0 := v.Args[0] 40052 if v_0.Op != OpAMD64SETAE { 40053 break 40054 } 40055 x := v_0.Args[0] 40056 v.reset(OpAMD64SETB) 40057 v.AddArg(x) 40058 return true 40059 } 40060 // match: (XORLconst [1] (SETBE x)) 40061 // cond: 40062 // result: (SETA x) 40063 for { 40064 if v.AuxInt != 1 { 40065 break 40066 } 40067 v_0 := v.Args[0] 40068 if v_0.Op != OpAMD64SETBE { 40069 break 40070 } 40071 x := v_0.Args[0] 40072 v.reset(OpAMD64SETA) 40073 v.AddArg(x) 40074 return true 40075 } 40076 // match: (XORLconst [1] (SETA x)) 40077 // cond: 40078 // result: (SETBE x) 40079 for { 40080 if v.AuxInt != 1 { 40081 break 40082 } 40083 v_0 := v.Args[0] 40084 if v_0.Op != OpAMD64SETA { 40085 break 40086 } 40087 x := v_0.Args[0] 40088 v.reset(OpAMD64SETBE) 40089 v.AddArg(x) 40090 return true 40091 } 40092 return false 40093 } 40094 func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool { 40095 // match: (XORLconst [c] (XORLconst [d] x)) 40096 // cond: 40097 // result: (XORLconst [c ^ d] x) 40098 for { 40099 c := v.AuxInt 40100 v_0 := v.Args[0] 40101 if v_0.Op != OpAMD64XORLconst { 40102 break 40103 } 40104 d := v_0.AuxInt 40105 x := v_0.Args[0] 40106 v.reset(OpAMD64XORLconst) 40107 v.AuxInt = c ^ d 40108 v.AddArg(x) 40109 return true 40110 } 40111 // match: (XORLconst [c] x) 40112 // cond: int32(c)==0 40113 // result: x 40114 for { 40115 c := v.AuxInt 40116 x := v.Args[0] 40117 if !(int32(c) == 0) { 40118 break 40119 } 40120 v.reset(OpCopy) 40121 v.Type = x.Type 40122 v.AddArg(x) 40123 return true 40124 } 40125 // match: (XORLconst [c] (MOVLconst [d])) 40126 // cond: 40127 // result: (MOVLconst [c^d]) 40128 for { 40129 c := v.AuxInt 40130 v_0 := v.Args[0] 40131 if v_0.Op != OpAMD64MOVLconst { 40132 break 40133 } 40134 d := v_0.AuxInt 40135 v.reset(OpAMD64MOVLconst) 40136 v.AuxInt = c ^ d 40137 return true 40138 } 40139 return false 40140 } 40141 func rewriteValueAMD64_OpAMD64XORLmem_0(v *Value) bool { 40142 b := v.Block 40143 _ = b 40144 typ := &b.Func.Config.Types 40145 _ = typ 40146 // match: (XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 40147 // cond: 40148 // result: (XORL x (MOVLf2i y)) 40149 for { 40150 off := v.AuxInt 40151 sym := v.Aux 40152 _ = v.Args[2] 40153 x := v.Args[0] 40154 ptr := v.Args[1] 40155 v_2 := v.Args[2] 40156 if v_2.Op != OpAMD64MOVSSstore { 40157 break 40158 } 40159 if v_2.AuxInt != off { 40160 break 40161 } 40162 if v_2.Aux != sym { 40163 break 40164 } 40165 _ = v_2.Args[2] 40166 if ptr != v_2.Args[0] { 40167 break 40168 } 40169 y := v_2.Args[1] 40170 v.reset(OpAMD64XORL) 40171 v.AddArg(x) 40172 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 40173 v0.AddArg(y) 40174 v.AddArg(v0) 40175 return true 40176 } 40177 return false 40178 } 40179 func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { 40180 // match: (XORQ x (MOVQconst [c])) 40181 // cond: is32Bit(c) 40182 // result: (XORQconst [c] x) 40183 for { 40184 _ = v.Args[1] 40185 x := v.Args[0] 40186 v_1 := v.Args[1] 40187 if v_1.Op != OpAMD64MOVQconst { 40188 break 40189 } 40190 c := v_1.AuxInt 40191 if !(is32Bit(c)) { 40192 break 40193 } 40194 v.reset(OpAMD64XORQconst) 40195 v.AuxInt = c 40196 v.AddArg(x) 40197 return true 40198 } 40199 // match: (XORQ (MOVQconst [c]) x) 40200 // cond: is32Bit(c) 40201 // result: (XORQconst [c] x) 40202 for { 40203 _ = v.Args[1] 40204 v_0 := v.Args[0] 40205 if v_0.Op != OpAMD64MOVQconst { 40206 break 40207 } 40208 c := v_0.AuxInt 40209 x := v.Args[1] 40210 if !(is32Bit(c)) { 40211 break 40212 } 40213 v.reset(OpAMD64XORQconst) 40214 v.AuxInt = c 40215 v.AddArg(x) 40216 return true 40217 } 40218 // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d])) 40219 // cond: d==64-c 40220 // result: (ROLQconst x [c]) 40221 for { 40222 _ = v.Args[1] 40223 v_0 := v.Args[0] 40224 if v_0.Op != OpAMD64SHLQconst { 40225 break 40226 } 40227 c := v_0.AuxInt 40228 x := v_0.Args[0] 40229 v_1 := v.Args[1] 40230 if v_1.Op != OpAMD64SHRQconst { 40231 break 40232 } 40233 d := v_1.AuxInt 40234 if x != v_1.Args[0] { 40235 break 40236 } 40237 if !(d == 64-c) { 40238 break 40239 } 40240 v.reset(OpAMD64ROLQconst) 40241 v.AuxInt = c 40242 v.AddArg(x) 40243 return true 40244 } 40245 // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c])) 40246 // cond: d==64-c 40247 // result: (ROLQconst x [c]) 40248 for { 40249 _ = v.Args[1] 40250 v_0 := v.Args[0] 40251 if v_0.Op != OpAMD64SHRQconst { 40252 break 40253 } 40254 d := v_0.AuxInt 40255 x := v_0.Args[0] 40256 v_1 := v.Args[1] 40257 if v_1.Op != OpAMD64SHLQconst { 40258 break 40259 } 40260 c := v_1.AuxInt 40261 if x != v_1.Args[0] { 40262 break 40263 } 40264 if !(d == 64-c) { 40265 break 40266 } 40267 v.reset(OpAMD64ROLQconst) 40268 v.AuxInt = c 40269 v.AddArg(x) 40270 return true 40271 } 40272 // match: (XORQ x x) 40273 // cond: 40274 // result: (MOVQconst [0]) 40275 for { 40276 _ = v.Args[1] 40277 x := v.Args[0] 40278 if x != v.Args[1] { 40279 break 40280 } 40281 v.reset(OpAMD64MOVQconst) 40282 v.AuxInt = 0 40283 return true 40284 } 40285 // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) 40286 // cond: canMergeLoad(v, l, x) && clobber(l) 40287 // result: (XORQmem x [off] {sym} ptr mem) 40288 for { 40289 _ = v.Args[1] 40290 x := v.Args[0] 40291 l := v.Args[1] 40292 if l.Op != OpAMD64MOVQload { 40293 break 40294 } 40295 off := l.AuxInt 40296 sym := l.Aux 40297 _ = l.Args[1] 40298 ptr := l.Args[0] 40299 mem := l.Args[1] 40300 if !(canMergeLoad(v, l, x) && clobber(l)) { 40301 break 40302 } 40303 v.reset(OpAMD64XORQmem) 40304 v.AuxInt = off 40305 v.Aux = sym 40306 v.AddArg(x) 40307 v.AddArg(ptr) 40308 v.AddArg(mem) 40309 return true 40310 } 40311 // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x) 40312 // cond: canMergeLoad(v, l, x) && clobber(l) 40313 // result: (XORQmem x [off] {sym} ptr mem) 40314 for { 40315 _ = v.Args[1] 40316 l := v.Args[0] 40317 if l.Op != OpAMD64MOVQload { 40318 break 40319 } 40320 off := l.AuxInt 40321 sym := l.Aux 40322 _ = l.Args[1] 40323 ptr := l.Args[0] 40324 mem := l.Args[1] 40325 x := v.Args[1] 40326 if !(canMergeLoad(v, l, x) && clobber(l)) { 40327 break 40328 } 40329 v.reset(OpAMD64XORQmem) 40330 v.AuxInt = off 40331 v.Aux = sym 40332 v.AddArg(x) 40333 v.AddArg(ptr) 40334 v.AddArg(mem) 40335 return true 40336 } 40337 return false 40338 } 40339 func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool { 40340 // match: (XORQconst [c] (XORQconst [d] x)) 40341 // cond: 40342 // result: (XORQconst [c ^ d] x) 40343 for { 40344 c := v.AuxInt 40345 v_0 := v.Args[0] 40346 if v_0.Op != OpAMD64XORQconst { 40347 break 40348 } 40349 d := v_0.AuxInt 40350 x := v_0.Args[0] 40351 v.reset(OpAMD64XORQconst) 40352 v.AuxInt = c ^ d 40353 v.AddArg(x) 40354 return true 40355 } 40356 // match: (XORQconst [0] x) 40357 // cond: 40358 // result: x 40359 for { 40360 if v.AuxInt != 0 { 40361 break 40362 } 40363 x := v.Args[0] 40364 v.reset(OpCopy) 40365 v.Type = x.Type 40366 v.AddArg(x) 40367 return true 40368 } 40369 // match: (XORQconst [c] (MOVQconst [d])) 40370 // cond: 40371 // result: (MOVQconst [c^d]) 40372 for { 40373 c := v.AuxInt 40374 v_0 := v.Args[0] 40375 if v_0.Op != OpAMD64MOVQconst { 40376 break 40377 } 40378 d := v_0.AuxInt 40379 v.reset(OpAMD64MOVQconst) 40380 v.AuxInt = c ^ d 40381 return true 40382 } 40383 return false 40384 } 40385 func rewriteValueAMD64_OpAMD64XORQmem_0(v *Value) bool { 40386 b := v.Block 40387 _ = b 40388 typ := &b.Func.Config.Types 40389 _ = typ 40390 // match: (XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 40391 // cond: 40392 // result: (XORQ x (MOVQf2i y)) 40393 for { 40394 off := v.AuxInt 40395 sym := v.Aux 40396 _ = v.Args[2] 40397 x := v.Args[0] 40398 ptr := v.Args[1] 40399 v_2 := v.Args[2] 40400 if v_2.Op != OpAMD64MOVSDstore { 40401 break 40402 } 40403 if v_2.AuxInt != off { 40404 break 40405 } 40406 if v_2.Aux != sym { 40407 break 40408 } 40409 _ = v_2.Args[2] 40410 if ptr != v_2.Args[0] { 40411 break 40412 } 40413 y := v_2.Args[1] 40414 v.reset(OpAMD64XORQ) 40415 v.AddArg(x) 40416 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 40417 v0.AddArg(y) 40418 v.AddArg(v0) 40419 return true 40420 } 40421 return false 40422 } 40423 func rewriteValueAMD64_OpAdd16_0(v *Value) bool { 40424 // match: (Add16 x y) 40425 // cond: 40426 // result: (ADDL x y) 40427 for { 40428 _ = v.Args[1] 40429 x := v.Args[0] 40430 y := v.Args[1] 40431 v.reset(OpAMD64ADDL) 40432 v.AddArg(x) 40433 v.AddArg(y) 40434 return true 40435 } 40436 } 40437 func rewriteValueAMD64_OpAdd32_0(v *Value) bool { 40438 // match: (Add32 x y) 40439 // cond: 40440 // result: (ADDL x y) 40441 for { 40442 _ = v.Args[1] 40443 x := v.Args[0] 40444 y := v.Args[1] 40445 v.reset(OpAMD64ADDL) 40446 v.AddArg(x) 40447 v.AddArg(y) 40448 return true 40449 } 40450 } 40451 func rewriteValueAMD64_OpAdd32F_0(v *Value) bool { 40452 // match: (Add32F x y) 40453 // cond: 40454 // result: (ADDSS x y) 40455 for { 40456 _ = v.Args[1] 40457 x := v.Args[0] 40458 y := v.Args[1] 40459 v.reset(OpAMD64ADDSS) 40460 v.AddArg(x) 40461 v.AddArg(y) 40462 return true 40463 } 40464 } 40465 func rewriteValueAMD64_OpAdd64_0(v *Value) bool { 40466 // match: (Add64 x y) 40467 // cond: 40468 // result: (ADDQ x y) 40469 for { 40470 _ = v.Args[1] 40471 x := v.Args[0] 40472 y := v.Args[1] 40473 v.reset(OpAMD64ADDQ) 40474 v.AddArg(x) 40475 v.AddArg(y) 40476 return true 40477 } 40478 } 40479 func rewriteValueAMD64_OpAdd64F_0(v *Value) bool { 40480 // match: (Add64F x y) 40481 // cond: 40482 // result: (ADDSD x y) 40483 for { 40484 _ = v.Args[1] 40485 x := v.Args[0] 40486 y := v.Args[1] 40487 v.reset(OpAMD64ADDSD) 40488 v.AddArg(x) 40489 v.AddArg(y) 40490 return true 40491 } 40492 } 40493 func rewriteValueAMD64_OpAdd8_0(v *Value) bool { 40494 // match: (Add8 x y) 40495 // cond: 40496 // result: (ADDL x y) 40497 for { 40498 _ = v.Args[1] 40499 x := v.Args[0] 40500 y := v.Args[1] 40501 v.reset(OpAMD64ADDL) 40502 v.AddArg(x) 40503 v.AddArg(y) 40504 return true 40505 } 40506 } 40507 func rewriteValueAMD64_OpAddPtr_0(v *Value) bool { 40508 b := v.Block 40509 _ = b 40510 config := b.Func.Config 40511 _ = config 40512 // match: (AddPtr x y) 40513 // cond: config.PtrSize == 8 40514 // result: (ADDQ x y) 40515 for { 40516 _ = v.Args[1] 40517 x := v.Args[0] 40518 y := v.Args[1] 40519 if !(config.PtrSize == 8) { 40520 break 40521 } 40522 v.reset(OpAMD64ADDQ) 40523 v.AddArg(x) 40524 v.AddArg(y) 40525 return true 40526 } 40527 // match: (AddPtr x y) 40528 // cond: config.PtrSize == 4 40529 // result: (ADDL x y) 40530 for { 40531 _ = v.Args[1] 40532 x := v.Args[0] 40533 y := v.Args[1] 40534 if !(config.PtrSize == 4) { 40535 break 40536 } 40537 v.reset(OpAMD64ADDL) 40538 v.AddArg(x) 40539 v.AddArg(y) 40540 return true 40541 } 40542 return false 40543 } 40544 func rewriteValueAMD64_OpAddr_0(v *Value) bool { 40545 b := v.Block 40546 _ = b 40547 config := b.Func.Config 40548 _ = config 40549 // match: (Addr {sym} base) 40550 // cond: config.PtrSize == 8 40551 // result: (LEAQ {sym} base) 40552 for { 40553 sym := v.Aux 40554 base := v.Args[0] 40555 if !(config.PtrSize == 8) { 40556 break 40557 } 40558 v.reset(OpAMD64LEAQ) 40559 v.Aux = sym 40560 v.AddArg(base) 40561 return true 40562 } 40563 // match: (Addr {sym} base) 40564 // cond: config.PtrSize == 4 40565 // result: (LEAL {sym} base) 40566 for { 40567 sym := v.Aux 40568 base := v.Args[0] 40569 if !(config.PtrSize == 4) { 40570 break 40571 } 40572 v.reset(OpAMD64LEAL) 40573 v.Aux = sym 40574 v.AddArg(base) 40575 return true 40576 } 40577 return false 40578 } 40579 func rewriteValueAMD64_OpAnd16_0(v *Value) bool { 40580 // match: (And16 x y) 40581 // cond: 40582 // result: (ANDL x y) 40583 for { 40584 _ = v.Args[1] 40585 x := v.Args[0] 40586 y := v.Args[1] 40587 v.reset(OpAMD64ANDL) 40588 v.AddArg(x) 40589 v.AddArg(y) 40590 return true 40591 } 40592 } 40593 func rewriteValueAMD64_OpAnd32_0(v *Value) bool { 40594 // match: (And32 x y) 40595 // cond: 40596 // result: (ANDL x y) 40597 for { 40598 _ = v.Args[1] 40599 x := v.Args[0] 40600 y := v.Args[1] 40601 v.reset(OpAMD64ANDL) 40602 v.AddArg(x) 40603 v.AddArg(y) 40604 return true 40605 } 40606 } 40607 func rewriteValueAMD64_OpAnd64_0(v *Value) bool { 40608 // match: (And64 x y) 40609 // cond: 40610 // result: (ANDQ x y) 40611 for { 40612 _ = v.Args[1] 40613 x := v.Args[0] 40614 y := v.Args[1] 40615 v.reset(OpAMD64ANDQ) 40616 v.AddArg(x) 40617 v.AddArg(y) 40618 return true 40619 } 40620 } 40621 func rewriteValueAMD64_OpAnd8_0(v *Value) bool { 40622 // match: (And8 x y) 40623 // cond: 40624 // result: (ANDL x y) 40625 for { 40626 _ = v.Args[1] 40627 x := v.Args[0] 40628 y := v.Args[1] 40629 v.reset(OpAMD64ANDL) 40630 v.AddArg(x) 40631 v.AddArg(y) 40632 return true 40633 } 40634 } 40635 func rewriteValueAMD64_OpAndB_0(v *Value) bool { 40636 // match: (AndB x y) 40637 // cond: 40638 // result: (ANDL x y) 40639 for { 40640 _ = v.Args[1] 40641 x := v.Args[0] 40642 y := v.Args[1] 40643 v.reset(OpAMD64ANDL) 40644 v.AddArg(x) 40645 v.AddArg(y) 40646 return true 40647 } 40648 } 40649 func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool { 40650 b := v.Block 40651 _ = b 40652 typ := &b.Func.Config.Types 40653 _ = typ 40654 // match: (AtomicAdd32 ptr val mem) 40655 // cond: 40656 // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) 40657 for { 40658 _ = v.Args[2] 40659 ptr := v.Args[0] 40660 val := v.Args[1] 40661 mem := v.Args[2] 40662 v.reset(OpAMD64AddTupleFirst32) 40663 v.AddArg(val) 40664 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) 40665 v0.AddArg(val) 40666 v0.AddArg(ptr) 40667 v0.AddArg(mem) 40668 v.AddArg(v0) 40669 return true 40670 } 40671 } 40672 func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool { 40673 b := v.Block 40674 _ = b 40675 typ := &b.Func.Config.Types 40676 _ = typ 40677 // match: (AtomicAdd64 ptr val mem) 40678 // cond: 40679 // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) 40680 for { 40681 _ = v.Args[2] 40682 ptr := v.Args[0] 40683 val := v.Args[1] 40684 mem := v.Args[2] 40685 v.reset(OpAMD64AddTupleFirst64) 40686 v.AddArg(val) 40687 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) 40688 v0.AddArg(val) 40689 v0.AddArg(ptr) 40690 v0.AddArg(mem) 40691 v.AddArg(v0) 40692 return true 40693 } 40694 } 40695 func rewriteValueAMD64_OpAtomicAnd8_0(v *Value) bool { 40696 // match: (AtomicAnd8 ptr val mem) 40697 // cond: 40698 // result: (ANDBlock ptr val mem) 40699 for { 40700 _ = v.Args[2] 40701 ptr := v.Args[0] 40702 val := v.Args[1] 40703 mem := v.Args[2] 40704 v.reset(OpAMD64ANDBlock) 40705 v.AddArg(ptr) 40706 v.AddArg(val) 40707 v.AddArg(mem) 40708 return true 40709 } 40710 } 40711 func rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v *Value) bool { 40712 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 40713 // cond: 40714 // result: (CMPXCHGLlock ptr old new_ mem) 40715 for { 40716 _ = v.Args[3] 40717 ptr := v.Args[0] 40718 old := v.Args[1] 40719 new_ := v.Args[2] 40720 mem := v.Args[3] 40721 v.reset(OpAMD64CMPXCHGLlock) 40722 v.AddArg(ptr) 40723 v.AddArg(old) 40724 v.AddArg(new_) 40725 v.AddArg(mem) 40726 return true 40727 } 40728 } 40729 func rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v *Value) bool { 40730 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 40731 // cond: 40732 // result: (CMPXCHGQlock ptr old new_ mem) 40733 for { 40734 _ = v.Args[3] 40735 ptr := v.Args[0] 40736 old := v.Args[1] 40737 new_ := v.Args[2] 40738 mem := v.Args[3] 40739 v.reset(OpAMD64CMPXCHGQlock) 40740 v.AddArg(ptr) 40741 v.AddArg(old) 40742 v.AddArg(new_) 40743 v.AddArg(mem) 40744 return true 40745 } 40746 } 40747 func rewriteValueAMD64_OpAtomicExchange32_0(v *Value) bool { 40748 // match: (AtomicExchange32 ptr val mem) 40749 // cond: 40750 // result: (XCHGL val ptr mem) 40751 for { 40752 _ = v.Args[2] 40753 ptr := v.Args[0] 40754 val := v.Args[1] 40755 mem := v.Args[2] 40756 v.reset(OpAMD64XCHGL) 40757 v.AddArg(val) 40758 v.AddArg(ptr) 40759 v.AddArg(mem) 40760 return true 40761 } 40762 } 40763 func rewriteValueAMD64_OpAtomicExchange64_0(v *Value) bool { 40764 // match: (AtomicExchange64 ptr val mem) 40765 // cond: 40766 // result: (XCHGQ val ptr mem) 40767 for { 40768 _ = v.Args[2] 40769 ptr := v.Args[0] 40770 val := v.Args[1] 40771 mem := v.Args[2] 40772 v.reset(OpAMD64XCHGQ) 40773 v.AddArg(val) 40774 v.AddArg(ptr) 40775 v.AddArg(mem) 40776 return true 40777 } 40778 } 40779 func rewriteValueAMD64_OpAtomicLoad32_0(v *Value) bool { 40780 // match: (AtomicLoad32 ptr mem) 40781 // cond: 40782 // result: (MOVLatomicload ptr mem) 40783 for { 40784 _ = v.Args[1] 40785 ptr := v.Args[0] 40786 mem := v.Args[1] 40787 v.reset(OpAMD64MOVLatomicload) 40788 v.AddArg(ptr) 40789 v.AddArg(mem) 40790 return true 40791 } 40792 } 40793 func rewriteValueAMD64_OpAtomicLoad64_0(v *Value) bool { 40794 // match: (AtomicLoad64 ptr mem) 40795 // cond: 40796 // result: (MOVQatomicload ptr mem) 40797 for { 40798 _ = v.Args[1] 40799 ptr := v.Args[0] 40800 mem := v.Args[1] 40801 v.reset(OpAMD64MOVQatomicload) 40802 v.AddArg(ptr) 40803 v.AddArg(mem) 40804 return true 40805 } 40806 } 40807 func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool { 40808 b := v.Block 40809 _ = b 40810 config := b.Func.Config 40811 _ = config 40812 // match: (AtomicLoadPtr ptr mem) 40813 // cond: config.PtrSize == 8 40814 // result: (MOVQatomicload ptr mem) 40815 for { 40816 _ = v.Args[1] 40817 ptr := v.Args[0] 40818 mem := v.Args[1] 40819 if !(config.PtrSize == 8) { 40820 break 40821 } 40822 v.reset(OpAMD64MOVQatomicload) 40823 v.AddArg(ptr) 40824 v.AddArg(mem) 40825 return true 40826 } 40827 // match: (AtomicLoadPtr ptr mem) 40828 // cond: config.PtrSize == 4 40829 // result: (MOVLatomicload ptr mem) 40830 for { 40831 _ = v.Args[1] 40832 ptr := v.Args[0] 40833 mem := v.Args[1] 40834 if !(config.PtrSize == 4) { 40835 break 40836 } 40837 v.reset(OpAMD64MOVLatomicload) 40838 v.AddArg(ptr) 40839 v.AddArg(mem) 40840 return true 40841 } 40842 return false 40843 } 40844 func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool { 40845 // match: (AtomicOr8 ptr val mem) 40846 // cond: 40847 // result: (ORBlock ptr val mem) 40848 for { 40849 _ = v.Args[2] 40850 ptr := v.Args[0] 40851 val := v.Args[1] 40852 mem := v.Args[2] 40853 v.reset(OpAMD64ORBlock) 40854 v.AddArg(ptr) 40855 v.AddArg(val) 40856 v.AddArg(mem) 40857 return true 40858 } 40859 } 40860 func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool { 40861 b := v.Block 40862 _ = b 40863 typ := &b.Func.Config.Types 40864 _ = typ 40865 // match: (AtomicStore32 ptr val mem) 40866 // cond: 40867 // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem)) 40868 for { 40869 _ = v.Args[2] 40870 ptr := v.Args[0] 40871 val := v.Args[1] 40872 mem := v.Args[2] 40873 v.reset(OpSelect1) 40874 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) 40875 v0.AddArg(val) 40876 v0.AddArg(ptr) 40877 v0.AddArg(mem) 40878 v.AddArg(v0) 40879 return true 40880 } 40881 } 40882 func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool { 40883 b := v.Block 40884 _ = b 40885 typ := &b.Func.Config.Types 40886 _ = typ 40887 // match: (AtomicStore64 ptr val mem) 40888 // cond: 40889 // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem)) 40890 for { 40891 _ = v.Args[2] 40892 ptr := v.Args[0] 40893 val := v.Args[1] 40894 mem := v.Args[2] 40895 v.reset(OpSelect1) 40896 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) 40897 v0.AddArg(val) 40898 v0.AddArg(ptr) 40899 v0.AddArg(mem) 40900 v.AddArg(v0) 40901 return true 40902 } 40903 } 40904 func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool { 40905 b := v.Block 40906 _ = b 40907 config := b.Func.Config 40908 _ = config 40909 typ := &b.Func.Config.Types 40910 _ = typ 40911 // match: (AtomicStorePtrNoWB ptr val mem) 40912 // cond: config.PtrSize == 8 40913 // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 40914 for { 40915 _ = v.Args[2] 40916 ptr := v.Args[0] 40917 val := v.Args[1] 40918 mem := v.Args[2] 40919 if !(config.PtrSize == 8) { 40920 break 40921 } 40922 v.reset(OpSelect1) 40923 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) 40924 v0.AddArg(val) 40925 v0.AddArg(ptr) 40926 v0.AddArg(mem) 40927 v.AddArg(v0) 40928 return true 40929 } 40930 // match: (AtomicStorePtrNoWB ptr val mem) 40931 // cond: config.PtrSize == 4 40932 // result: (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 40933 for { 40934 _ = v.Args[2] 40935 ptr := v.Args[0] 40936 val := v.Args[1] 40937 mem := v.Args[2] 40938 if !(config.PtrSize == 4) { 40939 break 40940 } 40941 v.reset(OpSelect1) 40942 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem)) 40943 v0.AddArg(val) 40944 v0.AddArg(ptr) 40945 v0.AddArg(mem) 40946 v.AddArg(v0) 40947 return true 40948 } 40949 return false 40950 } 40951 func rewriteValueAMD64_OpAvg64u_0(v *Value) bool { 40952 // match: (Avg64u x y) 40953 // cond: 40954 // result: (AVGQU x y) 40955 for { 40956 _ = v.Args[1] 40957 x := v.Args[0] 40958 y := v.Args[1] 40959 v.reset(OpAMD64AVGQU) 40960 v.AddArg(x) 40961 v.AddArg(y) 40962 return true 40963 } 40964 } 40965 func rewriteValueAMD64_OpBitLen32_0(v *Value) bool { 40966 b := v.Block 40967 _ = b 40968 typ := &b.Func.Config.Types 40969 _ = typ 40970 // match: (BitLen32 x) 40971 // cond: 40972 // result: (BitLen64 (MOVLQZX <typ.UInt64> x)) 40973 for { 40974 x := v.Args[0] 40975 v.reset(OpBitLen64) 40976 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 40977 v0.AddArg(x) 40978 v.AddArg(v0) 40979 return true 40980 } 40981 } 40982 func rewriteValueAMD64_OpBitLen64_0(v *Value) bool { 40983 b := v.Block 40984 _ = b 40985 typ := &b.Func.Config.Types 40986 _ = typ 40987 // match: (BitLen64 <t> x) 40988 // cond: 40989 // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x)))) 40990 for { 40991 t := v.Type 40992 x := v.Args[0] 40993 v.reset(OpAMD64ADDQconst) 40994 v.AuxInt = 1 40995 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) 40996 v1 := b.NewValue0(v.Pos, OpSelect0, t) 40997 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 40998 v2.AddArg(x) 40999 v1.AddArg(v2) 41000 v0.AddArg(v1) 41001 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 41002 v3.AuxInt = -1 41003 v0.AddArg(v3) 41004 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 41005 v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 41006 v5.AddArg(x) 41007 v4.AddArg(v5) 41008 v0.AddArg(v4) 41009 v.AddArg(v0) 41010 return true 41011 } 41012 } 41013 func rewriteValueAMD64_OpBswap32_0(v *Value) bool { 41014 // match: (Bswap32 x) 41015 // cond: 41016 // result: (BSWAPL x) 41017 for { 41018 x := v.Args[0] 41019 v.reset(OpAMD64BSWAPL) 41020 v.AddArg(x) 41021 return true 41022 } 41023 } 41024 func rewriteValueAMD64_OpBswap64_0(v *Value) bool { 41025 // match: (Bswap64 x) 41026 // cond: 41027 // result: (BSWAPQ x) 41028 for { 41029 x := v.Args[0] 41030 v.reset(OpAMD64BSWAPQ) 41031 v.AddArg(x) 41032 return true 41033 } 41034 } 41035 func rewriteValueAMD64_OpClosureCall_0(v *Value) bool { 41036 // match: (ClosureCall [argwid] entry closure mem) 41037 // cond: 41038 // result: (CALLclosure [argwid] entry closure mem) 41039 for { 41040 argwid := v.AuxInt 41041 _ = v.Args[2] 41042 entry := v.Args[0] 41043 closure := v.Args[1] 41044 mem := v.Args[2] 41045 v.reset(OpAMD64CALLclosure) 41046 v.AuxInt = argwid 41047 v.AddArg(entry) 41048 v.AddArg(closure) 41049 v.AddArg(mem) 41050 return true 41051 } 41052 } 41053 func rewriteValueAMD64_OpCom16_0(v *Value) bool { 41054 // match: (Com16 x) 41055 // cond: 41056 // result: (NOTL x) 41057 for { 41058 x := v.Args[0] 41059 v.reset(OpAMD64NOTL) 41060 v.AddArg(x) 41061 return true 41062 } 41063 } 41064 func rewriteValueAMD64_OpCom32_0(v *Value) bool { 41065 // match: (Com32 x) 41066 // cond: 41067 // result: (NOTL x) 41068 for { 41069 x := v.Args[0] 41070 v.reset(OpAMD64NOTL) 41071 v.AddArg(x) 41072 return true 41073 } 41074 } 41075 func rewriteValueAMD64_OpCom64_0(v *Value) bool { 41076 // match: (Com64 x) 41077 // cond: 41078 // result: (NOTQ x) 41079 for { 41080 x := v.Args[0] 41081 v.reset(OpAMD64NOTQ) 41082 v.AddArg(x) 41083 return true 41084 } 41085 } 41086 func rewriteValueAMD64_OpCom8_0(v *Value) bool { 41087 // match: (Com8 x) 41088 // cond: 41089 // result: (NOTL x) 41090 for { 41091 x := v.Args[0] 41092 v.reset(OpAMD64NOTL) 41093 v.AddArg(x) 41094 return true 41095 } 41096 } 41097 func rewriteValueAMD64_OpConst16_0(v *Value) bool { 41098 // match: (Const16 [val]) 41099 // cond: 41100 // result: (MOVLconst [val]) 41101 for { 41102 val := v.AuxInt 41103 v.reset(OpAMD64MOVLconst) 41104 v.AuxInt = val 41105 return true 41106 } 41107 } 41108 func rewriteValueAMD64_OpConst32_0(v *Value) bool { 41109 // match: (Const32 [val]) 41110 // cond: 41111 // result: (MOVLconst [val]) 41112 for { 41113 val := v.AuxInt 41114 v.reset(OpAMD64MOVLconst) 41115 v.AuxInt = val 41116 return true 41117 } 41118 } 41119 func rewriteValueAMD64_OpConst32F_0(v *Value) bool { 41120 // match: (Const32F [val]) 41121 // cond: 41122 // result: (MOVSSconst [val]) 41123 for { 41124 val := v.AuxInt 41125 v.reset(OpAMD64MOVSSconst) 41126 v.AuxInt = val 41127 return true 41128 } 41129 } 41130 func rewriteValueAMD64_OpConst64_0(v *Value) bool { 41131 // match: (Const64 [val]) 41132 // cond: 41133 // result: (MOVQconst [val]) 41134 for { 41135 val := v.AuxInt 41136 v.reset(OpAMD64MOVQconst) 41137 v.AuxInt = val 41138 return true 41139 } 41140 } 41141 func rewriteValueAMD64_OpConst64F_0(v *Value) bool { 41142 // match: (Const64F [val]) 41143 // cond: 41144 // result: (MOVSDconst [val]) 41145 for { 41146 val := v.AuxInt 41147 v.reset(OpAMD64MOVSDconst) 41148 v.AuxInt = val 41149 return true 41150 } 41151 } 41152 func rewriteValueAMD64_OpConst8_0(v *Value) bool { 41153 // match: (Const8 [val]) 41154 // cond: 41155 // result: (MOVLconst [val]) 41156 for { 41157 val := v.AuxInt 41158 v.reset(OpAMD64MOVLconst) 41159 v.AuxInt = val 41160 return true 41161 } 41162 } 41163 func rewriteValueAMD64_OpConstBool_0(v *Value) bool { 41164 // match: (ConstBool [b]) 41165 // cond: 41166 // result: (MOVLconst [b]) 41167 for { 41168 b := v.AuxInt 41169 v.reset(OpAMD64MOVLconst) 41170 v.AuxInt = b 41171 return true 41172 } 41173 } 41174 func rewriteValueAMD64_OpConstNil_0(v *Value) bool { 41175 b := v.Block 41176 _ = b 41177 config := b.Func.Config 41178 _ = config 41179 // match: (ConstNil) 41180 // cond: config.PtrSize == 8 41181 // result: (MOVQconst [0]) 41182 for { 41183 if !(config.PtrSize == 8) { 41184 break 41185 } 41186 v.reset(OpAMD64MOVQconst) 41187 v.AuxInt = 0 41188 return true 41189 } 41190 // match: (ConstNil) 41191 // cond: config.PtrSize == 4 41192 // result: (MOVLconst [0]) 41193 for { 41194 if !(config.PtrSize == 4) { 41195 break 41196 } 41197 v.reset(OpAMD64MOVLconst) 41198 v.AuxInt = 0 41199 return true 41200 } 41201 return false 41202 } 41203 func rewriteValueAMD64_OpConvert_0(v *Value) bool { 41204 b := v.Block 41205 _ = b 41206 config := b.Func.Config 41207 _ = config 41208 // match: (Convert <t> x mem) 41209 // cond: config.PtrSize == 8 41210 // result: (MOVQconvert <t> x mem) 41211 for { 41212 t := v.Type 41213 _ = v.Args[1] 41214 x := v.Args[0] 41215 mem := v.Args[1] 41216 if !(config.PtrSize == 8) { 41217 break 41218 } 41219 v.reset(OpAMD64MOVQconvert) 41220 v.Type = t 41221 v.AddArg(x) 41222 v.AddArg(mem) 41223 return true 41224 } 41225 // match: (Convert <t> x mem) 41226 // cond: config.PtrSize == 4 41227 // result: (MOVLconvert <t> x mem) 41228 for { 41229 t := v.Type 41230 _ = v.Args[1] 41231 x := v.Args[0] 41232 mem := v.Args[1] 41233 if !(config.PtrSize == 4) { 41234 break 41235 } 41236 v.reset(OpAMD64MOVLconvert) 41237 v.Type = t 41238 v.AddArg(x) 41239 v.AddArg(mem) 41240 return true 41241 } 41242 return false 41243 } 41244 func rewriteValueAMD64_OpCtz32_0(v *Value) bool { 41245 b := v.Block 41246 _ = b 41247 typ := &b.Func.Config.Types 41248 _ = typ 41249 // match: (Ctz32 x) 41250 // cond: 41251 // result: (Select0 (BSFQ (ORQ <typ.UInt64> (MOVQconst [1<<32]) x))) 41252 for { 41253 x := v.Args[0] 41254 v.reset(OpSelect0) 41255 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 41256 v1 := b.NewValue0(v.Pos, OpAMD64ORQ, typ.UInt64) 41257 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 41258 v2.AuxInt = 1 << 32 41259 v1.AddArg(v2) 41260 v1.AddArg(x) 41261 v0.AddArg(v1) 41262 v.AddArg(v0) 41263 return true 41264 } 41265 } 41266 func rewriteValueAMD64_OpCtz64_0(v *Value) bool { 41267 b := v.Block 41268 _ = b 41269 typ := &b.Func.Config.Types 41270 _ = typ 41271 // match: (Ctz64 <t> x) 41272 // cond: 41273 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x))) 41274 for { 41275 t := v.Type 41276 x := v.Args[0] 41277 v.reset(OpAMD64CMOVQEQ) 41278 v0 := b.NewValue0(v.Pos, OpSelect0, t) 41279 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 41280 v1.AddArg(x) 41281 v0.AddArg(v1) 41282 v.AddArg(v0) 41283 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 41284 v2.AuxInt = 64 41285 v.AddArg(v2) 41286 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 41287 v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 41288 v4.AddArg(x) 41289 v3.AddArg(v4) 41290 v.AddArg(v3) 41291 return true 41292 } 41293 } 41294 func rewriteValueAMD64_OpCvt32Fto32_0(v *Value) bool { 41295 // match: (Cvt32Fto32 x) 41296 // cond: 41297 // result: (CVTTSS2SL x) 41298 for { 41299 x := v.Args[0] 41300 v.reset(OpAMD64CVTTSS2SL) 41301 v.AddArg(x) 41302 return true 41303 } 41304 } 41305 func rewriteValueAMD64_OpCvt32Fto64_0(v *Value) bool { 41306 // match: (Cvt32Fto64 x) 41307 // cond: 41308 // result: (CVTTSS2SQ x) 41309 for { 41310 x := v.Args[0] 41311 v.reset(OpAMD64CVTTSS2SQ) 41312 v.AddArg(x) 41313 return true 41314 } 41315 } 41316 func rewriteValueAMD64_OpCvt32Fto64F_0(v *Value) bool { 41317 // match: (Cvt32Fto64F x) 41318 // cond: 41319 // result: (CVTSS2SD x) 41320 for { 41321 x := v.Args[0] 41322 v.reset(OpAMD64CVTSS2SD) 41323 v.AddArg(x) 41324 return true 41325 } 41326 } 41327 func rewriteValueAMD64_OpCvt32to32F_0(v *Value) bool { 41328 // match: (Cvt32to32F x) 41329 // cond: 41330 // result: (CVTSL2SS x) 41331 for { 41332 x := v.Args[0] 41333 v.reset(OpAMD64CVTSL2SS) 41334 v.AddArg(x) 41335 return true 41336 } 41337 } 41338 func rewriteValueAMD64_OpCvt32to64F_0(v *Value) bool { 41339 // match: (Cvt32to64F x) 41340 // cond: 41341 // result: (CVTSL2SD x) 41342 for { 41343 x := v.Args[0] 41344 v.reset(OpAMD64CVTSL2SD) 41345 v.AddArg(x) 41346 return true 41347 } 41348 } 41349 func rewriteValueAMD64_OpCvt64Fto32_0(v *Value) bool { 41350 // match: (Cvt64Fto32 x) 41351 // cond: 41352 // result: (CVTTSD2SL x) 41353 for { 41354 x := v.Args[0] 41355 v.reset(OpAMD64CVTTSD2SL) 41356 v.AddArg(x) 41357 return true 41358 } 41359 } 41360 func rewriteValueAMD64_OpCvt64Fto32F_0(v *Value) bool { 41361 // match: (Cvt64Fto32F x) 41362 // cond: 41363 // result: (CVTSD2SS x) 41364 for { 41365 x := v.Args[0] 41366 v.reset(OpAMD64CVTSD2SS) 41367 v.AddArg(x) 41368 return true 41369 } 41370 } 41371 func rewriteValueAMD64_OpCvt64Fto64_0(v *Value) bool { 41372 // match: (Cvt64Fto64 x) 41373 // cond: 41374 // result: (CVTTSD2SQ x) 41375 for { 41376 x := v.Args[0] 41377 v.reset(OpAMD64CVTTSD2SQ) 41378 v.AddArg(x) 41379 return true 41380 } 41381 } 41382 func rewriteValueAMD64_OpCvt64to32F_0(v *Value) bool { 41383 // match: (Cvt64to32F x) 41384 // cond: 41385 // result: (CVTSQ2SS x) 41386 for { 41387 x := v.Args[0] 41388 v.reset(OpAMD64CVTSQ2SS) 41389 v.AddArg(x) 41390 return true 41391 } 41392 } 41393 func rewriteValueAMD64_OpCvt64to64F_0(v *Value) bool { 41394 // match: (Cvt64to64F x) 41395 // cond: 41396 // result: (CVTSQ2SD x) 41397 for { 41398 x := v.Args[0] 41399 v.reset(OpAMD64CVTSQ2SD) 41400 v.AddArg(x) 41401 return true 41402 } 41403 } 41404 func rewriteValueAMD64_OpDiv128u_0(v *Value) bool { 41405 // match: (Div128u xhi xlo y) 41406 // cond: 41407 // result: (DIVQU2 xhi xlo y) 41408 for { 41409 _ = v.Args[2] 41410 xhi := v.Args[0] 41411 xlo := v.Args[1] 41412 y := v.Args[2] 41413 v.reset(OpAMD64DIVQU2) 41414 v.AddArg(xhi) 41415 v.AddArg(xlo) 41416 v.AddArg(y) 41417 return true 41418 } 41419 } 41420 func rewriteValueAMD64_OpDiv16_0(v *Value) bool { 41421 b := v.Block 41422 _ = b 41423 typ := &b.Func.Config.Types 41424 _ = typ 41425 // match: (Div16 x y) 41426 // cond: 41427 // result: (Select0 (DIVW x y)) 41428 for { 41429 _ = v.Args[1] 41430 x := v.Args[0] 41431 y := v.Args[1] 41432 v.reset(OpSelect0) 41433 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 41434 v0.AddArg(x) 41435 v0.AddArg(y) 41436 v.AddArg(v0) 41437 return true 41438 } 41439 } 41440 func rewriteValueAMD64_OpDiv16u_0(v *Value) bool { 41441 b := v.Block 41442 _ = b 41443 typ := &b.Func.Config.Types 41444 _ = typ 41445 // match: (Div16u x y) 41446 // cond: 41447 // result: (Select0 (DIVWU x y)) 41448 for { 41449 _ = v.Args[1] 41450 x := v.Args[0] 41451 y := v.Args[1] 41452 v.reset(OpSelect0) 41453 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 41454 v0.AddArg(x) 41455 v0.AddArg(y) 41456 v.AddArg(v0) 41457 return true 41458 } 41459 } 41460 func rewriteValueAMD64_OpDiv32_0(v *Value) bool { 41461 b := v.Block 41462 _ = b 41463 typ := &b.Func.Config.Types 41464 _ = typ 41465 // match: (Div32 x y) 41466 // cond: 41467 // result: (Select0 (DIVL x y)) 41468 for { 41469 _ = v.Args[1] 41470 x := v.Args[0] 41471 y := v.Args[1] 41472 v.reset(OpSelect0) 41473 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 41474 v0.AddArg(x) 41475 v0.AddArg(y) 41476 v.AddArg(v0) 41477 return true 41478 } 41479 } 41480 func rewriteValueAMD64_OpDiv32F_0(v *Value) bool { 41481 // match: (Div32F x y) 41482 // cond: 41483 // result: (DIVSS x y) 41484 for { 41485 _ = v.Args[1] 41486 x := v.Args[0] 41487 y := v.Args[1] 41488 v.reset(OpAMD64DIVSS) 41489 v.AddArg(x) 41490 v.AddArg(y) 41491 return true 41492 } 41493 } 41494 func rewriteValueAMD64_OpDiv32u_0(v *Value) bool { 41495 b := v.Block 41496 _ = b 41497 typ := &b.Func.Config.Types 41498 _ = typ 41499 // match: (Div32u x y) 41500 // cond: 41501 // result: (Select0 (DIVLU x y)) 41502 for { 41503 _ = v.Args[1] 41504 x := v.Args[0] 41505 y := v.Args[1] 41506 v.reset(OpSelect0) 41507 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 41508 v0.AddArg(x) 41509 v0.AddArg(y) 41510 v.AddArg(v0) 41511 return true 41512 } 41513 } 41514 func rewriteValueAMD64_OpDiv64_0(v *Value) bool { 41515 b := v.Block 41516 _ = b 41517 typ := &b.Func.Config.Types 41518 _ = typ 41519 // match: (Div64 x y) 41520 // cond: 41521 // result: (Select0 (DIVQ x y)) 41522 for { 41523 _ = v.Args[1] 41524 x := v.Args[0] 41525 y := v.Args[1] 41526 v.reset(OpSelect0) 41527 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 41528 v0.AddArg(x) 41529 v0.AddArg(y) 41530 v.AddArg(v0) 41531 return true 41532 } 41533 } 41534 func rewriteValueAMD64_OpDiv64F_0(v *Value) bool { 41535 // match: (Div64F x y) 41536 // cond: 41537 // result: (DIVSD x y) 41538 for { 41539 _ = v.Args[1] 41540 x := v.Args[0] 41541 y := v.Args[1] 41542 v.reset(OpAMD64DIVSD) 41543 v.AddArg(x) 41544 v.AddArg(y) 41545 return true 41546 } 41547 } 41548 func rewriteValueAMD64_OpDiv64u_0(v *Value) bool { 41549 b := v.Block 41550 _ = b 41551 typ := &b.Func.Config.Types 41552 _ = typ 41553 // match: (Div64u x y) 41554 // cond: 41555 // result: (Select0 (DIVQU x y)) 41556 for { 41557 _ = v.Args[1] 41558 x := v.Args[0] 41559 y := v.Args[1] 41560 v.reset(OpSelect0) 41561 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 41562 v0.AddArg(x) 41563 v0.AddArg(y) 41564 v.AddArg(v0) 41565 return true 41566 } 41567 } 41568 func rewriteValueAMD64_OpDiv8_0(v *Value) bool { 41569 b := v.Block 41570 _ = b 41571 typ := &b.Func.Config.Types 41572 _ = typ 41573 // match: (Div8 x y) 41574 // cond: 41575 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 41576 for { 41577 _ = v.Args[1] 41578 x := v.Args[0] 41579 y := v.Args[1] 41580 v.reset(OpSelect0) 41581 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 41582 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 41583 v1.AddArg(x) 41584 v0.AddArg(v1) 41585 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 41586 v2.AddArg(y) 41587 v0.AddArg(v2) 41588 v.AddArg(v0) 41589 return true 41590 } 41591 } 41592 func rewriteValueAMD64_OpDiv8u_0(v *Value) bool { 41593 b := v.Block 41594 _ = b 41595 typ := &b.Func.Config.Types 41596 _ = typ 41597 // match: (Div8u x y) 41598 // cond: 41599 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 41600 for { 41601 _ = v.Args[1] 41602 x := v.Args[0] 41603 y := v.Args[1] 41604 v.reset(OpSelect0) 41605 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 41606 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 41607 v1.AddArg(x) 41608 v0.AddArg(v1) 41609 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 41610 v2.AddArg(y) 41611 v0.AddArg(v2) 41612 v.AddArg(v0) 41613 return true 41614 } 41615 } 41616 func rewriteValueAMD64_OpEq16_0(v *Value) bool { 41617 b := v.Block 41618 _ = b 41619 // match: (Eq16 x y) 41620 // cond: 41621 // result: (SETEQ (CMPW x y)) 41622 for { 41623 _ = v.Args[1] 41624 x := v.Args[0] 41625 y := v.Args[1] 41626 v.reset(OpAMD64SETEQ) 41627 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 41628 v0.AddArg(x) 41629 v0.AddArg(y) 41630 v.AddArg(v0) 41631 return true 41632 } 41633 } 41634 func rewriteValueAMD64_OpEq32_0(v *Value) bool { 41635 b := v.Block 41636 _ = b 41637 // match: (Eq32 x y) 41638 // cond: 41639 // result: (SETEQ (CMPL x y)) 41640 for { 41641 _ = v.Args[1] 41642 x := v.Args[0] 41643 y := v.Args[1] 41644 v.reset(OpAMD64SETEQ) 41645 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 41646 v0.AddArg(x) 41647 v0.AddArg(y) 41648 v.AddArg(v0) 41649 return true 41650 } 41651 } 41652 func rewriteValueAMD64_OpEq32F_0(v *Value) bool { 41653 b := v.Block 41654 _ = b 41655 // match: (Eq32F x y) 41656 // cond: 41657 // result: (SETEQF (UCOMISS x y)) 41658 for { 41659 _ = v.Args[1] 41660 x := v.Args[0] 41661 y := v.Args[1] 41662 v.reset(OpAMD64SETEQF) 41663 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 41664 v0.AddArg(x) 41665 v0.AddArg(y) 41666 v.AddArg(v0) 41667 return true 41668 } 41669 } 41670 func rewriteValueAMD64_OpEq64_0(v *Value) bool { 41671 b := v.Block 41672 _ = b 41673 // match: (Eq64 x y) 41674 // cond: 41675 // result: (SETEQ (CMPQ x y)) 41676 for { 41677 _ = v.Args[1] 41678 x := v.Args[0] 41679 y := v.Args[1] 41680 v.reset(OpAMD64SETEQ) 41681 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 41682 v0.AddArg(x) 41683 v0.AddArg(y) 41684 v.AddArg(v0) 41685 return true 41686 } 41687 } 41688 func rewriteValueAMD64_OpEq64F_0(v *Value) bool { 41689 b := v.Block 41690 _ = b 41691 // match: (Eq64F x y) 41692 // cond: 41693 // result: (SETEQF (UCOMISD x y)) 41694 for { 41695 _ = v.Args[1] 41696 x := v.Args[0] 41697 y := v.Args[1] 41698 v.reset(OpAMD64SETEQF) 41699 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 41700 v0.AddArg(x) 41701 v0.AddArg(y) 41702 v.AddArg(v0) 41703 return true 41704 } 41705 } 41706 func rewriteValueAMD64_OpEq8_0(v *Value) bool { 41707 b := v.Block 41708 _ = b 41709 // match: (Eq8 x y) 41710 // cond: 41711 // result: (SETEQ (CMPB x y)) 41712 for { 41713 _ = v.Args[1] 41714 x := v.Args[0] 41715 y := v.Args[1] 41716 v.reset(OpAMD64SETEQ) 41717 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 41718 v0.AddArg(x) 41719 v0.AddArg(y) 41720 v.AddArg(v0) 41721 return true 41722 } 41723 } 41724 func rewriteValueAMD64_OpEqB_0(v *Value) bool { 41725 b := v.Block 41726 _ = b 41727 // match: (EqB x y) 41728 // cond: 41729 // result: (SETEQ (CMPB x y)) 41730 for { 41731 _ = v.Args[1] 41732 x := v.Args[0] 41733 y := v.Args[1] 41734 v.reset(OpAMD64SETEQ) 41735 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 41736 v0.AddArg(x) 41737 v0.AddArg(y) 41738 v.AddArg(v0) 41739 return true 41740 } 41741 } 41742 func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { 41743 b := v.Block 41744 _ = b 41745 config := b.Func.Config 41746 _ = config 41747 // match: (EqPtr x y) 41748 // cond: config.PtrSize == 8 41749 // result: (SETEQ (CMPQ x y)) 41750 for { 41751 _ = v.Args[1] 41752 x := v.Args[0] 41753 y := v.Args[1] 41754 if !(config.PtrSize == 8) { 41755 break 41756 } 41757 v.reset(OpAMD64SETEQ) 41758 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 41759 v0.AddArg(x) 41760 v0.AddArg(y) 41761 v.AddArg(v0) 41762 return true 41763 } 41764 // match: (EqPtr x y) 41765 // cond: config.PtrSize == 4 41766 // result: (SETEQ (CMPL x y)) 41767 for { 41768 _ = v.Args[1] 41769 x := v.Args[0] 41770 y := v.Args[1] 41771 if !(config.PtrSize == 4) { 41772 break 41773 } 41774 v.reset(OpAMD64SETEQ) 41775 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 41776 v0.AddArg(x) 41777 v0.AddArg(y) 41778 v.AddArg(v0) 41779 return true 41780 } 41781 return false 41782 } 41783 func rewriteValueAMD64_OpGeq16_0(v *Value) bool { 41784 b := v.Block 41785 _ = b 41786 // match: (Geq16 x y) 41787 // cond: 41788 // result: (SETGE (CMPW x y)) 41789 for { 41790 _ = v.Args[1] 41791 x := v.Args[0] 41792 y := v.Args[1] 41793 v.reset(OpAMD64SETGE) 41794 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 41795 v0.AddArg(x) 41796 v0.AddArg(y) 41797 v.AddArg(v0) 41798 return true 41799 } 41800 } 41801 func rewriteValueAMD64_OpGeq16U_0(v *Value) bool { 41802 b := v.Block 41803 _ = b 41804 // match: (Geq16U x y) 41805 // cond: 41806 // result: (SETAE (CMPW x y)) 41807 for { 41808 _ = v.Args[1] 41809 x := v.Args[0] 41810 y := v.Args[1] 41811 v.reset(OpAMD64SETAE) 41812 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 41813 v0.AddArg(x) 41814 v0.AddArg(y) 41815 v.AddArg(v0) 41816 return true 41817 } 41818 } 41819 func rewriteValueAMD64_OpGeq32_0(v *Value) bool { 41820 b := v.Block 41821 _ = b 41822 // match: (Geq32 x y) 41823 // cond: 41824 // result: (SETGE (CMPL x y)) 41825 for { 41826 _ = v.Args[1] 41827 x := v.Args[0] 41828 y := v.Args[1] 41829 v.reset(OpAMD64SETGE) 41830 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 41831 v0.AddArg(x) 41832 v0.AddArg(y) 41833 v.AddArg(v0) 41834 return true 41835 } 41836 } 41837 func rewriteValueAMD64_OpGeq32F_0(v *Value) bool { 41838 b := v.Block 41839 _ = b 41840 // match: (Geq32F x y) 41841 // cond: 41842 // result: (SETGEF (UCOMISS x y)) 41843 for { 41844 _ = v.Args[1] 41845 x := v.Args[0] 41846 y := v.Args[1] 41847 v.reset(OpAMD64SETGEF) 41848 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 41849 v0.AddArg(x) 41850 v0.AddArg(y) 41851 v.AddArg(v0) 41852 return true 41853 } 41854 } 41855 func rewriteValueAMD64_OpGeq32U_0(v *Value) bool { 41856 b := v.Block 41857 _ = b 41858 // match: (Geq32U x y) 41859 // cond: 41860 // result: (SETAE (CMPL x y)) 41861 for { 41862 _ = v.Args[1] 41863 x := v.Args[0] 41864 y := v.Args[1] 41865 v.reset(OpAMD64SETAE) 41866 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 41867 v0.AddArg(x) 41868 v0.AddArg(y) 41869 v.AddArg(v0) 41870 return true 41871 } 41872 } 41873 func rewriteValueAMD64_OpGeq64_0(v *Value) bool { 41874 b := v.Block 41875 _ = b 41876 // match: (Geq64 x y) 41877 // cond: 41878 // result: (SETGE (CMPQ x y)) 41879 for { 41880 _ = v.Args[1] 41881 x := v.Args[0] 41882 y := v.Args[1] 41883 v.reset(OpAMD64SETGE) 41884 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 41885 v0.AddArg(x) 41886 v0.AddArg(y) 41887 v.AddArg(v0) 41888 return true 41889 } 41890 } 41891 func rewriteValueAMD64_OpGeq64F_0(v *Value) bool { 41892 b := v.Block 41893 _ = b 41894 // match: (Geq64F x y) 41895 // cond: 41896 // result: (SETGEF (UCOMISD x y)) 41897 for { 41898 _ = v.Args[1] 41899 x := v.Args[0] 41900 y := v.Args[1] 41901 v.reset(OpAMD64SETGEF) 41902 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 41903 v0.AddArg(x) 41904 v0.AddArg(y) 41905 v.AddArg(v0) 41906 return true 41907 } 41908 } 41909 func rewriteValueAMD64_OpGeq64U_0(v *Value) bool { 41910 b := v.Block 41911 _ = b 41912 // match: (Geq64U x y) 41913 // cond: 41914 // result: (SETAE (CMPQ x y)) 41915 for { 41916 _ = v.Args[1] 41917 x := v.Args[0] 41918 y := v.Args[1] 41919 v.reset(OpAMD64SETAE) 41920 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 41921 v0.AddArg(x) 41922 v0.AddArg(y) 41923 v.AddArg(v0) 41924 return true 41925 } 41926 } 41927 func rewriteValueAMD64_OpGeq8_0(v *Value) bool { 41928 b := v.Block 41929 _ = b 41930 // match: (Geq8 x y) 41931 // cond: 41932 // result: (SETGE (CMPB x y)) 41933 for { 41934 _ = v.Args[1] 41935 x := v.Args[0] 41936 y := v.Args[1] 41937 v.reset(OpAMD64SETGE) 41938 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 41939 v0.AddArg(x) 41940 v0.AddArg(y) 41941 v.AddArg(v0) 41942 return true 41943 } 41944 } 41945 func rewriteValueAMD64_OpGeq8U_0(v *Value) bool { 41946 b := v.Block 41947 _ = b 41948 // match: (Geq8U x y) 41949 // cond: 41950 // result: (SETAE (CMPB x y)) 41951 for { 41952 _ = v.Args[1] 41953 x := v.Args[0] 41954 y := v.Args[1] 41955 v.reset(OpAMD64SETAE) 41956 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 41957 v0.AddArg(x) 41958 v0.AddArg(y) 41959 v.AddArg(v0) 41960 return true 41961 } 41962 } 41963 func rewriteValueAMD64_OpGetCallerPC_0(v *Value) bool { 41964 // match: (GetCallerPC) 41965 // cond: 41966 // result: (LoweredGetCallerPC) 41967 for { 41968 v.reset(OpAMD64LoweredGetCallerPC) 41969 return true 41970 } 41971 } 41972 func rewriteValueAMD64_OpGetCallerSP_0(v *Value) bool { 41973 // match: (GetCallerSP) 41974 // cond: 41975 // result: (LoweredGetCallerSP) 41976 for { 41977 v.reset(OpAMD64LoweredGetCallerSP) 41978 return true 41979 } 41980 } 41981 func rewriteValueAMD64_OpGetClosurePtr_0(v *Value) bool { 41982 // match: (GetClosurePtr) 41983 // cond: 41984 // result: (LoweredGetClosurePtr) 41985 for { 41986 v.reset(OpAMD64LoweredGetClosurePtr) 41987 return true 41988 } 41989 } 41990 func rewriteValueAMD64_OpGetG_0(v *Value) bool { 41991 // match: (GetG mem) 41992 // cond: 41993 // result: (LoweredGetG mem) 41994 for { 41995 mem := v.Args[0] 41996 v.reset(OpAMD64LoweredGetG) 41997 v.AddArg(mem) 41998 return true 41999 } 42000 } 42001 func rewriteValueAMD64_OpGreater16_0(v *Value) bool { 42002 b := v.Block 42003 _ = b 42004 // match: (Greater16 x y) 42005 // cond: 42006 // result: (SETG (CMPW x y)) 42007 for { 42008 _ = v.Args[1] 42009 x := v.Args[0] 42010 y := v.Args[1] 42011 v.reset(OpAMD64SETG) 42012 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 42013 v0.AddArg(x) 42014 v0.AddArg(y) 42015 v.AddArg(v0) 42016 return true 42017 } 42018 } 42019 func rewriteValueAMD64_OpGreater16U_0(v *Value) bool { 42020 b := v.Block 42021 _ = b 42022 // match: (Greater16U x y) 42023 // cond: 42024 // result: (SETA (CMPW x y)) 42025 for { 42026 _ = v.Args[1] 42027 x := v.Args[0] 42028 y := v.Args[1] 42029 v.reset(OpAMD64SETA) 42030 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 42031 v0.AddArg(x) 42032 v0.AddArg(y) 42033 v.AddArg(v0) 42034 return true 42035 } 42036 } 42037 func rewriteValueAMD64_OpGreater32_0(v *Value) bool { 42038 b := v.Block 42039 _ = b 42040 // match: (Greater32 x y) 42041 // cond: 42042 // result: (SETG (CMPL x y)) 42043 for { 42044 _ = v.Args[1] 42045 x := v.Args[0] 42046 y := v.Args[1] 42047 v.reset(OpAMD64SETG) 42048 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42049 v0.AddArg(x) 42050 v0.AddArg(y) 42051 v.AddArg(v0) 42052 return true 42053 } 42054 } 42055 func rewriteValueAMD64_OpGreater32F_0(v *Value) bool { 42056 b := v.Block 42057 _ = b 42058 // match: (Greater32F x y) 42059 // cond: 42060 // result: (SETGF (UCOMISS x y)) 42061 for { 42062 _ = v.Args[1] 42063 x := v.Args[0] 42064 y := v.Args[1] 42065 v.reset(OpAMD64SETGF) 42066 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 42067 v0.AddArg(x) 42068 v0.AddArg(y) 42069 v.AddArg(v0) 42070 return true 42071 } 42072 } 42073 func rewriteValueAMD64_OpGreater32U_0(v *Value) bool { 42074 b := v.Block 42075 _ = b 42076 // match: (Greater32U x y) 42077 // cond: 42078 // result: (SETA (CMPL x y)) 42079 for { 42080 _ = v.Args[1] 42081 x := v.Args[0] 42082 y := v.Args[1] 42083 v.reset(OpAMD64SETA) 42084 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42085 v0.AddArg(x) 42086 v0.AddArg(y) 42087 v.AddArg(v0) 42088 return true 42089 } 42090 } 42091 func rewriteValueAMD64_OpGreater64_0(v *Value) bool { 42092 b := v.Block 42093 _ = b 42094 // match: (Greater64 x y) 42095 // cond: 42096 // result: (SETG (CMPQ x y)) 42097 for { 42098 _ = v.Args[1] 42099 x := v.Args[0] 42100 y := v.Args[1] 42101 v.reset(OpAMD64SETG) 42102 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 42103 v0.AddArg(x) 42104 v0.AddArg(y) 42105 v.AddArg(v0) 42106 return true 42107 } 42108 } 42109 func rewriteValueAMD64_OpGreater64F_0(v *Value) bool { 42110 b := v.Block 42111 _ = b 42112 // match: (Greater64F x y) 42113 // cond: 42114 // result: (SETGF (UCOMISD x y)) 42115 for { 42116 _ = v.Args[1] 42117 x := v.Args[0] 42118 y := v.Args[1] 42119 v.reset(OpAMD64SETGF) 42120 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 42121 v0.AddArg(x) 42122 v0.AddArg(y) 42123 v.AddArg(v0) 42124 return true 42125 } 42126 } 42127 func rewriteValueAMD64_OpGreater64U_0(v *Value) bool { 42128 b := v.Block 42129 _ = b 42130 // match: (Greater64U x y) 42131 // cond: 42132 // result: (SETA (CMPQ x y)) 42133 for { 42134 _ = v.Args[1] 42135 x := v.Args[0] 42136 y := v.Args[1] 42137 v.reset(OpAMD64SETA) 42138 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 42139 v0.AddArg(x) 42140 v0.AddArg(y) 42141 v.AddArg(v0) 42142 return true 42143 } 42144 } 42145 func rewriteValueAMD64_OpGreater8_0(v *Value) bool { 42146 b := v.Block 42147 _ = b 42148 // match: (Greater8 x y) 42149 // cond: 42150 // result: (SETG (CMPB x y)) 42151 for { 42152 _ = v.Args[1] 42153 x := v.Args[0] 42154 y := v.Args[1] 42155 v.reset(OpAMD64SETG) 42156 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 42157 v0.AddArg(x) 42158 v0.AddArg(y) 42159 v.AddArg(v0) 42160 return true 42161 } 42162 } 42163 func rewriteValueAMD64_OpGreater8U_0(v *Value) bool { 42164 b := v.Block 42165 _ = b 42166 // match: (Greater8U x y) 42167 // cond: 42168 // result: (SETA (CMPB x y)) 42169 for { 42170 _ = v.Args[1] 42171 x := v.Args[0] 42172 y := v.Args[1] 42173 v.reset(OpAMD64SETA) 42174 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 42175 v0.AddArg(x) 42176 v0.AddArg(y) 42177 v.AddArg(v0) 42178 return true 42179 } 42180 } 42181 func rewriteValueAMD64_OpHmul32_0(v *Value) bool { 42182 // match: (Hmul32 x y) 42183 // cond: 42184 // result: (HMULL x y) 42185 for { 42186 _ = v.Args[1] 42187 x := v.Args[0] 42188 y := v.Args[1] 42189 v.reset(OpAMD64HMULL) 42190 v.AddArg(x) 42191 v.AddArg(y) 42192 return true 42193 } 42194 } 42195 func rewriteValueAMD64_OpHmul32u_0(v *Value) bool { 42196 // match: (Hmul32u x y) 42197 // cond: 42198 // result: (HMULLU x y) 42199 for { 42200 _ = v.Args[1] 42201 x := v.Args[0] 42202 y := v.Args[1] 42203 v.reset(OpAMD64HMULLU) 42204 v.AddArg(x) 42205 v.AddArg(y) 42206 return true 42207 } 42208 } 42209 func rewriteValueAMD64_OpHmul64_0(v *Value) bool { 42210 // match: (Hmul64 x y) 42211 // cond: 42212 // result: (HMULQ x y) 42213 for { 42214 _ = v.Args[1] 42215 x := v.Args[0] 42216 y := v.Args[1] 42217 v.reset(OpAMD64HMULQ) 42218 v.AddArg(x) 42219 v.AddArg(y) 42220 return true 42221 } 42222 } 42223 func rewriteValueAMD64_OpHmul64u_0(v *Value) bool { 42224 // match: (Hmul64u x y) 42225 // cond: 42226 // result: (HMULQU x y) 42227 for { 42228 _ = v.Args[1] 42229 x := v.Args[0] 42230 y := v.Args[1] 42231 v.reset(OpAMD64HMULQU) 42232 v.AddArg(x) 42233 v.AddArg(y) 42234 return true 42235 } 42236 } 42237 func rewriteValueAMD64_OpInt64Hi_0(v *Value) bool { 42238 // match: (Int64Hi x) 42239 // cond: 42240 // result: (SHRQconst [32] x) 42241 for { 42242 x := v.Args[0] 42243 v.reset(OpAMD64SHRQconst) 42244 v.AuxInt = 32 42245 v.AddArg(x) 42246 return true 42247 } 42248 } 42249 func rewriteValueAMD64_OpInterCall_0(v *Value) bool { 42250 // match: (InterCall [argwid] entry mem) 42251 // cond: 42252 // result: (CALLinter [argwid] entry mem) 42253 for { 42254 argwid := v.AuxInt 42255 _ = v.Args[1] 42256 entry := v.Args[0] 42257 mem := v.Args[1] 42258 v.reset(OpAMD64CALLinter) 42259 v.AuxInt = argwid 42260 v.AddArg(entry) 42261 v.AddArg(mem) 42262 return true 42263 } 42264 } 42265 func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool { 42266 b := v.Block 42267 _ = b 42268 config := b.Func.Config 42269 _ = config 42270 // match: (IsInBounds idx len) 42271 // cond: config.PtrSize == 8 42272 // result: (SETB (CMPQ idx len)) 42273 for { 42274 _ = v.Args[1] 42275 idx := v.Args[0] 42276 len := v.Args[1] 42277 if !(config.PtrSize == 8) { 42278 break 42279 } 42280 v.reset(OpAMD64SETB) 42281 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 42282 v0.AddArg(idx) 42283 v0.AddArg(len) 42284 v.AddArg(v0) 42285 return true 42286 } 42287 // match: (IsInBounds idx len) 42288 // cond: config.PtrSize == 4 42289 // result: (SETB (CMPL idx len)) 42290 for { 42291 _ = v.Args[1] 42292 idx := v.Args[0] 42293 len := v.Args[1] 42294 if !(config.PtrSize == 4) { 42295 break 42296 } 42297 v.reset(OpAMD64SETB) 42298 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42299 v0.AddArg(idx) 42300 v0.AddArg(len) 42301 v.AddArg(v0) 42302 return true 42303 } 42304 return false 42305 } 42306 func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool { 42307 b := v.Block 42308 _ = b 42309 config := b.Func.Config 42310 _ = config 42311 // match: (IsNonNil p) 42312 // cond: config.PtrSize == 8 42313 // result: (SETNE (TESTQ p p)) 42314 for { 42315 p := v.Args[0] 42316 if !(config.PtrSize == 8) { 42317 break 42318 } 42319 v.reset(OpAMD64SETNE) 42320 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) 42321 v0.AddArg(p) 42322 v0.AddArg(p) 42323 v.AddArg(v0) 42324 return true 42325 } 42326 // match: (IsNonNil p) 42327 // cond: config.PtrSize == 4 42328 // result: (SETNE (TESTL p p)) 42329 for { 42330 p := v.Args[0] 42331 if !(config.PtrSize == 4) { 42332 break 42333 } 42334 v.reset(OpAMD64SETNE) 42335 v0 := b.NewValue0(v.Pos, OpAMD64TESTL, types.TypeFlags) 42336 v0.AddArg(p) 42337 v0.AddArg(p) 42338 v.AddArg(v0) 42339 return true 42340 } 42341 return false 42342 } 42343 func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool { 42344 b := v.Block 42345 _ = b 42346 config := b.Func.Config 42347 _ = config 42348 // match: (IsSliceInBounds idx len) 42349 // cond: config.PtrSize == 8 42350 // result: (SETBE (CMPQ idx len)) 42351 for { 42352 _ = v.Args[1] 42353 idx := v.Args[0] 42354 len := v.Args[1] 42355 if !(config.PtrSize == 8) { 42356 break 42357 } 42358 v.reset(OpAMD64SETBE) 42359 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 42360 v0.AddArg(idx) 42361 v0.AddArg(len) 42362 v.AddArg(v0) 42363 return true 42364 } 42365 // match: (IsSliceInBounds idx len) 42366 // cond: config.PtrSize == 4 42367 // result: (SETBE (CMPL idx len)) 42368 for { 42369 _ = v.Args[1] 42370 idx := v.Args[0] 42371 len := v.Args[1] 42372 if !(config.PtrSize == 4) { 42373 break 42374 } 42375 v.reset(OpAMD64SETBE) 42376 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42377 v0.AddArg(idx) 42378 v0.AddArg(len) 42379 v.AddArg(v0) 42380 return true 42381 } 42382 return false 42383 } 42384 func rewriteValueAMD64_OpLeq16_0(v *Value) bool { 42385 b := v.Block 42386 _ = b 42387 // match: (Leq16 x y) 42388 // cond: 42389 // result: (SETLE (CMPW x y)) 42390 for { 42391 _ = v.Args[1] 42392 x := v.Args[0] 42393 y := v.Args[1] 42394 v.reset(OpAMD64SETLE) 42395 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 42396 v0.AddArg(x) 42397 v0.AddArg(y) 42398 v.AddArg(v0) 42399 return true 42400 } 42401 } 42402 func rewriteValueAMD64_OpLeq16U_0(v *Value) bool { 42403 b := v.Block 42404 _ = b 42405 // match: (Leq16U x y) 42406 // cond: 42407 // result: (SETBE (CMPW x y)) 42408 for { 42409 _ = v.Args[1] 42410 x := v.Args[0] 42411 y := v.Args[1] 42412 v.reset(OpAMD64SETBE) 42413 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 42414 v0.AddArg(x) 42415 v0.AddArg(y) 42416 v.AddArg(v0) 42417 return true 42418 } 42419 } 42420 func rewriteValueAMD64_OpLeq32_0(v *Value) bool { 42421 b := v.Block 42422 _ = b 42423 // match: (Leq32 x y) 42424 // cond: 42425 // result: (SETLE (CMPL x y)) 42426 for { 42427 _ = v.Args[1] 42428 x := v.Args[0] 42429 y := v.Args[1] 42430 v.reset(OpAMD64SETLE) 42431 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42432 v0.AddArg(x) 42433 v0.AddArg(y) 42434 v.AddArg(v0) 42435 return true 42436 } 42437 } 42438 func rewriteValueAMD64_OpLeq32F_0(v *Value) bool { 42439 b := v.Block 42440 _ = b 42441 // match: (Leq32F x y) 42442 // cond: 42443 // result: (SETGEF (UCOMISS y x)) 42444 for { 42445 _ = v.Args[1] 42446 x := v.Args[0] 42447 y := v.Args[1] 42448 v.reset(OpAMD64SETGEF) 42449 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 42450 v0.AddArg(y) 42451 v0.AddArg(x) 42452 v.AddArg(v0) 42453 return true 42454 } 42455 } 42456 func rewriteValueAMD64_OpLeq32U_0(v *Value) bool { 42457 b := v.Block 42458 _ = b 42459 // match: (Leq32U x y) 42460 // cond: 42461 // result: (SETBE (CMPL x y)) 42462 for { 42463 _ = v.Args[1] 42464 x := v.Args[0] 42465 y := v.Args[1] 42466 v.reset(OpAMD64SETBE) 42467 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42468 v0.AddArg(x) 42469 v0.AddArg(y) 42470 v.AddArg(v0) 42471 return true 42472 } 42473 } 42474 func rewriteValueAMD64_OpLeq64_0(v *Value) bool { 42475 b := v.Block 42476 _ = b 42477 // match: (Leq64 x y) 42478 // cond: 42479 // result: (SETLE (CMPQ x y)) 42480 for { 42481 _ = v.Args[1] 42482 x := v.Args[0] 42483 y := v.Args[1] 42484 v.reset(OpAMD64SETLE) 42485 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 42486 v0.AddArg(x) 42487 v0.AddArg(y) 42488 v.AddArg(v0) 42489 return true 42490 } 42491 } 42492 func rewriteValueAMD64_OpLeq64F_0(v *Value) bool { 42493 b := v.Block 42494 _ = b 42495 // match: (Leq64F x y) 42496 // cond: 42497 // result: (SETGEF (UCOMISD y x)) 42498 for { 42499 _ = v.Args[1] 42500 x := v.Args[0] 42501 y := v.Args[1] 42502 v.reset(OpAMD64SETGEF) 42503 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 42504 v0.AddArg(y) 42505 v0.AddArg(x) 42506 v.AddArg(v0) 42507 return true 42508 } 42509 } 42510 func rewriteValueAMD64_OpLeq64U_0(v *Value) bool { 42511 b := v.Block 42512 _ = b 42513 // match: (Leq64U x y) 42514 // cond: 42515 // result: (SETBE (CMPQ x y)) 42516 for { 42517 _ = v.Args[1] 42518 x := v.Args[0] 42519 y := v.Args[1] 42520 v.reset(OpAMD64SETBE) 42521 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 42522 v0.AddArg(x) 42523 v0.AddArg(y) 42524 v.AddArg(v0) 42525 return true 42526 } 42527 } 42528 func rewriteValueAMD64_OpLeq8_0(v *Value) bool { 42529 b := v.Block 42530 _ = b 42531 // match: (Leq8 x y) 42532 // cond: 42533 // result: (SETLE (CMPB x y)) 42534 for { 42535 _ = v.Args[1] 42536 x := v.Args[0] 42537 y := v.Args[1] 42538 v.reset(OpAMD64SETLE) 42539 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 42540 v0.AddArg(x) 42541 v0.AddArg(y) 42542 v.AddArg(v0) 42543 return true 42544 } 42545 } 42546 func rewriteValueAMD64_OpLeq8U_0(v *Value) bool { 42547 b := v.Block 42548 _ = b 42549 // match: (Leq8U x y) 42550 // cond: 42551 // result: (SETBE (CMPB x y)) 42552 for { 42553 _ = v.Args[1] 42554 x := v.Args[0] 42555 y := v.Args[1] 42556 v.reset(OpAMD64SETBE) 42557 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 42558 v0.AddArg(x) 42559 v0.AddArg(y) 42560 v.AddArg(v0) 42561 return true 42562 } 42563 } 42564 func rewriteValueAMD64_OpLess16_0(v *Value) bool { 42565 b := v.Block 42566 _ = b 42567 // match: (Less16 x y) 42568 // cond: 42569 // result: (SETL (CMPW x y)) 42570 for { 42571 _ = v.Args[1] 42572 x := v.Args[0] 42573 y := v.Args[1] 42574 v.reset(OpAMD64SETL) 42575 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 42576 v0.AddArg(x) 42577 v0.AddArg(y) 42578 v.AddArg(v0) 42579 return true 42580 } 42581 } 42582 func rewriteValueAMD64_OpLess16U_0(v *Value) bool { 42583 b := v.Block 42584 _ = b 42585 // match: (Less16U x y) 42586 // cond: 42587 // result: (SETB (CMPW x y)) 42588 for { 42589 _ = v.Args[1] 42590 x := v.Args[0] 42591 y := v.Args[1] 42592 v.reset(OpAMD64SETB) 42593 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 42594 v0.AddArg(x) 42595 v0.AddArg(y) 42596 v.AddArg(v0) 42597 return true 42598 } 42599 } 42600 func rewriteValueAMD64_OpLess32_0(v *Value) bool { 42601 b := v.Block 42602 _ = b 42603 // match: (Less32 x y) 42604 // cond: 42605 // result: (SETL (CMPL x y)) 42606 for { 42607 _ = v.Args[1] 42608 x := v.Args[0] 42609 y := v.Args[1] 42610 v.reset(OpAMD64SETL) 42611 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42612 v0.AddArg(x) 42613 v0.AddArg(y) 42614 v.AddArg(v0) 42615 return true 42616 } 42617 } 42618 func rewriteValueAMD64_OpLess32F_0(v *Value) bool { 42619 b := v.Block 42620 _ = b 42621 // match: (Less32F x y) 42622 // cond: 42623 // result: (SETGF (UCOMISS y x)) 42624 for { 42625 _ = v.Args[1] 42626 x := v.Args[0] 42627 y := v.Args[1] 42628 v.reset(OpAMD64SETGF) 42629 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 42630 v0.AddArg(y) 42631 v0.AddArg(x) 42632 v.AddArg(v0) 42633 return true 42634 } 42635 } 42636 func rewriteValueAMD64_OpLess32U_0(v *Value) bool { 42637 b := v.Block 42638 _ = b 42639 // match: (Less32U x y) 42640 // cond: 42641 // result: (SETB (CMPL x y)) 42642 for { 42643 _ = v.Args[1] 42644 x := v.Args[0] 42645 y := v.Args[1] 42646 v.reset(OpAMD64SETB) 42647 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42648 v0.AddArg(x) 42649 v0.AddArg(y) 42650 v.AddArg(v0) 42651 return true 42652 } 42653 } 42654 func rewriteValueAMD64_OpLess64_0(v *Value) bool { 42655 b := v.Block 42656 _ = b 42657 // match: (Less64 x y) 42658 // cond: 42659 // result: (SETL (CMPQ x y)) 42660 for { 42661 _ = v.Args[1] 42662 x := v.Args[0] 42663 y := v.Args[1] 42664 v.reset(OpAMD64SETL) 42665 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 42666 v0.AddArg(x) 42667 v0.AddArg(y) 42668 v.AddArg(v0) 42669 return true 42670 } 42671 } 42672 func rewriteValueAMD64_OpLess64F_0(v *Value) bool { 42673 b := v.Block 42674 _ = b 42675 // match: (Less64F x y) 42676 // cond: 42677 // result: (SETGF (UCOMISD y x)) 42678 for { 42679 _ = v.Args[1] 42680 x := v.Args[0] 42681 y := v.Args[1] 42682 v.reset(OpAMD64SETGF) 42683 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 42684 v0.AddArg(y) 42685 v0.AddArg(x) 42686 v.AddArg(v0) 42687 return true 42688 } 42689 } 42690 func rewriteValueAMD64_OpLess64U_0(v *Value) bool { 42691 b := v.Block 42692 _ = b 42693 // match: (Less64U x y) 42694 // cond: 42695 // result: (SETB (CMPQ x y)) 42696 for { 42697 _ = v.Args[1] 42698 x := v.Args[0] 42699 y := v.Args[1] 42700 v.reset(OpAMD64SETB) 42701 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 42702 v0.AddArg(x) 42703 v0.AddArg(y) 42704 v.AddArg(v0) 42705 return true 42706 } 42707 } 42708 func rewriteValueAMD64_OpLess8_0(v *Value) bool { 42709 b := v.Block 42710 _ = b 42711 // match: (Less8 x y) 42712 // cond: 42713 // result: (SETL (CMPB x y)) 42714 for { 42715 _ = v.Args[1] 42716 x := v.Args[0] 42717 y := v.Args[1] 42718 v.reset(OpAMD64SETL) 42719 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 42720 v0.AddArg(x) 42721 v0.AddArg(y) 42722 v.AddArg(v0) 42723 return true 42724 } 42725 } 42726 func rewriteValueAMD64_OpLess8U_0(v *Value) bool { 42727 b := v.Block 42728 _ = b 42729 // match: (Less8U x y) 42730 // cond: 42731 // result: (SETB (CMPB x y)) 42732 for { 42733 _ = v.Args[1] 42734 x := v.Args[0] 42735 y := v.Args[1] 42736 v.reset(OpAMD64SETB) 42737 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 42738 v0.AddArg(x) 42739 v0.AddArg(y) 42740 v.AddArg(v0) 42741 return true 42742 } 42743 } 42744 func rewriteValueAMD64_OpLoad_0(v *Value) bool { 42745 b := v.Block 42746 _ = b 42747 config := b.Func.Config 42748 _ = config 42749 // match: (Load <t> ptr mem) 42750 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 42751 // result: (MOVQload ptr mem) 42752 for { 42753 t := v.Type 42754 _ = v.Args[1] 42755 ptr := v.Args[0] 42756 mem := v.Args[1] 42757 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 42758 break 42759 } 42760 v.reset(OpAMD64MOVQload) 42761 v.AddArg(ptr) 42762 v.AddArg(mem) 42763 return true 42764 } 42765 // match: (Load <t> ptr mem) 42766 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 42767 // result: (MOVLload ptr mem) 42768 for { 42769 t := v.Type 42770 _ = v.Args[1] 42771 ptr := v.Args[0] 42772 mem := v.Args[1] 42773 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 42774 break 42775 } 42776 v.reset(OpAMD64MOVLload) 42777 v.AddArg(ptr) 42778 v.AddArg(mem) 42779 return true 42780 } 42781 // match: (Load <t> ptr mem) 42782 // cond: is16BitInt(t) 42783 // result: (MOVWload ptr mem) 42784 for { 42785 t := v.Type 42786 _ = v.Args[1] 42787 ptr := v.Args[0] 42788 mem := v.Args[1] 42789 if !(is16BitInt(t)) { 42790 break 42791 } 42792 v.reset(OpAMD64MOVWload) 42793 v.AddArg(ptr) 42794 v.AddArg(mem) 42795 return true 42796 } 42797 // match: (Load <t> ptr mem) 42798 // cond: (t.IsBoolean() || is8BitInt(t)) 42799 // result: (MOVBload ptr mem) 42800 for { 42801 t := v.Type 42802 _ = v.Args[1] 42803 ptr := v.Args[0] 42804 mem := v.Args[1] 42805 if !(t.IsBoolean() || is8BitInt(t)) { 42806 break 42807 } 42808 v.reset(OpAMD64MOVBload) 42809 v.AddArg(ptr) 42810 v.AddArg(mem) 42811 return true 42812 } 42813 // match: (Load <t> ptr mem) 42814 // cond: is32BitFloat(t) 42815 // result: (MOVSSload ptr mem) 42816 for { 42817 t := v.Type 42818 _ = v.Args[1] 42819 ptr := v.Args[0] 42820 mem := v.Args[1] 42821 if !(is32BitFloat(t)) { 42822 break 42823 } 42824 v.reset(OpAMD64MOVSSload) 42825 v.AddArg(ptr) 42826 v.AddArg(mem) 42827 return true 42828 } 42829 // match: (Load <t> ptr mem) 42830 // cond: is64BitFloat(t) 42831 // result: (MOVSDload ptr mem) 42832 for { 42833 t := v.Type 42834 _ = v.Args[1] 42835 ptr := v.Args[0] 42836 mem := v.Args[1] 42837 if !(is64BitFloat(t)) { 42838 break 42839 } 42840 v.reset(OpAMD64MOVSDload) 42841 v.AddArg(ptr) 42842 v.AddArg(mem) 42843 return true 42844 } 42845 return false 42846 } 42847 func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool { 42848 b := v.Block 42849 _ = b 42850 // match: (Lsh16x16 <t> x y) 42851 // cond: 42852 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 42853 for { 42854 t := v.Type 42855 _ = v.Args[1] 42856 x := v.Args[0] 42857 y := v.Args[1] 42858 v.reset(OpAMD64ANDL) 42859 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 42860 v0.AddArg(x) 42861 v0.AddArg(y) 42862 v.AddArg(v0) 42863 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42864 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 42865 v2.AuxInt = 32 42866 v2.AddArg(y) 42867 v1.AddArg(v2) 42868 v.AddArg(v1) 42869 return true 42870 } 42871 } 42872 func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool { 42873 b := v.Block 42874 _ = b 42875 // match: (Lsh16x32 <t> x y) 42876 // cond: 42877 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 42878 for { 42879 t := v.Type 42880 _ = v.Args[1] 42881 x := v.Args[0] 42882 y := v.Args[1] 42883 v.reset(OpAMD64ANDL) 42884 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 42885 v0.AddArg(x) 42886 v0.AddArg(y) 42887 v.AddArg(v0) 42888 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42889 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 42890 v2.AuxInt = 32 42891 v2.AddArg(y) 42892 v1.AddArg(v2) 42893 v.AddArg(v1) 42894 return true 42895 } 42896 } 42897 func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool { 42898 b := v.Block 42899 _ = b 42900 // match: (Lsh16x64 <t> x y) 42901 // cond: 42902 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 42903 for { 42904 t := v.Type 42905 _ = v.Args[1] 42906 x := v.Args[0] 42907 y := v.Args[1] 42908 v.reset(OpAMD64ANDL) 42909 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 42910 v0.AddArg(x) 42911 v0.AddArg(y) 42912 v.AddArg(v0) 42913 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42914 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 42915 v2.AuxInt = 32 42916 v2.AddArg(y) 42917 v1.AddArg(v2) 42918 v.AddArg(v1) 42919 return true 42920 } 42921 } 42922 func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool { 42923 b := v.Block 42924 _ = b 42925 // match: (Lsh16x8 <t> x y) 42926 // cond: 42927 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 42928 for { 42929 t := v.Type 42930 _ = v.Args[1] 42931 x := v.Args[0] 42932 y := v.Args[1] 42933 v.reset(OpAMD64ANDL) 42934 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 42935 v0.AddArg(x) 42936 v0.AddArg(y) 42937 v.AddArg(v0) 42938 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42939 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 42940 v2.AuxInt = 32 42941 v2.AddArg(y) 42942 v1.AddArg(v2) 42943 v.AddArg(v1) 42944 return true 42945 } 42946 } 42947 func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool { 42948 b := v.Block 42949 _ = b 42950 // match: (Lsh32x16 <t> x y) 42951 // cond: 42952 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 42953 for { 42954 t := v.Type 42955 _ = v.Args[1] 42956 x := v.Args[0] 42957 y := v.Args[1] 42958 v.reset(OpAMD64ANDL) 42959 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 42960 v0.AddArg(x) 42961 v0.AddArg(y) 42962 v.AddArg(v0) 42963 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42964 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 42965 v2.AuxInt = 32 42966 v2.AddArg(y) 42967 v1.AddArg(v2) 42968 v.AddArg(v1) 42969 return true 42970 } 42971 } 42972 func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool { 42973 b := v.Block 42974 _ = b 42975 // match: (Lsh32x32 <t> x y) 42976 // cond: 42977 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 42978 for { 42979 t := v.Type 42980 _ = v.Args[1] 42981 x := v.Args[0] 42982 y := v.Args[1] 42983 v.reset(OpAMD64ANDL) 42984 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 42985 v0.AddArg(x) 42986 v0.AddArg(y) 42987 v.AddArg(v0) 42988 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 42989 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 42990 v2.AuxInt = 32 42991 v2.AddArg(y) 42992 v1.AddArg(v2) 42993 v.AddArg(v1) 42994 return true 42995 } 42996 } 42997 func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool { 42998 b := v.Block 42999 _ = b 43000 // match: (Lsh32x64 <t> x y) 43001 // cond: 43002 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 43003 for { 43004 t := v.Type 43005 _ = v.Args[1] 43006 x := v.Args[0] 43007 y := v.Args[1] 43008 v.reset(OpAMD64ANDL) 43009 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 43010 v0.AddArg(x) 43011 v0.AddArg(y) 43012 v.AddArg(v0) 43013 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 43014 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 43015 v2.AuxInt = 32 43016 v2.AddArg(y) 43017 v1.AddArg(v2) 43018 v.AddArg(v1) 43019 return true 43020 } 43021 } 43022 func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool { 43023 b := v.Block 43024 _ = b 43025 // match: (Lsh32x8 <t> x y) 43026 // cond: 43027 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 43028 for { 43029 t := v.Type 43030 _ = v.Args[1] 43031 x := v.Args[0] 43032 y := v.Args[1] 43033 v.reset(OpAMD64ANDL) 43034 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 43035 v0.AddArg(x) 43036 v0.AddArg(y) 43037 v.AddArg(v0) 43038 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 43039 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 43040 v2.AuxInt = 32 43041 v2.AddArg(y) 43042 v1.AddArg(v2) 43043 v.AddArg(v1) 43044 return true 43045 } 43046 } 43047 func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool { 43048 b := v.Block 43049 _ = b 43050 // match: (Lsh64x16 <t> x y) 43051 // cond: 43052 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 43053 for { 43054 t := v.Type 43055 _ = v.Args[1] 43056 x := v.Args[0] 43057 y := v.Args[1] 43058 v.reset(OpAMD64ANDQ) 43059 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 43060 v0.AddArg(x) 43061 v0.AddArg(y) 43062 v.AddArg(v0) 43063 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 43064 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 43065 v2.AuxInt = 64 43066 v2.AddArg(y) 43067 v1.AddArg(v2) 43068 v.AddArg(v1) 43069 return true 43070 } 43071 } 43072 func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool { 43073 b := v.Block 43074 _ = b 43075 // match: (Lsh64x32 <t> x y) 43076 // cond: 43077 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 43078 for { 43079 t := v.Type 43080 _ = v.Args[1] 43081 x := v.Args[0] 43082 y := v.Args[1] 43083 v.reset(OpAMD64ANDQ) 43084 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 43085 v0.AddArg(x) 43086 v0.AddArg(y) 43087 v.AddArg(v0) 43088 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 43089 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 43090 v2.AuxInt = 64 43091 v2.AddArg(y) 43092 v1.AddArg(v2) 43093 v.AddArg(v1) 43094 return true 43095 } 43096 } 43097 func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool { 43098 b := v.Block 43099 _ = b 43100 // match: (Lsh64x64 <t> x y) 43101 // cond: 43102 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 43103 for { 43104 t := v.Type 43105 _ = v.Args[1] 43106 x := v.Args[0] 43107 y := v.Args[1] 43108 v.reset(OpAMD64ANDQ) 43109 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 43110 v0.AddArg(x) 43111 v0.AddArg(y) 43112 v.AddArg(v0) 43113 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 43114 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 43115 v2.AuxInt = 64 43116 v2.AddArg(y) 43117 v1.AddArg(v2) 43118 v.AddArg(v1) 43119 return true 43120 } 43121 } 43122 func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool { 43123 b := v.Block 43124 _ = b 43125 // match: (Lsh64x8 <t> x y) 43126 // cond: 43127 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 43128 for { 43129 t := v.Type 43130 _ = v.Args[1] 43131 x := v.Args[0] 43132 y := v.Args[1] 43133 v.reset(OpAMD64ANDQ) 43134 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 43135 v0.AddArg(x) 43136 v0.AddArg(y) 43137 v.AddArg(v0) 43138 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 43139 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 43140 v2.AuxInt = 64 43141 v2.AddArg(y) 43142 v1.AddArg(v2) 43143 v.AddArg(v1) 43144 return true 43145 } 43146 } 43147 func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool { 43148 b := v.Block 43149 _ = b 43150 // match: (Lsh8x16 <t> x y) 43151 // cond: 43152 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 43153 for { 43154 t := v.Type 43155 _ = v.Args[1] 43156 x := v.Args[0] 43157 y := v.Args[1] 43158 v.reset(OpAMD64ANDL) 43159 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 43160 v0.AddArg(x) 43161 v0.AddArg(y) 43162 v.AddArg(v0) 43163 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 43164 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 43165 v2.AuxInt = 32 43166 v2.AddArg(y) 43167 v1.AddArg(v2) 43168 v.AddArg(v1) 43169 return true 43170 } 43171 } 43172 func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool { 43173 b := v.Block 43174 _ = b 43175 // match: (Lsh8x32 <t> x y) 43176 // cond: 43177 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 43178 for { 43179 t := v.Type 43180 _ = v.Args[1] 43181 x := v.Args[0] 43182 y := v.Args[1] 43183 v.reset(OpAMD64ANDL) 43184 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 43185 v0.AddArg(x) 43186 v0.AddArg(y) 43187 v.AddArg(v0) 43188 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 43189 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 43190 v2.AuxInt = 32 43191 v2.AddArg(y) 43192 v1.AddArg(v2) 43193 v.AddArg(v1) 43194 return true 43195 } 43196 } 43197 func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool { 43198 b := v.Block 43199 _ = b 43200 // match: (Lsh8x64 <t> x y) 43201 // cond: 43202 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 43203 for { 43204 t := v.Type 43205 _ = v.Args[1] 43206 x := v.Args[0] 43207 y := v.Args[1] 43208 v.reset(OpAMD64ANDL) 43209 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 43210 v0.AddArg(x) 43211 v0.AddArg(y) 43212 v.AddArg(v0) 43213 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 43214 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 43215 v2.AuxInt = 32 43216 v2.AddArg(y) 43217 v1.AddArg(v2) 43218 v.AddArg(v1) 43219 return true 43220 } 43221 } 43222 func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool { 43223 b := v.Block 43224 _ = b 43225 // match: (Lsh8x8 <t> x y) 43226 // cond: 43227 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 43228 for { 43229 t := v.Type 43230 _ = v.Args[1] 43231 x := v.Args[0] 43232 y := v.Args[1] 43233 v.reset(OpAMD64ANDL) 43234 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 43235 v0.AddArg(x) 43236 v0.AddArg(y) 43237 v.AddArg(v0) 43238 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 43239 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 43240 v2.AuxInt = 32 43241 v2.AddArg(y) 43242 v1.AddArg(v2) 43243 v.AddArg(v1) 43244 return true 43245 } 43246 } 43247 func rewriteValueAMD64_OpMod16_0(v *Value) bool { 43248 b := v.Block 43249 _ = b 43250 typ := &b.Func.Config.Types 43251 _ = typ 43252 // match: (Mod16 x y) 43253 // cond: 43254 // result: (Select1 (DIVW x y)) 43255 for { 43256 _ = v.Args[1] 43257 x := v.Args[0] 43258 y := v.Args[1] 43259 v.reset(OpSelect1) 43260 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 43261 v0.AddArg(x) 43262 v0.AddArg(y) 43263 v.AddArg(v0) 43264 return true 43265 } 43266 } 43267 func rewriteValueAMD64_OpMod16u_0(v *Value) bool { 43268 b := v.Block 43269 _ = b 43270 typ := &b.Func.Config.Types 43271 _ = typ 43272 // match: (Mod16u x y) 43273 // cond: 43274 // result: (Select1 (DIVWU x y)) 43275 for { 43276 _ = v.Args[1] 43277 x := v.Args[0] 43278 y := v.Args[1] 43279 v.reset(OpSelect1) 43280 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 43281 v0.AddArg(x) 43282 v0.AddArg(y) 43283 v.AddArg(v0) 43284 return true 43285 } 43286 } 43287 func rewriteValueAMD64_OpMod32_0(v *Value) bool { 43288 b := v.Block 43289 _ = b 43290 typ := &b.Func.Config.Types 43291 _ = typ 43292 // match: (Mod32 x y) 43293 // cond: 43294 // result: (Select1 (DIVL x y)) 43295 for { 43296 _ = v.Args[1] 43297 x := v.Args[0] 43298 y := v.Args[1] 43299 v.reset(OpSelect1) 43300 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 43301 v0.AddArg(x) 43302 v0.AddArg(y) 43303 v.AddArg(v0) 43304 return true 43305 } 43306 } 43307 func rewriteValueAMD64_OpMod32u_0(v *Value) bool { 43308 b := v.Block 43309 _ = b 43310 typ := &b.Func.Config.Types 43311 _ = typ 43312 // match: (Mod32u x y) 43313 // cond: 43314 // result: (Select1 (DIVLU x y)) 43315 for { 43316 _ = v.Args[1] 43317 x := v.Args[0] 43318 y := v.Args[1] 43319 v.reset(OpSelect1) 43320 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 43321 v0.AddArg(x) 43322 v0.AddArg(y) 43323 v.AddArg(v0) 43324 return true 43325 } 43326 } 43327 func rewriteValueAMD64_OpMod64_0(v *Value) bool { 43328 b := v.Block 43329 _ = b 43330 typ := &b.Func.Config.Types 43331 _ = typ 43332 // match: (Mod64 x y) 43333 // cond: 43334 // result: (Select1 (DIVQ x y)) 43335 for { 43336 _ = v.Args[1] 43337 x := v.Args[0] 43338 y := v.Args[1] 43339 v.reset(OpSelect1) 43340 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 43341 v0.AddArg(x) 43342 v0.AddArg(y) 43343 v.AddArg(v0) 43344 return true 43345 } 43346 } 43347 func rewriteValueAMD64_OpMod64u_0(v *Value) bool { 43348 b := v.Block 43349 _ = b 43350 typ := &b.Func.Config.Types 43351 _ = typ 43352 // match: (Mod64u x y) 43353 // cond: 43354 // result: (Select1 (DIVQU x y)) 43355 for { 43356 _ = v.Args[1] 43357 x := v.Args[0] 43358 y := v.Args[1] 43359 v.reset(OpSelect1) 43360 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 43361 v0.AddArg(x) 43362 v0.AddArg(y) 43363 v.AddArg(v0) 43364 return true 43365 } 43366 } 43367 func rewriteValueAMD64_OpMod8_0(v *Value) bool { 43368 b := v.Block 43369 _ = b 43370 typ := &b.Func.Config.Types 43371 _ = typ 43372 // match: (Mod8 x y) 43373 // cond: 43374 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 43375 for { 43376 _ = v.Args[1] 43377 x := v.Args[0] 43378 y := v.Args[1] 43379 v.reset(OpSelect1) 43380 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 43381 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 43382 v1.AddArg(x) 43383 v0.AddArg(v1) 43384 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 43385 v2.AddArg(y) 43386 v0.AddArg(v2) 43387 v.AddArg(v0) 43388 return true 43389 } 43390 } 43391 func rewriteValueAMD64_OpMod8u_0(v *Value) bool { 43392 b := v.Block 43393 _ = b 43394 typ := &b.Func.Config.Types 43395 _ = typ 43396 // match: (Mod8u x y) 43397 // cond: 43398 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 43399 for { 43400 _ = v.Args[1] 43401 x := v.Args[0] 43402 y := v.Args[1] 43403 v.reset(OpSelect1) 43404 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 43405 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 43406 v1.AddArg(x) 43407 v0.AddArg(v1) 43408 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 43409 v2.AddArg(y) 43410 v0.AddArg(v2) 43411 v.AddArg(v0) 43412 return true 43413 } 43414 } 43415 func rewriteValueAMD64_OpMove_0(v *Value) bool { 43416 b := v.Block 43417 _ = b 43418 config := b.Func.Config 43419 _ = config 43420 typ := &b.Func.Config.Types 43421 _ = typ 43422 // match: (Move [0] _ _ mem) 43423 // cond: 43424 // result: mem 43425 for { 43426 if v.AuxInt != 0 { 43427 break 43428 } 43429 _ = v.Args[2] 43430 mem := v.Args[2] 43431 v.reset(OpCopy) 43432 v.Type = mem.Type 43433 v.AddArg(mem) 43434 return true 43435 } 43436 // match: (Move [1] dst src mem) 43437 // cond: 43438 // result: (MOVBstore dst (MOVBload src mem) mem) 43439 for { 43440 if v.AuxInt != 1 { 43441 break 43442 } 43443 _ = v.Args[2] 43444 dst := v.Args[0] 43445 src := v.Args[1] 43446 mem := v.Args[2] 43447 v.reset(OpAMD64MOVBstore) 43448 v.AddArg(dst) 43449 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 43450 v0.AddArg(src) 43451 v0.AddArg(mem) 43452 v.AddArg(v0) 43453 v.AddArg(mem) 43454 return true 43455 } 43456 // match: (Move [2] dst src mem) 43457 // cond: 43458 // result: (MOVWstore dst (MOVWload src mem) mem) 43459 for { 43460 if v.AuxInt != 2 { 43461 break 43462 } 43463 _ = v.Args[2] 43464 dst := v.Args[0] 43465 src := v.Args[1] 43466 mem := v.Args[2] 43467 v.reset(OpAMD64MOVWstore) 43468 v.AddArg(dst) 43469 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 43470 v0.AddArg(src) 43471 v0.AddArg(mem) 43472 v.AddArg(v0) 43473 v.AddArg(mem) 43474 return true 43475 } 43476 // match: (Move [4] dst src mem) 43477 // cond: 43478 // result: (MOVLstore dst (MOVLload src mem) mem) 43479 for { 43480 if v.AuxInt != 4 { 43481 break 43482 } 43483 _ = v.Args[2] 43484 dst := v.Args[0] 43485 src := v.Args[1] 43486 mem := v.Args[2] 43487 v.reset(OpAMD64MOVLstore) 43488 v.AddArg(dst) 43489 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 43490 v0.AddArg(src) 43491 v0.AddArg(mem) 43492 v.AddArg(v0) 43493 v.AddArg(mem) 43494 return true 43495 } 43496 // match: (Move [8] dst src mem) 43497 // cond: 43498 // result: (MOVQstore dst (MOVQload src mem) mem) 43499 for { 43500 if v.AuxInt != 8 { 43501 break 43502 } 43503 _ = v.Args[2] 43504 dst := v.Args[0] 43505 src := v.Args[1] 43506 mem := v.Args[2] 43507 v.reset(OpAMD64MOVQstore) 43508 v.AddArg(dst) 43509 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 43510 v0.AddArg(src) 43511 v0.AddArg(mem) 43512 v.AddArg(v0) 43513 v.AddArg(mem) 43514 return true 43515 } 43516 // match: (Move [16] dst src mem) 43517 // cond: config.useSSE 43518 // result: (MOVOstore dst (MOVOload src mem) mem) 43519 for { 43520 if v.AuxInt != 16 { 43521 break 43522 } 43523 _ = v.Args[2] 43524 dst := v.Args[0] 43525 src := v.Args[1] 43526 mem := v.Args[2] 43527 if !(config.useSSE) { 43528 break 43529 } 43530 v.reset(OpAMD64MOVOstore) 43531 v.AddArg(dst) 43532 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 43533 v0.AddArg(src) 43534 v0.AddArg(mem) 43535 v.AddArg(v0) 43536 v.AddArg(mem) 43537 return true 43538 } 43539 // match: (Move [16] dst src mem) 43540 // cond: !config.useSSE 43541 // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 43542 for { 43543 if v.AuxInt != 16 { 43544 break 43545 } 43546 _ = v.Args[2] 43547 dst := v.Args[0] 43548 src := v.Args[1] 43549 mem := v.Args[2] 43550 if !(!config.useSSE) { 43551 break 43552 } 43553 v.reset(OpAMD64MOVQstore) 43554 v.AuxInt = 8 43555 v.AddArg(dst) 43556 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 43557 v0.AuxInt = 8 43558 v0.AddArg(src) 43559 v0.AddArg(mem) 43560 v.AddArg(v0) 43561 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 43562 v1.AddArg(dst) 43563 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 43564 v2.AddArg(src) 43565 v2.AddArg(mem) 43566 v1.AddArg(v2) 43567 v1.AddArg(mem) 43568 v.AddArg(v1) 43569 return true 43570 } 43571 // match: (Move [3] dst src mem) 43572 // cond: 43573 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 43574 for { 43575 if v.AuxInt != 3 { 43576 break 43577 } 43578 _ = v.Args[2] 43579 dst := v.Args[0] 43580 src := v.Args[1] 43581 mem := v.Args[2] 43582 v.reset(OpAMD64MOVBstore) 43583 v.AuxInt = 2 43584 v.AddArg(dst) 43585 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 43586 v0.AuxInt = 2 43587 v0.AddArg(src) 43588 v0.AddArg(mem) 43589 v.AddArg(v0) 43590 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) 43591 v1.AddArg(dst) 43592 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 43593 v2.AddArg(src) 43594 v2.AddArg(mem) 43595 v1.AddArg(v2) 43596 v1.AddArg(mem) 43597 v.AddArg(v1) 43598 return true 43599 } 43600 // match: (Move [5] dst src mem) 43601 // cond: 43602 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 43603 for { 43604 if v.AuxInt != 5 { 43605 break 43606 } 43607 _ = v.Args[2] 43608 dst := v.Args[0] 43609 src := v.Args[1] 43610 mem := v.Args[2] 43611 v.reset(OpAMD64MOVBstore) 43612 v.AuxInt = 4 43613 v.AddArg(dst) 43614 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 43615 v0.AuxInt = 4 43616 v0.AddArg(src) 43617 v0.AddArg(mem) 43618 v.AddArg(v0) 43619 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 43620 v1.AddArg(dst) 43621 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 43622 v2.AddArg(src) 43623 v2.AddArg(mem) 43624 v1.AddArg(v2) 43625 v1.AddArg(mem) 43626 v.AddArg(v1) 43627 return true 43628 } 43629 // match: (Move [6] dst src mem) 43630 // cond: 43631 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 43632 for { 43633 if v.AuxInt != 6 { 43634 break 43635 } 43636 _ = v.Args[2] 43637 dst := v.Args[0] 43638 src := v.Args[1] 43639 mem := v.Args[2] 43640 v.reset(OpAMD64MOVWstore) 43641 v.AuxInt = 4 43642 v.AddArg(dst) 43643 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 43644 v0.AuxInt = 4 43645 v0.AddArg(src) 43646 v0.AddArg(mem) 43647 v.AddArg(v0) 43648 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 43649 v1.AddArg(dst) 43650 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 43651 v2.AddArg(src) 43652 v2.AddArg(mem) 43653 v1.AddArg(v2) 43654 v1.AddArg(mem) 43655 v.AddArg(v1) 43656 return true 43657 } 43658 return false 43659 } 43660 func rewriteValueAMD64_OpMove_10(v *Value) bool { 43661 b := v.Block 43662 _ = b 43663 config := b.Func.Config 43664 _ = config 43665 typ := &b.Func.Config.Types 43666 _ = typ 43667 // match: (Move [7] dst src mem) 43668 // cond: 43669 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 43670 for { 43671 if v.AuxInt != 7 { 43672 break 43673 } 43674 _ = v.Args[2] 43675 dst := v.Args[0] 43676 src := v.Args[1] 43677 mem := v.Args[2] 43678 v.reset(OpAMD64MOVLstore) 43679 v.AuxInt = 3 43680 v.AddArg(dst) 43681 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 43682 v0.AuxInt = 3 43683 v0.AddArg(src) 43684 v0.AddArg(mem) 43685 v.AddArg(v0) 43686 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 43687 v1.AddArg(dst) 43688 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 43689 v2.AddArg(src) 43690 v2.AddArg(mem) 43691 v1.AddArg(v2) 43692 v1.AddArg(mem) 43693 v.AddArg(v1) 43694 return true 43695 } 43696 // match: (Move [s] dst src mem) 43697 // cond: s > 8 && s < 16 43698 // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 43699 for { 43700 s := v.AuxInt 43701 _ = v.Args[2] 43702 dst := v.Args[0] 43703 src := v.Args[1] 43704 mem := v.Args[2] 43705 if !(s > 8 && s < 16) { 43706 break 43707 } 43708 v.reset(OpAMD64MOVQstore) 43709 v.AuxInt = s - 8 43710 v.AddArg(dst) 43711 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 43712 v0.AuxInt = s - 8 43713 v0.AddArg(src) 43714 v0.AddArg(mem) 43715 v.AddArg(v0) 43716 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 43717 v1.AddArg(dst) 43718 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 43719 v2.AddArg(src) 43720 v2.AddArg(mem) 43721 v1.AddArg(v2) 43722 v1.AddArg(mem) 43723 v.AddArg(v1) 43724 return true 43725 } 43726 // match: (Move [s] dst src mem) 43727 // cond: s > 16 && s%16 != 0 && s%16 <= 8 43728 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) 43729 for { 43730 s := v.AuxInt 43731 _ = v.Args[2] 43732 dst := v.Args[0] 43733 src := v.Args[1] 43734 mem := v.Args[2] 43735 if !(s > 16 && s%16 != 0 && s%16 <= 8) { 43736 break 43737 } 43738 v.reset(OpMove) 43739 v.AuxInt = s - s%16 43740 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 43741 v0.AuxInt = s % 16 43742 v0.AddArg(dst) 43743 v.AddArg(v0) 43744 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 43745 v1.AuxInt = s % 16 43746 v1.AddArg(src) 43747 v.AddArg(v1) 43748 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 43749 v2.AddArg(dst) 43750 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 43751 v3.AddArg(src) 43752 v3.AddArg(mem) 43753 v2.AddArg(v3) 43754 v2.AddArg(mem) 43755 v.AddArg(v2) 43756 return true 43757 } 43758 // match: (Move [s] dst src mem) 43759 // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE 43760 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) 43761 for { 43762 s := v.AuxInt 43763 _ = v.Args[2] 43764 dst := v.Args[0] 43765 src := v.Args[1] 43766 mem := v.Args[2] 43767 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) { 43768 break 43769 } 43770 v.reset(OpMove) 43771 v.AuxInt = s - s%16 43772 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 43773 v0.AuxInt = s % 16 43774 v0.AddArg(dst) 43775 v.AddArg(v0) 43776 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 43777 v1.AuxInt = s % 16 43778 v1.AddArg(src) 43779 v.AddArg(v1) 43780 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 43781 v2.AddArg(dst) 43782 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 43783 v3.AddArg(src) 43784 v3.AddArg(mem) 43785 v2.AddArg(v3) 43786 v2.AddArg(mem) 43787 v.AddArg(v2) 43788 return true 43789 } 43790 // match: (Move [s] dst src mem) 43791 // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE 43792 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))) 43793 for { 43794 s := v.AuxInt 43795 _ = v.Args[2] 43796 dst := v.Args[0] 43797 src := v.Args[1] 43798 mem := v.Args[2] 43799 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) { 43800 break 43801 } 43802 v.reset(OpMove) 43803 v.AuxInt = s - s%16 43804 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 43805 v0.AuxInt = s % 16 43806 v0.AddArg(dst) 43807 v.AddArg(v0) 43808 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 43809 v1.AuxInt = s % 16 43810 v1.AddArg(src) 43811 v.AddArg(v1) 43812 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 43813 v2.AuxInt = 8 43814 v2.AddArg(dst) 43815 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 43816 v3.AuxInt = 8 43817 v3.AddArg(src) 43818 v3.AddArg(mem) 43819 v2.AddArg(v3) 43820 v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 43821 v4.AddArg(dst) 43822 v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 43823 v5.AddArg(src) 43824 v5.AddArg(mem) 43825 v4.AddArg(v5) 43826 v4.AddArg(mem) 43827 v2.AddArg(v4) 43828 v.AddArg(v2) 43829 return true 43830 } 43831 // match: (Move [s] dst src mem) 43832 // cond: s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice 43833 // result: (DUFFCOPY [14*(64-s/16)] dst src mem) 43834 for { 43835 s := v.AuxInt 43836 _ = v.Args[2] 43837 dst := v.Args[0] 43838 src := v.Args[1] 43839 mem := v.Args[2] 43840 if !(s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { 43841 break 43842 } 43843 v.reset(OpAMD64DUFFCOPY) 43844 v.AuxInt = 14 * (64 - s/16) 43845 v.AddArg(dst) 43846 v.AddArg(src) 43847 v.AddArg(mem) 43848 return true 43849 } 43850 // match: (Move [s] dst src mem) 43851 // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 43852 // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) 43853 for { 43854 s := v.AuxInt 43855 _ = v.Args[2] 43856 dst := v.Args[0] 43857 src := v.Args[1] 43858 mem := v.Args[2] 43859 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0) { 43860 break 43861 } 43862 v.reset(OpAMD64REPMOVSQ) 43863 v.AddArg(dst) 43864 v.AddArg(src) 43865 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 43866 v0.AuxInt = s / 8 43867 v.AddArg(v0) 43868 v.AddArg(mem) 43869 return true 43870 } 43871 return false 43872 } 43873 func rewriteValueAMD64_OpMul16_0(v *Value) bool { 43874 // match: (Mul16 x y) 43875 // cond: 43876 // result: (MULL x y) 43877 for { 43878 _ = v.Args[1] 43879 x := v.Args[0] 43880 y := v.Args[1] 43881 v.reset(OpAMD64MULL) 43882 v.AddArg(x) 43883 v.AddArg(y) 43884 return true 43885 } 43886 } 43887 func rewriteValueAMD64_OpMul32_0(v *Value) bool { 43888 // match: (Mul32 x y) 43889 // cond: 43890 // result: (MULL x y) 43891 for { 43892 _ = v.Args[1] 43893 x := v.Args[0] 43894 y := v.Args[1] 43895 v.reset(OpAMD64MULL) 43896 v.AddArg(x) 43897 v.AddArg(y) 43898 return true 43899 } 43900 } 43901 func rewriteValueAMD64_OpMul32F_0(v *Value) bool { 43902 // match: (Mul32F x y) 43903 // cond: 43904 // result: (MULSS x y) 43905 for { 43906 _ = v.Args[1] 43907 x := v.Args[0] 43908 y := v.Args[1] 43909 v.reset(OpAMD64MULSS) 43910 v.AddArg(x) 43911 v.AddArg(y) 43912 return true 43913 } 43914 } 43915 func rewriteValueAMD64_OpMul64_0(v *Value) bool { 43916 // match: (Mul64 x y) 43917 // cond: 43918 // result: (MULQ x y) 43919 for { 43920 _ = v.Args[1] 43921 x := v.Args[0] 43922 y := v.Args[1] 43923 v.reset(OpAMD64MULQ) 43924 v.AddArg(x) 43925 v.AddArg(y) 43926 return true 43927 } 43928 } 43929 func rewriteValueAMD64_OpMul64F_0(v *Value) bool { 43930 // match: (Mul64F x y) 43931 // cond: 43932 // result: (MULSD x y) 43933 for { 43934 _ = v.Args[1] 43935 x := v.Args[0] 43936 y := v.Args[1] 43937 v.reset(OpAMD64MULSD) 43938 v.AddArg(x) 43939 v.AddArg(y) 43940 return true 43941 } 43942 } 43943 func rewriteValueAMD64_OpMul64uhilo_0(v *Value) bool { 43944 // match: (Mul64uhilo x y) 43945 // cond: 43946 // result: (MULQU2 x y) 43947 for { 43948 _ = v.Args[1] 43949 x := v.Args[0] 43950 y := v.Args[1] 43951 v.reset(OpAMD64MULQU2) 43952 v.AddArg(x) 43953 v.AddArg(y) 43954 return true 43955 } 43956 } 43957 func rewriteValueAMD64_OpMul8_0(v *Value) bool { 43958 // match: (Mul8 x y) 43959 // cond: 43960 // result: (MULL x y) 43961 for { 43962 _ = v.Args[1] 43963 x := v.Args[0] 43964 y := v.Args[1] 43965 v.reset(OpAMD64MULL) 43966 v.AddArg(x) 43967 v.AddArg(y) 43968 return true 43969 } 43970 } 43971 func rewriteValueAMD64_OpNeg16_0(v *Value) bool { 43972 // match: (Neg16 x) 43973 // cond: 43974 // result: (NEGL x) 43975 for { 43976 x := v.Args[0] 43977 v.reset(OpAMD64NEGL) 43978 v.AddArg(x) 43979 return true 43980 } 43981 } 43982 func rewriteValueAMD64_OpNeg32_0(v *Value) bool { 43983 // match: (Neg32 x) 43984 // cond: 43985 // result: (NEGL x) 43986 for { 43987 x := v.Args[0] 43988 v.reset(OpAMD64NEGL) 43989 v.AddArg(x) 43990 return true 43991 } 43992 } 43993 func rewriteValueAMD64_OpNeg32F_0(v *Value) bool { 43994 b := v.Block 43995 _ = b 43996 typ := &b.Func.Config.Types 43997 _ = typ 43998 // match: (Neg32F x) 43999 // cond: 44000 // result: (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))])) 44001 for { 44002 x := v.Args[0] 44003 v.reset(OpAMD64PXOR) 44004 v.AddArg(x) 44005 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) 44006 v0.AuxInt = f2i(math.Copysign(0, -1)) 44007 v.AddArg(v0) 44008 return true 44009 } 44010 } 44011 func rewriteValueAMD64_OpNeg64_0(v *Value) bool { 44012 // match: (Neg64 x) 44013 // cond: 44014 // result: (NEGQ x) 44015 for { 44016 x := v.Args[0] 44017 v.reset(OpAMD64NEGQ) 44018 v.AddArg(x) 44019 return true 44020 } 44021 } 44022 func rewriteValueAMD64_OpNeg64F_0(v *Value) bool { 44023 b := v.Block 44024 _ = b 44025 typ := &b.Func.Config.Types 44026 _ = typ 44027 // match: (Neg64F x) 44028 // cond: 44029 // result: (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))])) 44030 for { 44031 x := v.Args[0] 44032 v.reset(OpAMD64PXOR) 44033 v.AddArg(x) 44034 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) 44035 v0.AuxInt = f2i(math.Copysign(0, -1)) 44036 v.AddArg(v0) 44037 return true 44038 } 44039 } 44040 func rewriteValueAMD64_OpNeg8_0(v *Value) bool { 44041 // match: (Neg8 x) 44042 // cond: 44043 // result: (NEGL x) 44044 for { 44045 x := v.Args[0] 44046 v.reset(OpAMD64NEGL) 44047 v.AddArg(x) 44048 return true 44049 } 44050 } 44051 func rewriteValueAMD64_OpNeq16_0(v *Value) bool { 44052 b := v.Block 44053 _ = b 44054 // match: (Neq16 x y) 44055 // cond: 44056 // result: (SETNE (CMPW x y)) 44057 for { 44058 _ = v.Args[1] 44059 x := v.Args[0] 44060 y := v.Args[1] 44061 v.reset(OpAMD64SETNE) 44062 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 44063 v0.AddArg(x) 44064 v0.AddArg(y) 44065 v.AddArg(v0) 44066 return true 44067 } 44068 } 44069 func rewriteValueAMD64_OpNeq32_0(v *Value) bool { 44070 b := v.Block 44071 _ = b 44072 // match: (Neq32 x y) 44073 // cond: 44074 // result: (SETNE (CMPL x y)) 44075 for { 44076 _ = v.Args[1] 44077 x := v.Args[0] 44078 y := v.Args[1] 44079 v.reset(OpAMD64SETNE) 44080 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 44081 v0.AddArg(x) 44082 v0.AddArg(y) 44083 v.AddArg(v0) 44084 return true 44085 } 44086 } 44087 func rewriteValueAMD64_OpNeq32F_0(v *Value) bool { 44088 b := v.Block 44089 _ = b 44090 // match: (Neq32F x y) 44091 // cond: 44092 // result: (SETNEF (UCOMISS x y)) 44093 for { 44094 _ = v.Args[1] 44095 x := v.Args[0] 44096 y := v.Args[1] 44097 v.reset(OpAMD64SETNEF) 44098 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 44099 v0.AddArg(x) 44100 v0.AddArg(y) 44101 v.AddArg(v0) 44102 return true 44103 } 44104 } 44105 func rewriteValueAMD64_OpNeq64_0(v *Value) bool { 44106 b := v.Block 44107 _ = b 44108 // match: (Neq64 x y) 44109 // cond: 44110 // result: (SETNE (CMPQ x y)) 44111 for { 44112 _ = v.Args[1] 44113 x := v.Args[0] 44114 y := v.Args[1] 44115 v.reset(OpAMD64SETNE) 44116 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 44117 v0.AddArg(x) 44118 v0.AddArg(y) 44119 v.AddArg(v0) 44120 return true 44121 } 44122 } 44123 func rewriteValueAMD64_OpNeq64F_0(v *Value) bool { 44124 b := v.Block 44125 _ = b 44126 // match: (Neq64F x y) 44127 // cond: 44128 // result: (SETNEF (UCOMISD x y)) 44129 for { 44130 _ = v.Args[1] 44131 x := v.Args[0] 44132 y := v.Args[1] 44133 v.reset(OpAMD64SETNEF) 44134 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 44135 v0.AddArg(x) 44136 v0.AddArg(y) 44137 v.AddArg(v0) 44138 return true 44139 } 44140 } 44141 func rewriteValueAMD64_OpNeq8_0(v *Value) bool { 44142 b := v.Block 44143 _ = b 44144 // match: (Neq8 x y) 44145 // cond: 44146 // result: (SETNE (CMPB x y)) 44147 for { 44148 _ = v.Args[1] 44149 x := v.Args[0] 44150 y := v.Args[1] 44151 v.reset(OpAMD64SETNE) 44152 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 44153 v0.AddArg(x) 44154 v0.AddArg(y) 44155 v.AddArg(v0) 44156 return true 44157 } 44158 } 44159 func rewriteValueAMD64_OpNeqB_0(v *Value) bool { 44160 b := v.Block 44161 _ = b 44162 // match: (NeqB x y) 44163 // cond: 44164 // result: (SETNE (CMPB x y)) 44165 for { 44166 _ = v.Args[1] 44167 x := v.Args[0] 44168 y := v.Args[1] 44169 v.reset(OpAMD64SETNE) 44170 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 44171 v0.AddArg(x) 44172 v0.AddArg(y) 44173 v.AddArg(v0) 44174 return true 44175 } 44176 } 44177 func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool { 44178 b := v.Block 44179 _ = b 44180 config := b.Func.Config 44181 _ = config 44182 // match: (NeqPtr x y) 44183 // cond: config.PtrSize == 8 44184 // result: (SETNE (CMPQ x y)) 44185 for { 44186 _ = v.Args[1] 44187 x := v.Args[0] 44188 y := v.Args[1] 44189 if !(config.PtrSize == 8) { 44190 break 44191 } 44192 v.reset(OpAMD64SETNE) 44193 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 44194 v0.AddArg(x) 44195 v0.AddArg(y) 44196 v.AddArg(v0) 44197 return true 44198 } 44199 // match: (NeqPtr x y) 44200 // cond: config.PtrSize == 4 44201 // result: (SETNE (CMPL x y)) 44202 for { 44203 _ = v.Args[1] 44204 x := v.Args[0] 44205 y := v.Args[1] 44206 if !(config.PtrSize == 4) { 44207 break 44208 } 44209 v.reset(OpAMD64SETNE) 44210 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 44211 v0.AddArg(x) 44212 v0.AddArg(y) 44213 v.AddArg(v0) 44214 return true 44215 } 44216 return false 44217 } 44218 func rewriteValueAMD64_OpNilCheck_0(v *Value) bool { 44219 // match: (NilCheck ptr mem) 44220 // cond: 44221 // result: (LoweredNilCheck ptr mem) 44222 for { 44223 _ = v.Args[1] 44224 ptr := v.Args[0] 44225 mem := v.Args[1] 44226 v.reset(OpAMD64LoweredNilCheck) 44227 v.AddArg(ptr) 44228 v.AddArg(mem) 44229 return true 44230 } 44231 } 44232 func rewriteValueAMD64_OpNot_0(v *Value) bool { 44233 // match: (Not x) 44234 // cond: 44235 // result: (XORLconst [1] x) 44236 for { 44237 x := v.Args[0] 44238 v.reset(OpAMD64XORLconst) 44239 v.AuxInt = 1 44240 v.AddArg(x) 44241 return true 44242 } 44243 } 44244 func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { 44245 b := v.Block 44246 _ = b 44247 config := b.Func.Config 44248 _ = config 44249 typ := &b.Func.Config.Types 44250 _ = typ 44251 // match: (OffPtr [off] ptr) 44252 // cond: config.PtrSize == 8 && is32Bit(off) 44253 // result: (ADDQconst [off] ptr) 44254 for { 44255 off := v.AuxInt 44256 ptr := v.Args[0] 44257 if !(config.PtrSize == 8 && is32Bit(off)) { 44258 break 44259 } 44260 v.reset(OpAMD64ADDQconst) 44261 v.AuxInt = off 44262 v.AddArg(ptr) 44263 return true 44264 } 44265 // match: (OffPtr [off] ptr) 44266 // cond: config.PtrSize == 8 44267 // result: (ADDQ (MOVQconst [off]) ptr) 44268 for { 44269 off := v.AuxInt 44270 ptr := v.Args[0] 44271 if !(config.PtrSize == 8) { 44272 break 44273 } 44274 v.reset(OpAMD64ADDQ) 44275 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 44276 v0.AuxInt = off 44277 v.AddArg(v0) 44278 v.AddArg(ptr) 44279 return true 44280 } 44281 // match: (OffPtr [off] ptr) 44282 // cond: config.PtrSize == 4 44283 // result: (ADDLconst [off] ptr) 44284 for { 44285 off := v.AuxInt 44286 ptr := v.Args[0] 44287 if !(config.PtrSize == 4) { 44288 break 44289 } 44290 v.reset(OpAMD64ADDLconst) 44291 v.AuxInt = off 44292 v.AddArg(ptr) 44293 return true 44294 } 44295 return false 44296 } 44297 func rewriteValueAMD64_OpOr16_0(v *Value) bool { 44298 // match: (Or16 x y) 44299 // cond: 44300 // result: (ORL x y) 44301 for { 44302 _ = v.Args[1] 44303 x := v.Args[0] 44304 y := v.Args[1] 44305 v.reset(OpAMD64ORL) 44306 v.AddArg(x) 44307 v.AddArg(y) 44308 return true 44309 } 44310 } 44311 func rewriteValueAMD64_OpOr32_0(v *Value) bool { 44312 // match: (Or32 x y) 44313 // cond: 44314 // result: (ORL x y) 44315 for { 44316 _ = v.Args[1] 44317 x := v.Args[0] 44318 y := v.Args[1] 44319 v.reset(OpAMD64ORL) 44320 v.AddArg(x) 44321 v.AddArg(y) 44322 return true 44323 } 44324 } 44325 func rewriteValueAMD64_OpOr64_0(v *Value) bool { 44326 // match: (Or64 x y) 44327 // cond: 44328 // result: (ORQ x y) 44329 for { 44330 _ = v.Args[1] 44331 x := v.Args[0] 44332 y := v.Args[1] 44333 v.reset(OpAMD64ORQ) 44334 v.AddArg(x) 44335 v.AddArg(y) 44336 return true 44337 } 44338 } 44339 func rewriteValueAMD64_OpOr8_0(v *Value) bool { 44340 // match: (Or8 x y) 44341 // cond: 44342 // result: (ORL x y) 44343 for { 44344 _ = v.Args[1] 44345 x := v.Args[0] 44346 y := v.Args[1] 44347 v.reset(OpAMD64ORL) 44348 v.AddArg(x) 44349 v.AddArg(y) 44350 return true 44351 } 44352 } 44353 func rewriteValueAMD64_OpOrB_0(v *Value) bool { 44354 // match: (OrB x y) 44355 // cond: 44356 // result: (ORL x y) 44357 for { 44358 _ = v.Args[1] 44359 x := v.Args[0] 44360 y := v.Args[1] 44361 v.reset(OpAMD64ORL) 44362 v.AddArg(x) 44363 v.AddArg(y) 44364 return true 44365 } 44366 } 44367 func rewriteValueAMD64_OpPopCount16_0(v *Value) bool { 44368 b := v.Block 44369 _ = b 44370 typ := &b.Func.Config.Types 44371 _ = typ 44372 // match: (PopCount16 x) 44373 // cond: 44374 // result: (POPCNTL (MOVWQZX <typ.UInt32> x)) 44375 for { 44376 x := v.Args[0] 44377 v.reset(OpAMD64POPCNTL) 44378 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 44379 v0.AddArg(x) 44380 v.AddArg(v0) 44381 return true 44382 } 44383 } 44384 func rewriteValueAMD64_OpPopCount32_0(v *Value) bool { 44385 // match: (PopCount32 x) 44386 // cond: 44387 // result: (POPCNTL x) 44388 for { 44389 x := v.Args[0] 44390 v.reset(OpAMD64POPCNTL) 44391 v.AddArg(x) 44392 return true 44393 } 44394 } 44395 func rewriteValueAMD64_OpPopCount64_0(v *Value) bool { 44396 // match: (PopCount64 x) 44397 // cond: 44398 // result: (POPCNTQ x) 44399 for { 44400 x := v.Args[0] 44401 v.reset(OpAMD64POPCNTQ) 44402 v.AddArg(x) 44403 return true 44404 } 44405 } 44406 func rewriteValueAMD64_OpPopCount8_0(v *Value) bool { 44407 b := v.Block 44408 _ = b 44409 typ := &b.Func.Config.Types 44410 _ = typ 44411 // match: (PopCount8 x) 44412 // cond: 44413 // result: (POPCNTL (MOVBQZX <typ.UInt32> x)) 44414 for { 44415 x := v.Args[0] 44416 v.reset(OpAMD64POPCNTL) 44417 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 44418 v0.AddArg(x) 44419 v.AddArg(v0) 44420 return true 44421 } 44422 } 44423 func rewriteValueAMD64_OpRound32F_0(v *Value) bool { 44424 // match: (Round32F x) 44425 // cond: 44426 // result: x 44427 for { 44428 x := v.Args[0] 44429 v.reset(OpCopy) 44430 v.Type = x.Type 44431 v.AddArg(x) 44432 return true 44433 } 44434 } 44435 func rewriteValueAMD64_OpRound64F_0(v *Value) bool { 44436 // match: (Round64F x) 44437 // cond: 44438 // result: x 44439 for { 44440 x := v.Args[0] 44441 v.reset(OpCopy) 44442 v.Type = x.Type 44443 v.AddArg(x) 44444 return true 44445 } 44446 } 44447 func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool { 44448 b := v.Block 44449 _ = b 44450 // match: (Rsh16Ux16 <t> x y) 44451 // cond: 44452 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 44453 for { 44454 t := v.Type 44455 _ = v.Args[1] 44456 x := v.Args[0] 44457 y := v.Args[1] 44458 v.reset(OpAMD64ANDL) 44459 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 44460 v0.AddArg(x) 44461 v0.AddArg(y) 44462 v.AddArg(v0) 44463 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44464 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44465 v2.AuxInt = 16 44466 v2.AddArg(y) 44467 v1.AddArg(v2) 44468 v.AddArg(v1) 44469 return true 44470 } 44471 } 44472 func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool { 44473 b := v.Block 44474 _ = b 44475 // match: (Rsh16Ux32 <t> x y) 44476 // cond: 44477 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 44478 for { 44479 t := v.Type 44480 _ = v.Args[1] 44481 x := v.Args[0] 44482 y := v.Args[1] 44483 v.reset(OpAMD64ANDL) 44484 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 44485 v0.AddArg(x) 44486 v0.AddArg(y) 44487 v.AddArg(v0) 44488 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44489 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44490 v2.AuxInt = 16 44491 v2.AddArg(y) 44492 v1.AddArg(v2) 44493 v.AddArg(v1) 44494 return true 44495 } 44496 } 44497 func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool { 44498 b := v.Block 44499 _ = b 44500 // match: (Rsh16Ux64 <t> x y) 44501 // cond: 44502 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 44503 for { 44504 t := v.Type 44505 _ = v.Args[1] 44506 x := v.Args[0] 44507 y := v.Args[1] 44508 v.reset(OpAMD64ANDL) 44509 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 44510 v0.AddArg(x) 44511 v0.AddArg(y) 44512 v.AddArg(v0) 44513 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44514 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44515 v2.AuxInt = 16 44516 v2.AddArg(y) 44517 v1.AddArg(v2) 44518 v.AddArg(v1) 44519 return true 44520 } 44521 } 44522 func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool { 44523 b := v.Block 44524 _ = b 44525 // match: (Rsh16Ux8 <t> x y) 44526 // cond: 44527 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 44528 for { 44529 t := v.Type 44530 _ = v.Args[1] 44531 x := v.Args[0] 44532 y := v.Args[1] 44533 v.reset(OpAMD64ANDL) 44534 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 44535 v0.AddArg(x) 44536 v0.AddArg(y) 44537 v.AddArg(v0) 44538 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44539 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44540 v2.AuxInt = 16 44541 v2.AddArg(y) 44542 v1.AddArg(v2) 44543 v.AddArg(v1) 44544 return true 44545 } 44546 } 44547 func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool { 44548 b := v.Block 44549 _ = b 44550 // match: (Rsh16x16 <t> x y) 44551 // cond: 44552 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 44553 for { 44554 t := v.Type 44555 _ = v.Args[1] 44556 x := v.Args[0] 44557 y := v.Args[1] 44558 v.reset(OpAMD64SARW) 44559 v.Type = t 44560 v.AddArg(x) 44561 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 44562 v0.AddArg(y) 44563 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 44564 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 44565 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44566 v3.AuxInt = 16 44567 v3.AddArg(y) 44568 v2.AddArg(v3) 44569 v1.AddArg(v2) 44570 v0.AddArg(v1) 44571 v.AddArg(v0) 44572 return true 44573 } 44574 } 44575 func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool { 44576 b := v.Block 44577 _ = b 44578 // match: (Rsh16x32 <t> x y) 44579 // cond: 44580 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 44581 for { 44582 t := v.Type 44583 _ = v.Args[1] 44584 x := v.Args[0] 44585 y := v.Args[1] 44586 v.reset(OpAMD64SARW) 44587 v.Type = t 44588 v.AddArg(x) 44589 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 44590 v0.AddArg(y) 44591 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 44592 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 44593 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44594 v3.AuxInt = 16 44595 v3.AddArg(y) 44596 v2.AddArg(v3) 44597 v1.AddArg(v2) 44598 v0.AddArg(v1) 44599 v.AddArg(v0) 44600 return true 44601 } 44602 } 44603 func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool { 44604 b := v.Block 44605 _ = b 44606 // match: (Rsh16x64 <t> x y) 44607 // cond: 44608 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 44609 for { 44610 t := v.Type 44611 _ = v.Args[1] 44612 x := v.Args[0] 44613 y := v.Args[1] 44614 v.reset(OpAMD64SARW) 44615 v.Type = t 44616 v.AddArg(x) 44617 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 44618 v0.AddArg(y) 44619 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 44620 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 44621 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44622 v3.AuxInt = 16 44623 v3.AddArg(y) 44624 v2.AddArg(v3) 44625 v1.AddArg(v2) 44626 v0.AddArg(v1) 44627 v.AddArg(v0) 44628 return true 44629 } 44630 } 44631 func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool { 44632 b := v.Block 44633 _ = b 44634 // match: (Rsh16x8 <t> x y) 44635 // cond: 44636 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 44637 for { 44638 t := v.Type 44639 _ = v.Args[1] 44640 x := v.Args[0] 44641 y := v.Args[1] 44642 v.reset(OpAMD64SARW) 44643 v.Type = t 44644 v.AddArg(x) 44645 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 44646 v0.AddArg(y) 44647 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 44648 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 44649 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44650 v3.AuxInt = 16 44651 v3.AddArg(y) 44652 v2.AddArg(v3) 44653 v1.AddArg(v2) 44654 v0.AddArg(v1) 44655 v.AddArg(v0) 44656 return true 44657 } 44658 } 44659 func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool { 44660 b := v.Block 44661 _ = b 44662 // match: (Rsh32Ux16 <t> x y) 44663 // cond: 44664 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 44665 for { 44666 t := v.Type 44667 _ = v.Args[1] 44668 x := v.Args[0] 44669 y := v.Args[1] 44670 v.reset(OpAMD64ANDL) 44671 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 44672 v0.AddArg(x) 44673 v0.AddArg(y) 44674 v.AddArg(v0) 44675 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44676 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44677 v2.AuxInt = 32 44678 v2.AddArg(y) 44679 v1.AddArg(v2) 44680 v.AddArg(v1) 44681 return true 44682 } 44683 } 44684 func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool { 44685 b := v.Block 44686 _ = b 44687 // match: (Rsh32Ux32 <t> x y) 44688 // cond: 44689 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 44690 for { 44691 t := v.Type 44692 _ = v.Args[1] 44693 x := v.Args[0] 44694 y := v.Args[1] 44695 v.reset(OpAMD64ANDL) 44696 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 44697 v0.AddArg(x) 44698 v0.AddArg(y) 44699 v.AddArg(v0) 44700 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44701 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44702 v2.AuxInt = 32 44703 v2.AddArg(y) 44704 v1.AddArg(v2) 44705 v.AddArg(v1) 44706 return true 44707 } 44708 } 44709 func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool { 44710 b := v.Block 44711 _ = b 44712 // match: (Rsh32Ux64 <t> x y) 44713 // cond: 44714 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 44715 for { 44716 t := v.Type 44717 _ = v.Args[1] 44718 x := v.Args[0] 44719 y := v.Args[1] 44720 v.reset(OpAMD64ANDL) 44721 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 44722 v0.AddArg(x) 44723 v0.AddArg(y) 44724 v.AddArg(v0) 44725 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44726 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44727 v2.AuxInt = 32 44728 v2.AddArg(y) 44729 v1.AddArg(v2) 44730 v.AddArg(v1) 44731 return true 44732 } 44733 } 44734 func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool { 44735 b := v.Block 44736 _ = b 44737 // match: (Rsh32Ux8 <t> x y) 44738 // cond: 44739 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 44740 for { 44741 t := v.Type 44742 _ = v.Args[1] 44743 x := v.Args[0] 44744 y := v.Args[1] 44745 v.reset(OpAMD64ANDL) 44746 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 44747 v0.AddArg(x) 44748 v0.AddArg(y) 44749 v.AddArg(v0) 44750 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44751 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44752 v2.AuxInt = 32 44753 v2.AddArg(y) 44754 v1.AddArg(v2) 44755 v.AddArg(v1) 44756 return true 44757 } 44758 } 44759 func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool { 44760 b := v.Block 44761 _ = b 44762 // match: (Rsh32x16 <t> x y) 44763 // cond: 44764 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 44765 for { 44766 t := v.Type 44767 _ = v.Args[1] 44768 x := v.Args[0] 44769 y := v.Args[1] 44770 v.reset(OpAMD64SARL) 44771 v.Type = t 44772 v.AddArg(x) 44773 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 44774 v0.AddArg(y) 44775 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 44776 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 44777 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44778 v3.AuxInt = 32 44779 v3.AddArg(y) 44780 v2.AddArg(v3) 44781 v1.AddArg(v2) 44782 v0.AddArg(v1) 44783 v.AddArg(v0) 44784 return true 44785 } 44786 } 44787 func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool { 44788 b := v.Block 44789 _ = b 44790 // match: (Rsh32x32 <t> x y) 44791 // cond: 44792 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 44793 for { 44794 t := v.Type 44795 _ = v.Args[1] 44796 x := v.Args[0] 44797 y := v.Args[1] 44798 v.reset(OpAMD64SARL) 44799 v.Type = t 44800 v.AddArg(x) 44801 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 44802 v0.AddArg(y) 44803 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 44804 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 44805 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44806 v3.AuxInt = 32 44807 v3.AddArg(y) 44808 v2.AddArg(v3) 44809 v1.AddArg(v2) 44810 v0.AddArg(v1) 44811 v.AddArg(v0) 44812 return true 44813 } 44814 } 44815 func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool { 44816 b := v.Block 44817 _ = b 44818 // match: (Rsh32x64 <t> x y) 44819 // cond: 44820 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 44821 for { 44822 t := v.Type 44823 _ = v.Args[1] 44824 x := v.Args[0] 44825 y := v.Args[1] 44826 v.reset(OpAMD64SARL) 44827 v.Type = t 44828 v.AddArg(x) 44829 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 44830 v0.AddArg(y) 44831 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 44832 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 44833 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44834 v3.AuxInt = 32 44835 v3.AddArg(y) 44836 v2.AddArg(v3) 44837 v1.AddArg(v2) 44838 v0.AddArg(v1) 44839 v.AddArg(v0) 44840 return true 44841 } 44842 } 44843 func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool { 44844 b := v.Block 44845 _ = b 44846 // match: (Rsh32x8 <t> x y) 44847 // cond: 44848 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 44849 for { 44850 t := v.Type 44851 _ = v.Args[1] 44852 x := v.Args[0] 44853 y := v.Args[1] 44854 v.reset(OpAMD64SARL) 44855 v.Type = t 44856 v.AddArg(x) 44857 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 44858 v0.AddArg(y) 44859 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 44860 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 44861 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44862 v3.AuxInt = 32 44863 v3.AddArg(y) 44864 v2.AddArg(v3) 44865 v1.AddArg(v2) 44866 v0.AddArg(v1) 44867 v.AddArg(v0) 44868 return true 44869 } 44870 } 44871 func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool { 44872 b := v.Block 44873 _ = b 44874 // match: (Rsh64Ux16 <t> x y) 44875 // cond: 44876 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 44877 for { 44878 t := v.Type 44879 _ = v.Args[1] 44880 x := v.Args[0] 44881 y := v.Args[1] 44882 v.reset(OpAMD64ANDQ) 44883 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 44884 v0.AddArg(x) 44885 v0.AddArg(y) 44886 v.AddArg(v0) 44887 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 44888 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44889 v2.AuxInt = 64 44890 v2.AddArg(y) 44891 v1.AddArg(v2) 44892 v.AddArg(v1) 44893 return true 44894 } 44895 } 44896 func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool { 44897 b := v.Block 44898 _ = b 44899 // match: (Rsh64Ux32 <t> x y) 44900 // cond: 44901 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 44902 for { 44903 t := v.Type 44904 _ = v.Args[1] 44905 x := v.Args[0] 44906 y := v.Args[1] 44907 v.reset(OpAMD64ANDQ) 44908 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 44909 v0.AddArg(x) 44910 v0.AddArg(y) 44911 v.AddArg(v0) 44912 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 44913 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44914 v2.AuxInt = 64 44915 v2.AddArg(y) 44916 v1.AddArg(v2) 44917 v.AddArg(v1) 44918 return true 44919 } 44920 } 44921 func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool { 44922 b := v.Block 44923 _ = b 44924 // match: (Rsh64Ux64 <t> x y) 44925 // cond: 44926 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 44927 for { 44928 t := v.Type 44929 _ = v.Args[1] 44930 x := v.Args[0] 44931 y := v.Args[1] 44932 v.reset(OpAMD64ANDQ) 44933 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 44934 v0.AddArg(x) 44935 v0.AddArg(y) 44936 v.AddArg(v0) 44937 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 44938 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44939 v2.AuxInt = 64 44940 v2.AddArg(y) 44941 v1.AddArg(v2) 44942 v.AddArg(v1) 44943 return true 44944 } 44945 } 44946 func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool { 44947 b := v.Block 44948 _ = b 44949 // match: (Rsh64Ux8 <t> x y) 44950 // cond: 44951 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 44952 for { 44953 t := v.Type 44954 _ = v.Args[1] 44955 x := v.Args[0] 44956 y := v.Args[1] 44957 v.reset(OpAMD64ANDQ) 44958 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 44959 v0.AddArg(x) 44960 v0.AddArg(y) 44961 v.AddArg(v0) 44962 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 44963 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44964 v2.AuxInt = 64 44965 v2.AddArg(y) 44966 v1.AddArg(v2) 44967 v.AddArg(v1) 44968 return true 44969 } 44970 } 44971 func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool { 44972 b := v.Block 44973 _ = b 44974 // match: (Rsh64x16 <t> x y) 44975 // cond: 44976 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 44977 for { 44978 t := v.Type 44979 _ = v.Args[1] 44980 x := v.Args[0] 44981 y := v.Args[1] 44982 v.reset(OpAMD64SARQ) 44983 v.Type = t 44984 v.AddArg(x) 44985 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 44986 v0.AddArg(y) 44987 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 44988 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 44989 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44990 v3.AuxInt = 64 44991 v3.AddArg(y) 44992 v2.AddArg(v3) 44993 v1.AddArg(v2) 44994 v0.AddArg(v1) 44995 v.AddArg(v0) 44996 return true 44997 } 44998 } 44999 func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool { 45000 b := v.Block 45001 _ = b 45002 // match: (Rsh64x32 <t> x y) 45003 // cond: 45004 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 45005 for { 45006 t := v.Type 45007 _ = v.Args[1] 45008 x := v.Args[0] 45009 y := v.Args[1] 45010 v.reset(OpAMD64SARQ) 45011 v.Type = t 45012 v.AddArg(x) 45013 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45014 v0.AddArg(y) 45015 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45016 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45017 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 45018 v3.AuxInt = 64 45019 v3.AddArg(y) 45020 v2.AddArg(v3) 45021 v1.AddArg(v2) 45022 v0.AddArg(v1) 45023 v.AddArg(v0) 45024 return true 45025 } 45026 } 45027 func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool { 45028 b := v.Block 45029 _ = b 45030 // match: (Rsh64x64 <t> x y) 45031 // cond: 45032 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 45033 for { 45034 t := v.Type 45035 _ = v.Args[1] 45036 x := v.Args[0] 45037 y := v.Args[1] 45038 v.reset(OpAMD64SARQ) 45039 v.Type = t 45040 v.AddArg(x) 45041 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 45042 v0.AddArg(y) 45043 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 45044 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 45045 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 45046 v3.AuxInt = 64 45047 v3.AddArg(y) 45048 v2.AddArg(v3) 45049 v1.AddArg(v2) 45050 v0.AddArg(v1) 45051 v.AddArg(v0) 45052 return true 45053 } 45054 } 45055 func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool { 45056 b := v.Block 45057 _ = b 45058 // match: (Rsh64x8 <t> x y) 45059 // cond: 45060 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 45061 for { 45062 t := v.Type 45063 _ = v.Args[1] 45064 x := v.Args[0] 45065 y := v.Args[1] 45066 v.reset(OpAMD64SARQ) 45067 v.Type = t 45068 v.AddArg(x) 45069 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45070 v0.AddArg(y) 45071 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45072 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45073 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 45074 v3.AuxInt = 64 45075 v3.AddArg(y) 45076 v2.AddArg(v3) 45077 v1.AddArg(v2) 45078 v0.AddArg(v1) 45079 v.AddArg(v0) 45080 return true 45081 } 45082 } 45083 func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool { 45084 b := v.Block 45085 _ = b 45086 // match: (Rsh8Ux16 <t> x y) 45087 // cond: 45088 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 45089 for { 45090 t := v.Type 45091 _ = v.Args[1] 45092 x := v.Args[0] 45093 y := v.Args[1] 45094 v.reset(OpAMD64ANDL) 45095 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 45096 v0.AddArg(x) 45097 v0.AddArg(y) 45098 v.AddArg(v0) 45099 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45100 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 45101 v2.AuxInt = 8 45102 v2.AddArg(y) 45103 v1.AddArg(v2) 45104 v.AddArg(v1) 45105 return true 45106 } 45107 } 45108 func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool { 45109 b := v.Block 45110 _ = b 45111 // match: (Rsh8Ux32 <t> x y) 45112 // cond: 45113 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 45114 for { 45115 t := v.Type 45116 _ = v.Args[1] 45117 x := v.Args[0] 45118 y := v.Args[1] 45119 v.reset(OpAMD64ANDL) 45120 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 45121 v0.AddArg(x) 45122 v0.AddArg(y) 45123 v.AddArg(v0) 45124 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45125 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 45126 v2.AuxInt = 8 45127 v2.AddArg(y) 45128 v1.AddArg(v2) 45129 v.AddArg(v1) 45130 return true 45131 } 45132 } 45133 func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool { 45134 b := v.Block 45135 _ = b 45136 // match: (Rsh8Ux64 <t> x y) 45137 // cond: 45138 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 45139 for { 45140 t := v.Type 45141 _ = v.Args[1] 45142 x := v.Args[0] 45143 y := v.Args[1] 45144 v.reset(OpAMD64ANDL) 45145 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 45146 v0.AddArg(x) 45147 v0.AddArg(y) 45148 v.AddArg(v0) 45149 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45150 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 45151 v2.AuxInt = 8 45152 v2.AddArg(y) 45153 v1.AddArg(v2) 45154 v.AddArg(v1) 45155 return true 45156 } 45157 } 45158 func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool { 45159 b := v.Block 45160 _ = b 45161 // match: (Rsh8Ux8 <t> x y) 45162 // cond: 45163 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 45164 for { 45165 t := v.Type 45166 _ = v.Args[1] 45167 x := v.Args[0] 45168 y := v.Args[1] 45169 v.reset(OpAMD64ANDL) 45170 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 45171 v0.AddArg(x) 45172 v0.AddArg(y) 45173 v.AddArg(v0) 45174 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45175 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 45176 v2.AuxInt = 8 45177 v2.AddArg(y) 45178 v1.AddArg(v2) 45179 v.AddArg(v1) 45180 return true 45181 } 45182 } 45183 func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool { 45184 b := v.Block 45185 _ = b 45186 // match: (Rsh8x16 <t> x y) 45187 // cond: 45188 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 45189 for { 45190 t := v.Type 45191 _ = v.Args[1] 45192 x := v.Args[0] 45193 y := v.Args[1] 45194 v.reset(OpAMD64SARB) 45195 v.Type = t 45196 v.AddArg(x) 45197 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45198 v0.AddArg(y) 45199 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45200 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45201 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 45202 v3.AuxInt = 8 45203 v3.AddArg(y) 45204 v2.AddArg(v3) 45205 v1.AddArg(v2) 45206 v0.AddArg(v1) 45207 v.AddArg(v0) 45208 return true 45209 } 45210 } 45211 func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool { 45212 b := v.Block 45213 _ = b 45214 // match: (Rsh8x32 <t> x y) 45215 // cond: 45216 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 45217 for { 45218 t := v.Type 45219 _ = v.Args[1] 45220 x := v.Args[0] 45221 y := v.Args[1] 45222 v.reset(OpAMD64SARB) 45223 v.Type = t 45224 v.AddArg(x) 45225 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45226 v0.AddArg(y) 45227 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45228 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45229 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 45230 v3.AuxInt = 8 45231 v3.AddArg(y) 45232 v2.AddArg(v3) 45233 v1.AddArg(v2) 45234 v0.AddArg(v1) 45235 v.AddArg(v0) 45236 return true 45237 } 45238 } 45239 func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool { 45240 b := v.Block 45241 _ = b 45242 // match: (Rsh8x64 <t> x y) 45243 // cond: 45244 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 45245 for { 45246 t := v.Type 45247 _ = v.Args[1] 45248 x := v.Args[0] 45249 y := v.Args[1] 45250 v.reset(OpAMD64SARB) 45251 v.Type = t 45252 v.AddArg(x) 45253 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 45254 v0.AddArg(y) 45255 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 45256 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 45257 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 45258 v3.AuxInt = 8 45259 v3.AddArg(y) 45260 v2.AddArg(v3) 45261 v1.AddArg(v2) 45262 v0.AddArg(v1) 45263 v.AddArg(v0) 45264 return true 45265 } 45266 } 45267 func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { 45268 b := v.Block 45269 _ = b 45270 // match: (Rsh8x8 <t> x y) 45271 // cond: 45272 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 45273 for { 45274 t := v.Type 45275 _ = v.Args[1] 45276 x := v.Args[0] 45277 y := v.Args[1] 45278 v.reset(OpAMD64SARB) 45279 v.Type = t 45280 v.AddArg(x) 45281 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45282 v0.AddArg(y) 45283 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45284 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45285 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 45286 v3.AuxInt = 8 45287 v3.AddArg(y) 45288 v2.AddArg(v3) 45289 v1.AddArg(v2) 45290 v0.AddArg(v1) 45291 v.AddArg(v0) 45292 return true 45293 } 45294 } 45295 func rewriteValueAMD64_OpSelect0_0(v *Value) bool { 45296 b := v.Block 45297 _ = b 45298 // match: (Select0 <t> (AddTupleFirst32 val tuple)) 45299 // cond: 45300 // result: (ADDL val (Select0 <t> tuple)) 45301 for { 45302 t := v.Type 45303 v_0 := v.Args[0] 45304 if v_0.Op != OpAMD64AddTupleFirst32 { 45305 break 45306 } 45307 _ = v_0.Args[1] 45308 val := v_0.Args[0] 45309 tuple := v_0.Args[1] 45310 v.reset(OpAMD64ADDL) 45311 v.AddArg(val) 45312 v0 := b.NewValue0(v.Pos, OpSelect0, t) 45313 v0.AddArg(tuple) 45314 v.AddArg(v0) 45315 return true 45316 } 45317 // match: (Select0 <t> (AddTupleFirst64 val tuple)) 45318 // cond: 45319 // result: (ADDQ val (Select0 <t> tuple)) 45320 for { 45321 t := v.Type 45322 v_0 := v.Args[0] 45323 if v_0.Op != OpAMD64AddTupleFirst64 { 45324 break 45325 } 45326 _ = v_0.Args[1] 45327 val := v_0.Args[0] 45328 tuple := v_0.Args[1] 45329 v.reset(OpAMD64ADDQ) 45330 v.AddArg(val) 45331 v0 := b.NewValue0(v.Pos, OpSelect0, t) 45332 v0.AddArg(tuple) 45333 v.AddArg(v0) 45334 return true 45335 } 45336 return false 45337 } 45338 func rewriteValueAMD64_OpSelect1_0(v *Value) bool { 45339 // match: (Select1 (AddTupleFirst32 _ tuple)) 45340 // cond: 45341 // result: (Select1 tuple) 45342 for { 45343 v_0 := v.Args[0] 45344 if v_0.Op != OpAMD64AddTupleFirst32 { 45345 break 45346 } 45347 _ = v_0.Args[1] 45348 tuple := v_0.Args[1] 45349 v.reset(OpSelect1) 45350 v.AddArg(tuple) 45351 return true 45352 } 45353 // match: (Select1 (AddTupleFirst64 _ tuple)) 45354 // cond: 45355 // result: (Select1 tuple) 45356 for { 45357 v_0 := v.Args[0] 45358 if v_0.Op != OpAMD64AddTupleFirst64 { 45359 break 45360 } 45361 _ = v_0.Args[1] 45362 tuple := v_0.Args[1] 45363 v.reset(OpSelect1) 45364 v.AddArg(tuple) 45365 return true 45366 } 45367 return false 45368 } 45369 func rewriteValueAMD64_OpSignExt16to32_0(v *Value) bool { 45370 // match: (SignExt16to32 x) 45371 // cond: 45372 // result: (MOVWQSX x) 45373 for { 45374 x := v.Args[0] 45375 v.reset(OpAMD64MOVWQSX) 45376 v.AddArg(x) 45377 return true 45378 } 45379 } 45380 func rewriteValueAMD64_OpSignExt16to64_0(v *Value) bool { 45381 // match: (SignExt16to64 x) 45382 // cond: 45383 // result: (MOVWQSX x) 45384 for { 45385 x := v.Args[0] 45386 v.reset(OpAMD64MOVWQSX) 45387 v.AddArg(x) 45388 return true 45389 } 45390 } 45391 func rewriteValueAMD64_OpSignExt32to64_0(v *Value) bool { 45392 // match: (SignExt32to64 x) 45393 // cond: 45394 // result: (MOVLQSX x) 45395 for { 45396 x := v.Args[0] 45397 v.reset(OpAMD64MOVLQSX) 45398 v.AddArg(x) 45399 return true 45400 } 45401 } 45402 func rewriteValueAMD64_OpSignExt8to16_0(v *Value) bool { 45403 // match: (SignExt8to16 x) 45404 // cond: 45405 // result: (MOVBQSX x) 45406 for { 45407 x := v.Args[0] 45408 v.reset(OpAMD64MOVBQSX) 45409 v.AddArg(x) 45410 return true 45411 } 45412 } 45413 func rewriteValueAMD64_OpSignExt8to32_0(v *Value) bool { 45414 // match: (SignExt8to32 x) 45415 // cond: 45416 // result: (MOVBQSX x) 45417 for { 45418 x := v.Args[0] 45419 v.reset(OpAMD64MOVBQSX) 45420 v.AddArg(x) 45421 return true 45422 } 45423 } 45424 func rewriteValueAMD64_OpSignExt8to64_0(v *Value) bool { 45425 // match: (SignExt8to64 x) 45426 // cond: 45427 // result: (MOVBQSX x) 45428 for { 45429 x := v.Args[0] 45430 v.reset(OpAMD64MOVBQSX) 45431 v.AddArg(x) 45432 return true 45433 } 45434 } 45435 func rewriteValueAMD64_OpSlicemask_0(v *Value) bool { 45436 b := v.Block 45437 _ = b 45438 // match: (Slicemask <t> x) 45439 // cond: 45440 // result: (SARQconst (NEGQ <t> x) [63]) 45441 for { 45442 t := v.Type 45443 x := v.Args[0] 45444 v.reset(OpAMD64SARQconst) 45445 v.AuxInt = 63 45446 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 45447 v0.AddArg(x) 45448 v.AddArg(v0) 45449 return true 45450 } 45451 } 45452 func rewriteValueAMD64_OpSqrt_0(v *Value) bool { 45453 // match: (Sqrt x) 45454 // cond: 45455 // result: (SQRTSD x) 45456 for { 45457 x := v.Args[0] 45458 v.reset(OpAMD64SQRTSD) 45459 v.AddArg(x) 45460 return true 45461 } 45462 } 45463 func rewriteValueAMD64_OpStaticCall_0(v *Value) bool { 45464 // match: (StaticCall [argwid] {target} mem) 45465 // cond: 45466 // result: (CALLstatic [argwid] {target} mem) 45467 for { 45468 argwid := v.AuxInt 45469 target := v.Aux 45470 mem := v.Args[0] 45471 v.reset(OpAMD64CALLstatic) 45472 v.AuxInt = argwid 45473 v.Aux = target 45474 v.AddArg(mem) 45475 return true 45476 } 45477 } 45478 func rewriteValueAMD64_OpStore_0(v *Value) bool { 45479 // match: (Store {t} ptr val mem) 45480 // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) 45481 // result: (MOVSDstore ptr val mem) 45482 for { 45483 t := v.Aux 45484 _ = v.Args[2] 45485 ptr := v.Args[0] 45486 val := v.Args[1] 45487 mem := v.Args[2] 45488 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { 45489 break 45490 } 45491 v.reset(OpAMD64MOVSDstore) 45492 v.AddArg(ptr) 45493 v.AddArg(val) 45494 v.AddArg(mem) 45495 return true 45496 } 45497 // match: (Store {t} ptr val mem) 45498 // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) 45499 // result: (MOVSSstore ptr val mem) 45500 for { 45501 t := v.Aux 45502 _ = v.Args[2] 45503 ptr := v.Args[0] 45504 val := v.Args[1] 45505 mem := v.Args[2] 45506 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { 45507 break 45508 } 45509 v.reset(OpAMD64MOVSSstore) 45510 v.AddArg(ptr) 45511 v.AddArg(val) 45512 v.AddArg(mem) 45513 return true 45514 } 45515 // match: (Store {t} ptr val mem) 45516 // cond: t.(*types.Type).Size() == 8 45517 // result: (MOVQstore ptr val mem) 45518 for { 45519 t := v.Aux 45520 _ = v.Args[2] 45521 ptr := v.Args[0] 45522 val := v.Args[1] 45523 mem := v.Args[2] 45524 if !(t.(*types.Type).Size() == 8) { 45525 break 45526 } 45527 v.reset(OpAMD64MOVQstore) 45528 v.AddArg(ptr) 45529 v.AddArg(val) 45530 v.AddArg(mem) 45531 return true 45532 } 45533 // match: (Store {t} ptr val mem) 45534 // cond: t.(*types.Type).Size() == 4 45535 // result: (MOVLstore ptr val mem) 45536 for { 45537 t := v.Aux 45538 _ = v.Args[2] 45539 ptr := v.Args[0] 45540 val := v.Args[1] 45541 mem := v.Args[2] 45542 if !(t.(*types.Type).Size() == 4) { 45543 break 45544 } 45545 v.reset(OpAMD64MOVLstore) 45546 v.AddArg(ptr) 45547 v.AddArg(val) 45548 v.AddArg(mem) 45549 return true 45550 } 45551 // match: (Store {t} ptr val mem) 45552 // cond: t.(*types.Type).Size() == 2 45553 // result: (MOVWstore ptr val mem) 45554 for { 45555 t := v.Aux 45556 _ = v.Args[2] 45557 ptr := v.Args[0] 45558 val := v.Args[1] 45559 mem := v.Args[2] 45560 if !(t.(*types.Type).Size() == 2) { 45561 break 45562 } 45563 v.reset(OpAMD64MOVWstore) 45564 v.AddArg(ptr) 45565 v.AddArg(val) 45566 v.AddArg(mem) 45567 return true 45568 } 45569 // match: (Store {t} ptr val mem) 45570 // cond: t.(*types.Type).Size() == 1 45571 // result: (MOVBstore ptr val mem) 45572 for { 45573 t := v.Aux 45574 _ = v.Args[2] 45575 ptr := v.Args[0] 45576 val := v.Args[1] 45577 mem := v.Args[2] 45578 if !(t.(*types.Type).Size() == 1) { 45579 break 45580 } 45581 v.reset(OpAMD64MOVBstore) 45582 v.AddArg(ptr) 45583 v.AddArg(val) 45584 v.AddArg(mem) 45585 return true 45586 } 45587 return false 45588 } 45589 func rewriteValueAMD64_OpSub16_0(v *Value) bool { 45590 // match: (Sub16 x y) 45591 // cond: 45592 // result: (SUBL x y) 45593 for { 45594 _ = v.Args[1] 45595 x := v.Args[0] 45596 y := v.Args[1] 45597 v.reset(OpAMD64SUBL) 45598 v.AddArg(x) 45599 v.AddArg(y) 45600 return true 45601 } 45602 } 45603 func rewriteValueAMD64_OpSub32_0(v *Value) bool { 45604 // match: (Sub32 x y) 45605 // cond: 45606 // result: (SUBL x y) 45607 for { 45608 _ = v.Args[1] 45609 x := v.Args[0] 45610 y := v.Args[1] 45611 v.reset(OpAMD64SUBL) 45612 v.AddArg(x) 45613 v.AddArg(y) 45614 return true 45615 } 45616 } 45617 func rewriteValueAMD64_OpSub32F_0(v *Value) bool { 45618 // match: (Sub32F x y) 45619 // cond: 45620 // result: (SUBSS x y) 45621 for { 45622 _ = v.Args[1] 45623 x := v.Args[0] 45624 y := v.Args[1] 45625 v.reset(OpAMD64SUBSS) 45626 v.AddArg(x) 45627 v.AddArg(y) 45628 return true 45629 } 45630 } 45631 func rewriteValueAMD64_OpSub64_0(v *Value) bool { 45632 // match: (Sub64 x y) 45633 // cond: 45634 // result: (SUBQ x y) 45635 for { 45636 _ = v.Args[1] 45637 x := v.Args[0] 45638 y := v.Args[1] 45639 v.reset(OpAMD64SUBQ) 45640 v.AddArg(x) 45641 v.AddArg(y) 45642 return true 45643 } 45644 } 45645 func rewriteValueAMD64_OpSub64F_0(v *Value) bool { 45646 // match: (Sub64F x y) 45647 // cond: 45648 // result: (SUBSD x y) 45649 for { 45650 _ = v.Args[1] 45651 x := v.Args[0] 45652 y := v.Args[1] 45653 v.reset(OpAMD64SUBSD) 45654 v.AddArg(x) 45655 v.AddArg(y) 45656 return true 45657 } 45658 } 45659 func rewriteValueAMD64_OpSub8_0(v *Value) bool { 45660 // match: (Sub8 x y) 45661 // cond: 45662 // result: (SUBL x y) 45663 for { 45664 _ = v.Args[1] 45665 x := v.Args[0] 45666 y := v.Args[1] 45667 v.reset(OpAMD64SUBL) 45668 v.AddArg(x) 45669 v.AddArg(y) 45670 return true 45671 } 45672 } 45673 func rewriteValueAMD64_OpSubPtr_0(v *Value) bool { 45674 b := v.Block 45675 _ = b 45676 config := b.Func.Config 45677 _ = config 45678 // match: (SubPtr x y) 45679 // cond: config.PtrSize == 8 45680 // result: (SUBQ x y) 45681 for { 45682 _ = v.Args[1] 45683 x := v.Args[0] 45684 y := v.Args[1] 45685 if !(config.PtrSize == 8) { 45686 break 45687 } 45688 v.reset(OpAMD64SUBQ) 45689 v.AddArg(x) 45690 v.AddArg(y) 45691 return true 45692 } 45693 // match: (SubPtr x y) 45694 // cond: config.PtrSize == 4 45695 // result: (SUBL x y) 45696 for { 45697 _ = v.Args[1] 45698 x := v.Args[0] 45699 y := v.Args[1] 45700 if !(config.PtrSize == 4) { 45701 break 45702 } 45703 v.reset(OpAMD64SUBL) 45704 v.AddArg(x) 45705 v.AddArg(y) 45706 return true 45707 } 45708 return false 45709 } 45710 func rewriteValueAMD64_OpTrunc16to8_0(v *Value) bool { 45711 // match: (Trunc16to8 x) 45712 // cond: 45713 // result: x 45714 for { 45715 x := v.Args[0] 45716 v.reset(OpCopy) 45717 v.Type = x.Type 45718 v.AddArg(x) 45719 return true 45720 } 45721 } 45722 func rewriteValueAMD64_OpTrunc32to16_0(v *Value) bool { 45723 // match: (Trunc32to16 x) 45724 // cond: 45725 // result: x 45726 for { 45727 x := v.Args[0] 45728 v.reset(OpCopy) 45729 v.Type = x.Type 45730 v.AddArg(x) 45731 return true 45732 } 45733 } 45734 func rewriteValueAMD64_OpTrunc32to8_0(v *Value) bool { 45735 // match: (Trunc32to8 x) 45736 // cond: 45737 // result: x 45738 for { 45739 x := v.Args[0] 45740 v.reset(OpCopy) 45741 v.Type = x.Type 45742 v.AddArg(x) 45743 return true 45744 } 45745 } 45746 func rewriteValueAMD64_OpTrunc64to16_0(v *Value) bool { 45747 // match: (Trunc64to16 x) 45748 // cond: 45749 // result: x 45750 for { 45751 x := v.Args[0] 45752 v.reset(OpCopy) 45753 v.Type = x.Type 45754 v.AddArg(x) 45755 return true 45756 } 45757 } 45758 func rewriteValueAMD64_OpTrunc64to32_0(v *Value) bool { 45759 // match: (Trunc64to32 x) 45760 // cond: 45761 // result: x 45762 for { 45763 x := v.Args[0] 45764 v.reset(OpCopy) 45765 v.Type = x.Type 45766 v.AddArg(x) 45767 return true 45768 } 45769 } 45770 func rewriteValueAMD64_OpTrunc64to8_0(v *Value) bool { 45771 // match: (Trunc64to8 x) 45772 // cond: 45773 // result: x 45774 for { 45775 x := v.Args[0] 45776 v.reset(OpCopy) 45777 v.Type = x.Type 45778 v.AddArg(x) 45779 return true 45780 } 45781 } 45782 func rewriteValueAMD64_OpXor16_0(v *Value) bool { 45783 // match: (Xor16 x y) 45784 // cond: 45785 // result: (XORL x y) 45786 for { 45787 _ = v.Args[1] 45788 x := v.Args[0] 45789 y := v.Args[1] 45790 v.reset(OpAMD64XORL) 45791 v.AddArg(x) 45792 v.AddArg(y) 45793 return true 45794 } 45795 } 45796 func rewriteValueAMD64_OpXor32_0(v *Value) bool { 45797 // match: (Xor32 x y) 45798 // cond: 45799 // result: (XORL x y) 45800 for { 45801 _ = v.Args[1] 45802 x := v.Args[0] 45803 y := v.Args[1] 45804 v.reset(OpAMD64XORL) 45805 v.AddArg(x) 45806 v.AddArg(y) 45807 return true 45808 } 45809 } 45810 func rewriteValueAMD64_OpXor64_0(v *Value) bool { 45811 // match: (Xor64 x y) 45812 // cond: 45813 // result: (XORQ x y) 45814 for { 45815 _ = v.Args[1] 45816 x := v.Args[0] 45817 y := v.Args[1] 45818 v.reset(OpAMD64XORQ) 45819 v.AddArg(x) 45820 v.AddArg(y) 45821 return true 45822 } 45823 } 45824 func rewriteValueAMD64_OpXor8_0(v *Value) bool { 45825 // match: (Xor8 x y) 45826 // cond: 45827 // result: (XORL x y) 45828 for { 45829 _ = v.Args[1] 45830 x := v.Args[0] 45831 y := v.Args[1] 45832 v.reset(OpAMD64XORL) 45833 v.AddArg(x) 45834 v.AddArg(y) 45835 return true 45836 } 45837 } 45838 func rewriteValueAMD64_OpZero_0(v *Value) bool { 45839 b := v.Block 45840 _ = b 45841 config := b.Func.Config 45842 _ = config 45843 // match: (Zero [0] _ mem) 45844 // cond: 45845 // result: mem 45846 for { 45847 if v.AuxInt != 0 { 45848 break 45849 } 45850 _ = v.Args[1] 45851 mem := v.Args[1] 45852 v.reset(OpCopy) 45853 v.Type = mem.Type 45854 v.AddArg(mem) 45855 return true 45856 } 45857 // match: (Zero [1] destptr mem) 45858 // cond: 45859 // result: (MOVBstoreconst [0] destptr mem) 45860 for { 45861 if v.AuxInt != 1 { 45862 break 45863 } 45864 _ = v.Args[1] 45865 destptr := v.Args[0] 45866 mem := v.Args[1] 45867 v.reset(OpAMD64MOVBstoreconst) 45868 v.AuxInt = 0 45869 v.AddArg(destptr) 45870 v.AddArg(mem) 45871 return true 45872 } 45873 // match: (Zero [2] destptr mem) 45874 // cond: 45875 // result: (MOVWstoreconst [0] destptr mem) 45876 for { 45877 if v.AuxInt != 2 { 45878 break 45879 } 45880 _ = v.Args[1] 45881 destptr := v.Args[0] 45882 mem := v.Args[1] 45883 v.reset(OpAMD64MOVWstoreconst) 45884 v.AuxInt = 0 45885 v.AddArg(destptr) 45886 v.AddArg(mem) 45887 return true 45888 } 45889 // match: (Zero [4] destptr mem) 45890 // cond: 45891 // result: (MOVLstoreconst [0] destptr mem) 45892 for { 45893 if v.AuxInt != 4 { 45894 break 45895 } 45896 _ = v.Args[1] 45897 destptr := v.Args[0] 45898 mem := v.Args[1] 45899 v.reset(OpAMD64MOVLstoreconst) 45900 v.AuxInt = 0 45901 v.AddArg(destptr) 45902 v.AddArg(mem) 45903 return true 45904 } 45905 // match: (Zero [8] destptr mem) 45906 // cond: 45907 // result: (MOVQstoreconst [0] destptr mem) 45908 for { 45909 if v.AuxInt != 8 { 45910 break 45911 } 45912 _ = v.Args[1] 45913 destptr := v.Args[0] 45914 mem := v.Args[1] 45915 v.reset(OpAMD64MOVQstoreconst) 45916 v.AuxInt = 0 45917 v.AddArg(destptr) 45918 v.AddArg(mem) 45919 return true 45920 } 45921 // match: (Zero [3] destptr mem) 45922 // cond: 45923 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 45924 for { 45925 if v.AuxInt != 3 { 45926 break 45927 } 45928 _ = v.Args[1] 45929 destptr := v.Args[0] 45930 mem := v.Args[1] 45931 v.reset(OpAMD64MOVBstoreconst) 45932 v.AuxInt = makeValAndOff(0, 2) 45933 v.AddArg(destptr) 45934 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) 45935 v0.AuxInt = 0 45936 v0.AddArg(destptr) 45937 v0.AddArg(mem) 45938 v.AddArg(v0) 45939 return true 45940 } 45941 // match: (Zero [5] destptr mem) 45942 // cond: 45943 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 45944 for { 45945 if v.AuxInt != 5 { 45946 break 45947 } 45948 _ = v.Args[1] 45949 destptr := v.Args[0] 45950 mem := v.Args[1] 45951 v.reset(OpAMD64MOVBstoreconst) 45952 v.AuxInt = makeValAndOff(0, 4) 45953 v.AddArg(destptr) 45954 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 45955 v0.AuxInt = 0 45956 v0.AddArg(destptr) 45957 v0.AddArg(mem) 45958 v.AddArg(v0) 45959 return true 45960 } 45961 // match: (Zero [6] destptr mem) 45962 // cond: 45963 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 45964 for { 45965 if v.AuxInt != 6 { 45966 break 45967 } 45968 _ = v.Args[1] 45969 destptr := v.Args[0] 45970 mem := v.Args[1] 45971 v.reset(OpAMD64MOVWstoreconst) 45972 v.AuxInt = makeValAndOff(0, 4) 45973 v.AddArg(destptr) 45974 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 45975 v0.AuxInt = 0 45976 v0.AddArg(destptr) 45977 v0.AddArg(mem) 45978 v.AddArg(v0) 45979 return true 45980 } 45981 // match: (Zero [7] destptr mem) 45982 // cond: 45983 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 45984 for { 45985 if v.AuxInt != 7 { 45986 break 45987 } 45988 _ = v.Args[1] 45989 destptr := v.Args[0] 45990 mem := v.Args[1] 45991 v.reset(OpAMD64MOVLstoreconst) 45992 v.AuxInt = makeValAndOff(0, 3) 45993 v.AddArg(destptr) 45994 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 45995 v0.AuxInt = 0 45996 v0.AddArg(destptr) 45997 v0.AddArg(mem) 45998 v.AddArg(v0) 45999 return true 46000 } 46001 // match: (Zero [s] destptr mem) 46002 // cond: s%8 != 0 && s > 8 && !config.useSSE 46003 // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [0] destptr mem)) 46004 for { 46005 s := v.AuxInt 46006 _ = v.Args[1] 46007 destptr := v.Args[0] 46008 mem := v.Args[1] 46009 if !(s%8 != 0 && s > 8 && !config.useSSE) { 46010 break 46011 } 46012 v.reset(OpZero) 46013 v.AuxInt = s - s%8 46014 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 46015 v0.AuxInt = s % 8 46016 v0.AddArg(destptr) 46017 v.AddArg(v0) 46018 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 46019 v1.AuxInt = 0 46020 v1.AddArg(destptr) 46021 v1.AddArg(mem) 46022 v.AddArg(v1) 46023 return true 46024 } 46025 return false 46026 } 46027 func rewriteValueAMD64_OpZero_10(v *Value) bool { 46028 b := v.Block 46029 _ = b 46030 config := b.Func.Config 46031 _ = config 46032 // match: (Zero [16] destptr mem) 46033 // cond: !config.useSSE 46034 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 46035 for { 46036 if v.AuxInt != 16 { 46037 break 46038 } 46039 _ = v.Args[1] 46040 destptr := v.Args[0] 46041 mem := v.Args[1] 46042 if !(!config.useSSE) { 46043 break 46044 } 46045 v.reset(OpAMD64MOVQstoreconst) 46046 v.AuxInt = makeValAndOff(0, 8) 46047 v.AddArg(destptr) 46048 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 46049 v0.AuxInt = 0 46050 v0.AddArg(destptr) 46051 v0.AddArg(mem) 46052 v.AddArg(v0) 46053 return true 46054 } 46055 // match: (Zero [24] destptr mem) 46056 // cond: !config.useSSE 46057 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 46058 for { 46059 if v.AuxInt != 24 { 46060 break 46061 } 46062 _ = v.Args[1] 46063 destptr := v.Args[0] 46064 mem := v.Args[1] 46065 if !(!config.useSSE) { 46066 break 46067 } 46068 v.reset(OpAMD64MOVQstoreconst) 46069 v.AuxInt = makeValAndOff(0, 16) 46070 v.AddArg(destptr) 46071 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 46072 v0.AuxInt = makeValAndOff(0, 8) 46073 v0.AddArg(destptr) 46074 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 46075 v1.AuxInt = 0 46076 v1.AddArg(destptr) 46077 v1.AddArg(mem) 46078 v0.AddArg(v1) 46079 v.AddArg(v0) 46080 return true 46081 } 46082 // match: (Zero [32] destptr mem) 46083 // cond: !config.useSSE 46084 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 46085 for { 46086 if v.AuxInt != 32 { 46087 break 46088 } 46089 _ = v.Args[1] 46090 destptr := v.Args[0] 46091 mem := v.Args[1] 46092 if !(!config.useSSE) { 46093 break 46094 } 46095 v.reset(OpAMD64MOVQstoreconst) 46096 v.AuxInt = makeValAndOff(0, 24) 46097 v.AddArg(destptr) 46098 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 46099 v0.AuxInt = makeValAndOff(0, 16) 46100 v0.AddArg(destptr) 46101 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 46102 v1.AuxInt = makeValAndOff(0, 8) 46103 v1.AddArg(destptr) 46104 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 46105 v2.AuxInt = 0 46106 v2.AddArg(destptr) 46107 v2.AddArg(mem) 46108 v1.AddArg(v2) 46109 v0.AddArg(v1) 46110 v.AddArg(v0) 46111 return true 46112 } 46113 // match: (Zero [s] destptr mem) 46114 // cond: s > 8 && s < 16 && config.useSSE 46115 // result: (MOVQstoreconst [makeValAndOff(0,s-8)] destptr (MOVQstoreconst [0] destptr mem)) 46116 for { 46117 s := v.AuxInt 46118 _ = v.Args[1] 46119 destptr := v.Args[0] 46120 mem := v.Args[1] 46121 if !(s > 8 && s < 16 && config.useSSE) { 46122 break 46123 } 46124 v.reset(OpAMD64MOVQstoreconst) 46125 v.AuxInt = makeValAndOff(0, s-8) 46126 v.AddArg(destptr) 46127 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 46128 v0.AuxInt = 0 46129 v0.AddArg(destptr) 46130 v0.AddArg(mem) 46131 v.AddArg(v0) 46132 return true 46133 } 46134 // match: (Zero [s] destptr mem) 46135 // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE 46136 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstore destptr (MOVOconst [0]) mem)) 46137 for { 46138 s := v.AuxInt 46139 _ = v.Args[1] 46140 destptr := v.Args[0] 46141 mem := v.Args[1] 46142 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) { 46143 break 46144 } 46145 v.reset(OpZero) 46146 v.AuxInt = s - s%16 46147 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 46148 v0.AuxInt = s % 16 46149 v0.AddArg(destptr) 46150 v.AddArg(v0) 46151 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 46152 v1.AddArg(destptr) 46153 v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 46154 v2.AuxInt = 0 46155 v1.AddArg(v2) 46156 v1.AddArg(mem) 46157 v.AddArg(v1) 46158 return true 46159 } 46160 // match: (Zero [s] destptr mem) 46161 // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE 46162 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVQstoreconst [0] destptr mem)) 46163 for { 46164 s := v.AuxInt 46165 _ = v.Args[1] 46166 destptr := v.Args[0] 46167 mem := v.Args[1] 46168 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) { 46169 break 46170 } 46171 v.reset(OpZero) 46172 v.AuxInt = s - s%16 46173 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 46174 v0.AuxInt = s % 16 46175 v0.AddArg(destptr) 46176 v.AddArg(v0) 46177 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 46178 v1.AuxInt = 0 46179 v1.AddArg(destptr) 46180 v1.AddArg(mem) 46181 v.AddArg(v1) 46182 return true 46183 } 46184 // match: (Zero [16] destptr mem) 46185 // cond: config.useSSE 46186 // result: (MOVOstore destptr (MOVOconst [0]) mem) 46187 for { 46188 if v.AuxInt != 16 { 46189 break 46190 } 46191 _ = v.Args[1] 46192 destptr := v.Args[0] 46193 mem := v.Args[1] 46194 if !(config.useSSE) { 46195 break 46196 } 46197 v.reset(OpAMD64MOVOstore) 46198 v.AddArg(destptr) 46199 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 46200 v0.AuxInt = 0 46201 v.AddArg(v0) 46202 v.AddArg(mem) 46203 return true 46204 } 46205 // match: (Zero [32] destptr mem) 46206 // cond: config.useSSE 46207 // result: (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)) 46208 for { 46209 if v.AuxInt != 32 { 46210 break 46211 } 46212 _ = v.Args[1] 46213 destptr := v.Args[0] 46214 mem := v.Args[1] 46215 if !(config.useSSE) { 46216 break 46217 } 46218 v.reset(OpAMD64MOVOstore) 46219 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 46220 v0.AuxInt = 16 46221 v0.AddArg(destptr) 46222 v.AddArg(v0) 46223 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 46224 v1.AuxInt = 0 46225 v.AddArg(v1) 46226 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 46227 v2.AddArg(destptr) 46228 v3 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 46229 v3.AuxInt = 0 46230 v2.AddArg(v3) 46231 v2.AddArg(mem) 46232 v.AddArg(v2) 46233 return true 46234 } 46235 // match: (Zero [48] destptr mem) 46236 // cond: config.useSSE 46237 // result: (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))) 46238 for { 46239 if v.AuxInt != 48 { 46240 break 46241 } 46242 _ = v.Args[1] 46243 destptr := v.Args[0] 46244 mem := v.Args[1] 46245 if !(config.useSSE) { 46246 break 46247 } 46248 v.reset(OpAMD64MOVOstore) 46249 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 46250 v0.AuxInt = 32 46251 v0.AddArg(destptr) 46252 v.AddArg(v0) 46253 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 46254 v1.AuxInt = 0 46255 v.AddArg(v1) 46256 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 46257 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 46258 v3.AuxInt = 16 46259 v3.AddArg(destptr) 46260 v2.AddArg(v3) 46261 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 46262 v4.AuxInt = 0 46263 v2.AddArg(v4) 46264 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 46265 v5.AddArg(destptr) 46266 v6 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 46267 v6.AuxInt = 0 46268 v5.AddArg(v6) 46269 v5.AddArg(mem) 46270 v2.AddArg(v5) 46271 v.AddArg(v2) 46272 return true 46273 } 46274 // match: (Zero [64] destptr mem) 46275 // cond: config.useSSE 46276 // result: (MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)))) 46277 for { 46278 if v.AuxInt != 64 { 46279 break 46280 } 46281 _ = v.Args[1] 46282 destptr := v.Args[0] 46283 mem := v.Args[1] 46284 if !(config.useSSE) { 46285 break 46286 } 46287 v.reset(OpAMD64MOVOstore) 46288 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 46289 v0.AuxInt = 48 46290 v0.AddArg(destptr) 46291 v.AddArg(v0) 46292 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 46293 v1.AuxInt = 0 46294 v.AddArg(v1) 46295 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 46296 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 46297 v3.AuxInt = 32 46298 v3.AddArg(destptr) 46299 v2.AddArg(v3) 46300 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 46301 v4.AuxInt = 0 46302 v2.AddArg(v4) 46303 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 46304 v6 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 46305 v6.AuxInt = 16 46306 v6.AddArg(destptr) 46307 v5.AddArg(v6) 46308 v7 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 46309 v7.AuxInt = 0 46310 v5.AddArg(v7) 46311 v8 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 46312 v8.AddArg(destptr) 46313 v9 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 46314 v9.AuxInt = 0 46315 v8.AddArg(v9) 46316 v8.AddArg(mem) 46317 v5.AddArg(v8) 46318 v2.AddArg(v5) 46319 v.AddArg(v2) 46320 return true 46321 } 46322 return false 46323 } 46324 func rewriteValueAMD64_OpZero_20(v *Value) bool { 46325 b := v.Block 46326 _ = b 46327 config := b.Func.Config 46328 _ = config 46329 typ := &b.Func.Config.Types 46330 _ = typ 46331 // match: (Zero [s] destptr mem) 46332 // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice 46333 // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem) 46334 for { 46335 s := v.AuxInt 46336 _ = v.Args[1] 46337 destptr := v.Args[0] 46338 mem := v.Args[1] 46339 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) { 46340 break 46341 } 46342 v.reset(OpAMD64DUFFZERO) 46343 v.AuxInt = s 46344 v.AddArg(destptr) 46345 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 46346 v0.AuxInt = 0 46347 v.AddArg(v0) 46348 v.AddArg(mem) 46349 return true 46350 } 46351 // match: (Zero [s] destptr mem) 46352 // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0 46353 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) 46354 for { 46355 s := v.AuxInt 46356 _ = v.Args[1] 46357 destptr := v.Args[0] 46358 mem := v.Args[1] 46359 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) { 46360 break 46361 } 46362 v.reset(OpAMD64REPSTOSQ) 46363 v.AddArg(destptr) 46364 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 46365 v0.AuxInt = s / 8 46366 v.AddArg(v0) 46367 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 46368 v1.AuxInt = 0 46369 v.AddArg(v1) 46370 v.AddArg(mem) 46371 return true 46372 } 46373 return false 46374 } 46375 func rewriteValueAMD64_OpZeroExt16to32_0(v *Value) bool { 46376 // match: (ZeroExt16to32 x) 46377 // cond: 46378 // result: (MOVWQZX x) 46379 for { 46380 x := v.Args[0] 46381 v.reset(OpAMD64MOVWQZX) 46382 v.AddArg(x) 46383 return true 46384 } 46385 } 46386 func rewriteValueAMD64_OpZeroExt16to64_0(v *Value) bool { 46387 // match: (ZeroExt16to64 x) 46388 // cond: 46389 // result: (MOVWQZX x) 46390 for { 46391 x := v.Args[0] 46392 v.reset(OpAMD64MOVWQZX) 46393 v.AddArg(x) 46394 return true 46395 } 46396 } 46397 func rewriteValueAMD64_OpZeroExt32to64_0(v *Value) bool { 46398 // match: (ZeroExt32to64 x) 46399 // cond: 46400 // result: (MOVLQZX x) 46401 for { 46402 x := v.Args[0] 46403 v.reset(OpAMD64MOVLQZX) 46404 v.AddArg(x) 46405 return true 46406 } 46407 } 46408 func rewriteValueAMD64_OpZeroExt8to16_0(v *Value) bool { 46409 // match: (ZeroExt8to16 x) 46410 // cond: 46411 // result: (MOVBQZX x) 46412 for { 46413 x := v.Args[0] 46414 v.reset(OpAMD64MOVBQZX) 46415 v.AddArg(x) 46416 return true 46417 } 46418 } 46419 func rewriteValueAMD64_OpZeroExt8to32_0(v *Value) bool { 46420 // match: (ZeroExt8to32 x) 46421 // cond: 46422 // result: (MOVBQZX x) 46423 for { 46424 x := v.Args[0] 46425 v.reset(OpAMD64MOVBQZX) 46426 v.AddArg(x) 46427 return true 46428 } 46429 } 46430 func rewriteValueAMD64_OpZeroExt8to64_0(v *Value) bool { 46431 // match: (ZeroExt8to64 x) 46432 // cond: 46433 // result: (MOVBQZX x) 46434 for { 46435 x := v.Args[0] 46436 v.reset(OpAMD64MOVBQZX) 46437 v.AddArg(x) 46438 return true 46439 } 46440 } 46441 func rewriteBlockAMD64(b *Block) bool { 46442 config := b.Func.Config 46443 _ = config 46444 fe := b.Func.fe 46445 _ = fe 46446 typ := &config.Types 46447 _ = typ 46448 switch b.Kind { 46449 case BlockAMD64EQ: 46450 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) 46451 // cond: !config.nacl 46452 // result: (UGE (BTL x y)) 46453 for { 46454 v := b.Control 46455 if v.Op != OpAMD64TESTL { 46456 break 46457 } 46458 _ = v.Args[1] 46459 v_0 := v.Args[0] 46460 if v_0.Op != OpAMD64SHLL { 46461 break 46462 } 46463 _ = v_0.Args[1] 46464 v_0_0 := v_0.Args[0] 46465 if v_0_0.Op != OpAMD64MOVLconst { 46466 break 46467 } 46468 if v_0_0.AuxInt != 1 { 46469 break 46470 } 46471 x := v_0.Args[1] 46472 y := v.Args[1] 46473 if !(!config.nacl) { 46474 break 46475 } 46476 b.Kind = BlockAMD64UGE 46477 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 46478 v0.AddArg(x) 46479 v0.AddArg(y) 46480 b.SetControl(v0) 46481 b.Aux = nil 46482 return true 46483 } 46484 // match: (EQ (TESTL y (SHLL (MOVLconst [1]) x))) 46485 // cond: !config.nacl 46486 // result: (UGE (BTL x y)) 46487 for { 46488 v := b.Control 46489 if v.Op != OpAMD64TESTL { 46490 break 46491 } 46492 _ = v.Args[1] 46493 y := v.Args[0] 46494 v_1 := v.Args[1] 46495 if v_1.Op != OpAMD64SHLL { 46496 break 46497 } 46498 _ = v_1.Args[1] 46499 v_1_0 := v_1.Args[0] 46500 if v_1_0.Op != OpAMD64MOVLconst { 46501 break 46502 } 46503 if v_1_0.AuxInt != 1 { 46504 break 46505 } 46506 x := v_1.Args[1] 46507 if !(!config.nacl) { 46508 break 46509 } 46510 b.Kind = BlockAMD64UGE 46511 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 46512 v0.AddArg(x) 46513 v0.AddArg(y) 46514 b.SetControl(v0) 46515 b.Aux = nil 46516 return true 46517 } 46518 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 46519 // cond: !config.nacl 46520 // result: (UGE (BTQ x y)) 46521 for { 46522 v := b.Control 46523 if v.Op != OpAMD64TESTQ { 46524 break 46525 } 46526 _ = v.Args[1] 46527 v_0 := v.Args[0] 46528 if v_0.Op != OpAMD64SHLQ { 46529 break 46530 } 46531 _ = v_0.Args[1] 46532 v_0_0 := v_0.Args[0] 46533 if v_0_0.Op != OpAMD64MOVQconst { 46534 break 46535 } 46536 if v_0_0.AuxInt != 1 { 46537 break 46538 } 46539 x := v_0.Args[1] 46540 y := v.Args[1] 46541 if !(!config.nacl) { 46542 break 46543 } 46544 b.Kind = BlockAMD64UGE 46545 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 46546 v0.AddArg(x) 46547 v0.AddArg(y) 46548 b.SetControl(v0) 46549 b.Aux = nil 46550 return true 46551 } 46552 // match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 46553 // cond: !config.nacl 46554 // result: (UGE (BTQ x y)) 46555 for { 46556 v := b.Control 46557 if v.Op != OpAMD64TESTQ { 46558 break 46559 } 46560 _ = v.Args[1] 46561 y := v.Args[0] 46562 v_1 := v.Args[1] 46563 if v_1.Op != OpAMD64SHLQ { 46564 break 46565 } 46566 _ = v_1.Args[1] 46567 v_1_0 := v_1.Args[0] 46568 if v_1_0.Op != OpAMD64MOVQconst { 46569 break 46570 } 46571 if v_1_0.AuxInt != 1 { 46572 break 46573 } 46574 x := v_1.Args[1] 46575 if !(!config.nacl) { 46576 break 46577 } 46578 b.Kind = BlockAMD64UGE 46579 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 46580 v0.AddArg(x) 46581 v0.AddArg(y) 46582 b.SetControl(v0) 46583 b.Aux = nil 46584 return true 46585 } 46586 // match: (EQ (TESTLconst [c] x)) 46587 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 46588 // result: (UGE (BTLconst [log2(c)] x)) 46589 for { 46590 v := b.Control 46591 if v.Op != OpAMD64TESTLconst { 46592 break 46593 } 46594 c := v.AuxInt 46595 x := v.Args[0] 46596 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 46597 break 46598 } 46599 b.Kind = BlockAMD64UGE 46600 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 46601 v0.AuxInt = log2(c) 46602 v0.AddArg(x) 46603 b.SetControl(v0) 46604 b.Aux = nil 46605 return true 46606 } 46607 // match: (EQ (TESTQconst [c] x)) 46608 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 46609 // result: (UGE (BTQconst [log2(c)] x)) 46610 for { 46611 v := b.Control 46612 if v.Op != OpAMD64TESTQconst { 46613 break 46614 } 46615 c := v.AuxInt 46616 x := v.Args[0] 46617 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 46618 break 46619 } 46620 b.Kind = BlockAMD64UGE 46621 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 46622 v0.AuxInt = log2(c) 46623 v0.AddArg(x) 46624 b.SetControl(v0) 46625 b.Aux = nil 46626 return true 46627 } 46628 // match: (EQ (TESTQ (MOVQconst [c]) x)) 46629 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 46630 // result: (UGE (BTQconst [log2(c)] x)) 46631 for { 46632 v := b.Control 46633 if v.Op != OpAMD64TESTQ { 46634 break 46635 } 46636 _ = v.Args[1] 46637 v_0 := v.Args[0] 46638 if v_0.Op != OpAMD64MOVQconst { 46639 break 46640 } 46641 c := v_0.AuxInt 46642 x := v.Args[1] 46643 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 46644 break 46645 } 46646 b.Kind = BlockAMD64UGE 46647 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 46648 v0.AuxInt = log2(c) 46649 v0.AddArg(x) 46650 b.SetControl(v0) 46651 b.Aux = nil 46652 return true 46653 } 46654 // match: (EQ (TESTQ x (MOVQconst [c]))) 46655 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 46656 // result: (UGE (BTQconst [log2(c)] x)) 46657 for { 46658 v := b.Control 46659 if v.Op != OpAMD64TESTQ { 46660 break 46661 } 46662 _ = v.Args[1] 46663 x := v.Args[0] 46664 v_1 := v.Args[1] 46665 if v_1.Op != OpAMD64MOVQconst { 46666 break 46667 } 46668 c := v_1.AuxInt 46669 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 46670 break 46671 } 46672 b.Kind = BlockAMD64UGE 46673 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 46674 v0.AuxInt = log2(c) 46675 v0.AddArg(x) 46676 b.SetControl(v0) 46677 b.Aux = nil 46678 return true 46679 } 46680 // match: (EQ (InvertFlags cmp) yes no) 46681 // cond: 46682 // result: (EQ cmp yes no) 46683 for { 46684 v := b.Control 46685 if v.Op != OpAMD64InvertFlags { 46686 break 46687 } 46688 cmp := v.Args[0] 46689 b.Kind = BlockAMD64EQ 46690 b.SetControl(cmp) 46691 b.Aux = nil 46692 return true 46693 } 46694 // match: (EQ (FlagEQ) yes no) 46695 // cond: 46696 // result: (First nil yes no) 46697 for { 46698 v := b.Control 46699 if v.Op != OpAMD64FlagEQ { 46700 break 46701 } 46702 b.Kind = BlockFirst 46703 b.SetControl(nil) 46704 b.Aux = nil 46705 return true 46706 } 46707 // match: (EQ (FlagLT_ULT) yes no) 46708 // cond: 46709 // result: (First nil no yes) 46710 for { 46711 v := b.Control 46712 if v.Op != OpAMD64FlagLT_ULT { 46713 break 46714 } 46715 b.Kind = BlockFirst 46716 b.SetControl(nil) 46717 b.Aux = nil 46718 b.swapSuccessors() 46719 return true 46720 } 46721 // match: (EQ (FlagLT_UGT) yes no) 46722 // cond: 46723 // result: (First nil no yes) 46724 for { 46725 v := b.Control 46726 if v.Op != OpAMD64FlagLT_UGT { 46727 break 46728 } 46729 b.Kind = BlockFirst 46730 b.SetControl(nil) 46731 b.Aux = nil 46732 b.swapSuccessors() 46733 return true 46734 } 46735 // match: (EQ (FlagGT_ULT) yes no) 46736 // cond: 46737 // result: (First nil no yes) 46738 for { 46739 v := b.Control 46740 if v.Op != OpAMD64FlagGT_ULT { 46741 break 46742 } 46743 b.Kind = BlockFirst 46744 b.SetControl(nil) 46745 b.Aux = nil 46746 b.swapSuccessors() 46747 return true 46748 } 46749 // match: (EQ (FlagGT_UGT) yes no) 46750 // cond: 46751 // result: (First nil no yes) 46752 for { 46753 v := b.Control 46754 if v.Op != OpAMD64FlagGT_UGT { 46755 break 46756 } 46757 b.Kind = BlockFirst 46758 b.SetControl(nil) 46759 b.Aux = nil 46760 b.swapSuccessors() 46761 return true 46762 } 46763 case BlockAMD64GE: 46764 // match: (GE (InvertFlags cmp) yes no) 46765 // cond: 46766 // result: (LE cmp yes no) 46767 for { 46768 v := b.Control 46769 if v.Op != OpAMD64InvertFlags { 46770 break 46771 } 46772 cmp := v.Args[0] 46773 b.Kind = BlockAMD64LE 46774 b.SetControl(cmp) 46775 b.Aux = nil 46776 return true 46777 } 46778 // match: (GE (FlagEQ) yes no) 46779 // cond: 46780 // result: (First nil yes no) 46781 for { 46782 v := b.Control 46783 if v.Op != OpAMD64FlagEQ { 46784 break 46785 } 46786 b.Kind = BlockFirst 46787 b.SetControl(nil) 46788 b.Aux = nil 46789 return true 46790 } 46791 // match: (GE (FlagLT_ULT) yes no) 46792 // cond: 46793 // result: (First nil no yes) 46794 for { 46795 v := b.Control 46796 if v.Op != OpAMD64FlagLT_ULT { 46797 break 46798 } 46799 b.Kind = BlockFirst 46800 b.SetControl(nil) 46801 b.Aux = nil 46802 b.swapSuccessors() 46803 return true 46804 } 46805 // match: (GE (FlagLT_UGT) yes no) 46806 // cond: 46807 // result: (First nil no yes) 46808 for { 46809 v := b.Control 46810 if v.Op != OpAMD64FlagLT_UGT { 46811 break 46812 } 46813 b.Kind = BlockFirst 46814 b.SetControl(nil) 46815 b.Aux = nil 46816 b.swapSuccessors() 46817 return true 46818 } 46819 // match: (GE (FlagGT_ULT) yes no) 46820 // cond: 46821 // result: (First nil yes no) 46822 for { 46823 v := b.Control 46824 if v.Op != OpAMD64FlagGT_ULT { 46825 break 46826 } 46827 b.Kind = BlockFirst 46828 b.SetControl(nil) 46829 b.Aux = nil 46830 return true 46831 } 46832 // match: (GE (FlagGT_UGT) yes no) 46833 // cond: 46834 // result: (First nil yes no) 46835 for { 46836 v := b.Control 46837 if v.Op != OpAMD64FlagGT_UGT { 46838 break 46839 } 46840 b.Kind = BlockFirst 46841 b.SetControl(nil) 46842 b.Aux = nil 46843 return true 46844 } 46845 case BlockAMD64GT: 46846 // match: (GT (InvertFlags cmp) yes no) 46847 // cond: 46848 // result: (LT cmp yes no) 46849 for { 46850 v := b.Control 46851 if v.Op != OpAMD64InvertFlags { 46852 break 46853 } 46854 cmp := v.Args[0] 46855 b.Kind = BlockAMD64LT 46856 b.SetControl(cmp) 46857 b.Aux = nil 46858 return true 46859 } 46860 // match: (GT (FlagEQ) yes no) 46861 // cond: 46862 // result: (First nil no yes) 46863 for { 46864 v := b.Control 46865 if v.Op != OpAMD64FlagEQ { 46866 break 46867 } 46868 b.Kind = BlockFirst 46869 b.SetControl(nil) 46870 b.Aux = nil 46871 b.swapSuccessors() 46872 return true 46873 } 46874 // match: (GT (FlagLT_ULT) yes no) 46875 // cond: 46876 // result: (First nil no yes) 46877 for { 46878 v := b.Control 46879 if v.Op != OpAMD64FlagLT_ULT { 46880 break 46881 } 46882 b.Kind = BlockFirst 46883 b.SetControl(nil) 46884 b.Aux = nil 46885 b.swapSuccessors() 46886 return true 46887 } 46888 // match: (GT (FlagLT_UGT) yes no) 46889 // cond: 46890 // result: (First nil no yes) 46891 for { 46892 v := b.Control 46893 if v.Op != OpAMD64FlagLT_UGT { 46894 break 46895 } 46896 b.Kind = BlockFirst 46897 b.SetControl(nil) 46898 b.Aux = nil 46899 b.swapSuccessors() 46900 return true 46901 } 46902 // match: (GT (FlagGT_ULT) yes no) 46903 // cond: 46904 // result: (First nil yes no) 46905 for { 46906 v := b.Control 46907 if v.Op != OpAMD64FlagGT_ULT { 46908 break 46909 } 46910 b.Kind = BlockFirst 46911 b.SetControl(nil) 46912 b.Aux = nil 46913 return true 46914 } 46915 // match: (GT (FlagGT_UGT) yes no) 46916 // cond: 46917 // result: (First nil yes no) 46918 for { 46919 v := b.Control 46920 if v.Op != OpAMD64FlagGT_UGT { 46921 break 46922 } 46923 b.Kind = BlockFirst 46924 b.SetControl(nil) 46925 b.Aux = nil 46926 return true 46927 } 46928 case BlockIf: 46929 // match: (If (SETL cmp) yes no) 46930 // cond: 46931 // result: (LT cmp yes no) 46932 for { 46933 v := b.Control 46934 if v.Op != OpAMD64SETL { 46935 break 46936 } 46937 cmp := v.Args[0] 46938 b.Kind = BlockAMD64LT 46939 b.SetControl(cmp) 46940 b.Aux = nil 46941 return true 46942 } 46943 // match: (If (SETLE cmp) yes no) 46944 // cond: 46945 // result: (LE cmp yes no) 46946 for { 46947 v := b.Control 46948 if v.Op != OpAMD64SETLE { 46949 break 46950 } 46951 cmp := v.Args[0] 46952 b.Kind = BlockAMD64LE 46953 b.SetControl(cmp) 46954 b.Aux = nil 46955 return true 46956 } 46957 // match: (If (SETG cmp) yes no) 46958 // cond: 46959 // result: (GT cmp yes no) 46960 for { 46961 v := b.Control 46962 if v.Op != OpAMD64SETG { 46963 break 46964 } 46965 cmp := v.Args[0] 46966 b.Kind = BlockAMD64GT 46967 b.SetControl(cmp) 46968 b.Aux = nil 46969 return true 46970 } 46971 // match: (If (SETGE cmp) yes no) 46972 // cond: 46973 // result: (GE cmp yes no) 46974 for { 46975 v := b.Control 46976 if v.Op != OpAMD64SETGE { 46977 break 46978 } 46979 cmp := v.Args[0] 46980 b.Kind = BlockAMD64GE 46981 b.SetControl(cmp) 46982 b.Aux = nil 46983 return true 46984 } 46985 // match: (If (SETEQ cmp) yes no) 46986 // cond: 46987 // result: (EQ cmp yes no) 46988 for { 46989 v := b.Control 46990 if v.Op != OpAMD64SETEQ { 46991 break 46992 } 46993 cmp := v.Args[0] 46994 b.Kind = BlockAMD64EQ 46995 b.SetControl(cmp) 46996 b.Aux = nil 46997 return true 46998 } 46999 // match: (If (SETNE cmp) yes no) 47000 // cond: 47001 // result: (NE cmp yes no) 47002 for { 47003 v := b.Control 47004 if v.Op != OpAMD64SETNE { 47005 break 47006 } 47007 cmp := v.Args[0] 47008 b.Kind = BlockAMD64NE 47009 b.SetControl(cmp) 47010 b.Aux = nil 47011 return true 47012 } 47013 // match: (If (SETB cmp) yes no) 47014 // cond: 47015 // result: (ULT cmp yes no) 47016 for { 47017 v := b.Control 47018 if v.Op != OpAMD64SETB { 47019 break 47020 } 47021 cmp := v.Args[0] 47022 b.Kind = BlockAMD64ULT 47023 b.SetControl(cmp) 47024 b.Aux = nil 47025 return true 47026 } 47027 // match: (If (SETBE cmp) yes no) 47028 // cond: 47029 // result: (ULE cmp yes no) 47030 for { 47031 v := b.Control 47032 if v.Op != OpAMD64SETBE { 47033 break 47034 } 47035 cmp := v.Args[0] 47036 b.Kind = BlockAMD64ULE 47037 b.SetControl(cmp) 47038 b.Aux = nil 47039 return true 47040 } 47041 // match: (If (SETA cmp) yes no) 47042 // cond: 47043 // result: (UGT cmp yes no) 47044 for { 47045 v := b.Control 47046 if v.Op != OpAMD64SETA { 47047 break 47048 } 47049 cmp := v.Args[0] 47050 b.Kind = BlockAMD64UGT 47051 b.SetControl(cmp) 47052 b.Aux = nil 47053 return true 47054 } 47055 // match: (If (SETAE cmp) yes no) 47056 // cond: 47057 // result: (UGE cmp yes no) 47058 for { 47059 v := b.Control 47060 if v.Op != OpAMD64SETAE { 47061 break 47062 } 47063 cmp := v.Args[0] 47064 b.Kind = BlockAMD64UGE 47065 b.SetControl(cmp) 47066 b.Aux = nil 47067 return true 47068 } 47069 // match: (If (SETGF cmp) yes no) 47070 // cond: 47071 // result: (UGT cmp yes no) 47072 for { 47073 v := b.Control 47074 if v.Op != OpAMD64SETGF { 47075 break 47076 } 47077 cmp := v.Args[0] 47078 b.Kind = BlockAMD64UGT 47079 b.SetControl(cmp) 47080 b.Aux = nil 47081 return true 47082 } 47083 // match: (If (SETGEF cmp) yes no) 47084 // cond: 47085 // result: (UGE cmp yes no) 47086 for { 47087 v := b.Control 47088 if v.Op != OpAMD64SETGEF { 47089 break 47090 } 47091 cmp := v.Args[0] 47092 b.Kind = BlockAMD64UGE 47093 b.SetControl(cmp) 47094 b.Aux = nil 47095 return true 47096 } 47097 // match: (If (SETEQF cmp) yes no) 47098 // cond: 47099 // result: (EQF cmp yes no) 47100 for { 47101 v := b.Control 47102 if v.Op != OpAMD64SETEQF { 47103 break 47104 } 47105 cmp := v.Args[0] 47106 b.Kind = BlockAMD64EQF 47107 b.SetControl(cmp) 47108 b.Aux = nil 47109 return true 47110 } 47111 // match: (If (SETNEF cmp) yes no) 47112 // cond: 47113 // result: (NEF cmp yes no) 47114 for { 47115 v := b.Control 47116 if v.Op != OpAMD64SETNEF { 47117 break 47118 } 47119 cmp := v.Args[0] 47120 b.Kind = BlockAMD64NEF 47121 b.SetControl(cmp) 47122 b.Aux = nil 47123 return true 47124 } 47125 // match: (If cond yes no) 47126 // cond: 47127 // result: (NE (TESTB cond cond) yes no) 47128 for { 47129 v := b.Control 47130 _ = v 47131 cond := b.Control 47132 b.Kind = BlockAMD64NE 47133 v0 := b.NewValue0(v.Pos, OpAMD64TESTB, types.TypeFlags) 47134 v0.AddArg(cond) 47135 v0.AddArg(cond) 47136 b.SetControl(v0) 47137 b.Aux = nil 47138 return true 47139 } 47140 case BlockAMD64LE: 47141 // match: (LE (InvertFlags cmp) yes no) 47142 // cond: 47143 // result: (GE cmp yes no) 47144 for { 47145 v := b.Control 47146 if v.Op != OpAMD64InvertFlags { 47147 break 47148 } 47149 cmp := v.Args[0] 47150 b.Kind = BlockAMD64GE 47151 b.SetControl(cmp) 47152 b.Aux = nil 47153 return true 47154 } 47155 // match: (LE (FlagEQ) yes no) 47156 // cond: 47157 // result: (First nil yes no) 47158 for { 47159 v := b.Control 47160 if v.Op != OpAMD64FlagEQ { 47161 break 47162 } 47163 b.Kind = BlockFirst 47164 b.SetControl(nil) 47165 b.Aux = nil 47166 return true 47167 } 47168 // match: (LE (FlagLT_ULT) yes no) 47169 // cond: 47170 // result: (First nil yes no) 47171 for { 47172 v := b.Control 47173 if v.Op != OpAMD64FlagLT_ULT { 47174 break 47175 } 47176 b.Kind = BlockFirst 47177 b.SetControl(nil) 47178 b.Aux = nil 47179 return true 47180 } 47181 // match: (LE (FlagLT_UGT) yes no) 47182 // cond: 47183 // result: (First nil yes no) 47184 for { 47185 v := b.Control 47186 if v.Op != OpAMD64FlagLT_UGT { 47187 break 47188 } 47189 b.Kind = BlockFirst 47190 b.SetControl(nil) 47191 b.Aux = nil 47192 return true 47193 } 47194 // match: (LE (FlagGT_ULT) yes no) 47195 // cond: 47196 // result: (First nil no yes) 47197 for { 47198 v := b.Control 47199 if v.Op != OpAMD64FlagGT_ULT { 47200 break 47201 } 47202 b.Kind = BlockFirst 47203 b.SetControl(nil) 47204 b.Aux = nil 47205 b.swapSuccessors() 47206 return true 47207 } 47208 // match: (LE (FlagGT_UGT) yes no) 47209 // cond: 47210 // result: (First nil no yes) 47211 for { 47212 v := b.Control 47213 if v.Op != OpAMD64FlagGT_UGT { 47214 break 47215 } 47216 b.Kind = BlockFirst 47217 b.SetControl(nil) 47218 b.Aux = nil 47219 b.swapSuccessors() 47220 return true 47221 } 47222 case BlockAMD64LT: 47223 // match: (LT (InvertFlags cmp) yes no) 47224 // cond: 47225 // result: (GT cmp yes no) 47226 for { 47227 v := b.Control 47228 if v.Op != OpAMD64InvertFlags { 47229 break 47230 } 47231 cmp := v.Args[0] 47232 b.Kind = BlockAMD64GT 47233 b.SetControl(cmp) 47234 b.Aux = nil 47235 return true 47236 } 47237 // match: (LT (FlagEQ) yes no) 47238 // cond: 47239 // result: (First nil no yes) 47240 for { 47241 v := b.Control 47242 if v.Op != OpAMD64FlagEQ { 47243 break 47244 } 47245 b.Kind = BlockFirst 47246 b.SetControl(nil) 47247 b.Aux = nil 47248 b.swapSuccessors() 47249 return true 47250 } 47251 // match: (LT (FlagLT_ULT) yes no) 47252 // cond: 47253 // result: (First nil yes no) 47254 for { 47255 v := b.Control 47256 if v.Op != OpAMD64FlagLT_ULT { 47257 break 47258 } 47259 b.Kind = BlockFirst 47260 b.SetControl(nil) 47261 b.Aux = nil 47262 return true 47263 } 47264 // match: (LT (FlagLT_UGT) yes no) 47265 // cond: 47266 // result: (First nil yes no) 47267 for { 47268 v := b.Control 47269 if v.Op != OpAMD64FlagLT_UGT { 47270 break 47271 } 47272 b.Kind = BlockFirst 47273 b.SetControl(nil) 47274 b.Aux = nil 47275 return true 47276 } 47277 // match: (LT (FlagGT_ULT) yes no) 47278 // cond: 47279 // result: (First nil no yes) 47280 for { 47281 v := b.Control 47282 if v.Op != OpAMD64FlagGT_ULT { 47283 break 47284 } 47285 b.Kind = BlockFirst 47286 b.SetControl(nil) 47287 b.Aux = nil 47288 b.swapSuccessors() 47289 return true 47290 } 47291 // match: (LT (FlagGT_UGT) yes no) 47292 // cond: 47293 // result: (First nil no yes) 47294 for { 47295 v := b.Control 47296 if v.Op != OpAMD64FlagGT_UGT { 47297 break 47298 } 47299 b.Kind = BlockFirst 47300 b.SetControl(nil) 47301 b.Aux = nil 47302 b.swapSuccessors() 47303 return true 47304 } 47305 case BlockAMD64NE: 47306 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 47307 // cond: 47308 // result: (LT cmp yes no) 47309 for { 47310 v := b.Control 47311 if v.Op != OpAMD64TESTB { 47312 break 47313 } 47314 _ = v.Args[1] 47315 v_0 := v.Args[0] 47316 if v_0.Op != OpAMD64SETL { 47317 break 47318 } 47319 cmp := v_0.Args[0] 47320 v_1 := v.Args[1] 47321 if v_1.Op != OpAMD64SETL { 47322 break 47323 } 47324 if cmp != v_1.Args[0] { 47325 break 47326 } 47327 b.Kind = BlockAMD64LT 47328 b.SetControl(cmp) 47329 b.Aux = nil 47330 return true 47331 } 47332 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 47333 // cond: 47334 // result: (LT cmp yes no) 47335 for { 47336 v := b.Control 47337 if v.Op != OpAMD64TESTB { 47338 break 47339 } 47340 _ = v.Args[1] 47341 v_0 := v.Args[0] 47342 if v_0.Op != OpAMD64SETL { 47343 break 47344 } 47345 cmp := v_0.Args[0] 47346 v_1 := v.Args[1] 47347 if v_1.Op != OpAMD64SETL { 47348 break 47349 } 47350 if cmp != v_1.Args[0] { 47351 break 47352 } 47353 b.Kind = BlockAMD64LT 47354 b.SetControl(cmp) 47355 b.Aux = nil 47356 return true 47357 } 47358 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 47359 // cond: 47360 // result: (LE cmp yes no) 47361 for { 47362 v := b.Control 47363 if v.Op != OpAMD64TESTB { 47364 break 47365 } 47366 _ = v.Args[1] 47367 v_0 := v.Args[0] 47368 if v_0.Op != OpAMD64SETLE { 47369 break 47370 } 47371 cmp := v_0.Args[0] 47372 v_1 := v.Args[1] 47373 if v_1.Op != OpAMD64SETLE { 47374 break 47375 } 47376 if cmp != v_1.Args[0] { 47377 break 47378 } 47379 b.Kind = BlockAMD64LE 47380 b.SetControl(cmp) 47381 b.Aux = nil 47382 return true 47383 } 47384 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 47385 // cond: 47386 // result: (LE cmp yes no) 47387 for { 47388 v := b.Control 47389 if v.Op != OpAMD64TESTB { 47390 break 47391 } 47392 _ = v.Args[1] 47393 v_0 := v.Args[0] 47394 if v_0.Op != OpAMD64SETLE { 47395 break 47396 } 47397 cmp := v_0.Args[0] 47398 v_1 := v.Args[1] 47399 if v_1.Op != OpAMD64SETLE { 47400 break 47401 } 47402 if cmp != v_1.Args[0] { 47403 break 47404 } 47405 b.Kind = BlockAMD64LE 47406 b.SetControl(cmp) 47407 b.Aux = nil 47408 return true 47409 } 47410 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 47411 // cond: 47412 // result: (GT cmp yes no) 47413 for { 47414 v := b.Control 47415 if v.Op != OpAMD64TESTB { 47416 break 47417 } 47418 _ = v.Args[1] 47419 v_0 := v.Args[0] 47420 if v_0.Op != OpAMD64SETG { 47421 break 47422 } 47423 cmp := v_0.Args[0] 47424 v_1 := v.Args[1] 47425 if v_1.Op != OpAMD64SETG { 47426 break 47427 } 47428 if cmp != v_1.Args[0] { 47429 break 47430 } 47431 b.Kind = BlockAMD64GT 47432 b.SetControl(cmp) 47433 b.Aux = nil 47434 return true 47435 } 47436 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 47437 // cond: 47438 // result: (GT cmp yes no) 47439 for { 47440 v := b.Control 47441 if v.Op != OpAMD64TESTB { 47442 break 47443 } 47444 _ = v.Args[1] 47445 v_0 := v.Args[0] 47446 if v_0.Op != OpAMD64SETG { 47447 break 47448 } 47449 cmp := v_0.Args[0] 47450 v_1 := v.Args[1] 47451 if v_1.Op != OpAMD64SETG { 47452 break 47453 } 47454 if cmp != v_1.Args[0] { 47455 break 47456 } 47457 b.Kind = BlockAMD64GT 47458 b.SetControl(cmp) 47459 b.Aux = nil 47460 return true 47461 } 47462 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 47463 // cond: 47464 // result: (GE cmp yes no) 47465 for { 47466 v := b.Control 47467 if v.Op != OpAMD64TESTB { 47468 break 47469 } 47470 _ = v.Args[1] 47471 v_0 := v.Args[0] 47472 if v_0.Op != OpAMD64SETGE { 47473 break 47474 } 47475 cmp := v_0.Args[0] 47476 v_1 := v.Args[1] 47477 if v_1.Op != OpAMD64SETGE { 47478 break 47479 } 47480 if cmp != v_1.Args[0] { 47481 break 47482 } 47483 b.Kind = BlockAMD64GE 47484 b.SetControl(cmp) 47485 b.Aux = nil 47486 return true 47487 } 47488 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 47489 // cond: 47490 // result: (GE cmp yes no) 47491 for { 47492 v := b.Control 47493 if v.Op != OpAMD64TESTB { 47494 break 47495 } 47496 _ = v.Args[1] 47497 v_0 := v.Args[0] 47498 if v_0.Op != OpAMD64SETGE { 47499 break 47500 } 47501 cmp := v_0.Args[0] 47502 v_1 := v.Args[1] 47503 if v_1.Op != OpAMD64SETGE { 47504 break 47505 } 47506 if cmp != v_1.Args[0] { 47507 break 47508 } 47509 b.Kind = BlockAMD64GE 47510 b.SetControl(cmp) 47511 b.Aux = nil 47512 return true 47513 } 47514 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 47515 // cond: 47516 // result: (EQ cmp yes no) 47517 for { 47518 v := b.Control 47519 if v.Op != OpAMD64TESTB { 47520 break 47521 } 47522 _ = v.Args[1] 47523 v_0 := v.Args[0] 47524 if v_0.Op != OpAMD64SETEQ { 47525 break 47526 } 47527 cmp := v_0.Args[0] 47528 v_1 := v.Args[1] 47529 if v_1.Op != OpAMD64SETEQ { 47530 break 47531 } 47532 if cmp != v_1.Args[0] { 47533 break 47534 } 47535 b.Kind = BlockAMD64EQ 47536 b.SetControl(cmp) 47537 b.Aux = nil 47538 return true 47539 } 47540 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 47541 // cond: 47542 // result: (EQ cmp yes no) 47543 for { 47544 v := b.Control 47545 if v.Op != OpAMD64TESTB { 47546 break 47547 } 47548 _ = v.Args[1] 47549 v_0 := v.Args[0] 47550 if v_0.Op != OpAMD64SETEQ { 47551 break 47552 } 47553 cmp := v_0.Args[0] 47554 v_1 := v.Args[1] 47555 if v_1.Op != OpAMD64SETEQ { 47556 break 47557 } 47558 if cmp != v_1.Args[0] { 47559 break 47560 } 47561 b.Kind = BlockAMD64EQ 47562 b.SetControl(cmp) 47563 b.Aux = nil 47564 return true 47565 } 47566 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 47567 // cond: 47568 // result: (NE cmp yes no) 47569 for { 47570 v := b.Control 47571 if v.Op != OpAMD64TESTB { 47572 break 47573 } 47574 _ = v.Args[1] 47575 v_0 := v.Args[0] 47576 if v_0.Op != OpAMD64SETNE { 47577 break 47578 } 47579 cmp := v_0.Args[0] 47580 v_1 := v.Args[1] 47581 if v_1.Op != OpAMD64SETNE { 47582 break 47583 } 47584 if cmp != v_1.Args[0] { 47585 break 47586 } 47587 b.Kind = BlockAMD64NE 47588 b.SetControl(cmp) 47589 b.Aux = nil 47590 return true 47591 } 47592 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 47593 // cond: 47594 // result: (NE cmp yes no) 47595 for { 47596 v := b.Control 47597 if v.Op != OpAMD64TESTB { 47598 break 47599 } 47600 _ = v.Args[1] 47601 v_0 := v.Args[0] 47602 if v_0.Op != OpAMD64SETNE { 47603 break 47604 } 47605 cmp := v_0.Args[0] 47606 v_1 := v.Args[1] 47607 if v_1.Op != OpAMD64SETNE { 47608 break 47609 } 47610 if cmp != v_1.Args[0] { 47611 break 47612 } 47613 b.Kind = BlockAMD64NE 47614 b.SetControl(cmp) 47615 b.Aux = nil 47616 return true 47617 } 47618 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 47619 // cond: 47620 // result: (ULT cmp yes no) 47621 for { 47622 v := b.Control 47623 if v.Op != OpAMD64TESTB { 47624 break 47625 } 47626 _ = v.Args[1] 47627 v_0 := v.Args[0] 47628 if v_0.Op != OpAMD64SETB { 47629 break 47630 } 47631 cmp := v_0.Args[0] 47632 v_1 := v.Args[1] 47633 if v_1.Op != OpAMD64SETB { 47634 break 47635 } 47636 if cmp != v_1.Args[0] { 47637 break 47638 } 47639 b.Kind = BlockAMD64ULT 47640 b.SetControl(cmp) 47641 b.Aux = nil 47642 return true 47643 } 47644 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 47645 // cond: 47646 // result: (ULT cmp yes no) 47647 for { 47648 v := b.Control 47649 if v.Op != OpAMD64TESTB { 47650 break 47651 } 47652 _ = v.Args[1] 47653 v_0 := v.Args[0] 47654 if v_0.Op != OpAMD64SETB { 47655 break 47656 } 47657 cmp := v_0.Args[0] 47658 v_1 := v.Args[1] 47659 if v_1.Op != OpAMD64SETB { 47660 break 47661 } 47662 if cmp != v_1.Args[0] { 47663 break 47664 } 47665 b.Kind = BlockAMD64ULT 47666 b.SetControl(cmp) 47667 b.Aux = nil 47668 return true 47669 } 47670 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 47671 // cond: 47672 // result: (ULE cmp yes no) 47673 for { 47674 v := b.Control 47675 if v.Op != OpAMD64TESTB { 47676 break 47677 } 47678 _ = v.Args[1] 47679 v_0 := v.Args[0] 47680 if v_0.Op != OpAMD64SETBE { 47681 break 47682 } 47683 cmp := v_0.Args[0] 47684 v_1 := v.Args[1] 47685 if v_1.Op != OpAMD64SETBE { 47686 break 47687 } 47688 if cmp != v_1.Args[0] { 47689 break 47690 } 47691 b.Kind = BlockAMD64ULE 47692 b.SetControl(cmp) 47693 b.Aux = nil 47694 return true 47695 } 47696 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 47697 // cond: 47698 // result: (ULE cmp yes no) 47699 for { 47700 v := b.Control 47701 if v.Op != OpAMD64TESTB { 47702 break 47703 } 47704 _ = v.Args[1] 47705 v_0 := v.Args[0] 47706 if v_0.Op != OpAMD64SETBE { 47707 break 47708 } 47709 cmp := v_0.Args[0] 47710 v_1 := v.Args[1] 47711 if v_1.Op != OpAMD64SETBE { 47712 break 47713 } 47714 if cmp != v_1.Args[0] { 47715 break 47716 } 47717 b.Kind = BlockAMD64ULE 47718 b.SetControl(cmp) 47719 b.Aux = nil 47720 return true 47721 } 47722 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 47723 // cond: 47724 // result: (UGT cmp yes no) 47725 for { 47726 v := b.Control 47727 if v.Op != OpAMD64TESTB { 47728 break 47729 } 47730 _ = v.Args[1] 47731 v_0 := v.Args[0] 47732 if v_0.Op != OpAMD64SETA { 47733 break 47734 } 47735 cmp := v_0.Args[0] 47736 v_1 := v.Args[1] 47737 if v_1.Op != OpAMD64SETA { 47738 break 47739 } 47740 if cmp != v_1.Args[0] { 47741 break 47742 } 47743 b.Kind = BlockAMD64UGT 47744 b.SetControl(cmp) 47745 b.Aux = nil 47746 return true 47747 } 47748 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 47749 // cond: 47750 // result: (UGT cmp yes no) 47751 for { 47752 v := b.Control 47753 if v.Op != OpAMD64TESTB { 47754 break 47755 } 47756 _ = v.Args[1] 47757 v_0 := v.Args[0] 47758 if v_0.Op != OpAMD64SETA { 47759 break 47760 } 47761 cmp := v_0.Args[0] 47762 v_1 := v.Args[1] 47763 if v_1.Op != OpAMD64SETA { 47764 break 47765 } 47766 if cmp != v_1.Args[0] { 47767 break 47768 } 47769 b.Kind = BlockAMD64UGT 47770 b.SetControl(cmp) 47771 b.Aux = nil 47772 return true 47773 } 47774 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 47775 // cond: 47776 // result: (UGE cmp yes no) 47777 for { 47778 v := b.Control 47779 if v.Op != OpAMD64TESTB { 47780 break 47781 } 47782 _ = v.Args[1] 47783 v_0 := v.Args[0] 47784 if v_0.Op != OpAMD64SETAE { 47785 break 47786 } 47787 cmp := v_0.Args[0] 47788 v_1 := v.Args[1] 47789 if v_1.Op != OpAMD64SETAE { 47790 break 47791 } 47792 if cmp != v_1.Args[0] { 47793 break 47794 } 47795 b.Kind = BlockAMD64UGE 47796 b.SetControl(cmp) 47797 b.Aux = nil 47798 return true 47799 } 47800 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 47801 // cond: 47802 // result: (UGE cmp yes no) 47803 for { 47804 v := b.Control 47805 if v.Op != OpAMD64TESTB { 47806 break 47807 } 47808 _ = v.Args[1] 47809 v_0 := v.Args[0] 47810 if v_0.Op != OpAMD64SETAE { 47811 break 47812 } 47813 cmp := v_0.Args[0] 47814 v_1 := v.Args[1] 47815 if v_1.Op != OpAMD64SETAE { 47816 break 47817 } 47818 if cmp != v_1.Args[0] { 47819 break 47820 } 47821 b.Kind = BlockAMD64UGE 47822 b.SetControl(cmp) 47823 b.Aux = nil 47824 return true 47825 } 47826 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) 47827 // cond: !config.nacl 47828 // result: (ULT (BTL x y)) 47829 for { 47830 v := b.Control 47831 if v.Op != OpAMD64TESTL { 47832 break 47833 } 47834 _ = v.Args[1] 47835 v_0 := v.Args[0] 47836 if v_0.Op != OpAMD64SHLL { 47837 break 47838 } 47839 _ = v_0.Args[1] 47840 v_0_0 := v_0.Args[0] 47841 if v_0_0.Op != OpAMD64MOVLconst { 47842 break 47843 } 47844 if v_0_0.AuxInt != 1 { 47845 break 47846 } 47847 x := v_0.Args[1] 47848 y := v.Args[1] 47849 if !(!config.nacl) { 47850 break 47851 } 47852 b.Kind = BlockAMD64ULT 47853 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 47854 v0.AddArg(x) 47855 v0.AddArg(y) 47856 b.SetControl(v0) 47857 b.Aux = nil 47858 return true 47859 } 47860 // match: (NE (TESTL y (SHLL (MOVLconst [1]) x))) 47861 // cond: !config.nacl 47862 // result: (ULT (BTL x y)) 47863 for { 47864 v := b.Control 47865 if v.Op != OpAMD64TESTL { 47866 break 47867 } 47868 _ = v.Args[1] 47869 y := v.Args[0] 47870 v_1 := v.Args[1] 47871 if v_1.Op != OpAMD64SHLL { 47872 break 47873 } 47874 _ = v_1.Args[1] 47875 v_1_0 := v_1.Args[0] 47876 if v_1_0.Op != OpAMD64MOVLconst { 47877 break 47878 } 47879 if v_1_0.AuxInt != 1 { 47880 break 47881 } 47882 x := v_1.Args[1] 47883 if !(!config.nacl) { 47884 break 47885 } 47886 b.Kind = BlockAMD64ULT 47887 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 47888 v0.AddArg(x) 47889 v0.AddArg(y) 47890 b.SetControl(v0) 47891 b.Aux = nil 47892 return true 47893 } 47894 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 47895 // cond: !config.nacl 47896 // result: (ULT (BTQ x y)) 47897 for { 47898 v := b.Control 47899 if v.Op != OpAMD64TESTQ { 47900 break 47901 } 47902 _ = v.Args[1] 47903 v_0 := v.Args[0] 47904 if v_0.Op != OpAMD64SHLQ { 47905 break 47906 } 47907 _ = v_0.Args[1] 47908 v_0_0 := v_0.Args[0] 47909 if v_0_0.Op != OpAMD64MOVQconst { 47910 break 47911 } 47912 if v_0_0.AuxInt != 1 { 47913 break 47914 } 47915 x := v_0.Args[1] 47916 y := v.Args[1] 47917 if !(!config.nacl) { 47918 break 47919 } 47920 b.Kind = BlockAMD64ULT 47921 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 47922 v0.AddArg(x) 47923 v0.AddArg(y) 47924 b.SetControl(v0) 47925 b.Aux = nil 47926 return true 47927 } 47928 // match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x))) 47929 // cond: !config.nacl 47930 // result: (ULT (BTQ x y)) 47931 for { 47932 v := b.Control 47933 if v.Op != OpAMD64TESTQ { 47934 break 47935 } 47936 _ = v.Args[1] 47937 y := v.Args[0] 47938 v_1 := v.Args[1] 47939 if v_1.Op != OpAMD64SHLQ { 47940 break 47941 } 47942 _ = v_1.Args[1] 47943 v_1_0 := v_1.Args[0] 47944 if v_1_0.Op != OpAMD64MOVQconst { 47945 break 47946 } 47947 if v_1_0.AuxInt != 1 { 47948 break 47949 } 47950 x := v_1.Args[1] 47951 if !(!config.nacl) { 47952 break 47953 } 47954 b.Kind = BlockAMD64ULT 47955 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 47956 v0.AddArg(x) 47957 v0.AddArg(y) 47958 b.SetControl(v0) 47959 b.Aux = nil 47960 return true 47961 } 47962 // match: (NE (TESTLconst [c] x)) 47963 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 47964 // result: (ULT (BTLconst [log2(c)] x)) 47965 for { 47966 v := b.Control 47967 if v.Op != OpAMD64TESTLconst { 47968 break 47969 } 47970 c := v.AuxInt 47971 x := v.Args[0] 47972 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 47973 break 47974 } 47975 b.Kind = BlockAMD64ULT 47976 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 47977 v0.AuxInt = log2(c) 47978 v0.AddArg(x) 47979 b.SetControl(v0) 47980 b.Aux = nil 47981 return true 47982 } 47983 // match: (NE (TESTQconst [c] x)) 47984 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 47985 // result: (ULT (BTQconst [log2(c)] x)) 47986 for { 47987 v := b.Control 47988 if v.Op != OpAMD64TESTQconst { 47989 break 47990 } 47991 c := v.AuxInt 47992 x := v.Args[0] 47993 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 47994 break 47995 } 47996 b.Kind = BlockAMD64ULT 47997 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 47998 v0.AuxInt = log2(c) 47999 v0.AddArg(x) 48000 b.SetControl(v0) 48001 b.Aux = nil 48002 return true 48003 } 48004 // match: (NE (TESTQ (MOVQconst [c]) x)) 48005 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 48006 // result: (ULT (BTQconst [log2(c)] x)) 48007 for { 48008 v := b.Control 48009 if v.Op != OpAMD64TESTQ { 48010 break 48011 } 48012 _ = v.Args[1] 48013 v_0 := v.Args[0] 48014 if v_0.Op != OpAMD64MOVQconst { 48015 break 48016 } 48017 c := v_0.AuxInt 48018 x := v.Args[1] 48019 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 48020 break 48021 } 48022 b.Kind = BlockAMD64ULT 48023 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 48024 v0.AuxInt = log2(c) 48025 v0.AddArg(x) 48026 b.SetControl(v0) 48027 b.Aux = nil 48028 return true 48029 } 48030 // match: (NE (TESTQ x (MOVQconst [c]))) 48031 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 48032 // result: (ULT (BTQconst [log2(c)] x)) 48033 for { 48034 v := b.Control 48035 if v.Op != OpAMD64TESTQ { 48036 break 48037 } 48038 _ = v.Args[1] 48039 x := v.Args[0] 48040 v_1 := v.Args[1] 48041 if v_1.Op != OpAMD64MOVQconst { 48042 break 48043 } 48044 c := v_1.AuxInt 48045 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 48046 break 48047 } 48048 b.Kind = BlockAMD64ULT 48049 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 48050 v0.AuxInt = log2(c) 48051 v0.AddArg(x) 48052 b.SetControl(v0) 48053 b.Aux = nil 48054 return true 48055 } 48056 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 48057 // cond: 48058 // result: (UGT cmp yes no) 48059 for { 48060 v := b.Control 48061 if v.Op != OpAMD64TESTB { 48062 break 48063 } 48064 _ = v.Args[1] 48065 v_0 := v.Args[0] 48066 if v_0.Op != OpAMD64SETGF { 48067 break 48068 } 48069 cmp := v_0.Args[0] 48070 v_1 := v.Args[1] 48071 if v_1.Op != OpAMD64SETGF { 48072 break 48073 } 48074 if cmp != v_1.Args[0] { 48075 break 48076 } 48077 b.Kind = BlockAMD64UGT 48078 b.SetControl(cmp) 48079 b.Aux = nil 48080 return true 48081 } 48082 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 48083 // cond: 48084 // result: (UGT cmp yes no) 48085 for { 48086 v := b.Control 48087 if v.Op != OpAMD64TESTB { 48088 break 48089 } 48090 _ = v.Args[1] 48091 v_0 := v.Args[0] 48092 if v_0.Op != OpAMD64SETGF { 48093 break 48094 } 48095 cmp := v_0.Args[0] 48096 v_1 := v.Args[1] 48097 if v_1.Op != OpAMD64SETGF { 48098 break 48099 } 48100 if cmp != v_1.Args[0] { 48101 break 48102 } 48103 b.Kind = BlockAMD64UGT 48104 b.SetControl(cmp) 48105 b.Aux = nil 48106 return true 48107 } 48108 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 48109 // cond: 48110 // result: (UGE cmp yes no) 48111 for { 48112 v := b.Control 48113 if v.Op != OpAMD64TESTB { 48114 break 48115 } 48116 _ = v.Args[1] 48117 v_0 := v.Args[0] 48118 if v_0.Op != OpAMD64SETGEF { 48119 break 48120 } 48121 cmp := v_0.Args[0] 48122 v_1 := v.Args[1] 48123 if v_1.Op != OpAMD64SETGEF { 48124 break 48125 } 48126 if cmp != v_1.Args[0] { 48127 break 48128 } 48129 b.Kind = BlockAMD64UGE 48130 b.SetControl(cmp) 48131 b.Aux = nil 48132 return true 48133 } 48134 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 48135 // cond: 48136 // result: (UGE cmp yes no) 48137 for { 48138 v := b.Control 48139 if v.Op != OpAMD64TESTB { 48140 break 48141 } 48142 _ = v.Args[1] 48143 v_0 := v.Args[0] 48144 if v_0.Op != OpAMD64SETGEF { 48145 break 48146 } 48147 cmp := v_0.Args[0] 48148 v_1 := v.Args[1] 48149 if v_1.Op != OpAMD64SETGEF { 48150 break 48151 } 48152 if cmp != v_1.Args[0] { 48153 break 48154 } 48155 b.Kind = BlockAMD64UGE 48156 b.SetControl(cmp) 48157 b.Aux = nil 48158 return true 48159 } 48160 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 48161 // cond: 48162 // result: (EQF cmp yes no) 48163 for { 48164 v := b.Control 48165 if v.Op != OpAMD64TESTB { 48166 break 48167 } 48168 _ = v.Args[1] 48169 v_0 := v.Args[0] 48170 if v_0.Op != OpAMD64SETEQF { 48171 break 48172 } 48173 cmp := v_0.Args[0] 48174 v_1 := v.Args[1] 48175 if v_1.Op != OpAMD64SETEQF { 48176 break 48177 } 48178 if cmp != v_1.Args[0] { 48179 break 48180 } 48181 b.Kind = BlockAMD64EQF 48182 b.SetControl(cmp) 48183 b.Aux = nil 48184 return true 48185 } 48186 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 48187 // cond: 48188 // result: (EQF cmp yes no) 48189 for { 48190 v := b.Control 48191 if v.Op != OpAMD64TESTB { 48192 break 48193 } 48194 _ = v.Args[1] 48195 v_0 := v.Args[0] 48196 if v_0.Op != OpAMD64SETEQF { 48197 break 48198 } 48199 cmp := v_0.Args[0] 48200 v_1 := v.Args[1] 48201 if v_1.Op != OpAMD64SETEQF { 48202 break 48203 } 48204 if cmp != v_1.Args[0] { 48205 break 48206 } 48207 b.Kind = BlockAMD64EQF 48208 b.SetControl(cmp) 48209 b.Aux = nil 48210 return true 48211 } 48212 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 48213 // cond: 48214 // result: (NEF cmp yes no) 48215 for { 48216 v := b.Control 48217 if v.Op != OpAMD64TESTB { 48218 break 48219 } 48220 _ = v.Args[1] 48221 v_0 := v.Args[0] 48222 if v_0.Op != OpAMD64SETNEF { 48223 break 48224 } 48225 cmp := v_0.Args[0] 48226 v_1 := v.Args[1] 48227 if v_1.Op != OpAMD64SETNEF { 48228 break 48229 } 48230 if cmp != v_1.Args[0] { 48231 break 48232 } 48233 b.Kind = BlockAMD64NEF 48234 b.SetControl(cmp) 48235 b.Aux = nil 48236 return true 48237 } 48238 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 48239 // cond: 48240 // result: (NEF cmp yes no) 48241 for { 48242 v := b.Control 48243 if v.Op != OpAMD64TESTB { 48244 break 48245 } 48246 _ = v.Args[1] 48247 v_0 := v.Args[0] 48248 if v_0.Op != OpAMD64SETNEF { 48249 break 48250 } 48251 cmp := v_0.Args[0] 48252 v_1 := v.Args[1] 48253 if v_1.Op != OpAMD64SETNEF { 48254 break 48255 } 48256 if cmp != v_1.Args[0] { 48257 break 48258 } 48259 b.Kind = BlockAMD64NEF 48260 b.SetControl(cmp) 48261 b.Aux = nil 48262 return true 48263 } 48264 // match: (NE (InvertFlags cmp) yes no) 48265 // cond: 48266 // result: (NE cmp yes no) 48267 for { 48268 v := b.Control 48269 if v.Op != OpAMD64InvertFlags { 48270 break 48271 } 48272 cmp := v.Args[0] 48273 b.Kind = BlockAMD64NE 48274 b.SetControl(cmp) 48275 b.Aux = nil 48276 return true 48277 } 48278 // match: (NE (FlagEQ) yes no) 48279 // cond: 48280 // result: (First nil no yes) 48281 for { 48282 v := b.Control 48283 if v.Op != OpAMD64FlagEQ { 48284 break 48285 } 48286 b.Kind = BlockFirst 48287 b.SetControl(nil) 48288 b.Aux = nil 48289 b.swapSuccessors() 48290 return true 48291 } 48292 // match: (NE (FlagLT_ULT) yes no) 48293 // cond: 48294 // result: (First nil yes no) 48295 for { 48296 v := b.Control 48297 if v.Op != OpAMD64FlagLT_ULT { 48298 break 48299 } 48300 b.Kind = BlockFirst 48301 b.SetControl(nil) 48302 b.Aux = nil 48303 return true 48304 } 48305 // match: (NE (FlagLT_UGT) yes no) 48306 // cond: 48307 // result: (First nil yes no) 48308 for { 48309 v := b.Control 48310 if v.Op != OpAMD64FlagLT_UGT { 48311 break 48312 } 48313 b.Kind = BlockFirst 48314 b.SetControl(nil) 48315 b.Aux = nil 48316 return true 48317 } 48318 // match: (NE (FlagGT_ULT) yes no) 48319 // cond: 48320 // result: (First nil yes no) 48321 for { 48322 v := b.Control 48323 if v.Op != OpAMD64FlagGT_ULT { 48324 break 48325 } 48326 b.Kind = BlockFirst 48327 b.SetControl(nil) 48328 b.Aux = nil 48329 return true 48330 } 48331 // match: (NE (FlagGT_UGT) yes no) 48332 // cond: 48333 // result: (First nil yes no) 48334 for { 48335 v := b.Control 48336 if v.Op != OpAMD64FlagGT_UGT { 48337 break 48338 } 48339 b.Kind = BlockFirst 48340 b.SetControl(nil) 48341 b.Aux = nil 48342 return true 48343 } 48344 case BlockAMD64UGE: 48345 // match: (UGE (InvertFlags cmp) yes no) 48346 // cond: 48347 // result: (ULE cmp yes no) 48348 for { 48349 v := b.Control 48350 if v.Op != OpAMD64InvertFlags { 48351 break 48352 } 48353 cmp := v.Args[0] 48354 b.Kind = BlockAMD64ULE 48355 b.SetControl(cmp) 48356 b.Aux = nil 48357 return true 48358 } 48359 // match: (UGE (FlagEQ) yes no) 48360 // cond: 48361 // result: (First nil yes no) 48362 for { 48363 v := b.Control 48364 if v.Op != OpAMD64FlagEQ { 48365 break 48366 } 48367 b.Kind = BlockFirst 48368 b.SetControl(nil) 48369 b.Aux = nil 48370 return true 48371 } 48372 // match: (UGE (FlagLT_ULT) yes no) 48373 // cond: 48374 // result: (First nil no yes) 48375 for { 48376 v := b.Control 48377 if v.Op != OpAMD64FlagLT_ULT { 48378 break 48379 } 48380 b.Kind = BlockFirst 48381 b.SetControl(nil) 48382 b.Aux = nil 48383 b.swapSuccessors() 48384 return true 48385 } 48386 // match: (UGE (FlagLT_UGT) yes no) 48387 // cond: 48388 // result: (First nil yes no) 48389 for { 48390 v := b.Control 48391 if v.Op != OpAMD64FlagLT_UGT { 48392 break 48393 } 48394 b.Kind = BlockFirst 48395 b.SetControl(nil) 48396 b.Aux = nil 48397 return true 48398 } 48399 // match: (UGE (FlagGT_ULT) yes no) 48400 // cond: 48401 // result: (First nil no yes) 48402 for { 48403 v := b.Control 48404 if v.Op != OpAMD64FlagGT_ULT { 48405 break 48406 } 48407 b.Kind = BlockFirst 48408 b.SetControl(nil) 48409 b.Aux = nil 48410 b.swapSuccessors() 48411 return true 48412 } 48413 // match: (UGE (FlagGT_UGT) yes no) 48414 // cond: 48415 // result: (First nil yes no) 48416 for { 48417 v := b.Control 48418 if v.Op != OpAMD64FlagGT_UGT { 48419 break 48420 } 48421 b.Kind = BlockFirst 48422 b.SetControl(nil) 48423 b.Aux = nil 48424 return true 48425 } 48426 case BlockAMD64UGT: 48427 // match: (UGT (InvertFlags cmp) yes no) 48428 // cond: 48429 // result: (ULT cmp yes no) 48430 for { 48431 v := b.Control 48432 if v.Op != OpAMD64InvertFlags { 48433 break 48434 } 48435 cmp := v.Args[0] 48436 b.Kind = BlockAMD64ULT 48437 b.SetControl(cmp) 48438 b.Aux = nil 48439 return true 48440 } 48441 // match: (UGT (FlagEQ) yes no) 48442 // cond: 48443 // result: (First nil no yes) 48444 for { 48445 v := b.Control 48446 if v.Op != OpAMD64FlagEQ { 48447 break 48448 } 48449 b.Kind = BlockFirst 48450 b.SetControl(nil) 48451 b.Aux = nil 48452 b.swapSuccessors() 48453 return true 48454 } 48455 // match: (UGT (FlagLT_ULT) yes no) 48456 // cond: 48457 // result: (First nil no yes) 48458 for { 48459 v := b.Control 48460 if v.Op != OpAMD64FlagLT_ULT { 48461 break 48462 } 48463 b.Kind = BlockFirst 48464 b.SetControl(nil) 48465 b.Aux = nil 48466 b.swapSuccessors() 48467 return true 48468 } 48469 // match: (UGT (FlagLT_UGT) yes no) 48470 // cond: 48471 // result: (First nil yes no) 48472 for { 48473 v := b.Control 48474 if v.Op != OpAMD64FlagLT_UGT { 48475 break 48476 } 48477 b.Kind = BlockFirst 48478 b.SetControl(nil) 48479 b.Aux = nil 48480 return true 48481 } 48482 // match: (UGT (FlagGT_ULT) yes no) 48483 // cond: 48484 // result: (First nil no yes) 48485 for { 48486 v := b.Control 48487 if v.Op != OpAMD64FlagGT_ULT { 48488 break 48489 } 48490 b.Kind = BlockFirst 48491 b.SetControl(nil) 48492 b.Aux = nil 48493 b.swapSuccessors() 48494 return true 48495 } 48496 // match: (UGT (FlagGT_UGT) yes no) 48497 // cond: 48498 // result: (First nil yes no) 48499 for { 48500 v := b.Control 48501 if v.Op != OpAMD64FlagGT_UGT { 48502 break 48503 } 48504 b.Kind = BlockFirst 48505 b.SetControl(nil) 48506 b.Aux = nil 48507 return true 48508 } 48509 case BlockAMD64ULE: 48510 // match: (ULE (InvertFlags cmp) yes no) 48511 // cond: 48512 // result: (UGE cmp yes no) 48513 for { 48514 v := b.Control 48515 if v.Op != OpAMD64InvertFlags { 48516 break 48517 } 48518 cmp := v.Args[0] 48519 b.Kind = BlockAMD64UGE 48520 b.SetControl(cmp) 48521 b.Aux = nil 48522 return true 48523 } 48524 // match: (ULE (FlagEQ) yes no) 48525 // cond: 48526 // result: (First nil yes no) 48527 for { 48528 v := b.Control 48529 if v.Op != OpAMD64FlagEQ { 48530 break 48531 } 48532 b.Kind = BlockFirst 48533 b.SetControl(nil) 48534 b.Aux = nil 48535 return true 48536 } 48537 // match: (ULE (FlagLT_ULT) yes no) 48538 // cond: 48539 // result: (First nil yes no) 48540 for { 48541 v := b.Control 48542 if v.Op != OpAMD64FlagLT_ULT { 48543 break 48544 } 48545 b.Kind = BlockFirst 48546 b.SetControl(nil) 48547 b.Aux = nil 48548 return true 48549 } 48550 // match: (ULE (FlagLT_UGT) yes no) 48551 // cond: 48552 // result: (First nil no yes) 48553 for { 48554 v := b.Control 48555 if v.Op != OpAMD64FlagLT_UGT { 48556 break 48557 } 48558 b.Kind = BlockFirst 48559 b.SetControl(nil) 48560 b.Aux = nil 48561 b.swapSuccessors() 48562 return true 48563 } 48564 // match: (ULE (FlagGT_ULT) yes no) 48565 // cond: 48566 // result: (First nil yes no) 48567 for { 48568 v := b.Control 48569 if v.Op != OpAMD64FlagGT_ULT { 48570 break 48571 } 48572 b.Kind = BlockFirst 48573 b.SetControl(nil) 48574 b.Aux = nil 48575 return true 48576 } 48577 // match: (ULE (FlagGT_UGT) yes no) 48578 // cond: 48579 // result: (First nil no yes) 48580 for { 48581 v := b.Control 48582 if v.Op != OpAMD64FlagGT_UGT { 48583 break 48584 } 48585 b.Kind = BlockFirst 48586 b.SetControl(nil) 48587 b.Aux = nil 48588 b.swapSuccessors() 48589 return true 48590 } 48591 case BlockAMD64ULT: 48592 // match: (ULT (InvertFlags cmp) yes no) 48593 // cond: 48594 // result: (UGT cmp yes no) 48595 for { 48596 v := b.Control 48597 if v.Op != OpAMD64InvertFlags { 48598 break 48599 } 48600 cmp := v.Args[0] 48601 b.Kind = BlockAMD64UGT 48602 b.SetControl(cmp) 48603 b.Aux = nil 48604 return true 48605 } 48606 // match: (ULT (FlagEQ) yes no) 48607 // cond: 48608 // result: (First nil no yes) 48609 for { 48610 v := b.Control 48611 if v.Op != OpAMD64FlagEQ { 48612 break 48613 } 48614 b.Kind = BlockFirst 48615 b.SetControl(nil) 48616 b.Aux = nil 48617 b.swapSuccessors() 48618 return true 48619 } 48620 // match: (ULT (FlagLT_ULT) yes no) 48621 // cond: 48622 // result: (First nil yes no) 48623 for { 48624 v := b.Control 48625 if v.Op != OpAMD64FlagLT_ULT { 48626 break 48627 } 48628 b.Kind = BlockFirst 48629 b.SetControl(nil) 48630 b.Aux = nil 48631 return true 48632 } 48633 // match: (ULT (FlagLT_UGT) yes no) 48634 // cond: 48635 // result: (First nil no yes) 48636 for { 48637 v := b.Control 48638 if v.Op != OpAMD64FlagLT_UGT { 48639 break 48640 } 48641 b.Kind = BlockFirst 48642 b.SetControl(nil) 48643 b.Aux = nil 48644 b.swapSuccessors() 48645 return true 48646 } 48647 // match: (ULT (FlagGT_ULT) yes no) 48648 // cond: 48649 // result: (First nil yes no) 48650 for { 48651 v := b.Control 48652 if v.Op != OpAMD64FlagGT_ULT { 48653 break 48654 } 48655 b.Kind = BlockFirst 48656 b.SetControl(nil) 48657 b.Aux = nil 48658 return true 48659 } 48660 // match: (ULT (FlagGT_UGT) yes no) 48661 // cond: 48662 // result: (First nil no yes) 48663 for { 48664 v := b.Control 48665 if v.Op != OpAMD64FlagGT_UGT { 48666 break 48667 } 48668 b.Kind = BlockFirst 48669 b.SetControl(nil) 48670 b.Aux = nil 48671 b.swapSuccessors() 48672 return true 48673 } 48674 } 48675 return false 48676 }