github.com/goproxy0/go@v0.0.0-20171111080102-49cc0c489d2c/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // Code generated from gen/AMD64.rules; DO NOT EDIT. 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 import "cmd/internal/obj" 8 import "cmd/internal/objabi" 9 import "cmd/compile/internal/types" 10 11 var _ = math.MinInt8 // in case not otherwise used 12 var _ = obj.ANOP // in case not otherwise used 13 var _ = objabi.GOROOT // in case not otherwise used 14 var _ = types.TypeMem // in case not otherwise used 15 16 func rewriteValueAMD64(v *Value) bool { 17 switch v.Op { 18 case OpAMD64ADDL: 19 return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) 20 case OpAMD64ADDLconst: 21 return rewriteValueAMD64_OpAMD64ADDLconst_0(v) 22 case OpAMD64ADDLconstmem: 23 return rewriteValueAMD64_OpAMD64ADDLconstmem_0(v) 24 case OpAMD64ADDLmem: 25 return rewriteValueAMD64_OpAMD64ADDLmem_0(v) 26 case OpAMD64ADDQ: 27 return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) 28 case OpAMD64ADDQconst: 29 return rewriteValueAMD64_OpAMD64ADDQconst_0(v) 30 case OpAMD64ADDQconstmem: 31 return rewriteValueAMD64_OpAMD64ADDQconstmem_0(v) 32 case OpAMD64ADDQmem: 33 return rewriteValueAMD64_OpAMD64ADDQmem_0(v) 34 case OpAMD64ADDSD: 35 return rewriteValueAMD64_OpAMD64ADDSD_0(v) 36 case OpAMD64ADDSDmem: 37 return rewriteValueAMD64_OpAMD64ADDSDmem_0(v) 38 case OpAMD64ADDSS: 39 return rewriteValueAMD64_OpAMD64ADDSS_0(v) 40 case OpAMD64ADDSSmem: 41 return rewriteValueAMD64_OpAMD64ADDSSmem_0(v) 42 case OpAMD64ANDL: 43 return rewriteValueAMD64_OpAMD64ANDL_0(v) 44 case OpAMD64ANDLconst: 45 return rewriteValueAMD64_OpAMD64ANDLconst_0(v) 46 case OpAMD64ANDLmem: 47 return rewriteValueAMD64_OpAMD64ANDLmem_0(v) 48 case OpAMD64ANDQ: 49 return rewriteValueAMD64_OpAMD64ANDQ_0(v) 50 case OpAMD64ANDQconst: 51 return rewriteValueAMD64_OpAMD64ANDQconst_0(v) 52 case OpAMD64ANDQmem: 53 return rewriteValueAMD64_OpAMD64ANDQmem_0(v) 54 case OpAMD64BSFQ: 55 return rewriteValueAMD64_OpAMD64BSFQ_0(v) 56 case OpAMD64BTQconst: 57 return rewriteValueAMD64_OpAMD64BTQconst_0(v) 58 case OpAMD64CMOVQEQ: 59 return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v) 60 case OpAMD64CMPB: 61 return rewriteValueAMD64_OpAMD64CMPB_0(v) 62 case OpAMD64CMPBconst: 63 return rewriteValueAMD64_OpAMD64CMPBconst_0(v) 64 case OpAMD64CMPL: 65 return rewriteValueAMD64_OpAMD64CMPL_0(v) 66 case OpAMD64CMPLconst: 67 return rewriteValueAMD64_OpAMD64CMPLconst_0(v) 68 case OpAMD64CMPQ: 69 return rewriteValueAMD64_OpAMD64CMPQ_0(v) 70 case OpAMD64CMPQconst: 71 return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v) 72 case OpAMD64CMPW: 73 return rewriteValueAMD64_OpAMD64CMPW_0(v) 74 case OpAMD64CMPWconst: 75 return rewriteValueAMD64_OpAMD64CMPWconst_0(v) 76 case OpAMD64CMPXCHGLlock: 77 return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v) 78 case OpAMD64CMPXCHGQlock: 79 return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v) 80 case OpAMD64LEAL: 81 return rewriteValueAMD64_OpAMD64LEAL_0(v) 82 case OpAMD64LEAQ: 83 return rewriteValueAMD64_OpAMD64LEAQ_0(v) 84 case OpAMD64LEAQ1: 85 return rewriteValueAMD64_OpAMD64LEAQ1_0(v) 86 case OpAMD64LEAQ2: 87 return rewriteValueAMD64_OpAMD64LEAQ2_0(v) 88 case OpAMD64LEAQ4: 89 return rewriteValueAMD64_OpAMD64LEAQ4_0(v) 90 case OpAMD64LEAQ8: 91 return rewriteValueAMD64_OpAMD64LEAQ8_0(v) 92 case OpAMD64MOVBQSX: 93 return rewriteValueAMD64_OpAMD64MOVBQSX_0(v) 94 case OpAMD64MOVBQSXload: 95 return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v) 96 case OpAMD64MOVBQZX: 97 return rewriteValueAMD64_OpAMD64MOVBQZX_0(v) 98 case OpAMD64MOVBload: 99 return rewriteValueAMD64_OpAMD64MOVBload_0(v) 100 case OpAMD64MOVBloadidx1: 101 return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) 102 case OpAMD64MOVBstore: 103 return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) 104 case OpAMD64MOVBstoreconst: 105 return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) 106 case OpAMD64MOVBstoreconstidx1: 107 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v) 108 case OpAMD64MOVBstoreidx1: 109 return rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v) 110 case OpAMD64MOVLQSX: 111 return rewriteValueAMD64_OpAMD64MOVLQSX_0(v) 112 case OpAMD64MOVLQSXload: 113 return rewriteValueAMD64_OpAMD64MOVLQSXload_0(v) 114 case OpAMD64MOVLQZX: 115 return rewriteValueAMD64_OpAMD64MOVLQZX_0(v) 116 case OpAMD64MOVLatomicload: 117 return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v) 118 case OpAMD64MOVLf2i: 119 return rewriteValueAMD64_OpAMD64MOVLf2i_0(v) 120 case OpAMD64MOVLi2f: 121 return rewriteValueAMD64_OpAMD64MOVLi2f_0(v) 122 case OpAMD64MOVLload: 123 return rewriteValueAMD64_OpAMD64MOVLload_0(v) 124 case OpAMD64MOVLloadidx1: 125 return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v) 126 case OpAMD64MOVLloadidx4: 127 return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v) 128 case OpAMD64MOVLloadidx8: 129 return rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v) 130 case OpAMD64MOVLstore: 131 return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) 132 case OpAMD64MOVLstoreconst: 133 return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v) 134 case OpAMD64MOVLstoreconstidx1: 135 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v) 136 case OpAMD64MOVLstoreconstidx4: 137 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v) 138 case OpAMD64MOVLstoreidx1: 139 return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v) 140 case OpAMD64MOVLstoreidx4: 141 return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v) 142 case OpAMD64MOVLstoreidx8: 143 return rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v) 144 case OpAMD64MOVOload: 145 return rewriteValueAMD64_OpAMD64MOVOload_0(v) 146 case OpAMD64MOVOstore: 147 return rewriteValueAMD64_OpAMD64MOVOstore_0(v) 148 case OpAMD64MOVQatomicload: 149 return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v) 150 case OpAMD64MOVQf2i: 151 return rewriteValueAMD64_OpAMD64MOVQf2i_0(v) 152 case OpAMD64MOVQi2f: 153 return rewriteValueAMD64_OpAMD64MOVQi2f_0(v) 154 case OpAMD64MOVQload: 155 return rewriteValueAMD64_OpAMD64MOVQload_0(v) 156 case OpAMD64MOVQloadidx1: 157 return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v) 158 case OpAMD64MOVQloadidx8: 159 return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v) 160 case OpAMD64MOVQstore: 161 return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v) 162 case OpAMD64MOVQstoreconst: 163 return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v) 164 case OpAMD64MOVQstoreconstidx1: 165 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v) 166 case OpAMD64MOVQstoreconstidx8: 167 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v) 168 case OpAMD64MOVQstoreidx1: 169 return rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v) 170 case OpAMD64MOVQstoreidx8: 171 return rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v) 172 case OpAMD64MOVSDload: 173 return rewriteValueAMD64_OpAMD64MOVSDload_0(v) 174 case OpAMD64MOVSDloadidx1: 175 return rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v) 176 case OpAMD64MOVSDloadidx8: 177 return rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v) 178 case OpAMD64MOVSDstore: 179 return rewriteValueAMD64_OpAMD64MOVSDstore_0(v) 180 case OpAMD64MOVSDstoreidx1: 181 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v) 182 case OpAMD64MOVSDstoreidx8: 183 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v) 184 case OpAMD64MOVSSload: 185 return rewriteValueAMD64_OpAMD64MOVSSload_0(v) 186 case OpAMD64MOVSSloadidx1: 187 return rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v) 188 case OpAMD64MOVSSloadidx4: 189 return rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v) 190 case OpAMD64MOVSSstore: 191 return rewriteValueAMD64_OpAMD64MOVSSstore_0(v) 192 case OpAMD64MOVSSstoreidx1: 193 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v) 194 case OpAMD64MOVSSstoreidx4: 195 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v) 196 case OpAMD64MOVWQSX: 197 return rewriteValueAMD64_OpAMD64MOVWQSX_0(v) 198 case OpAMD64MOVWQSXload: 199 return rewriteValueAMD64_OpAMD64MOVWQSXload_0(v) 200 case OpAMD64MOVWQZX: 201 return rewriteValueAMD64_OpAMD64MOVWQZX_0(v) 202 case OpAMD64MOVWload: 203 return rewriteValueAMD64_OpAMD64MOVWload_0(v) 204 case OpAMD64MOVWloadidx1: 205 return rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v) 206 case OpAMD64MOVWloadidx2: 207 return rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v) 208 case OpAMD64MOVWstore: 209 return rewriteValueAMD64_OpAMD64MOVWstore_0(v) || rewriteValueAMD64_OpAMD64MOVWstore_10(v) 210 case OpAMD64MOVWstoreconst: 211 return rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v) 212 case OpAMD64MOVWstoreconstidx1: 213 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v) 214 case OpAMD64MOVWstoreconstidx2: 215 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v) 216 case OpAMD64MOVWstoreidx1: 217 return rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v) 218 case OpAMD64MOVWstoreidx2: 219 return rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v) 220 case OpAMD64MULL: 221 return rewriteValueAMD64_OpAMD64MULL_0(v) 222 case OpAMD64MULLconst: 223 return rewriteValueAMD64_OpAMD64MULLconst_0(v) 224 case OpAMD64MULQ: 225 return rewriteValueAMD64_OpAMD64MULQ_0(v) 226 case OpAMD64MULQconst: 227 return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) 228 case OpAMD64MULSD: 229 return rewriteValueAMD64_OpAMD64MULSD_0(v) 230 case OpAMD64MULSDmem: 231 return rewriteValueAMD64_OpAMD64MULSDmem_0(v) 232 case OpAMD64MULSS: 233 return rewriteValueAMD64_OpAMD64MULSS_0(v) 234 case OpAMD64MULSSmem: 235 return rewriteValueAMD64_OpAMD64MULSSmem_0(v) 236 case OpAMD64NEGL: 237 return rewriteValueAMD64_OpAMD64NEGL_0(v) 238 case OpAMD64NEGQ: 239 return rewriteValueAMD64_OpAMD64NEGQ_0(v) 240 case OpAMD64NOTL: 241 return rewriteValueAMD64_OpAMD64NOTL_0(v) 242 case OpAMD64NOTQ: 243 return rewriteValueAMD64_OpAMD64NOTQ_0(v) 244 case OpAMD64ORL: 245 return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v) 246 case OpAMD64ORLconst: 247 return rewriteValueAMD64_OpAMD64ORLconst_0(v) 248 case OpAMD64ORLmem: 249 return rewriteValueAMD64_OpAMD64ORLmem_0(v) 250 case OpAMD64ORQ: 251 return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v) 252 case OpAMD64ORQconst: 253 return rewriteValueAMD64_OpAMD64ORQconst_0(v) 254 case OpAMD64ORQmem: 255 return rewriteValueAMD64_OpAMD64ORQmem_0(v) 256 case OpAMD64ROLB: 257 return rewriteValueAMD64_OpAMD64ROLB_0(v) 258 case OpAMD64ROLBconst: 259 return rewriteValueAMD64_OpAMD64ROLBconst_0(v) 260 case OpAMD64ROLL: 261 return rewriteValueAMD64_OpAMD64ROLL_0(v) 262 case OpAMD64ROLLconst: 263 return rewriteValueAMD64_OpAMD64ROLLconst_0(v) 264 case OpAMD64ROLQ: 265 return rewriteValueAMD64_OpAMD64ROLQ_0(v) 266 case OpAMD64ROLQconst: 267 return rewriteValueAMD64_OpAMD64ROLQconst_0(v) 268 case OpAMD64ROLW: 269 return rewriteValueAMD64_OpAMD64ROLW_0(v) 270 case OpAMD64ROLWconst: 271 return rewriteValueAMD64_OpAMD64ROLWconst_0(v) 272 case OpAMD64RORB: 273 return rewriteValueAMD64_OpAMD64RORB_0(v) 274 case OpAMD64RORL: 275 return rewriteValueAMD64_OpAMD64RORL_0(v) 276 case OpAMD64RORQ: 277 return rewriteValueAMD64_OpAMD64RORQ_0(v) 278 case OpAMD64RORW: 279 return rewriteValueAMD64_OpAMD64RORW_0(v) 280 case OpAMD64SARB: 281 return rewriteValueAMD64_OpAMD64SARB_0(v) 282 case OpAMD64SARBconst: 283 return rewriteValueAMD64_OpAMD64SARBconst_0(v) 284 case OpAMD64SARL: 285 return rewriteValueAMD64_OpAMD64SARL_0(v) 286 case OpAMD64SARLconst: 287 return rewriteValueAMD64_OpAMD64SARLconst_0(v) 288 case OpAMD64SARQ: 289 return rewriteValueAMD64_OpAMD64SARQ_0(v) 290 case OpAMD64SARQconst: 291 return rewriteValueAMD64_OpAMD64SARQconst_0(v) 292 case OpAMD64SARW: 293 return rewriteValueAMD64_OpAMD64SARW_0(v) 294 case OpAMD64SARWconst: 295 return rewriteValueAMD64_OpAMD64SARWconst_0(v) 296 case OpAMD64SBBLcarrymask: 297 return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v) 298 case OpAMD64SBBQcarrymask: 299 return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v) 300 case OpAMD64SETA: 301 return rewriteValueAMD64_OpAMD64SETA_0(v) 302 case OpAMD64SETAE: 303 return rewriteValueAMD64_OpAMD64SETAE_0(v) 304 case OpAMD64SETAEmem: 305 return rewriteValueAMD64_OpAMD64SETAEmem_0(v) 306 case OpAMD64SETAmem: 307 return rewriteValueAMD64_OpAMD64SETAmem_0(v) 308 case OpAMD64SETB: 309 return rewriteValueAMD64_OpAMD64SETB_0(v) 310 case OpAMD64SETBE: 311 return rewriteValueAMD64_OpAMD64SETBE_0(v) 312 case OpAMD64SETBEmem: 313 return rewriteValueAMD64_OpAMD64SETBEmem_0(v) 314 case OpAMD64SETBmem: 315 return rewriteValueAMD64_OpAMD64SETBmem_0(v) 316 case OpAMD64SETEQ: 317 return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) 318 case OpAMD64SETEQmem: 319 return rewriteValueAMD64_OpAMD64SETEQmem_0(v) || rewriteValueAMD64_OpAMD64SETEQmem_10(v) 320 case OpAMD64SETG: 321 return rewriteValueAMD64_OpAMD64SETG_0(v) 322 case OpAMD64SETGE: 323 return rewriteValueAMD64_OpAMD64SETGE_0(v) 324 case OpAMD64SETGEmem: 325 return rewriteValueAMD64_OpAMD64SETGEmem_0(v) 326 case OpAMD64SETGmem: 327 return rewriteValueAMD64_OpAMD64SETGmem_0(v) 328 case OpAMD64SETL: 329 return rewriteValueAMD64_OpAMD64SETL_0(v) 330 case OpAMD64SETLE: 331 return rewriteValueAMD64_OpAMD64SETLE_0(v) 332 case OpAMD64SETLEmem: 333 return rewriteValueAMD64_OpAMD64SETLEmem_0(v) 334 case OpAMD64SETLmem: 335 return rewriteValueAMD64_OpAMD64SETLmem_0(v) 336 case OpAMD64SETNE: 337 return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) 338 case OpAMD64SETNEmem: 339 return rewriteValueAMD64_OpAMD64SETNEmem_0(v) || rewriteValueAMD64_OpAMD64SETNEmem_10(v) 340 case OpAMD64SHLL: 341 return rewriteValueAMD64_OpAMD64SHLL_0(v) 342 case OpAMD64SHLLconst: 343 return rewriteValueAMD64_OpAMD64SHLLconst_0(v) 344 case OpAMD64SHLQ: 345 return rewriteValueAMD64_OpAMD64SHLQ_0(v) 346 case OpAMD64SHLQconst: 347 return rewriteValueAMD64_OpAMD64SHLQconst_0(v) 348 case OpAMD64SHRB: 349 return rewriteValueAMD64_OpAMD64SHRB_0(v) 350 case OpAMD64SHRBconst: 351 return rewriteValueAMD64_OpAMD64SHRBconst_0(v) 352 case OpAMD64SHRL: 353 return rewriteValueAMD64_OpAMD64SHRL_0(v) 354 case OpAMD64SHRLconst: 355 return rewriteValueAMD64_OpAMD64SHRLconst_0(v) 356 case OpAMD64SHRQ: 357 return rewriteValueAMD64_OpAMD64SHRQ_0(v) 358 case OpAMD64SHRQconst: 359 return rewriteValueAMD64_OpAMD64SHRQconst_0(v) 360 case OpAMD64SHRW: 361 return rewriteValueAMD64_OpAMD64SHRW_0(v) 362 case OpAMD64SHRWconst: 363 return rewriteValueAMD64_OpAMD64SHRWconst_0(v) 364 case OpAMD64SUBL: 365 return rewriteValueAMD64_OpAMD64SUBL_0(v) 366 case OpAMD64SUBLconst: 367 return rewriteValueAMD64_OpAMD64SUBLconst_0(v) 368 case OpAMD64SUBLmem: 369 return rewriteValueAMD64_OpAMD64SUBLmem_0(v) 370 case OpAMD64SUBQ: 371 return rewriteValueAMD64_OpAMD64SUBQ_0(v) 372 case OpAMD64SUBQconst: 373 return rewriteValueAMD64_OpAMD64SUBQconst_0(v) 374 case OpAMD64SUBQmem: 375 return rewriteValueAMD64_OpAMD64SUBQmem_0(v) 376 case OpAMD64SUBSD: 377 return rewriteValueAMD64_OpAMD64SUBSD_0(v) 378 case OpAMD64SUBSDmem: 379 return rewriteValueAMD64_OpAMD64SUBSDmem_0(v) 380 case OpAMD64SUBSS: 381 return rewriteValueAMD64_OpAMD64SUBSS_0(v) 382 case OpAMD64SUBSSmem: 383 return rewriteValueAMD64_OpAMD64SUBSSmem_0(v) 384 case OpAMD64TESTB: 385 return rewriteValueAMD64_OpAMD64TESTB_0(v) 386 case OpAMD64TESTL: 387 return rewriteValueAMD64_OpAMD64TESTL_0(v) 388 case OpAMD64TESTQ: 389 return rewriteValueAMD64_OpAMD64TESTQ_0(v) 390 case OpAMD64TESTW: 391 return rewriteValueAMD64_OpAMD64TESTW_0(v) 392 case OpAMD64XADDLlock: 393 return rewriteValueAMD64_OpAMD64XADDLlock_0(v) 394 case OpAMD64XADDQlock: 395 return rewriteValueAMD64_OpAMD64XADDQlock_0(v) 396 case OpAMD64XCHGL: 397 return rewriteValueAMD64_OpAMD64XCHGL_0(v) 398 case OpAMD64XCHGQ: 399 return rewriteValueAMD64_OpAMD64XCHGQ_0(v) 400 case OpAMD64XORL: 401 return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v) 402 case OpAMD64XORLconst: 403 return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v) 404 case OpAMD64XORLmem: 405 return rewriteValueAMD64_OpAMD64XORLmem_0(v) 406 case OpAMD64XORQ: 407 return rewriteValueAMD64_OpAMD64XORQ_0(v) 408 case OpAMD64XORQconst: 409 return rewriteValueAMD64_OpAMD64XORQconst_0(v) 410 case OpAMD64XORQmem: 411 return rewriteValueAMD64_OpAMD64XORQmem_0(v) 412 case OpAdd16: 413 return rewriteValueAMD64_OpAdd16_0(v) 414 case OpAdd32: 415 return rewriteValueAMD64_OpAdd32_0(v) 416 case OpAdd32F: 417 return rewriteValueAMD64_OpAdd32F_0(v) 418 case OpAdd64: 419 return rewriteValueAMD64_OpAdd64_0(v) 420 case OpAdd64F: 421 return rewriteValueAMD64_OpAdd64F_0(v) 422 case OpAdd8: 423 return rewriteValueAMD64_OpAdd8_0(v) 424 case OpAddPtr: 425 return rewriteValueAMD64_OpAddPtr_0(v) 426 case OpAddr: 427 return rewriteValueAMD64_OpAddr_0(v) 428 case OpAnd16: 429 return rewriteValueAMD64_OpAnd16_0(v) 430 case OpAnd32: 431 return rewriteValueAMD64_OpAnd32_0(v) 432 case OpAnd64: 433 return rewriteValueAMD64_OpAnd64_0(v) 434 case OpAnd8: 435 return rewriteValueAMD64_OpAnd8_0(v) 436 case OpAndB: 437 return rewriteValueAMD64_OpAndB_0(v) 438 case OpAtomicAdd32: 439 return rewriteValueAMD64_OpAtomicAdd32_0(v) 440 case OpAtomicAdd64: 441 return rewriteValueAMD64_OpAtomicAdd64_0(v) 442 case OpAtomicAnd8: 443 return rewriteValueAMD64_OpAtomicAnd8_0(v) 444 case OpAtomicCompareAndSwap32: 445 return rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v) 446 case OpAtomicCompareAndSwap64: 447 return rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v) 448 case OpAtomicExchange32: 449 return rewriteValueAMD64_OpAtomicExchange32_0(v) 450 case OpAtomicExchange64: 451 return rewriteValueAMD64_OpAtomicExchange64_0(v) 452 case OpAtomicLoad32: 453 return rewriteValueAMD64_OpAtomicLoad32_0(v) 454 case OpAtomicLoad64: 455 return rewriteValueAMD64_OpAtomicLoad64_0(v) 456 case OpAtomicLoadPtr: 457 return rewriteValueAMD64_OpAtomicLoadPtr_0(v) 458 case OpAtomicOr8: 459 return rewriteValueAMD64_OpAtomicOr8_0(v) 460 case OpAtomicStore32: 461 return rewriteValueAMD64_OpAtomicStore32_0(v) 462 case OpAtomicStore64: 463 return rewriteValueAMD64_OpAtomicStore64_0(v) 464 case OpAtomicStorePtrNoWB: 465 return rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v) 466 case OpAvg64u: 467 return rewriteValueAMD64_OpAvg64u_0(v) 468 case OpBitLen32: 469 return rewriteValueAMD64_OpBitLen32_0(v) 470 case OpBitLen64: 471 return rewriteValueAMD64_OpBitLen64_0(v) 472 case OpBswap32: 473 return rewriteValueAMD64_OpBswap32_0(v) 474 case OpBswap64: 475 return rewriteValueAMD64_OpBswap64_0(v) 476 case OpCeil: 477 return rewriteValueAMD64_OpCeil_0(v) 478 case OpClosureCall: 479 return rewriteValueAMD64_OpClosureCall_0(v) 480 case OpCom16: 481 return rewriteValueAMD64_OpCom16_0(v) 482 case OpCom32: 483 return rewriteValueAMD64_OpCom32_0(v) 484 case OpCom64: 485 return rewriteValueAMD64_OpCom64_0(v) 486 case OpCom8: 487 return rewriteValueAMD64_OpCom8_0(v) 488 case OpConst16: 489 return rewriteValueAMD64_OpConst16_0(v) 490 case OpConst32: 491 return rewriteValueAMD64_OpConst32_0(v) 492 case OpConst32F: 493 return rewriteValueAMD64_OpConst32F_0(v) 494 case OpConst64: 495 return rewriteValueAMD64_OpConst64_0(v) 496 case OpConst64F: 497 return rewriteValueAMD64_OpConst64F_0(v) 498 case OpConst8: 499 return rewriteValueAMD64_OpConst8_0(v) 500 case OpConstBool: 501 return rewriteValueAMD64_OpConstBool_0(v) 502 case OpConstNil: 503 return rewriteValueAMD64_OpConstNil_0(v) 504 case OpConvert: 505 return rewriteValueAMD64_OpConvert_0(v) 506 case OpCtz32: 507 return rewriteValueAMD64_OpCtz32_0(v) 508 case OpCtz64: 509 return rewriteValueAMD64_OpCtz64_0(v) 510 case OpCvt32Fto32: 511 return rewriteValueAMD64_OpCvt32Fto32_0(v) 512 case OpCvt32Fto64: 513 return rewriteValueAMD64_OpCvt32Fto64_0(v) 514 case OpCvt32Fto64F: 515 return rewriteValueAMD64_OpCvt32Fto64F_0(v) 516 case OpCvt32to32F: 517 return rewriteValueAMD64_OpCvt32to32F_0(v) 518 case OpCvt32to64F: 519 return rewriteValueAMD64_OpCvt32to64F_0(v) 520 case OpCvt64Fto32: 521 return rewriteValueAMD64_OpCvt64Fto32_0(v) 522 case OpCvt64Fto32F: 523 return rewriteValueAMD64_OpCvt64Fto32F_0(v) 524 case OpCvt64Fto64: 525 return rewriteValueAMD64_OpCvt64Fto64_0(v) 526 case OpCvt64to32F: 527 return rewriteValueAMD64_OpCvt64to32F_0(v) 528 case OpCvt64to64F: 529 return rewriteValueAMD64_OpCvt64to64F_0(v) 530 case OpDiv128u: 531 return rewriteValueAMD64_OpDiv128u_0(v) 532 case OpDiv16: 533 return rewriteValueAMD64_OpDiv16_0(v) 534 case OpDiv16u: 535 return rewriteValueAMD64_OpDiv16u_0(v) 536 case OpDiv32: 537 return rewriteValueAMD64_OpDiv32_0(v) 538 case OpDiv32F: 539 return rewriteValueAMD64_OpDiv32F_0(v) 540 case OpDiv32u: 541 return rewriteValueAMD64_OpDiv32u_0(v) 542 case OpDiv64: 543 return rewriteValueAMD64_OpDiv64_0(v) 544 case OpDiv64F: 545 return rewriteValueAMD64_OpDiv64F_0(v) 546 case OpDiv64u: 547 return rewriteValueAMD64_OpDiv64u_0(v) 548 case OpDiv8: 549 return rewriteValueAMD64_OpDiv8_0(v) 550 case OpDiv8u: 551 return rewriteValueAMD64_OpDiv8u_0(v) 552 case OpEq16: 553 return rewriteValueAMD64_OpEq16_0(v) 554 case OpEq32: 555 return rewriteValueAMD64_OpEq32_0(v) 556 case OpEq32F: 557 return rewriteValueAMD64_OpEq32F_0(v) 558 case OpEq64: 559 return rewriteValueAMD64_OpEq64_0(v) 560 case OpEq64F: 561 return rewriteValueAMD64_OpEq64F_0(v) 562 case OpEq8: 563 return rewriteValueAMD64_OpEq8_0(v) 564 case OpEqB: 565 return rewriteValueAMD64_OpEqB_0(v) 566 case OpEqPtr: 567 return rewriteValueAMD64_OpEqPtr_0(v) 568 case OpFloor: 569 return rewriteValueAMD64_OpFloor_0(v) 570 case OpGeq16: 571 return rewriteValueAMD64_OpGeq16_0(v) 572 case OpGeq16U: 573 return rewriteValueAMD64_OpGeq16U_0(v) 574 case OpGeq32: 575 return rewriteValueAMD64_OpGeq32_0(v) 576 case OpGeq32F: 577 return rewriteValueAMD64_OpGeq32F_0(v) 578 case OpGeq32U: 579 return rewriteValueAMD64_OpGeq32U_0(v) 580 case OpGeq64: 581 return rewriteValueAMD64_OpGeq64_0(v) 582 case OpGeq64F: 583 return rewriteValueAMD64_OpGeq64F_0(v) 584 case OpGeq64U: 585 return rewriteValueAMD64_OpGeq64U_0(v) 586 case OpGeq8: 587 return rewriteValueAMD64_OpGeq8_0(v) 588 case OpGeq8U: 589 return rewriteValueAMD64_OpGeq8U_0(v) 590 case OpGetCallerPC: 591 return rewriteValueAMD64_OpGetCallerPC_0(v) 592 case OpGetCallerSP: 593 return rewriteValueAMD64_OpGetCallerSP_0(v) 594 case OpGetClosurePtr: 595 return rewriteValueAMD64_OpGetClosurePtr_0(v) 596 case OpGetG: 597 return rewriteValueAMD64_OpGetG_0(v) 598 case OpGreater16: 599 return rewriteValueAMD64_OpGreater16_0(v) 600 case OpGreater16U: 601 return rewriteValueAMD64_OpGreater16U_0(v) 602 case OpGreater32: 603 return rewriteValueAMD64_OpGreater32_0(v) 604 case OpGreater32F: 605 return rewriteValueAMD64_OpGreater32F_0(v) 606 case OpGreater32U: 607 return rewriteValueAMD64_OpGreater32U_0(v) 608 case OpGreater64: 609 return rewriteValueAMD64_OpGreater64_0(v) 610 case OpGreater64F: 611 return rewriteValueAMD64_OpGreater64F_0(v) 612 case OpGreater64U: 613 return rewriteValueAMD64_OpGreater64U_0(v) 614 case OpGreater8: 615 return rewriteValueAMD64_OpGreater8_0(v) 616 case OpGreater8U: 617 return rewriteValueAMD64_OpGreater8U_0(v) 618 case OpHmul32: 619 return rewriteValueAMD64_OpHmul32_0(v) 620 case OpHmul32u: 621 return rewriteValueAMD64_OpHmul32u_0(v) 622 case OpHmul64: 623 return rewriteValueAMD64_OpHmul64_0(v) 624 case OpHmul64u: 625 return rewriteValueAMD64_OpHmul64u_0(v) 626 case OpInt64Hi: 627 return rewriteValueAMD64_OpInt64Hi_0(v) 628 case OpInterCall: 629 return rewriteValueAMD64_OpInterCall_0(v) 630 case OpIsInBounds: 631 return rewriteValueAMD64_OpIsInBounds_0(v) 632 case OpIsNonNil: 633 return rewriteValueAMD64_OpIsNonNil_0(v) 634 case OpIsSliceInBounds: 635 return rewriteValueAMD64_OpIsSliceInBounds_0(v) 636 case OpLeq16: 637 return rewriteValueAMD64_OpLeq16_0(v) 638 case OpLeq16U: 639 return rewriteValueAMD64_OpLeq16U_0(v) 640 case OpLeq32: 641 return rewriteValueAMD64_OpLeq32_0(v) 642 case OpLeq32F: 643 return rewriteValueAMD64_OpLeq32F_0(v) 644 case OpLeq32U: 645 return rewriteValueAMD64_OpLeq32U_0(v) 646 case OpLeq64: 647 return rewriteValueAMD64_OpLeq64_0(v) 648 case OpLeq64F: 649 return rewriteValueAMD64_OpLeq64F_0(v) 650 case OpLeq64U: 651 return rewriteValueAMD64_OpLeq64U_0(v) 652 case OpLeq8: 653 return rewriteValueAMD64_OpLeq8_0(v) 654 case OpLeq8U: 655 return rewriteValueAMD64_OpLeq8U_0(v) 656 case OpLess16: 657 return rewriteValueAMD64_OpLess16_0(v) 658 case OpLess16U: 659 return rewriteValueAMD64_OpLess16U_0(v) 660 case OpLess32: 661 return rewriteValueAMD64_OpLess32_0(v) 662 case OpLess32F: 663 return rewriteValueAMD64_OpLess32F_0(v) 664 case OpLess32U: 665 return rewriteValueAMD64_OpLess32U_0(v) 666 case OpLess64: 667 return rewriteValueAMD64_OpLess64_0(v) 668 case OpLess64F: 669 return rewriteValueAMD64_OpLess64F_0(v) 670 case OpLess64U: 671 return rewriteValueAMD64_OpLess64U_0(v) 672 case OpLess8: 673 return rewriteValueAMD64_OpLess8_0(v) 674 case OpLess8U: 675 return rewriteValueAMD64_OpLess8U_0(v) 676 case OpLoad: 677 return rewriteValueAMD64_OpLoad_0(v) 678 case OpLsh16x16: 679 return rewriteValueAMD64_OpLsh16x16_0(v) 680 case OpLsh16x32: 681 return rewriteValueAMD64_OpLsh16x32_0(v) 682 case OpLsh16x64: 683 return rewriteValueAMD64_OpLsh16x64_0(v) 684 case OpLsh16x8: 685 return rewriteValueAMD64_OpLsh16x8_0(v) 686 case OpLsh32x16: 687 return rewriteValueAMD64_OpLsh32x16_0(v) 688 case OpLsh32x32: 689 return rewriteValueAMD64_OpLsh32x32_0(v) 690 case OpLsh32x64: 691 return rewriteValueAMD64_OpLsh32x64_0(v) 692 case OpLsh32x8: 693 return rewriteValueAMD64_OpLsh32x8_0(v) 694 case OpLsh64x16: 695 return rewriteValueAMD64_OpLsh64x16_0(v) 696 case OpLsh64x32: 697 return rewriteValueAMD64_OpLsh64x32_0(v) 698 case OpLsh64x64: 699 return rewriteValueAMD64_OpLsh64x64_0(v) 700 case OpLsh64x8: 701 return rewriteValueAMD64_OpLsh64x8_0(v) 702 case OpLsh8x16: 703 return rewriteValueAMD64_OpLsh8x16_0(v) 704 case OpLsh8x32: 705 return rewriteValueAMD64_OpLsh8x32_0(v) 706 case OpLsh8x64: 707 return rewriteValueAMD64_OpLsh8x64_0(v) 708 case OpLsh8x8: 709 return rewriteValueAMD64_OpLsh8x8_0(v) 710 case OpMod16: 711 return rewriteValueAMD64_OpMod16_0(v) 712 case OpMod16u: 713 return rewriteValueAMD64_OpMod16u_0(v) 714 case OpMod32: 715 return rewriteValueAMD64_OpMod32_0(v) 716 case OpMod32u: 717 return rewriteValueAMD64_OpMod32u_0(v) 718 case OpMod64: 719 return rewriteValueAMD64_OpMod64_0(v) 720 case OpMod64u: 721 return rewriteValueAMD64_OpMod64u_0(v) 722 case OpMod8: 723 return rewriteValueAMD64_OpMod8_0(v) 724 case OpMod8u: 725 return rewriteValueAMD64_OpMod8u_0(v) 726 case OpMove: 727 return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) 728 case OpMul16: 729 return rewriteValueAMD64_OpMul16_0(v) 730 case OpMul32: 731 return rewriteValueAMD64_OpMul32_0(v) 732 case OpMul32F: 733 return rewriteValueAMD64_OpMul32F_0(v) 734 case OpMul64: 735 return rewriteValueAMD64_OpMul64_0(v) 736 case OpMul64F: 737 return rewriteValueAMD64_OpMul64F_0(v) 738 case OpMul64uhilo: 739 return rewriteValueAMD64_OpMul64uhilo_0(v) 740 case OpMul8: 741 return rewriteValueAMD64_OpMul8_0(v) 742 case OpNeg16: 743 return rewriteValueAMD64_OpNeg16_0(v) 744 case OpNeg32: 745 return rewriteValueAMD64_OpNeg32_0(v) 746 case OpNeg32F: 747 return rewriteValueAMD64_OpNeg32F_0(v) 748 case OpNeg64: 749 return rewriteValueAMD64_OpNeg64_0(v) 750 case OpNeg64F: 751 return rewriteValueAMD64_OpNeg64F_0(v) 752 case OpNeg8: 753 return rewriteValueAMD64_OpNeg8_0(v) 754 case OpNeq16: 755 return rewriteValueAMD64_OpNeq16_0(v) 756 case OpNeq32: 757 return rewriteValueAMD64_OpNeq32_0(v) 758 case OpNeq32F: 759 return rewriteValueAMD64_OpNeq32F_0(v) 760 case OpNeq64: 761 return rewriteValueAMD64_OpNeq64_0(v) 762 case OpNeq64F: 763 return rewriteValueAMD64_OpNeq64F_0(v) 764 case OpNeq8: 765 return rewriteValueAMD64_OpNeq8_0(v) 766 case OpNeqB: 767 return rewriteValueAMD64_OpNeqB_0(v) 768 case OpNeqPtr: 769 return rewriteValueAMD64_OpNeqPtr_0(v) 770 case OpNilCheck: 771 return rewriteValueAMD64_OpNilCheck_0(v) 772 case OpNot: 773 return rewriteValueAMD64_OpNot_0(v) 774 case OpOffPtr: 775 return rewriteValueAMD64_OpOffPtr_0(v) 776 case OpOr16: 777 return rewriteValueAMD64_OpOr16_0(v) 778 case OpOr32: 779 return rewriteValueAMD64_OpOr32_0(v) 780 case OpOr64: 781 return rewriteValueAMD64_OpOr64_0(v) 782 case OpOr8: 783 return rewriteValueAMD64_OpOr8_0(v) 784 case OpOrB: 785 return rewriteValueAMD64_OpOrB_0(v) 786 case OpPopCount16: 787 return rewriteValueAMD64_OpPopCount16_0(v) 788 case OpPopCount32: 789 return rewriteValueAMD64_OpPopCount32_0(v) 790 case OpPopCount64: 791 return rewriteValueAMD64_OpPopCount64_0(v) 792 case OpPopCount8: 793 return rewriteValueAMD64_OpPopCount8_0(v) 794 case OpRound32F: 795 return rewriteValueAMD64_OpRound32F_0(v) 796 case OpRound64F: 797 return rewriteValueAMD64_OpRound64F_0(v) 798 case OpRoundToEven: 799 return rewriteValueAMD64_OpRoundToEven_0(v) 800 case OpRsh16Ux16: 801 return rewriteValueAMD64_OpRsh16Ux16_0(v) 802 case OpRsh16Ux32: 803 return rewriteValueAMD64_OpRsh16Ux32_0(v) 804 case OpRsh16Ux64: 805 return rewriteValueAMD64_OpRsh16Ux64_0(v) 806 case OpRsh16Ux8: 807 return rewriteValueAMD64_OpRsh16Ux8_0(v) 808 case OpRsh16x16: 809 return rewriteValueAMD64_OpRsh16x16_0(v) 810 case OpRsh16x32: 811 return rewriteValueAMD64_OpRsh16x32_0(v) 812 case OpRsh16x64: 813 return rewriteValueAMD64_OpRsh16x64_0(v) 814 case OpRsh16x8: 815 return rewriteValueAMD64_OpRsh16x8_0(v) 816 case OpRsh32Ux16: 817 return rewriteValueAMD64_OpRsh32Ux16_0(v) 818 case OpRsh32Ux32: 819 return rewriteValueAMD64_OpRsh32Ux32_0(v) 820 case OpRsh32Ux64: 821 return rewriteValueAMD64_OpRsh32Ux64_0(v) 822 case OpRsh32Ux8: 823 return rewriteValueAMD64_OpRsh32Ux8_0(v) 824 case OpRsh32x16: 825 return rewriteValueAMD64_OpRsh32x16_0(v) 826 case OpRsh32x32: 827 return rewriteValueAMD64_OpRsh32x32_0(v) 828 case OpRsh32x64: 829 return rewriteValueAMD64_OpRsh32x64_0(v) 830 case OpRsh32x8: 831 return rewriteValueAMD64_OpRsh32x8_0(v) 832 case OpRsh64Ux16: 833 return rewriteValueAMD64_OpRsh64Ux16_0(v) 834 case OpRsh64Ux32: 835 return rewriteValueAMD64_OpRsh64Ux32_0(v) 836 case OpRsh64Ux64: 837 return rewriteValueAMD64_OpRsh64Ux64_0(v) 838 case OpRsh64Ux8: 839 return rewriteValueAMD64_OpRsh64Ux8_0(v) 840 case OpRsh64x16: 841 return rewriteValueAMD64_OpRsh64x16_0(v) 842 case OpRsh64x32: 843 return rewriteValueAMD64_OpRsh64x32_0(v) 844 case OpRsh64x64: 845 return rewriteValueAMD64_OpRsh64x64_0(v) 846 case OpRsh64x8: 847 return rewriteValueAMD64_OpRsh64x8_0(v) 848 case OpRsh8Ux16: 849 return rewriteValueAMD64_OpRsh8Ux16_0(v) 850 case OpRsh8Ux32: 851 return rewriteValueAMD64_OpRsh8Ux32_0(v) 852 case OpRsh8Ux64: 853 return rewriteValueAMD64_OpRsh8Ux64_0(v) 854 case OpRsh8Ux8: 855 return rewriteValueAMD64_OpRsh8Ux8_0(v) 856 case OpRsh8x16: 857 return rewriteValueAMD64_OpRsh8x16_0(v) 858 case OpRsh8x32: 859 return rewriteValueAMD64_OpRsh8x32_0(v) 860 case OpRsh8x64: 861 return rewriteValueAMD64_OpRsh8x64_0(v) 862 case OpRsh8x8: 863 return rewriteValueAMD64_OpRsh8x8_0(v) 864 case OpSelect0: 865 return rewriteValueAMD64_OpSelect0_0(v) 866 case OpSelect1: 867 return rewriteValueAMD64_OpSelect1_0(v) 868 case OpSignExt16to32: 869 return rewriteValueAMD64_OpSignExt16to32_0(v) 870 case OpSignExt16to64: 871 return rewriteValueAMD64_OpSignExt16to64_0(v) 872 case OpSignExt32to64: 873 return rewriteValueAMD64_OpSignExt32to64_0(v) 874 case OpSignExt8to16: 875 return rewriteValueAMD64_OpSignExt8to16_0(v) 876 case OpSignExt8to32: 877 return rewriteValueAMD64_OpSignExt8to32_0(v) 878 case OpSignExt8to64: 879 return rewriteValueAMD64_OpSignExt8to64_0(v) 880 case OpSlicemask: 881 return rewriteValueAMD64_OpSlicemask_0(v) 882 case OpSqrt: 883 return rewriteValueAMD64_OpSqrt_0(v) 884 case OpStaticCall: 885 return rewriteValueAMD64_OpStaticCall_0(v) 886 case OpStore: 887 return rewriteValueAMD64_OpStore_0(v) 888 case OpSub16: 889 return rewriteValueAMD64_OpSub16_0(v) 890 case OpSub32: 891 return rewriteValueAMD64_OpSub32_0(v) 892 case OpSub32F: 893 return rewriteValueAMD64_OpSub32F_0(v) 894 case OpSub64: 895 return rewriteValueAMD64_OpSub64_0(v) 896 case OpSub64F: 897 return rewriteValueAMD64_OpSub64F_0(v) 898 case OpSub8: 899 return rewriteValueAMD64_OpSub8_0(v) 900 case OpSubPtr: 901 return rewriteValueAMD64_OpSubPtr_0(v) 902 case OpTrunc: 903 return rewriteValueAMD64_OpTrunc_0(v) 904 case OpTrunc16to8: 905 return rewriteValueAMD64_OpTrunc16to8_0(v) 906 case OpTrunc32to16: 907 return rewriteValueAMD64_OpTrunc32to16_0(v) 908 case OpTrunc32to8: 909 return rewriteValueAMD64_OpTrunc32to8_0(v) 910 case OpTrunc64to16: 911 return rewriteValueAMD64_OpTrunc64to16_0(v) 912 case OpTrunc64to32: 913 return rewriteValueAMD64_OpTrunc64to32_0(v) 914 case OpTrunc64to8: 915 return rewriteValueAMD64_OpTrunc64to8_0(v) 916 case OpWB: 917 return rewriteValueAMD64_OpWB_0(v) 918 case OpXor16: 919 return rewriteValueAMD64_OpXor16_0(v) 920 case OpXor32: 921 return rewriteValueAMD64_OpXor32_0(v) 922 case OpXor64: 923 return rewriteValueAMD64_OpXor64_0(v) 924 case OpXor8: 925 return rewriteValueAMD64_OpXor8_0(v) 926 case OpZero: 927 return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) || rewriteValueAMD64_OpZero_20(v) 928 case OpZeroExt16to32: 929 return rewriteValueAMD64_OpZeroExt16to32_0(v) 930 case OpZeroExt16to64: 931 return rewriteValueAMD64_OpZeroExt16to64_0(v) 932 case OpZeroExt32to64: 933 return rewriteValueAMD64_OpZeroExt32to64_0(v) 934 case OpZeroExt8to16: 935 return rewriteValueAMD64_OpZeroExt8to16_0(v) 936 case OpZeroExt8to32: 937 return rewriteValueAMD64_OpZeroExt8to32_0(v) 938 case OpZeroExt8to64: 939 return rewriteValueAMD64_OpZeroExt8to64_0(v) 940 } 941 return false 942 } 943 func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { 944 // match: (ADDL x (MOVLconst [c])) 945 // cond: 946 // result: (ADDLconst [c] x) 947 for { 948 _ = v.Args[1] 949 x := v.Args[0] 950 v_1 := v.Args[1] 951 if v_1.Op != OpAMD64MOVLconst { 952 break 953 } 954 c := v_1.AuxInt 955 v.reset(OpAMD64ADDLconst) 956 v.AuxInt = c 957 v.AddArg(x) 958 return true 959 } 960 // match: (ADDL (MOVLconst [c]) x) 961 // cond: 962 // result: (ADDLconst [c] x) 963 for { 964 _ = v.Args[1] 965 v_0 := v.Args[0] 966 if v_0.Op != OpAMD64MOVLconst { 967 break 968 } 969 c := v_0.AuxInt 970 x := v.Args[1] 971 v.reset(OpAMD64ADDLconst) 972 v.AuxInt = c 973 v.AddArg(x) 974 return true 975 } 976 // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) 977 // cond: d==32-c 978 // result: (ROLLconst x [c]) 979 for { 980 _ = v.Args[1] 981 v_0 := v.Args[0] 982 if v_0.Op != OpAMD64SHLLconst { 983 break 984 } 985 c := v_0.AuxInt 986 x := v_0.Args[0] 987 v_1 := v.Args[1] 988 if v_1.Op != OpAMD64SHRLconst { 989 break 990 } 991 d := v_1.AuxInt 992 if x != v_1.Args[0] { 993 break 994 } 995 if !(d == 32-c) { 996 break 997 } 998 v.reset(OpAMD64ROLLconst) 999 v.AuxInt = c 1000 v.AddArg(x) 1001 return true 1002 } 1003 // match: (ADDL (SHRLconst x [d]) (SHLLconst x [c])) 1004 // cond: d==32-c 1005 // result: (ROLLconst x [c]) 1006 for { 1007 _ = v.Args[1] 1008 v_0 := v.Args[0] 1009 if v_0.Op != OpAMD64SHRLconst { 1010 break 1011 } 1012 d := v_0.AuxInt 1013 x := v_0.Args[0] 1014 v_1 := v.Args[1] 1015 if v_1.Op != OpAMD64SHLLconst { 1016 break 1017 } 1018 c := v_1.AuxInt 1019 if x != v_1.Args[0] { 1020 break 1021 } 1022 if !(d == 32-c) { 1023 break 1024 } 1025 v.reset(OpAMD64ROLLconst) 1026 v.AuxInt = c 1027 v.AddArg(x) 1028 return true 1029 } 1030 // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) 1031 // cond: d==16-c && c < 16 && t.Size() == 2 1032 // result: (ROLWconst x [c]) 1033 for { 1034 t := v.Type 1035 _ = v.Args[1] 1036 v_0 := v.Args[0] 1037 if v_0.Op != OpAMD64SHLLconst { 1038 break 1039 } 1040 c := v_0.AuxInt 1041 x := v_0.Args[0] 1042 v_1 := v.Args[1] 1043 if v_1.Op != OpAMD64SHRWconst { 1044 break 1045 } 1046 d := v_1.AuxInt 1047 if x != v_1.Args[0] { 1048 break 1049 } 1050 if !(d == 16-c && c < 16 && t.Size() == 2) { 1051 break 1052 } 1053 v.reset(OpAMD64ROLWconst) 1054 v.AuxInt = c 1055 v.AddArg(x) 1056 return true 1057 } 1058 // match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c])) 1059 // cond: d==16-c && c < 16 && t.Size() == 2 1060 // result: (ROLWconst x [c]) 1061 for { 1062 t := v.Type 1063 _ = v.Args[1] 1064 v_0 := v.Args[0] 1065 if v_0.Op != OpAMD64SHRWconst { 1066 break 1067 } 1068 d := v_0.AuxInt 1069 x := v_0.Args[0] 1070 v_1 := v.Args[1] 1071 if v_1.Op != OpAMD64SHLLconst { 1072 break 1073 } 1074 c := v_1.AuxInt 1075 if x != v_1.Args[0] { 1076 break 1077 } 1078 if !(d == 16-c && c < 16 && t.Size() == 2) { 1079 break 1080 } 1081 v.reset(OpAMD64ROLWconst) 1082 v.AuxInt = c 1083 v.AddArg(x) 1084 return true 1085 } 1086 // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) 1087 // cond: d==8-c && c < 8 && t.Size() == 1 1088 // result: (ROLBconst x [c]) 1089 for { 1090 t := v.Type 1091 _ = v.Args[1] 1092 v_0 := v.Args[0] 1093 if v_0.Op != OpAMD64SHLLconst { 1094 break 1095 } 1096 c := v_0.AuxInt 1097 x := v_0.Args[0] 1098 v_1 := v.Args[1] 1099 if v_1.Op != OpAMD64SHRBconst { 1100 break 1101 } 1102 d := v_1.AuxInt 1103 if x != v_1.Args[0] { 1104 break 1105 } 1106 if !(d == 8-c && c < 8 && t.Size() == 1) { 1107 break 1108 } 1109 v.reset(OpAMD64ROLBconst) 1110 v.AuxInt = c 1111 v.AddArg(x) 1112 return true 1113 } 1114 // match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c])) 1115 // cond: d==8-c && c < 8 && t.Size() == 1 1116 // result: (ROLBconst x [c]) 1117 for { 1118 t := v.Type 1119 _ = v.Args[1] 1120 v_0 := v.Args[0] 1121 if v_0.Op != OpAMD64SHRBconst { 1122 break 1123 } 1124 d := v_0.AuxInt 1125 x := v_0.Args[0] 1126 v_1 := v.Args[1] 1127 if v_1.Op != OpAMD64SHLLconst { 1128 break 1129 } 1130 c := v_1.AuxInt 1131 if x != v_1.Args[0] { 1132 break 1133 } 1134 if !(d == 8-c && c < 8 && t.Size() == 1) { 1135 break 1136 } 1137 v.reset(OpAMD64ROLBconst) 1138 v.AuxInt = c 1139 v.AddArg(x) 1140 return true 1141 } 1142 // match: (ADDL x (NEGL y)) 1143 // cond: 1144 // result: (SUBL x y) 1145 for { 1146 _ = v.Args[1] 1147 x := v.Args[0] 1148 v_1 := v.Args[1] 1149 if v_1.Op != OpAMD64NEGL { 1150 break 1151 } 1152 y := v_1.Args[0] 1153 v.reset(OpAMD64SUBL) 1154 v.AddArg(x) 1155 v.AddArg(y) 1156 return true 1157 } 1158 // match: (ADDL (NEGL y) x) 1159 // cond: 1160 // result: (SUBL x y) 1161 for { 1162 _ = v.Args[1] 1163 v_0 := v.Args[0] 1164 if v_0.Op != OpAMD64NEGL { 1165 break 1166 } 1167 y := v_0.Args[0] 1168 x := v.Args[1] 1169 v.reset(OpAMD64SUBL) 1170 v.AddArg(x) 1171 v.AddArg(y) 1172 return true 1173 } 1174 return false 1175 } 1176 func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { 1177 // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) 1178 // cond: canMergeLoad(v, l, x) && clobber(l) 1179 // result: (ADDLmem x [off] {sym} ptr mem) 1180 for { 1181 _ = v.Args[1] 1182 x := v.Args[0] 1183 l := v.Args[1] 1184 if l.Op != OpAMD64MOVLload { 1185 break 1186 } 1187 off := l.AuxInt 1188 sym := l.Aux 1189 _ = l.Args[1] 1190 ptr := l.Args[0] 1191 mem := l.Args[1] 1192 if !(canMergeLoad(v, l, x) && clobber(l)) { 1193 break 1194 } 1195 v.reset(OpAMD64ADDLmem) 1196 v.AuxInt = off 1197 v.Aux = sym 1198 v.AddArg(x) 1199 v.AddArg(ptr) 1200 v.AddArg(mem) 1201 return true 1202 } 1203 // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) 1204 // cond: canMergeLoad(v, l, x) && clobber(l) 1205 // result: (ADDLmem x [off] {sym} ptr mem) 1206 for { 1207 _ = v.Args[1] 1208 l := v.Args[0] 1209 if l.Op != OpAMD64MOVLload { 1210 break 1211 } 1212 off := l.AuxInt 1213 sym := l.Aux 1214 _ = l.Args[1] 1215 ptr := l.Args[0] 1216 mem := l.Args[1] 1217 x := v.Args[1] 1218 if !(canMergeLoad(v, l, x) && clobber(l)) { 1219 break 1220 } 1221 v.reset(OpAMD64ADDLmem) 1222 v.AuxInt = off 1223 v.Aux = sym 1224 v.AddArg(x) 1225 v.AddArg(ptr) 1226 v.AddArg(mem) 1227 return true 1228 } 1229 return false 1230 } 1231 func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { 1232 // match: (ADDLconst [c] x) 1233 // cond: int32(c)==0 1234 // result: x 1235 for { 1236 c := v.AuxInt 1237 x := v.Args[0] 1238 if !(int32(c) == 0) { 1239 break 1240 } 1241 v.reset(OpCopy) 1242 v.Type = x.Type 1243 v.AddArg(x) 1244 return true 1245 } 1246 // match: (ADDLconst [c] (MOVLconst [d])) 1247 // cond: 1248 // result: (MOVLconst [int64(int32(c+d))]) 1249 for { 1250 c := v.AuxInt 1251 v_0 := v.Args[0] 1252 if v_0.Op != OpAMD64MOVLconst { 1253 break 1254 } 1255 d := v_0.AuxInt 1256 v.reset(OpAMD64MOVLconst) 1257 v.AuxInt = int64(int32(c + d)) 1258 return true 1259 } 1260 // match: (ADDLconst [c] (ADDLconst [d] x)) 1261 // cond: 1262 // result: (ADDLconst [int64(int32(c+d))] x) 1263 for { 1264 c := v.AuxInt 1265 v_0 := v.Args[0] 1266 if v_0.Op != OpAMD64ADDLconst { 1267 break 1268 } 1269 d := v_0.AuxInt 1270 x := v_0.Args[0] 1271 v.reset(OpAMD64ADDLconst) 1272 v.AuxInt = int64(int32(c + d)) 1273 v.AddArg(x) 1274 return true 1275 } 1276 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1277 // cond: is32Bit(c+d) 1278 // result: (LEAL [c+d] {s} x) 1279 for { 1280 c := v.AuxInt 1281 v_0 := v.Args[0] 1282 if v_0.Op != OpAMD64LEAL { 1283 break 1284 } 1285 d := v_0.AuxInt 1286 s := v_0.Aux 1287 x := v_0.Args[0] 1288 if !(is32Bit(c + d)) { 1289 break 1290 } 1291 v.reset(OpAMD64LEAL) 1292 v.AuxInt = c + d 1293 v.Aux = s 1294 v.AddArg(x) 1295 return true 1296 } 1297 return false 1298 } 1299 func rewriteValueAMD64_OpAMD64ADDLconstmem_0(v *Value) bool { 1300 b := v.Block 1301 _ = b 1302 typ := &b.Func.Config.Types 1303 _ = typ 1304 // match: (ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _)) 1305 // cond: 1306 // result: (ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x)) 1307 for { 1308 valOff := v.AuxInt 1309 sym := v.Aux 1310 _ = v.Args[1] 1311 ptr := v.Args[0] 1312 v_1 := v.Args[1] 1313 if v_1.Op != OpAMD64MOVSSstore { 1314 break 1315 } 1316 if v_1.AuxInt != ValAndOff(valOff).Off() { 1317 break 1318 } 1319 if v_1.Aux != sym { 1320 break 1321 } 1322 _ = v_1.Args[2] 1323 if ptr != v_1.Args[0] { 1324 break 1325 } 1326 x := v_1.Args[1] 1327 v.reset(OpAMD64ADDLconst) 1328 v.AuxInt = ValAndOff(valOff).Val() 1329 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 1330 v0.AddArg(x) 1331 v.AddArg(v0) 1332 return true 1333 } 1334 return false 1335 } 1336 func rewriteValueAMD64_OpAMD64ADDLmem_0(v *Value) bool { 1337 b := v.Block 1338 _ = b 1339 typ := &b.Func.Config.Types 1340 _ = typ 1341 // match: (ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 1342 // cond: 1343 // result: (ADDL x (MOVLf2i y)) 1344 for { 1345 off := v.AuxInt 1346 sym := v.Aux 1347 _ = v.Args[2] 1348 x := v.Args[0] 1349 ptr := v.Args[1] 1350 v_2 := v.Args[2] 1351 if v_2.Op != OpAMD64MOVSSstore { 1352 break 1353 } 1354 if v_2.AuxInt != off { 1355 break 1356 } 1357 if v_2.Aux != sym { 1358 break 1359 } 1360 _ = v_2.Args[2] 1361 if ptr != v_2.Args[0] { 1362 break 1363 } 1364 y := v_2.Args[1] 1365 v.reset(OpAMD64ADDL) 1366 v.AddArg(x) 1367 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 1368 v0.AddArg(y) 1369 v.AddArg(v0) 1370 return true 1371 } 1372 return false 1373 } 1374 func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { 1375 // match: (ADDQ x (MOVQconst [c])) 1376 // cond: is32Bit(c) 1377 // result: (ADDQconst [c] x) 1378 for { 1379 _ = v.Args[1] 1380 x := v.Args[0] 1381 v_1 := v.Args[1] 1382 if v_1.Op != OpAMD64MOVQconst { 1383 break 1384 } 1385 c := v_1.AuxInt 1386 if !(is32Bit(c)) { 1387 break 1388 } 1389 v.reset(OpAMD64ADDQconst) 1390 v.AuxInt = c 1391 v.AddArg(x) 1392 return true 1393 } 1394 // match: (ADDQ (MOVQconst [c]) x) 1395 // cond: is32Bit(c) 1396 // result: (ADDQconst [c] x) 1397 for { 1398 _ = v.Args[1] 1399 v_0 := v.Args[0] 1400 if v_0.Op != OpAMD64MOVQconst { 1401 break 1402 } 1403 c := v_0.AuxInt 1404 x := v.Args[1] 1405 if !(is32Bit(c)) { 1406 break 1407 } 1408 v.reset(OpAMD64ADDQconst) 1409 v.AuxInt = c 1410 v.AddArg(x) 1411 return true 1412 } 1413 // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) 1414 // cond: d==64-c 1415 // result: (ROLQconst x [c]) 1416 for { 1417 _ = v.Args[1] 1418 v_0 := v.Args[0] 1419 if v_0.Op != OpAMD64SHLQconst { 1420 break 1421 } 1422 c := v_0.AuxInt 1423 x := v_0.Args[0] 1424 v_1 := v.Args[1] 1425 if v_1.Op != OpAMD64SHRQconst { 1426 break 1427 } 1428 d := v_1.AuxInt 1429 if x != v_1.Args[0] { 1430 break 1431 } 1432 if !(d == 64-c) { 1433 break 1434 } 1435 v.reset(OpAMD64ROLQconst) 1436 v.AuxInt = c 1437 v.AddArg(x) 1438 return true 1439 } 1440 // match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c])) 1441 // cond: d==64-c 1442 // result: (ROLQconst x [c]) 1443 for { 1444 _ = v.Args[1] 1445 v_0 := v.Args[0] 1446 if v_0.Op != OpAMD64SHRQconst { 1447 break 1448 } 1449 d := v_0.AuxInt 1450 x := v_0.Args[0] 1451 v_1 := v.Args[1] 1452 if v_1.Op != OpAMD64SHLQconst { 1453 break 1454 } 1455 c := v_1.AuxInt 1456 if x != v_1.Args[0] { 1457 break 1458 } 1459 if !(d == 64-c) { 1460 break 1461 } 1462 v.reset(OpAMD64ROLQconst) 1463 v.AuxInt = c 1464 v.AddArg(x) 1465 return true 1466 } 1467 // match: (ADDQ x (SHLQconst [3] y)) 1468 // cond: 1469 // result: (LEAQ8 x y) 1470 for { 1471 _ = v.Args[1] 1472 x := v.Args[0] 1473 v_1 := v.Args[1] 1474 if v_1.Op != OpAMD64SHLQconst { 1475 break 1476 } 1477 if v_1.AuxInt != 3 { 1478 break 1479 } 1480 y := v_1.Args[0] 1481 v.reset(OpAMD64LEAQ8) 1482 v.AddArg(x) 1483 v.AddArg(y) 1484 return true 1485 } 1486 // match: (ADDQ (SHLQconst [3] y) x) 1487 // cond: 1488 // result: (LEAQ8 x y) 1489 for { 1490 _ = v.Args[1] 1491 v_0 := v.Args[0] 1492 if v_0.Op != OpAMD64SHLQconst { 1493 break 1494 } 1495 if v_0.AuxInt != 3 { 1496 break 1497 } 1498 y := v_0.Args[0] 1499 x := v.Args[1] 1500 v.reset(OpAMD64LEAQ8) 1501 v.AddArg(x) 1502 v.AddArg(y) 1503 return true 1504 } 1505 // match: (ADDQ x (SHLQconst [2] y)) 1506 // cond: 1507 // result: (LEAQ4 x y) 1508 for { 1509 _ = v.Args[1] 1510 x := v.Args[0] 1511 v_1 := v.Args[1] 1512 if v_1.Op != OpAMD64SHLQconst { 1513 break 1514 } 1515 if v_1.AuxInt != 2 { 1516 break 1517 } 1518 y := v_1.Args[0] 1519 v.reset(OpAMD64LEAQ4) 1520 v.AddArg(x) 1521 v.AddArg(y) 1522 return true 1523 } 1524 // match: (ADDQ (SHLQconst [2] y) x) 1525 // cond: 1526 // result: (LEAQ4 x y) 1527 for { 1528 _ = v.Args[1] 1529 v_0 := v.Args[0] 1530 if v_0.Op != OpAMD64SHLQconst { 1531 break 1532 } 1533 if v_0.AuxInt != 2 { 1534 break 1535 } 1536 y := v_0.Args[0] 1537 x := v.Args[1] 1538 v.reset(OpAMD64LEAQ4) 1539 v.AddArg(x) 1540 v.AddArg(y) 1541 return true 1542 } 1543 // match: (ADDQ x (SHLQconst [1] y)) 1544 // cond: 1545 // result: (LEAQ2 x y) 1546 for { 1547 _ = v.Args[1] 1548 x := v.Args[0] 1549 v_1 := v.Args[1] 1550 if v_1.Op != OpAMD64SHLQconst { 1551 break 1552 } 1553 if v_1.AuxInt != 1 { 1554 break 1555 } 1556 y := v_1.Args[0] 1557 v.reset(OpAMD64LEAQ2) 1558 v.AddArg(x) 1559 v.AddArg(y) 1560 return true 1561 } 1562 // match: (ADDQ (SHLQconst [1] y) x) 1563 // cond: 1564 // result: (LEAQ2 x y) 1565 for { 1566 _ = v.Args[1] 1567 v_0 := v.Args[0] 1568 if v_0.Op != OpAMD64SHLQconst { 1569 break 1570 } 1571 if v_0.AuxInt != 1 { 1572 break 1573 } 1574 y := v_0.Args[0] 1575 x := v.Args[1] 1576 v.reset(OpAMD64LEAQ2) 1577 v.AddArg(x) 1578 v.AddArg(y) 1579 return true 1580 } 1581 return false 1582 } 1583 func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { 1584 // match: (ADDQ x (ADDQ y y)) 1585 // cond: 1586 // result: (LEAQ2 x y) 1587 for { 1588 _ = v.Args[1] 1589 x := v.Args[0] 1590 v_1 := v.Args[1] 1591 if v_1.Op != OpAMD64ADDQ { 1592 break 1593 } 1594 _ = v_1.Args[1] 1595 y := v_1.Args[0] 1596 if y != v_1.Args[1] { 1597 break 1598 } 1599 v.reset(OpAMD64LEAQ2) 1600 v.AddArg(x) 1601 v.AddArg(y) 1602 return true 1603 } 1604 // match: (ADDQ (ADDQ y y) x) 1605 // cond: 1606 // result: (LEAQ2 x y) 1607 for { 1608 _ = v.Args[1] 1609 v_0 := v.Args[0] 1610 if v_0.Op != OpAMD64ADDQ { 1611 break 1612 } 1613 _ = v_0.Args[1] 1614 y := v_0.Args[0] 1615 if y != v_0.Args[1] { 1616 break 1617 } 1618 x := v.Args[1] 1619 v.reset(OpAMD64LEAQ2) 1620 v.AddArg(x) 1621 v.AddArg(y) 1622 return true 1623 } 1624 // match: (ADDQ x (ADDQ x y)) 1625 // cond: 1626 // result: (LEAQ2 y x) 1627 for { 1628 _ = v.Args[1] 1629 x := v.Args[0] 1630 v_1 := v.Args[1] 1631 if v_1.Op != OpAMD64ADDQ { 1632 break 1633 } 1634 _ = v_1.Args[1] 1635 if x != v_1.Args[0] { 1636 break 1637 } 1638 y := v_1.Args[1] 1639 v.reset(OpAMD64LEAQ2) 1640 v.AddArg(y) 1641 v.AddArg(x) 1642 return true 1643 } 1644 // match: (ADDQ x (ADDQ y x)) 1645 // cond: 1646 // result: (LEAQ2 y x) 1647 for { 1648 _ = v.Args[1] 1649 x := v.Args[0] 1650 v_1 := v.Args[1] 1651 if v_1.Op != OpAMD64ADDQ { 1652 break 1653 } 1654 _ = v_1.Args[1] 1655 y := v_1.Args[0] 1656 if x != v_1.Args[1] { 1657 break 1658 } 1659 v.reset(OpAMD64LEAQ2) 1660 v.AddArg(y) 1661 v.AddArg(x) 1662 return true 1663 } 1664 // match: (ADDQ (ADDQ x y) x) 1665 // cond: 1666 // result: (LEAQ2 y x) 1667 for { 1668 _ = v.Args[1] 1669 v_0 := v.Args[0] 1670 if v_0.Op != OpAMD64ADDQ { 1671 break 1672 } 1673 _ = v_0.Args[1] 1674 x := v_0.Args[0] 1675 y := v_0.Args[1] 1676 if x != v.Args[1] { 1677 break 1678 } 1679 v.reset(OpAMD64LEAQ2) 1680 v.AddArg(y) 1681 v.AddArg(x) 1682 return true 1683 } 1684 // match: (ADDQ (ADDQ y x) x) 1685 // cond: 1686 // result: (LEAQ2 y x) 1687 for { 1688 _ = v.Args[1] 1689 v_0 := v.Args[0] 1690 if v_0.Op != OpAMD64ADDQ { 1691 break 1692 } 1693 _ = v_0.Args[1] 1694 y := v_0.Args[0] 1695 x := v_0.Args[1] 1696 if x != v.Args[1] { 1697 break 1698 } 1699 v.reset(OpAMD64LEAQ2) 1700 v.AddArg(y) 1701 v.AddArg(x) 1702 return true 1703 } 1704 // match: (ADDQ (ADDQconst [c] x) y) 1705 // cond: 1706 // result: (LEAQ1 [c] x y) 1707 for { 1708 _ = v.Args[1] 1709 v_0 := v.Args[0] 1710 if v_0.Op != OpAMD64ADDQconst { 1711 break 1712 } 1713 c := v_0.AuxInt 1714 x := v_0.Args[0] 1715 y := v.Args[1] 1716 v.reset(OpAMD64LEAQ1) 1717 v.AuxInt = c 1718 v.AddArg(x) 1719 v.AddArg(y) 1720 return true 1721 } 1722 // match: (ADDQ y (ADDQconst [c] x)) 1723 // cond: 1724 // result: (LEAQ1 [c] x y) 1725 for { 1726 _ = v.Args[1] 1727 y := v.Args[0] 1728 v_1 := v.Args[1] 1729 if v_1.Op != OpAMD64ADDQconst { 1730 break 1731 } 1732 c := v_1.AuxInt 1733 x := v_1.Args[0] 1734 v.reset(OpAMD64LEAQ1) 1735 v.AuxInt = c 1736 v.AddArg(x) 1737 v.AddArg(y) 1738 return true 1739 } 1740 // match: (ADDQ x (LEAQ [c] {s} y)) 1741 // cond: x.Op != OpSB && y.Op != OpSB 1742 // result: (LEAQ1 [c] {s} x y) 1743 for { 1744 _ = v.Args[1] 1745 x := v.Args[0] 1746 v_1 := v.Args[1] 1747 if v_1.Op != OpAMD64LEAQ { 1748 break 1749 } 1750 c := v_1.AuxInt 1751 s := v_1.Aux 1752 y := v_1.Args[0] 1753 if !(x.Op != OpSB && y.Op != OpSB) { 1754 break 1755 } 1756 v.reset(OpAMD64LEAQ1) 1757 v.AuxInt = c 1758 v.Aux = s 1759 v.AddArg(x) 1760 v.AddArg(y) 1761 return true 1762 } 1763 // match: (ADDQ (LEAQ [c] {s} y) x) 1764 // cond: x.Op != OpSB && y.Op != OpSB 1765 // result: (LEAQ1 [c] {s} x y) 1766 for { 1767 _ = v.Args[1] 1768 v_0 := v.Args[0] 1769 if v_0.Op != OpAMD64LEAQ { 1770 break 1771 } 1772 c := v_0.AuxInt 1773 s := v_0.Aux 1774 y := v_0.Args[0] 1775 x := v.Args[1] 1776 if !(x.Op != OpSB && y.Op != OpSB) { 1777 break 1778 } 1779 v.reset(OpAMD64LEAQ1) 1780 v.AuxInt = c 1781 v.Aux = s 1782 v.AddArg(x) 1783 v.AddArg(y) 1784 return true 1785 } 1786 return false 1787 } 1788 func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { 1789 // match: (ADDQ x (NEGQ y)) 1790 // cond: 1791 // result: (SUBQ x y) 1792 for { 1793 _ = v.Args[1] 1794 x := v.Args[0] 1795 v_1 := v.Args[1] 1796 if v_1.Op != OpAMD64NEGQ { 1797 break 1798 } 1799 y := v_1.Args[0] 1800 v.reset(OpAMD64SUBQ) 1801 v.AddArg(x) 1802 v.AddArg(y) 1803 return true 1804 } 1805 // match: (ADDQ (NEGQ y) x) 1806 // cond: 1807 // result: (SUBQ x y) 1808 for { 1809 _ = v.Args[1] 1810 v_0 := v.Args[0] 1811 if v_0.Op != OpAMD64NEGQ { 1812 break 1813 } 1814 y := v_0.Args[0] 1815 x := v.Args[1] 1816 v.reset(OpAMD64SUBQ) 1817 v.AddArg(x) 1818 v.AddArg(y) 1819 return true 1820 } 1821 // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) 1822 // cond: canMergeLoad(v, l, x) && clobber(l) 1823 // result: (ADDQmem x [off] {sym} ptr mem) 1824 for { 1825 _ = v.Args[1] 1826 x := v.Args[0] 1827 l := v.Args[1] 1828 if l.Op != OpAMD64MOVQload { 1829 break 1830 } 1831 off := l.AuxInt 1832 sym := l.Aux 1833 _ = l.Args[1] 1834 ptr := l.Args[0] 1835 mem := l.Args[1] 1836 if !(canMergeLoad(v, l, x) && clobber(l)) { 1837 break 1838 } 1839 v.reset(OpAMD64ADDQmem) 1840 v.AuxInt = off 1841 v.Aux = sym 1842 v.AddArg(x) 1843 v.AddArg(ptr) 1844 v.AddArg(mem) 1845 return true 1846 } 1847 // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) 1848 // cond: canMergeLoad(v, l, x) && clobber(l) 1849 // result: (ADDQmem x [off] {sym} ptr mem) 1850 for { 1851 _ = v.Args[1] 1852 l := v.Args[0] 1853 if l.Op != OpAMD64MOVQload { 1854 break 1855 } 1856 off := l.AuxInt 1857 sym := l.Aux 1858 _ = l.Args[1] 1859 ptr := l.Args[0] 1860 mem := l.Args[1] 1861 x := v.Args[1] 1862 if !(canMergeLoad(v, l, x) && clobber(l)) { 1863 break 1864 } 1865 v.reset(OpAMD64ADDQmem) 1866 v.AuxInt = off 1867 v.Aux = sym 1868 v.AddArg(x) 1869 v.AddArg(ptr) 1870 v.AddArg(mem) 1871 return true 1872 } 1873 return false 1874 } 1875 func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { 1876 // match: (ADDQconst [c] (ADDQ x y)) 1877 // cond: 1878 // result: (LEAQ1 [c] x y) 1879 for { 1880 c := v.AuxInt 1881 v_0 := v.Args[0] 1882 if v_0.Op != OpAMD64ADDQ { 1883 break 1884 } 1885 _ = v_0.Args[1] 1886 x := v_0.Args[0] 1887 y := v_0.Args[1] 1888 v.reset(OpAMD64LEAQ1) 1889 v.AuxInt = c 1890 v.AddArg(x) 1891 v.AddArg(y) 1892 return true 1893 } 1894 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1895 // cond: is32Bit(c+d) 1896 // result: (LEAQ [c+d] {s} x) 1897 for { 1898 c := v.AuxInt 1899 v_0 := v.Args[0] 1900 if v_0.Op != OpAMD64LEAQ { 1901 break 1902 } 1903 d := v_0.AuxInt 1904 s := v_0.Aux 1905 x := v_0.Args[0] 1906 if !(is32Bit(c + d)) { 1907 break 1908 } 1909 v.reset(OpAMD64LEAQ) 1910 v.AuxInt = c + d 1911 v.Aux = s 1912 v.AddArg(x) 1913 return true 1914 } 1915 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1916 // cond: is32Bit(c+d) 1917 // result: (LEAQ1 [c+d] {s} x y) 1918 for { 1919 c := v.AuxInt 1920 v_0 := v.Args[0] 1921 if v_0.Op != OpAMD64LEAQ1 { 1922 break 1923 } 1924 d := v_0.AuxInt 1925 s := v_0.Aux 1926 _ = v_0.Args[1] 1927 x := v_0.Args[0] 1928 y := v_0.Args[1] 1929 if !(is32Bit(c + d)) { 1930 break 1931 } 1932 v.reset(OpAMD64LEAQ1) 1933 v.AuxInt = c + d 1934 v.Aux = s 1935 v.AddArg(x) 1936 v.AddArg(y) 1937 return true 1938 } 1939 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1940 // cond: is32Bit(c+d) 1941 // result: (LEAQ2 [c+d] {s} x y) 1942 for { 1943 c := v.AuxInt 1944 v_0 := v.Args[0] 1945 if v_0.Op != OpAMD64LEAQ2 { 1946 break 1947 } 1948 d := v_0.AuxInt 1949 s := v_0.Aux 1950 _ = v_0.Args[1] 1951 x := v_0.Args[0] 1952 y := v_0.Args[1] 1953 if !(is32Bit(c + d)) { 1954 break 1955 } 1956 v.reset(OpAMD64LEAQ2) 1957 v.AuxInt = c + d 1958 v.Aux = s 1959 v.AddArg(x) 1960 v.AddArg(y) 1961 return true 1962 } 1963 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1964 // cond: is32Bit(c+d) 1965 // result: (LEAQ4 [c+d] {s} x y) 1966 for { 1967 c := v.AuxInt 1968 v_0 := v.Args[0] 1969 if v_0.Op != OpAMD64LEAQ4 { 1970 break 1971 } 1972 d := v_0.AuxInt 1973 s := v_0.Aux 1974 _ = v_0.Args[1] 1975 x := v_0.Args[0] 1976 y := v_0.Args[1] 1977 if !(is32Bit(c + d)) { 1978 break 1979 } 1980 v.reset(OpAMD64LEAQ4) 1981 v.AuxInt = c + d 1982 v.Aux = s 1983 v.AddArg(x) 1984 v.AddArg(y) 1985 return true 1986 } 1987 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1988 // cond: is32Bit(c+d) 1989 // result: (LEAQ8 [c+d] {s} x y) 1990 for { 1991 c := v.AuxInt 1992 v_0 := v.Args[0] 1993 if v_0.Op != OpAMD64LEAQ8 { 1994 break 1995 } 1996 d := v_0.AuxInt 1997 s := v_0.Aux 1998 _ = v_0.Args[1] 1999 x := v_0.Args[0] 2000 y := v_0.Args[1] 2001 if !(is32Bit(c + d)) { 2002 break 2003 } 2004 v.reset(OpAMD64LEAQ8) 2005 v.AuxInt = c + d 2006 v.Aux = s 2007 v.AddArg(x) 2008 v.AddArg(y) 2009 return true 2010 } 2011 // match: (ADDQconst [0] x) 2012 // cond: 2013 // result: x 2014 for { 2015 if v.AuxInt != 0 { 2016 break 2017 } 2018 x := v.Args[0] 2019 v.reset(OpCopy) 2020 v.Type = x.Type 2021 v.AddArg(x) 2022 return true 2023 } 2024 // match: (ADDQconst [c] (MOVQconst [d])) 2025 // cond: 2026 // result: (MOVQconst [c+d]) 2027 for { 2028 c := v.AuxInt 2029 v_0 := v.Args[0] 2030 if v_0.Op != OpAMD64MOVQconst { 2031 break 2032 } 2033 d := v_0.AuxInt 2034 v.reset(OpAMD64MOVQconst) 2035 v.AuxInt = c + d 2036 return true 2037 } 2038 // match: (ADDQconst [c] (ADDQconst [d] x)) 2039 // cond: is32Bit(c+d) 2040 // result: (ADDQconst [c+d] x) 2041 for { 2042 c := v.AuxInt 2043 v_0 := v.Args[0] 2044 if v_0.Op != OpAMD64ADDQconst { 2045 break 2046 } 2047 d := v_0.AuxInt 2048 x := v_0.Args[0] 2049 if !(is32Bit(c + d)) { 2050 break 2051 } 2052 v.reset(OpAMD64ADDQconst) 2053 v.AuxInt = c + d 2054 v.AddArg(x) 2055 return true 2056 } 2057 return false 2058 } 2059 func rewriteValueAMD64_OpAMD64ADDQconstmem_0(v *Value) bool { 2060 b := v.Block 2061 _ = b 2062 typ := &b.Func.Config.Types 2063 _ = typ 2064 // match: (ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _)) 2065 // cond: 2066 // result: (ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x)) 2067 for { 2068 valOff := v.AuxInt 2069 sym := v.Aux 2070 _ = v.Args[1] 2071 ptr := v.Args[0] 2072 v_1 := v.Args[1] 2073 if v_1.Op != OpAMD64MOVSDstore { 2074 break 2075 } 2076 if v_1.AuxInt != ValAndOff(valOff).Off() { 2077 break 2078 } 2079 if v_1.Aux != sym { 2080 break 2081 } 2082 _ = v_1.Args[2] 2083 if ptr != v_1.Args[0] { 2084 break 2085 } 2086 x := v_1.Args[1] 2087 v.reset(OpAMD64ADDQconst) 2088 v.AuxInt = ValAndOff(valOff).Val() 2089 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 2090 v0.AddArg(x) 2091 v.AddArg(v0) 2092 return true 2093 } 2094 return false 2095 } 2096 func rewriteValueAMD64_OpAMD64ADDQmem_0(v *Value) bool { 2097 b := v.Block 2098 _ = b 2099 typ := &b.Func.Config.Types 2100 _ = typ 2101 // match: (ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 2102 // cond: 2103 // result: (ADDQ x (MOVQf2i y)) 2104 for { 2105 off := v.AuxInt 2106 sym := v.Aux 2107 _ = v.Args[2] 2108 x := v.Args[0] 2109 ptr := v.Args[1] 2110 v_2 := v.Args[2] 2111 if v_2.Op != OpAMD64MOVSDstore { 2112 break 2113 } 2114 if v_2.AuxInt != off { 2115 break 2116 } 2117 if v_2.Aux != sym { 2118 break 2119 } 2120 _ = v_2.Args[2] 2121 if ptr != v_2.Args[0] { 2122 break 2123 } 2124 y := v_2.Args[1] 2125 v.reset(OpAMD64ADDQ) 2126 v.AddArg(x) 2127 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 2128 v0.AddArg(y) 2129 v.AddArg(v0) 2130 return true 2131 } 2132 return false 2133 } 2134 func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { 2135 // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) 2136 // cond: canMergeLoad(v, l, x) && clobber(l) 2137 // result: (ADDSDmem x [off] {sym} ptr mem) 2138 for { 2139 _ = v.Args[1] 2140 x := v.Args[0] 2141 l := v.Args[1] 2142 if l.Op != OpAMD64MOVSDload { 2143 break 2144 } 2145 off := l.AuxInt 2146 sym := l.Aux 2147 _ = l.Args[1] 2148 ptr := l.Args[0] 2149 mem := l.Args[1] 2150 if !(canMergeLoad(v, l, x) && clobber(l)) { 2151 break 2152 } 2153 v.reset(OpAMD64ADDSDmem) 2154 v.AuxInt = off 2155 v.Aux = sym 2156 v.AddArg(x) 2157 v.AddArg(ptr) 2158 v.AddArg(mem) 2159 return true 2160 } 2161 // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) 2162 // cond: canMergeLoad(v, l, x) && clobber(l) 2163 // result: (ADDSDmem x [off] {sym} ptr mem) 2164 for { 2165 _ = v.Args[1] 2166 l := v.Args[0] 2167 if l.Op != OpAMD64MOVSDload { 2168 break 2169 } 2170 off := l.AuxInt 2171 sym := l.Aux 2172 _ = l.Args[1] 2173 ptr := l.Args[0] 2174 mem := l.Args[1] 2175 x := v.Args[1] 2176 if !(canMergeLoad(v, l, x) && clobber(l)) { 2177 break 2178 } 2179 v.reset(OpAMD64ADDSDmem) 2180 v.AuxInt = off 2181 v.Aux = sym 2182 v.AddArg(x) 2183 v.AddArg(ptr) 2184 v.AddArg(mem) 2185 return true 2186 } 2187 return false 2188 } 2189 func rewriteValueAMD64_OpAMD64ADDSDmem_0(v *Value) bool { 2190 b := v.Block 2191 _ = b 2192 typ := &b.Func.Config.Types 2193 _ = typ 2194 // match: (ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 2195 // cond: 2196 // result: (ADDSD x (MOVQi2f y)) 2197 for { 2198 off := v.AuxInt 2199 sym := v.Aux 2200 _ = v.Args[2] 2201 x := v.Args[0] 2202 ptr := v.Args[1] 2203 v_2 := v.Args[2] 2204 if v_2.Op != OpAMD64MOVQstore { 2205 break 2206 } 2207 if v_2.AuxInt != off { 2208 break 2209 } 2210 if v_2.Aux != sym { 2211 break 2212 } 2213 _ = v_2.Args[2] 2214 if ptr != v_2.Args[0] { 2215 break 2216 } 2217 y := v_2.Args[1] 2218 v.reset(OpAMD64ADDSD) 2219 v.AddArg(x) 2220 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 2221 v0.AddArg(y) 2222 v.AddArg(v0) 2223 return true 2224 } 2225 return false 2226 } 2227 func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { 2228 // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) 2229 // cond: canMergeLoad(v, l, x) && clobber(l) 2230 // result: (ADDSSmem x [off] {sym} ptr mem) 2231 for { 2232 _ = v.Args[1] 2233 x := v.Args[0] 2234 l := v.Args[1] 2235 if l.Op != OpAMD64MOVSSload { 2236 break 2237 } 2238 off := l.AuxInt 2239 sym := l.Aux 2240 _ = l.Args[1] 2241 ptr := l.Args[0] 2242 mem := l.Args[1] 2243 if !(canMergeLoad(v, l, x) && clobber(l)) { 2244 break 2245 } 2246 v.reset(OpAMD64ADDSSmem) 2247 v.AuxInt = off 2248 v.Aux = sym 2249 v.AddArg(x) 2250 v.AddArg(ptr) 2251 v.AddArg(mem) 2252 return true 2253 } 2254 // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) 2255 // cond: canMergeLoad(v, l, x) && clobber(l) 2256 // result: (ADDSSmem x [off] {sym} ptr mem) 2257 for { 2258 _ = v.Args[1] 2259 l := v.Args[0] 2260 if l.Op != OpAMD64MOVSSload { 2261 break 2262 } 2263 off := l.AuxInt 2264 sym := l.Aux 2265 _ = l.Args[1] 2266 ptr := l.Args[0] 2267 mem := l.Args[1] 2268 x := v.Args[1] 2269 if !(canMergeLoad(v, l, x) && clobber(l)) { 2270 break 2271 } 2272 v.reset(OpAMD64ADDSSmem) 2273 v.AuxInt = off 2274 v.Aux = sym 2275 v.AddArg(x) 2276 v.AddArg(ptr) 2277 v.AddArg(mem) 2278 return true 2279 } 2280 return false 2281 } 2282 func rewriteValueAMD64_OpAMD64ADDSSmem_0(v *Value) bool { 2283 b := v.Block 2284 _ = b 2285 typ := &b.Func.Config.Types 2286 _ = typ 2287 // match: (ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 2288 // cond: 2289 // result: (ADDSS x (MOVLi2f y)) 2290 for { 2291 off := v.AuxInt 2292 sym := v.Aux 2293 _ = v.Args[2] 2294 x := v.Args[0] 2295 ptr := v.Args[1] 2296 v_2 := v.Args[2] 2297 if v_2.Op != OpAMD64MOVLstore { 2298 break 2299 } 2300 if v_2.AuxInt != off { 2301 break 2302 } 2303 if v_2.Aux != sym { 2304 break 2305 } 2306 _ = v_2.Args[2] 2307 if ptr != v_2.Args[0] { 2308 break 2309 } 2310 y := v_2.Args[1] 2311 v.reset(OpAMD64ADDSS) 2312 v.AddArg(x) 2313 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 2314 v0.AddArg(y) 2315 v.AddArg(v0) 2316 return true 2317 } 2318 return false 2319 } 2320 func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { 2321 // match: (ANDL x (MOVLconst [c])) 2322 // cond: 2323 // result: (ANDLconst [c] x) 2324 for { 2325 _ = v.Args[1] 2326 x := v.Args[0] 2327 v_1 := v.Args[1] 2328 if v_1.Op != OpAMD64MOVLconst { 2329 break 2330 } 2331 c := v_1.AuxInt 2332 v.reset(OpAMD64ANDLconst) 2333 v.AuxInt = c 2334 v.AddArg(x) 2335 return true 2336 } 2337 // match: (ANDL (MOVLconst [c]) x) 2338 // cond: 2339 // result: (ANDLconst [c] x) 2340 for { 2341 _ = v.Args[1] 2342 v_0 := v.Args[0] 2343 if v_0.Op != OpAMD64MOVLconst { 2344 break 2345 } 2346 c := v_0.AuxInt 2347 x := v.Args[1] 2348 v.reset(OpAMD64ANDLconst) 2349 v.AuxInt = c 2350 v.AddArg(x) 2351 return true 2352 } 2353 // match: (ANDL x x) 2354 // cond: 2355 // result: x 2356 for { 2357 _ = v.Args[1] 2358 x := v.Args[0] 2359 if x != v.Args[1] { 2360 break 2361 } 2362 v.reset(OpCopy) 2363 v.Type = x.Type 2364 v.AddArg(x) 2365 return true 2366 } 2367 // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) 2368 // cond: canMergeLoad(v, l, x) && clobber(l) 2369 // result: (ANDLmem x [off] {sym} ptr mem) 2370 for { 2371 _ = v.Args[1] 2372 x := v.Args[0] 2373 l := v.Args[1] 2374 if l.Op != OpAMD64MOVLload { 2375 break 2376 } 2377 off := l.AuxInt 2378 sym := l.Aux 2379 _ = l.Args[1] 2380 ptr := l.Args[0] 2381 mem := l.Args[1] 2382 if !(canMergeLoad(v, l, x) && clobber(l)) { 2383 break 2384 } 2385 v.reset(OpAMD64ANDLmem) 2386 v.AuxInt = off 2387 v.Aux = sym 2388 v.AddArg(x) 2389 v.AddArg(ptr) 2390 v.AddArg(mem) 2391 return true 2392 } 2393 // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) 2394 // cond: canMergeLoad(v, l, x) && clobber(l) 2395 // result: (ANDLmem x [off] {sym} ptr mem) 2396 for { 2397 _ = v.Args[1] 2398 l := v.Args[0] 2399 if l.Op != OpAMD64MOVLload { 2400 break 2401 } 2402 off := l.AuxInt 2403 sym := l.Aux 2404 _ = l.Args[1] 2405 ptr := l.Args[0] 2406 mem := l.Args[1] 2407 x := v.Args[1] 2408 if !(canMergeLoad(v, l, x) && clobber(l)) { 2409 break 2410 } 2411 v.reset(OpAMD64ANDLmem) 2412 v.AuxInt = off 2413 v.Aux = sym 2414 v.AddArg(x) 2415 v.AddArg(ptr) 2416 v.AddArg(mem) 2417 return true 2418 } 2419 return false 2420 } 2421 func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { 2422 // match: (ANDLconst [c] (ANDLconst [d] x)) 2423 // cond: 2424 // result: (ANDLconst [c & d] x) 2425 for { 2426 c := v.AuxInt 2427 v_0 := v.Args[0] 2428 if v_0.Op != OpAMD64ANDLconst { 2429 break 2430 } 2431 d := v_0.AuxInt 2432 x := v_0.Args[0] 2433 v.reset(OpAMD64ANDLconst) 2434 v.AuxInt = c & d 2435 v.AddArg(x) 2436 return true 2437 } 2438 // match: (ANDLconst [0xFF] x) 2439 // cond: 2440 // result: (MOVBQZX x) 2441 for { 2442 if v.AuxInt != 0xFF { 2443 break 2444 } 2445 x := v.Args[0] 2446 v.reset(OpAMD64MOVBQZX) 2447 v.AddArg(x) 2448 return true 2449 } 2450 // match: (ANDLconst [0xFFFF] x) 2451 // cond: 2452 // result: (MOVWQZX x) 2453 for { 2454 if v.AuxInt != 0xFFFF { 2455 break 2456 } 2457 x := v.Args[0] 2458 v.reset(OpAMD64MOVWQZX) 2459 v.AddArg(x) 2460 return true 2461 } 2462 // match: (ANDLconst [c] _) 2463 // cond: int32(c)==0 2464 // result: (MOVLconst [0]) 2465 for { 2466 c := v.AuxInt 2467 if !(int32(c) == 0) { 2468 break 2469 } 2470 v.reset(OpAMD64MOVLconst) 2471 v.AuxInt = 0 2472 return true 2473 } 2474 // match: (ANDLconst [c] x) 2475 // cond: int32(c)==-1 2476 // result: x 2477 for { 2478 c := v.AuxInt 2479 x := v.Args[0] 2480 if !(int32(c) == -1) { 2481 break 2482 } 2483 v.reset(OpCopy) 2484 v.Type = x.Type 2485 v.AddArg(x) 2486 return true 2487 } 2488 // match: (ANDLconst [c] (MOVLconst [d])) 2489 // cond: 2490 // result: (MOVLconst [c&d]) 2491 for { 2492 c := v.AuxInt 2493 v_0 := v.Args[0] 2494 if v_0.Op != OpAMD64MOVLconst { 2495 break 2496 } 2497 d := v_0.AuxInt 2498 v.reset(OpAMD64MOVLconst) 2499 v.AuxInt = c & d 2500 return true 2501 } 2502 return false 2503 } 2504 func rewriteValueAMD64_OpAMD64ANDLmem_0(v *Value) bool { 2505 b := v.Block 2506 _ = b 2507 typ := &b.Func.Config.Types 2508 _ = typ 2509 // match: (ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 2510 // cond: 2511 // result: (ANDL x (MOVLf2i y)) 2512 for { 2513 off := v.AuxInt 2514 sym := v.Aux 2515 _ = v.Args[2] 2516 x := v.Args[0] 2517 ptr := v.Args[1] 2518 v_2 := v.Args[2] 2519 if v_2.Op != OpAMD64MOVSSstore { 2520 break 2521 } 2522 if v_2.AuxInt != off { 2523 break 2524 } 2525 if v_2.Aux != sym { 2526 break 2527 } 2528 _ = v_2.Args[2] 2529 if ptr != v_2.Args[0] { 2530 break 2531 } 2532 y := v_2.Args[1] 2533 v.reset(OpAMD64ANDL) 2534 v.AddArg(x) 2535 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 2536 v0.AddArg(y) 2537 v.AddArg(v0) 2538 return true 2539 } 2540 return false 2541 } 2542 func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { 2543 // match: (ANDQ x (MOVQconst [c])) 2544 // cond: is32Bit(c) 2545 // result: (ANDQconst [c] x) 2546 for { 2547 _ = v.Args[1] 2548 x := v.Args[0] 2549 v_1 := v.Args[1] 2550 if v_1.Op != OpAMD64MOVQconst { 2551 break 2552 } 2553 c := v_1.AuxInt 2554 if !(is32Bit(c)) { 2555 break 2556 } 2557 v.reset(OpAMD64ANDQconst) 2558 v.AuxInt = c 2559 v.AddArg(x) 2560 return true 2561 } 2562 // match: (ANDQ (MOVQconst [c]) x) 2563 // cond: is32Bit(c) 2564 // result: (ANDQconst [c] x) 2565 for { 2566 _ = v.Args[1] 2567 v_0 := v.Args[0] 2568 if v_0.Op != OpAMD64MOVQconst { 2569 break 2570 } 2571 c := v_0.AuxInt 2572 x := v.Args[1] 2573 if !(is32Bit(c)) { 2574 break 2575 } 2576 v.reset(OpAMD64ANDQconst) 2577 v.AuxInt = c 2578 v.AddArg(x) 2579 return true 2580 } 2581 // match: (ANDQ x x) 2582 // cond: 2583 // result: x 2584 for { 2585 _ = v.Args[1] 2586 x := v.Args[0] 2587 if x != v.Args[1] { 2588 break 2589 } 2590 v.reset(OpCopy) 2591 v.Type = x.Type 2592 v.AddArg(x) 2593 return true 2594 } 2595 // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) 2596 // cond: canMergeLoad(v, l, x) && clobber(l) 2597 // result: (ANDQmem x [off] {sym} ptr mem) 2598 for { 2599 _ = v.Args[1] 2600 x := v.Args[0] 2601 l := v.Args[1] 2602 if l.Op != OpAMD64MOVQload { 2603 break 2604 } 2605 off := l.AuxInt 2606 sym := l.Aux 2607 _ = l.Args[1] 2608 ptr := l.Args[0] 2609 mem := l.Args[1] 2610 if !(canMergeLoad(v, l, x) && clobber(l)) { 2611 break 2612 } 2613 v.reset(OpAMD64ANDQmem) 2614 v.AuxInt = off 2615 v.Aux = sym 2616 v.AddArg(x) 2617 v.AddArg(ptr) 2618 v.AddArg(mem) 2619 return true 2620 } 2621 // match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x) 2622 // cond: canMergeLoad(v, l, x) && clobber(l) 2623 // result: (ANDQmem x [off] {sym} ptr mem) 2624 for { 2625 _ = v.Args[1] 2626 l := v.Args[0] 2627 if l.Op != OpAMD64MOVQload { 2628 break 2629 } 2630 off := l.AuxInt 2631 sym := l.Aux 2632 _ = l.Args[1] 2633 ptr := l.Args[0] 2634 mem := l.Args[1] 2635 x := v.Args[1] 2636 if !(canMergeLoad(v, l, x) && clobber(l)) { 2637 break 2638 } 2639 v.reset(OpAMD64ANDQmem) 2640 v.AuxInt = off 2641 v.Aux = sym 2642 v.AddArg(x) 2643 v.AddArg(ptr) 2644 v.AddArg(mem) 2645 return true 2646 } 2647 return false 2648 } 2649 func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { 2650 // match: (ANDQconst [c] (ANDQconst [d] x)) 2651 // cond: 2652 // result: (ANDQconst [c & d] x) 2653 for { 2654 c := v.AuxInt 2655 v_0 := v.Args[0] 2656 if v_0.Op != OpAMD64ANDQconst { 2657 break 2658 } 2659 d := v_0.AuxInt 2660 x := v_0.Args[0] 2661 v.reset(OpAMD64ANDQconst) 2662 v.AuxInt = c & d 2663 v.AddArg(x) 2664 return true 2665 } 2666 // match: (ANDQconst [0xFF] x) 2667 // cond: 2668 // result: (MOVBQZX x) 2669 for { 2670 if v.AuxInt != 0xFF { 2671 break 2672 } 2673 x := v.Args[0] 2674 v.reset(OpAMD64MOVBQZX) 2675 v.AddArg(x) 2676 return true 2677 } 2678 // match: (ANDQconst [0xFFFF] x) 2679 // cond: 2680 // result: (MOVWQZX x) 2681 for { 2682 if v.AuxInt != 0xFFFF { 2683 break 2684 } 2685 x := v.Args[0] 2686 v.reset(OpAMD64MOVWQZX) 2687 v.AddArg(x) 2688 return true 2689 } 2690 // match: (ANDQconst [0xFFFFFFFF] x) 2691 // cond: 2692 // result: (MOVLQZX x) 2693 for { 2694 if v.AuxInt != 0xFFFFFFFF { 2695 break 2696 } 2697 x := v.Args[0] 2698 v.reset(OpAMD64MOVLQZX) 2699 v.AddArg(x) 2700 return true 2701 } 2702 // match: (ANDQconst [0] _) 2703 // cond: 2704 // result: (MOVQconst [0]) 2705 for { 2706 if v.AuxInt != 0 { 2707 break 2708 } 2709 v.reset(OpAMD64MOVQconst) 2710 v.AuxInt = 0 2711 return true 2712 } 2713 // match: (ANDQconst [-1] x) 2714 // cond: 2715 // result: x 2716 for { 2717 if v.AuxInt != -1 { 2718 break 2719 } 2720 x := v.Args[0] 2721 v.reset(OpCopy) 2722 v.Type = x.Type 2723 v.AddArg(x) 2724 return true 2725 } 2726 // match: (ANDQconst [c] (MOVQconst [d])) 2727 // cond: 2728 // result: (MOVQconst [c&d]) 2729 for { 2730 c := v.AuxInt 2731 v_0 := v.Args[0] 2732 if v_0.Op != OpAMD64MOVQconst { 2733 break 2734 } 2735 d := v_0.AuxInt 2736 v.reset(OpAMD64MOVQconst) 2737 v.AuxInt = c & d 2738 return true 2739 } 2740 return false 2741 } 2742 func rewriteValueAMD64_OpAMD64ANDQmem_0(v *Value) bool { 2743 b := v.Block 2744 _ = b 2745 typ := &b.Func.Config.Types 2746 _ = typ 2747 // match: (ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 2748 // cond: 2749 // result: (ANDQ x (MOVQf2i y)) 2750 for { 2751 off := v.AuxInt 2752 sym := v.Aux 2753 _ = v.Args[2] 2754 x := v.Args[0] 2755 ptr := v.Args[1] 2756 v_2 := v.Args[2] 2757 if v_2.Op != OpAMD64MOVSDstore { 2758 break 2759 } 2760 if v_2.AuxInt != off { 2761 break 2762 } 2763 if v_2.Aux != sym { 2764 break 2765 } 2766 _ = v_2.Args[2] 2767 if ptr != v_2.Args[0] { 2768 break 2769 } 2770 y := v_2.Args[1] 2771 v.reset(OpAMD64ANDQ) 2772 v.AddArg(x) 2773 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 2774 v0.AddArg(y) 2775 v.AddArg(v0) 2776 return true 2777 } 2778 return false 2779 } 2780 func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool { 2781 b := v.Block 2782 _ = b 2783 // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) 2784 // cond: 2785 // result: (BSFQ (ORQconst <t> [1<<8] x)) 2786 for { 2787 v_0 := v.Args[0] 2788 if v_0.Op != OpAMD64ORQconst { 2789 break 2790 } 2791 t := v_0.Type 2792 if v_0.AuxInt != 1<<8 { 2793 break 2794 } 2795 v_0_0 := v_0.Args[0] 2796 if v_0_0.Op != OpAMD64MOVBQZX { 2797 break 2798 } 2799 x := v_0_0.Args[0] 2800 v.reset(OpAMD64BSFQ) 2801 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2802 v0.AuxInt = 1 << 8 2803 v0.AddArg(x) 2804 v.AddArg(v0) 2805 return true 2806 } 2807 // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) 2808 // cond: 2809 // result: (BSFQ (ORQconst <t> [1<<16] x)) 2810 for { 2811 v_0 := v.Args[0] 2812 if v_0.Op != OpAMD64ORQconst { 2813 break 2814 } 2815 t := v_0.Type 2816 if v_0.AuxInt != 1<<16 { 2817 break 2818 } 2819 v_0_0 := v_0.Args[0] 2820 if v_0_0.Op != OpAMD64MOVWQZX { 2821 break 2822 } 2823 x := v_0_0.Args[0] 2824 v.reset(OpAMD64BSFQ) 2825 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2826 v0.AuxInt = 1 << 16 2827 v0.AddArg(x) 2828 v.AddArg(v0) 2829 return true 2830 } 2831 return false 2832 } 2833 func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool { 2834 // match: (BTQconst [c] x) 2835 // cond: c < 32 2836 // result: (BTLconst [c] x) 2837 for { 2838 c := v.AuxInt 2839 x := v.Args[0] 2840 if !(c < 32) { 2841 break 2842 } 2843 v.reset(OpAMD64BTLconst) 2844 v.AuxInt = c 2845 v.AddArg(x) 2846 return true 2847 } 2848 return false 2849 } 2850 func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool { 2851 // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) 2852 // cond: c != 0 2853 // result: x 2854 for { 2855 _ = v.Args[2] 2856 x := v.Args[0] 2857 v_2 := v.Args[2] 2858 if v_2.Op != OpSelect1 { 2859 break 2860 } 2861 v_2_0 := v_2.Args[0] 2862 if v_2_0.Op != OpAMD64BSFQ { 2863 break 2864 } 2865 v_2_0_0 := v_2_0.Args[0] 2866 if v_2_0_0.Op != OpAMD64ORQconst { 2867 break 2868 } 2869 c := v_2_0_0.AuxInt 2870 if !(c != 0) { 2871 break 2872 } 2873 v.reset(OpCopy) 2874 v.Type = x.Type 2875 v.AddArg(x) 2876 return true 2877 } 2878 return false 2879 } 2880 func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { 2881 b := v.Block 2882 _ = b 2883 // match: (CMPB x (MOVLconst [c])) 2884 // cond: 2885 // result: (CMPBconst x [int64(int8(c))]) 2886 for { 2887 _ = v.Args[1] 2888 x := v.Args[0] 2889 v_1 := v.Args[1] 2890 if v_1.Op != OpAMD64MOVLconst { 2891 break 2892 } 2893 c := v_1.AuxInt 2894 v.reset(OpAMD64CMPBconst) 2895 v.AuxInt = int64(int8(c)) 2896 v.AddArg(x) 2897 return true 2898 } 2899 // match: (CMPB (MOVLconst [c]) x) 2900 // cond: 2901 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 2902 for { 2903 _ = v.Args[1] 2904 v_0 := v.Args[0] 2905 if v_0.Op != OpAMD64MOVLconst { 2906 break 2907 } 2908 c := v_0.AuxInt 2909 x := v.Args[1] 2910 v.reset(OpAMD64InvertFlags) 2911 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 2912 v0.AuxInt = int64(int8(c)) 2913 v0.AddArg(x) 2914 v.AddArg(v0) 2915 return true 2916 } 2917 return false 2918 } 2919 func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool { 2920 // match: (CMPBconst (MOVLconst [x]) [y]) 2921 // cond: int8(x)==int8(y) 2922 // result: (FlagEQ) 2923 for { 2924 y := v.AuxInt 2925 v_0 := v.Args[0] 2926 if v_0.Op != OpAMD64MOVLconst { 2927 break 2928 } 2929 x := v_0.AuxInt 2930 if !(int8(x) == int8(y)) { 2931 break 2932 } 2933 v.reset(OpAMD64FlagEQ) 2934 return true 2935 } 2936 // match: (CMPBconst (MOVLconst [x]) [y]) 2937 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 2938 // result: (FlagLT_ULT) 2939 for { 2940 y := v.AuxInt 2941 v_0 := v.Args[0] 2942 if v_0.Op != OpAMD64MOVLconst { 2943 break 2944 } 2945 x := v_0.AuxInt 2946 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 2947 break 2948 } 2949 v.reset(OpAMD64FlagLT_ULT) 2950 return true 2951 } 2952 // match: (CMPBconst (MOVLconst [x]) [y]) 2953 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 2954 // result: (FlagLT_UGT) 2955 for { 2956 y := v.AuxInt 2957 v_0 := v.Args[0] 2958 if v_0.Op != OpAMD64MOVLconst { 2959 break 2960 } 2961 x := v_0.AuxInt 2962 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 2963 break 2964 } 2965 v.reset(OpAMD64FlagLT_UGT) 2966 return true 2967 } 2968 // match: (CMPBconst (MOVLconst [x]) [y]) 2969 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 2970 // result: (FlagGT_ULT) 2971 for { 2972 y := v.AuxInt 2973 v_0 := v.Args[0] 2974 if v_0.Op != OpAMD64MOVLconst { 2975 break 2976 } 2977 x := v_0.AuxInt 2978 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 2979 break 2980 } 2981 v.reset(OpAMD64FlagGT_ULT) 2982 return true 2983 } 2984 // match: (CMPBconst (MOVLconst [x]) [y]) 2985 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 2986 // result: (FlagGT_UGT) 2987 for { 2988 y := v.AuxInt 2989 v_0 := v.Args[0] 2990 if v_0.Op != OpAMD64MOVLconst { 2991 break 2992 } 2993 x := v_0.AuxInt 2994 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 2995 break 2996 } 2997 v.reset(OpAMD64FlagGT_UGT) 2998 return true 2999 } 3000 // match: (CMPBconst (ANDLconst _ [m]) [n]) 3001 // cond: 0 <= int8(m) && int8(m) < int8(n) 3002 // result: (FlagLT_ULT) 3003 for { 3004 n := v.AuxInt 3005 v_0 := v.Args[0] 3006 if v_0.Op != OpAMD64ANDLconst { 3007 break 3008 } 3009 m := v_0.AuxInt 3010 if !(0 <= int8(m) && int8(m) < int8(n)) { 3011 break 3012 } 3013 v.reset(OpAMD64FlagLT_ULT) 3014 return true 3015 } 3016 // match: (CMPBconst (ANDL x y) [0]) 3017 // cond: 3018 // result: (TESTB x y) 3019 for { 3020 if v.AuxInt != 0 { 3021 break 3022 } 3023 v_0 := v.Args[0] 3024 if v_0.Op != OpAMD64ANDL { 3025 break 3026 } 3027 _ = v_0.Args[1] 3028 x := v_0.Args[0] 3029 y := v_0.Args[1] 3030 v.reset(OpAMD64TESTB) 3031 v.AddArg(x) 3032 v.AddArg(y) 3033 return true 3034 } 3035 // match: (CMPBconst (ANDLconst [c] x) [0]) 3036 // cond: 3037 // result: (TESTBconst [int64(int8(c))] x) 3038 for { 3039 if v.AuxInt != 0 { 3040 break 3041 } 3042 v_0 := v.Args[0] 3043 if v_0.Op != OpAMD64ANDLconst { 3044 break 3045 } 3046 c := v_0.AuxInt 3047 x := v_0.Args[0] 3048 v.reset(OpAMD64TESTBconst) 3049 v.AuxInt = int64(int8(c)) 3050 v.AddArg(x) 3051 return true 3052 } 3053 // match: (CMPBconst x [0]) 3054 // cond: 3055 // result: (TESTB x x) 3056 for { 3057 if v.AuxInt != 0 { 3058 break 3059 } 3060 x := v.Args[0] 3061 v.reset(OpAMD64TESTB) 3062 v.AddArg(x) 3063 v.AddArg(x) 3064 return true 3065 } 3066 return false 3067 } 3068 func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { 3069 b := v.Block 3070 _ = b 3071 // match: (CMPL x (MOVLconst [c])) 3072 // cond: 3073 // result: (CMPLconst x [c]) 3074 for { 3075 _ = v.Args[1] 3076 x := v.Args[0] 3077 v_1 := v.Args[1] 3078 if v_1.Op != OpAMD64MOVLconst { 3079 break 3080 } 3081 c := v_1.AuxInt 3082 v.reset(OpAMD64CMPLconst) 3083 v.AuxInt = c 3084 v.AddArg(x) 3085 return true 3086 } 3087 // match: (CMPL (MOVLconst [c]) x) 3088 // cond: 3089 // result: (InvertFlags (CMPLconst x [c])) 3090 for { 3091 _ = v.Args[1] 3092 v_0 := v.Args[0] 3093 if v_0.Op != OpAMD64MOVLconst { 3094 break 3095 } 3096 c := v_0.AuxInt 3097 x := v.Args[1] 3098 v.reset(OpAMD64InvertFlags) 3099 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 3100 v0.AuxInt = c 3101 v0.AddArg(x) 3102 v.AddArg(v0) 3103 return true 3104 } 3105 return false 3106 } 3107 func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool { 3108 // match: (CMPLconst (MOVLconst [x]) [y]) 3109 // cond: int32(x)==int32(y) 3110 // result: (FlagEQ) 3111 for { 3112 y := v.AuxInt 3113 v_0 := v.Args[0] 3114 if v_0.Op != OpAMD64MOVLconst { 3115 break 3116 } 3117 x := v_0.AuxInt 3118 if !(int32(x) == int32(y)) { 3119 break 3120 } 3121 v.reset(OpAMD64FlagEQ) 3122 return true 3123 } 3124 // match: (CMPLconst (MOVLconst [x]) [y]) 3125 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 3126 // result: (FlagLT_ULT) 3127 for { 3128 y := v.AuxInt 3129 v_0 := v.Args[0] 3130 if v_0.Op != OpAMD64MOVLconst { 3131 break 3132 } 3133 x := v_0.AuxInt 3134 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 3135 break 3136 } 3137 v.reset(OpAMD64FlagLT_ULT) 3138 return true 3139 } 3140 // match: (CMPLconst (MOVLconst [x]) [y]) 3141 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 3142 // result: (FlagLT_UGT) 3143 for { 3144 y := v.AuxInt 3145 v_0 := v.Args[0] 3146 if v_0.Op != OpAMD64MOVLconst { 3147 break 3148 } 3149 x := v_0.AuxInt 3150 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 3151 break 3152 } 3153 v.reset(OpAMD64FlagLT_UGT) 3154 return true 3155 } 3156 // match: (CMPLconst (MOVLconst [x]) [y]) 3157 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 3158 // result: (FlagGT_ULT) 3159 for { 3160 y := v.AuxInt 3161 v_0 := v.Args[0] 3162 if v_0.Op != OpAMD64MOVLconst { 3163 break 3164 } 3165 x := v_0.AuxInt 3166 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 3167 break 3168 } 3169 v.reset(OpAMD64FlagGT_ULT) 3170 return true 3171 } 3172 // match: (CMPLconst (MOVLconst [x]) [y]) 3173 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 3174 // result: (FlagGT_UGT) 3175 for { 3176 y := v.AuxInt 3177 v_0 := v.Args[0] 3178 if v_0.Op != OpAMD64MOVLconst { 3179 break 3180 } 3181 x := v_0.AuxInt 3182 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 3183 break 3184 } 3185 v.reset(OpAMD64FlagGT_UGT) 3186 return true 3187 } 3188 // match: (CMPLconst (SHRLconst _ [c]) [n]) 3189 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 3190 // result: (FlagLT_ULT) 3191 for { 3192 n := v.AuxInt 3193 v_0 := v.Args[0] 3194 if v_0.Op != OpAMD64SHRLconst { 3195 break 3196 } 3197 c := v_0.AuxInt 3198 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 3199 break 3200 } 3201 v.reset(OpAMD64FlagLT_ULT) 3202 return true 3203 } 3204 // match: (CMPLconst (ANDLconst _ [m]) [n]) 3205 // cond: 0 <= int32(m) && int32(m) < int32(n) 3206 // result: (FlagLT_ULT) 3207 for { 3208 n := v.AuxInt 3209 v_0 := v.Args[0] 3210 if v_0.Op != OpAMD64ANDLconst { 3211 break 3212 } 3213 m := v_0.AuxInt 3214 if !(0 <= int32(m) && int32(m) < int32(n)) { 3215 break 3216 } 3217 v.reset(OpAMD64FlagLT_ULT) 3218 return true 3219 } 3220 // match: (CMPLconst (ANDL x y) [0]) 3221 // cond: 3222 // result: (TESTL x y) 3223 for { 3224 if v.AuxInt != 0 { 3225 break 3226 } 3227 v_0 := v.Args[0] 3228 if v_0.Op != OpAMD64ANDL { 3229 break 3230 } 3231 _ = v_0.Args[1] 3232 x := v_0.Args[0] 3233 y := v_0.Args[1] 3234 v.reset(OpAMD64TESTL) 3235 v.AddArg(x) 3236 v.AddArg(y) 3237 return true 3238 } 3239 // match: (CMPLconst (ANDLconst [c] x) [0]) 3240 // cond: 3241 // result: (TESTLconst [c] x) 3242 for { 3243 if v.AuxInt != 0 { 3244 break 3245 } 3246 v_0 := v.Args[0] 3247 if v_0.Op != OpAMD64ANDLconst { 3248 break 3249 } 3250 c := v_0.AuxInt 3251 x := v_0.Args[0] 3252 v.reset(OpAMD64TESTLconst) 3253 v.AuxInt = c 3254 v.AddArg(x) 3255 return true 3256 } 3257 // match: (CMPLconst x [0]) 3258 // cond: 3259 // result: (TESTL x x) 3260 for { 3261 if v.AuxInt != 0 { 3262 break 3263 } 3264 x := v.Args[0] 3265 v.reset(OpAMD64TESTL) 3266 v.AddArg(x) 3267 v.AddArg(x) 3268 return true 3269 } 3270 return false 3271 } 3272 func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { 3273 b := v.Block 3274 _ = b 3275 // match: (CMPQ x (MOVQconst [c])) 3276 // cond: is32Bit(c) 3277 // result: (CMPQconst x [c]) 3278 for { 3279 _ = v.Args[1] 3280 x := v.Args[0] 3281 v_1 := v.Args[1] 3282 if v_1.Op != OpAMD64MOVQconst { 3283 break 3284 } 3285 c := v_1.AuxInt 3286 if !(is32Bit(c)) { 3287 break 3288 } 3289 v.reset(OpAMD64CMPQconst) 3290 v.AuxInt = c 3291 v.AddArg(x) 3292 return true 3293 } 3294 // match: (CMPQ (MOVQconst [c]) x) 3295 // cond: is32Bit(c) 3296 // result: (InvertFlags (CMPQconst x [c])) 3297 for { 3298 _ = v.Args[1] 3299 v_0 := v.Args[0] 3300 if v_0.Op != OpAMD64MOVQconst { 3301 break 3302 } 3303 c := v_0.AuxInt 3304 x := v.Args[1] 3305 if !(is32Bit(c)) { 3306 break 3307 } 3308 v.reset(OpAMD64InvertFlags) 3309 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 3310 v0.AuxInt = c 3311 v0.AddArg(x) 3312 v.AddArg(v0) 3313 return true 3314 } 3315 return false 3316 } 3317 func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool { 3318 // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) 3319 // cond: 3320 // result: (FlagLT_ULT) 3321 for { 3322 if v.AuxInt != 32 { 3323 break 3324 } 3325 v_0 := v.Args[0] 3326 if v_0.Op != OpAMD64NEGQ { 3327 break 3328 } 3329 v_0_0 := v_0.Args[0] 3330 if v_0_0.Op != OpAMD64ADDQconst { 3331 break 3332 } 3333 if v_0_0.AuxInt != -16 { 3334 break 3335 } 3336 v_0_0_0 := v_0_0.Args[0] 3337 if v_0_0_0.Op != OpAMD64ANDQconst { 3338 break 3339 } 3340 if v_0_0_0.AuxInt != 15 { 3341 break 3342 } 3343 v.reset(OpAMD64FlagLT_ULT) 3344 return true 3345 } 3346 // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) 3347 // cond: 3348 // result: (FlagLT_ULT) 3349 for { 3350 if v.AuxInt != 32 { 3351 break 3352 } 3353 v_0 := v.Args[0] 3354 if v_0.Op != OpAMD64NEGQ { 3355 break 3356 } 3357 v_0_0 := v_0.Args[0] 3358 if v_0_0.Op != OpAMD64ADDQconst { 3359 break 3360 } 3361 if v_0_0.AuxInt != -8 { 3362 break 3363 } 3364 v_0_0_0 := v_0_0.Args[0] 3365 if v_0_0_0.Op != OpAMD64ANDQconst { 3366 break 3367 } 3368 if v_0_0_0.AuxInt != 7 { 3369 break 3370 } 3371 v.reset(OpAMD64FlagLT_ULT) 3372 return true 3373 } 3374 // match: (CMPQconst (MOVQconst [x]) [y]) 3375 // cond: x==y 3376 // result: (FlagEQ) 3377 for { 3378 y := v.AuxInt 3379 v_0 := v.Args[0] 3380 if v_0.Op != OpAMD64MOVQconst { 3381 break 3382 } 3383 x := v_0.AuxInt 3384 if !(x == y) { 3385 break 3386 } 3387 v.reset(OpAMD64FlagEQ) 3388 return true 3389 } 3390 // match: (CMPQconst (MOVQconst [x]) [y]) 3391 // cond: x<y && uint64(x)<uint64(y) 3392 // result: (FlagLT_ULT) 3393 for { 3394 y := v.AuxInt 3395 v_0 := v.Args[0] 3396 if v_0.Op != OpAMD64MOVQconst { 3397 break 3398 } 3399 x := v_0.AuxInt 3400 if !(x < y && uint64(x) < uint64(y)) { 3401 break 3402 } 3403 v.reset(OpAMD64FlagLT_ULT) 3404 return true 3405 } 3406 // match: (CMPQconst (MOVQconst [x]) [y]) 3407 // cond: x<y && uint64(x)>uint64(y) 3408 // result: (FlagLT_UGT) 3409 for { 3410 y := v.AuxInt 3411 v_0 := v.Args[0] 3412 if v_0.Op != OpAMD64MOVQconst { 3413 break 3414 } 3415 x := v_0.AuxInt 3416 if !(x < y && uint64(x) > uint64(y)) { 3417 break 3418 } 3419 v.reset(OpAMD64FlagLT_UGT) 3420 return true 3421 } 3422 // match: (CMPQconst (MOVQconst [x]) [y]) 3423 // cond: x>y && uint64(x)<uint64(y) 3424 // result: (FlagGT_ULT) 3425 for { 3426 y := v.AuxInt 3427 v_0 := v.Args[0] 3428 if v_0.Op != OpAMD64MOVQconst { 3429 break 3430 } 3431 x := v_0.AuxInt 3432 if !(x > y && uint64(x) < uint64(y)) { 3433 break 3434 } 3435 v.reset(OpAMD64FlagGT_ULT) 3436 return true 3437 } 3438 // match: (CMPQconst (MOVQconst [x]) [y]) 3439 // cond: x>y && uint64(x)>uint64(y) 3440 // result: (FlagGT_UGT) 3441 for { 3442 y := v.AuxInt 3443 v_0 := v.Args[0] 3444 if v_0.Op != OpAMD64MOVQconst { 3445 break 3446 } 3447 x := v_0.AuxInt 3448 if !(x > y && uint64(x) > uint64(y)) { 3449 break 3450 } 3451 v.reset(OpAMD64FlagGT_UGT) 3452 return true 3453 } 3454 // match: (CMPQconst (MOVBQZX _) [c]) 3455 // cond: 0xFF < c 3456 // result: (FlagLT_ULT) 3457 for { 3458 c := v.AuxInt 3459 v_0 := v.Args[0] 3460 if v_0.Op != OpAMD64MOVBQZX { 3461 break 3462 } 3463 if !(0xFF < c) { 3464 break 3465 } 3466 v.reset(OpAMD64FlagLT_ULT) 3467 return true 3468 } 3469 // match: (CMPQconst (MOVWQZX _) [c]) 3470 // cond: 0xFFFF < c 3471 // result: (FlagLT_ULT) 3472 for { 3473 c := v.AuxInt 3474 v_0 := v.Args[0] 3475 if v_0.Op != OpAMD64MOVWQZX { 3476 break 3477 } 3478 if !(0xFFFF < c) { 3479 break 3480 } 3481 v.reset(OpAMD64FlagLT_ULT) 3482 return true 3483 } 3484 // match: (CMPQconst (MOVLQZX _) [c]) 3485 // cond: 0xFFFFFFFF < c 3486 // result: (FlagLT_ULT) 3487 for { 3488 c := v.AuxInt 3489 v_0 := v.Args[0] 3490 if v_0.Op != OpAMD64MOVLQZX { 3491 break 3492 } 3493 if !(0xFFFFFFFF < c) { 3494 break 3495 } 3496 v.reset(OpAMD64FlagLT_ULT) 3497 return true 3498 } 3499 return false 3500 } 3501 func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool { 3502 // match: (CMPQconst (SHRQconst _ [c]) [n]) 3503 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 3504 // result: (FlagLT_ULT) 3505 for { 3506 n := v.AuxInt 3507 v_0 := v.Args[0] 3508 if v_0.Op != OpAMD64SHRQconst { 3509 break 3510 } 3511 c := v_0.AuxInt 3512 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 3513 break 3514 } 3515 v.reset(OpAMD64FlagLT_ULT) 3516 return true 3517 } 3518 // match: (CMPQconst (ANDQconst _ [m]) [n]) 3519 // cond: 0 <= m && m < n 3520 // result: (FlagLT_ULT) 3521 for { 3522 n := v.AuxInt 3523 v_0 := v.Args[0] 3524 if v_0.Op != OpAMD64ANDQconst { 3525 break 3526 } 3527 m := v_0.AuxInt 3528 if !(0 <= m && m < n) { 3529 break 3530 } 3531 v.reset(OpAMD64FlagLT_ULT) 3532 return true 3533 } 3534 // match: (CMPQconst (ANDLconst _ [m]) [n]) 3535 // cond: 0 <= m && m < n 3536 // result: (FlagLT_ULT) 3537 for { 3538 n := v.AuxInt 3539 v_0 := v.Args[0] 3540 if v_0.Op != OpAMD64ANDLconst { 3541 break 3542 } 3543 m := v_0.AuxInt 3544 if !(0 <= m && m < n) { 3545 break 3546 } 3547 v.reset(OpAMD64FlagLT_ULT) 3548 return true 3549 } 3550 // match: (CMPQconst (ANDQ x y) [0]) 3551 // cond: 3552 // result: (TESTQ x y) 3553 for { 3554 if v.AuxInt != 0 { 3555 break 3556 } 3557 v_0 := v.Args[0] 3558 if v_0.Op != OpAMD64ANDQ { 3559 break 3560 } 3561 _ = v_0.Args[1] 3562 x := v_0.Args[0] 3563 y := v_0.Args[1] 3564 v.reset(OpAMD64TESTQ) 3565 v.AddArg(x) 3566 v.AddArg(y) 3567 return true 3568 } 3569 // match: (CMPQconst (ANDQconst [c] x) [0]) 3570 // cond: 3571 // result: (TESTQconst [c] x) 3572 for { 3573 if v.AuxInt != 0 { 3574 break 3575 } 3576 v_0 := v.Args[0] 3577 if v_0.Op != OpAMD64ANDQconst { 3578 break 3579 } 3580 c := v_0.AuxInt 3581 x := v_0.Args[0] 3582 v.reset(OpAMD64TESTQconst) 3583 v.AuxInt = c 3584 v.AddArg(x) 3585 return true 3586 } 3587 // match: (CMPQconst x [0]) 3588 // cond: 3589 // result: (TESTQ x x) 3590 for { 3591 if v.AuxInt != 0 { 3592 break 3593 } 3594 x := v.Args[0] 3595 v.reset(OpAMD64TESTQ) 3596 v.AddArg(x) 3597 v.AddArg(x) 3598 return true 3599 } 3600 return false 3601 } 3602 func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { 3603 b := v.Block 3604 _ = b 3605 // match: (CMPW x (MOVLconst [c])) 3606 // cond: 3607 // result: (CMPWconst x [int64(int16(c))]) 3608 for { 3609 _ = v.Args[1] 3610 x := v.Args[0] 3611 v_1 := v.Args[1] 3612 if v_1.Op != OpAMD64MOVLconst { 3613 break 3614 } 3615 c := v_1.AuxInt 3616 v.reset(OpAMD64CMPWconst) 3617 v.AuxInt = int64(int16(c)) 3618 v.AddArg(x) 3619 return true 3620 } 3621 // match: (CMPW (MOVLconst [c]) x) 3622 // cond: 3623 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 3624 for { 3625 _ = v.Args[1] 3626 v_0 := v.Args[0] 3627 if v_0.Op != OpAMD64MOVLconst { 3628 break 3629 } 3630 c := v_0.AuxInt 3631 x := v.Args[1] 3632 v.reset(OpAMD64InvertFlags) 3633 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 3634 v0.AuxInt = int64(int16(c)) 3635 v0.AddArg(x) 3636 v.AddArg(v0) 3637 return true 3638 } 3639 return false 3640 } 3641 func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool { 3642 // match: (CMPWconst (MOVLconst [x]) [y]) 3643 // cond: int16(x)==int16(y) 3644 // result: (FlagEQ) 3645 for { 3646 y := v.AuxInt 3647 v_0 := v.Args[0] 3648 if v_0.Op != OpAMD64MOVLconst { 3649 break 3650 } 3651 x := v_0.AuxInt 3652 if !(int16(x) == int16(y)) { 3653 break 3654 } 3655 v.reset(OpAMD64FlagEQ) 3656 return true 3657 } 3658 // match: (CMPWconst (MOVLconst [x]) [y]) 3659 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 3660 // result: (FlagLT_ULT) 3661 for { 3662 y := v.AuxInt 3663 v_0 := v.Args[0] 3664 if v_0.Op != OpAMD64MOVLconst { 3665 break 3666 } 3667 x := v_0.AuxInt 3668 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 3669 break 3670 } 3671 v.reset(OpAMD64FlagLT_ULT) 3672 return true 3673 } 3674 // match: (CMPWconst (MOVLconst [x]) [y]) 3675 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 3676 // result: (FlagLT_UGT) 3677 for { 3678 y := v.AuxInt 3679 v_0 := v.Args[0] 3680 if v_0.Op != OpAMD64MOVLconst { 3681 break 3682 } 3683 x := v_0.AuxInt 3684 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 3685 break 3686 } 3687 v.reset(OpAMD64FlagLT_UGT) 3688 return true 3689 } 3690 // match: (CMPWconst (MOVLconst [x]) [y]) 3691 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 3692 // result: (FlagGT_ULT) 3693 for { 3694 y := v.AuxInt 3695 v_0 := v.Args[0] 3696 if v_0.Op != OpAMD64MOVLconst { 3697 break 3698 } 3699 x := v_0.AuxInt 3700 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 3701 break 3702 } 3703 v.reset(OpAMD64FlagGT_ULT) 3704 return true 3705 } 3706 // match: (CMPWconst (MOVLconst [x]) [y]) 3707 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 3708 // result: (FlagGT_UGT) 3709 for { 3710 y := v.AuxInt 3711 v_0 := v.Args[0] 3712 if v_0.Op != OpAMD64MOVLconst { 3713 break 3714 } 3715 x := v_0.AuxInt 3716 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 3717 break 3718 } 3719 v.reset(OpAMD64FlagGT_UGT) 3720 return true 3721 } 3722 // match: (CMPWconst (ANDLconst _ [m]) [n]) 3723 // cond: 0 <= int16(m) && int16(m) < int16(n) 3724 // result: (FlagLT_ULT) 3725 for { 3726 n := v.AuxInt 3727 v_0 := v.Args[0] 3728 if v_0.Op != OpAMD64ANDLconst { 3729 break 3730 } 3731 m := v_0.AuxInt 3732 if !(0 <= int16(m) && int16(m) < int16(n)) { 3733 break 3734 } 3735 v.reset(OpAMD64FlagLT_ULT) 3736 return true 3737 } 3738 // match: (CMPWconst (ANDL x y) [0]) 3739 // cond: 3740 // result: (TESTW x y) 3741 for { 3742 if v.AuxInt != 0 { 3743 break 3744 } 3745 v_0 := v.Args[0] 3746 if v_0.Op != OpAMD64ANDL { 3747 break 3748 } 3749 _ = v_0.Args[1] 3750 x := v_0.Args[0] 3751 y := v_0.Args[1] 3752 v.reset(OpAMD64TESTW) 3753 v.AddArg(x) 3754 v.AddArg(y) 3755 return true 3756 } 3757 // match: (CMPWconst (ANDLconst [c] x) [0]) 3758 // cond: 3759 // result: (TESTWconst [int64(int16(c))] x) 3760 for { 3761 if v.AuxInt != 0 { 3762 break 3763 } 3764 v_0 := v.Args[0] 3765 if v_0.Op != OpAMD64ANDLconst { 3766 break 3767 } 3768 c := v_0.AuxInt 3769 x := v_0.Args[0] 3770 v.reset(OpAMD64TESTWconst) 3771 v.AuxInt = int64(int16(c)) 3772 v.AddArg(x) 3773 return true 3774 } 3775 // match: (CMPWconst x [0]) 3776 // cond: 3777 // result: (TESTW x x) 3778 for { 3779 if v.AuxInt != 0 { 3780 break 3781 } 3782 x := v.Args[0] 3783 v.reset(OpAMD64TESTW) 3784 v.AddArg(x) 3785 v.AddArg(x) 3786 return true 3787 } 3788 return false 3789 } 3790 func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool { 3791 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3792 // cond: is32Bit(off1+off2) 3793 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 3794 for { 3795 off1 := v.AuxInt 3796 sym := v.Aux 3797 _ = v.Args[3] 3798 v_0 := v.Args[0] 3799 if v_0.Op != OpAMD64ADDQconst { 3800 break 3801 } 3802 off2 := v_0.AuxInt 3803 ptr := v_0.Args[0] 3804 old := v.Args[1] 3805 new_ := v.Args[2] 3806 mem := v.Args[3] 3807 if !(is32Bit(off1 + off2)) { 3808 break 3809 } 3810 v.reset(OpAMD64CMPXCHGLlock) 3811 v.AuxInt = off1 + off2 3812 v.Aux = sym 3813 v.AddArg(ptr) 3814 v.AddArg(old) 3815 v.AddArg(new_) 3816 v.AddArg(mem) 3817 return true 3818 } 3819 return false 3820 } 3821 func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool { 3822 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3823 // cond: is32Bit(off1+off2) 3824 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 3825 for { 3826 off1 := v.AuxInt 3827 sym := v.Aux 3828 _ = v.Args[3] 3829 v_0 := v.Args[0] 3830 if v_0.Op != OpAMD64ADDQconst { 3831 break 3832 } 3833 off2 := v_0.AuxInt 3834 ptr := v_0.Args[0] 3835 old := v.Args[1] 3836 new_ := v.Args[2] 3837 mem := v.Args[3] 3838 if !(is32Bit(off1 + off2)) { 3839 break 3840 } 3841 v.reset(OpAMD64CMPXCHGQlock) 3842 v.AuxInt = off1 + off2 3843 v.Aux = sym 3844 v.AddArg(ptr) 3845 v.AddArg(old) 3846 v.AddArg(new_) 3847 v.AddArg(mem) 3848 return true 3849 } 3850 return false 3851 } 3852 func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool { 3853 // match: (LEAL [c] {s} (ADDLconst [d] x)) 3854 // cond: is32Bit(c+d) 3855 // result: (LEAL [c+d] {s} x) 3856 for { 3857 c := v.AuxInt 3858 s := v.Aux 3859 v_0 := v.Args[0] 3860 if v_0.Op != OpAMD64ADDLconst { 3861 break 3862 } 3863 d := v_0.AuxInt 3864 x := v_0.Args[0] 3865 if !(is32Bit(c + d)) { 3866 break 3867 } 3868 v.reset(OpAMD64LEAL) 3869 v.AuxInt = c + d 3870 v.Aux = s 3871 v.AddArg(x) 3872 return true 3873 } 3874 return false 3875 } 3876 func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool { 3877 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 3878 // cond: is32Bit(c+d) 3879 // result: (LEAQ [c+d] {s} x) 3880 for { 3881 c := v.AuxInt 3882 s := v.Aux 3883 v_0 := v.Args[0] 3884 if v_0.Op != OpAMD64ADDQconst { 3885 break 3886 } 3887 d := v_0.AuxInt 3888 x := v_0.Args[0] 3889 if !(is32Bit(c + d)) { 3890 break 3891 } 3892 v.reset(OpAMD64LEAQ) 3893 v.AuxInt = c + d 3894 v.Aux = s 3895 v.AddArg(x) 3896 return true 3897 } 3898 // match: (LEAQ [c] {s} (ADDQ x y)) 3899 // cond: x.Op != OpSB && y.Op != OpSB 3900 // result: (LEAQ1 [c] {s} x y) 3901 for { 3902 c := v.AuxInt 3903 s := v.Aux 3904 v_0 := v.Args[0] 3905 if v_0.Op != OpAMD64ADDQ { 3906 break 3907 } 3908 _ = v_0.Args[1] 3909 x := v_0.Args[0] 3910 y := v_0.Args[1] 3911 if !(x.Op != OpSB && y.Op != OpSB) { 3912 break 3913 } 3914 v.reset(OpAMD64LEAQ1) 3915 v.AuxInt = c 3916 v.Aux = s 3917 v.AddArg(x) 3918 v.AddArg(y) 3919 return true 3920 } 3921 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 3922 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3923 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 3924 for { 3925 off1 := v.AuxInt 3926 sym1 := v.Aux 3927 v_0 := v.Args[0] 3928 if v_0.Op != OpAMD64LEAQ { 3929 break 3930 } 3931 off2 := v_0.AuxInt 3932 sym2 := v_0.Aux 3933 x := v_0.Args[0] 3934 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3935 break 3936 } 3937 v.reset(OpAMD64LEAQ) 3938 v.AuxInt = off1 + off2 3939 v.Aux = mergeSym(sym1, sym2) 3940 v.AddArg(x) 3941 return true 3942 } 3943 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 3944 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3945 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3946 for { 3947 off1 := v.AuxInt 3948 sym1 := v.Aux 3949 v_0 := v.Args[0] 3950 if v_0.Op != OpAMD64LEAQ1 { 3951 break 3952 } 3953 off2 := v_0.AuxInt 3954 sym2 := v_0.Aux 3955 _ = v_0.Args[1] 3956 x := v_0.Args[0] 3957 y := v_0.Args[1] 3958 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3959 break 3960 } 3961 v.reset(OpAMD64LEAQ1) 3962 v.AuxInt = off1 + off2 3963 v.Aux = mergeSym(sym1, sym2) 3964 v.AddArg(x) 3965 v.AddArg(y) 3966 return true 3967 } 3968 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 3969 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3970 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3971 for { 3972 off1 := v.AuxInt 3973 sym1 := v.Aux 3974 v_0 := v.Args[0] 3975 if v_0.Op != OpAMD64LEAQ2 { 3976 break 3977 } 3978 off2 := v_0.AuxInt 3979 sym2 := v_0.Aux 3980 _ = v_0.Args[1] 3981 x := v_0.Args[0] 3982 y := v_0.Args[1] 3983 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3984 break 3985 } 3986 v.reset(OpAMD64LEAQ2) 3987 v.AuxInt = off1 + off2 3988 v.Aux = mergeSym(sym1, sym2) 3989 v.AddArg(x) 3990 v.AddArg(y) 3991 return true 3992 } 3993 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 3994 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3995 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3996 for { 3997 off1 := v.AuxInt 3998 sym1 := v.Aux 3999 v_0 := v.Args[0] 4000 if v_0.Op != OpAMD64LEAQ4 { 4001 break 4002 } 4003 off2 := v_0.AuxInt 4004 sym2 := v_0.Aux 4005 _ = v_0.Args[1] 4006 x := v_0.Args[0] 4007 y := v_0.Args[1] 4008 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4009 break 4010 } 4011 v.reset(OpAMD64LEAQ4) 4012 v.AuxInt = off1 + off2 4013 v.Aux = mergeSym(sym1, sym2) 4014 v.AddArg(x) 4015 v.AddArg(y) 4016 return true 4017 } 4018 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 4019 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4020 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 4021 for { 4022 off1 := v.AuxInt 4023 sym1 := v.Aux 4024 v_0 := v.Args[0] 4025 if v_0.Op != OpAMD64LEAQ8 { 4026 break 4027 } 4028 off2 := v_0.AuxInt 4029 sym2 := v_0.Aux 4030 _ = v_0.Args[1] 4031 x := v_0.Args[0] 4032 y := v_0.Args[1] 4033 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4034 break 4035 } 4036 v.reset(OpAMD64LEAQ8) 4037 v.AuxInt = off1 + off2 4038 v.Aux = mergeSym(sym1, sym2) 4039 v.AddArg(x) 4040 v.AddArg(y) 4041 return true 4042 } 4043 return false 4044 } 4045 func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { 4046 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 4047 // cond: is32Bit(c+d) && x.Op != OpSB 4048 // result: (LEAQ1 [c+d] {s} x y) 4049 for { 4050 c := v.AuxInt 4051 s := v.Aux 4052 _ = v.Args[1] 4053 v_0 := v.Args[0] 4054 if v_0.Op != OpAMD64ADDQconst { 4055 break 4056 } 4057 d := v_0.AuxInt 4058 x := v_0.Args[0] 4059 y := v.Args[1] 4060 if !(is32Bit(c+d) && x.Op != OpSB) { 4061 break 4062 } 4063 v.reset(OpAMD64LEAQ1) 4064 v.AuxInt = c + d 4065 v.Aux = s 4066 v.AddArg(x) 4067 v.AddArg(y) 4068 return true 4069 } 4070 // match: (LEAQ1 [c] {s} y (ADDQconst [d] x)) 4071 // cond: is32Bit(c+d) && x.Op != OpSB 4072 // result: (LEAQ1 [c+d] {s} x y) 4073 for { 4074 c := v.AuxInt 4075 s := v.Aux 4076 _ = v.Args[1] 4077 y := v.Args[0] 4078 v_1 := v.Args[1] 4079 if v_1.Op != OpAMD64ADDQconst { 4080 break 4081 } 4082 d := v_1.AuxInt 4083 x := v_1.Args[0] 4084 if !(is32Bit(c+d) && x.Op != OpSB) { 4085 break 4086 } 4087 v.reset(OpAMD64LEAQ1) 4088 v.AuxInt = c + d 4089 v.Aux = s 4090 v.AddArg(x) 4091 v.AddArg(y) 4092 return true 4093 } 4094 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 4095 // cond: 4096 // result: (LEAQ2 [c] {s} x y) 4097 for { 4098 c := v.AuxInt 4099 s := v.Aux 4100 _ = v.Args[1] 4101 x := v.Args[0] 4102 v_1 := v.Args[1] 4103 if v_1.Op != OpAMD64SHLQconst { 4104 break 4105 } 4106 if v_1.AuxInt != 1 { 4107 break 4108 } 4109 y := v_1.Args[0] 4110 v.reset(OpAMD64LEAQ2) 4111 v.AuxInt = c 4112 v.Aux = s 4113 v.AddArg(x) 4114 v.AddArg(y) 4115 return true 4116 } 4117 // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x) 4118 // cond: 4119 // result: (LEAQ2 [c] {s} x y) 4120 for { 4121 c := v.AuxInt 4122 s := v.Aux 4123 _ = v.Args[1] 4124 v_0 := v.Args[0] 4125 if v_0.Op != OpAMD64SHLQconst { 4126 break 4127 } 4128 if v_0.AuxInt != 1 { 4129 break 4130 } 4131 y := v_0.Args[0] 4132 x := v.Args[1] 4133 v.reset(OpAMD64LEAQ2) 4134 v.AuxInt = c 4135 v.Aux = s 4136 v.AddArg(x) 4137 v.AddArg(y) 4138 return true 4139 } 4140 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 4141 // cond: 4142 // result: (LEAQ4 [c] {s} x y) 4143 for { 4144 c := v.AuxInt 4145 s := v.Aux 4146 _ = v.Args[1] 4147 x := v.Args[0] 4148 v_1 := v.Args[1] 4149 if v_1.Op != OpAMD64SHLQconst { 4150 break 4151 } 4152 if v_1.AuxInt != 2 { 4153 break 4154 } 4155 y := v_1.Args[0] 4156 v.reset(OpAMD64LEAQ4) 4157 v.AuxInt = c 4158 v.Aux = s 4159 v.AddArg(x) 4160 v.AddArg(y) 4161 return true 4162 } 4163 // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x) 4164 // cond: 4165 // result: (LEAQ4 [c] {s} x y) 4166 for { 4167 c := v.AuxInt 4168 s := v.Aux 4169 _ = v.Args[1] 4170 v_0 := v.Args[0] 4171 if v_0.Op != OpAMD64SHLQconst { 4172 break 4173 } 4174 if v_0.AuxInt != 2 { 4175 break 4176 } 4177 y := v_0.Args[0] 4178 x := v.Args[1] 4179 v.reset(OpAMD64LEAQ4) 4180 v.AuxInt = c 4181 v.Aux = s 4182 v.AddArg(x) 4183 v.AddArg(y) 4184 return true 4185 } 4186 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 4187 // cond: 4188 // result: (LEAQ8 [c] {s} x y) 4189 for { 4190 c := v.AuxInt 4191 s := v.Aux 4192 _ = v.Args[1] 4193 x := v.Args[0] 4194 v_1 := v.Args[1] 4195 if v_1.Op != OpAMD64SHLQconst { 4196 break 4197 } 4198 if v_1.AuxInt != 3 { 4199 break 4200 } 4201 y := v_1.Args[0] 4202 v.reset(OpAMD64LEAQ8) 4203 v.AuxInt = c 4204 v.Aux = s 4205 v.AddArg(x) 4206 v.AddArg(y) 4207 return true 4208 } 4209 // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x) 4210 // cond: 4211 // result: (LEAQ8 [c] {s} x y) 4212 for { 4213 c := v.AuxInt 4214 s := v.Aux 4215 _ = v.Args[1] 4216 v_0 := v.Args[0] 4217 if v_0.Op != OpAMD64SHLQconst { 4218 break 4219 } 4220 if v_0.AuxInt != 3 { 4221 break 4222 } 4223 y := v_0.Args[0] 4224 x := v.Args[1] 4225 v.reset(OpAMD64LEAQ8) 4226 v.AuxInt = c 4227 v.Aux = s 4228 v.AddArg(x) 4229 v.AddArg(y) 4230 return true 4231 } 4232 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4233 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4234 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 4235 for { 4236 off1 := v.AuxInt 4237 sym1 := v.Aux 4238 _ = v.Args[1] 4239 v_0 := v.Args[0] 4240 if v_0.Op != OpAMD64LEAQ { 4241 break 4242 } 4243 off2 := v_0.AuxInt 4244 sym2 := v_0.Aux 4245 x := v_0.Args[0] 4246 y := v.Args[1] 4247 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4248 break 4249 } 4250 v.reset(OpAMD64LEAQ1) 4251 v.AuxInt = off1 + off2 4252 v.Aux = mergeSym(sym1, sym2) 4253 v.AddArg(x) 4254 v.AddArg(y) 4255 return true 4256 } 4257 // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x)) 4258 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4259 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 4260 for { 4261 off1 := v.AuxInt 4262 sym1 := v.Aux 4263 _ = v.Args[1] 4264 y := v.Args[0] 4265 v_1 := v.Args[1] 4266 if v_1.Op != OpAMD64LEAQ { 4267 break 4268 } 4269 off2 := v_1.AuxInt 4270 sym2 := v_1.Aux 4271 x := v_1.Args[0] 4272 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4273 break 4274 } 4275 v.reset(OpAMD64LEAQ1) 4276 v.AuxInt = off1 + off2 4277 v.Aux = mergeSym(sym1, sym2) 4278 v.AddArg(x) 4279 v.AddArg(y) 4280 return true 4281 } 4282 return false 4283 } 4284 func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool { 4285 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 4286 // cond: is32Bit(c+d) && x.Op != OpSB 4287 // result: (LEAQ2 [c+d] {s} x y) 4288 for { 4289 c := v.AuxInt 4290 s := v.Aux 4291 _ = v.Args[1] 4292 v_0 := v.Args[0] 4293 if v_0.Op != OpAMD64ADDQconst { 4294 break 4295 } 4296 d := v_0.AuxInt 4297 x := v_0.Args[0] 4298 y := v.Args[1] 4299 if !(is32Bit(c+d) && x.Op != OpSB) { 4300 break 4301 } 4302 v.reset(OpAMD64LEAQ2) 4303 v.AuxInt = c + d 4304 v.Aux = s 4305 v.AddArg(x) 4306 v.AddArg(y) 4307 return true 4308 } 4309 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 4310 // cond: is32Bit(c+2*d) && y.Op != OpSB 4311 // result: (LEAQ2 [c+2*d] {s} x y) 4312 for { 4313 c := v.AuxInt 4314 s := v.Aux 4315 _ = v.Args[1] 4316 x := v.Args[0] 4317 v_1 := v.Args[1] 4318 if v_1.Op != OpAMD64ADDQconst { 4319 break 4320 } 4321 d := v_1.AuxInt 4322 y := v_1.Args[0] 4323 if !(is32Bit(c+2*d) && y.Op != OpSB) { 4324 break 4325 } 4326 v.reset(OpAMD64LEAQ2) 4327 v.AuxInt = c + 2*d 4328 v.Aux = s 4329 v.AddArg(x) 4330 v.AddArg(y) 4331 return true 4332 } 4333 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 4334 // cond: 4335 // result: (LEAQ4 [c] {s} x y) 4336 for { 4337 c := v.AuxInt 4338 s := v.Aux 4339 _ = v.Args[1] 4340 x := v.Args[0] 4341 v_1 := v.Args[1] 4342 if v_1.Op != OpAMD64SHLQconst { 4343 break 4344 } 4345 if v_1.AuxInt != 1 { 4346 break 4347 } 4348 y := v_1.Args[0] 4349 v.reset(OpAMD64LEAQ4) 4350 v.AuxInt = c 4351 v.Aux = s 4352 v.AddArg(x) 4353 v.AddArg(y) 4354 return true 4355 } 4356 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 4357 // cond: 4358 // result: (LEAQ8 [c] {s} x y) 4359 for { 4360 c := v.AuxInt 4361 s := v.Aux 4362 _ = v.Args[1] 4363 x := v.Args[0] 4364 v_1 := v.Args[1] 4365 if v_1.Op != OpAMD64SHLQconst { 4366 break 4367 } 4368 if v_1.AuxInt != 2 { 4369 break 4370 } 4371 y := v_1.Args[0] 4372 v.reset(OpAMD64LEAQ8) 4373 v.AuxInt = c 4374 v.Aux = s 4375 v.AddArg(x) 4376 v.AddArg(y) 4377 return true 4378 } 4379 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4380 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4381 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 4382 for { 4383 off1 := v.AuxInt 4384 sym1 := v.Aux 4385 _ = v.Args[1] 4386 v_0 := v.Args[0] 4387 if v_0.Op != OpAMD64LEAQ { 4388 break 4389 } 4390 off2 := v_0.AuxInt 4391 sym2 := v_0.Aux 4392 x := v_0.Args[0] 4393 y := v.Args[1] 4394 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4395 break 4396 } 4397 v.reset(OpAMD64LEAQ2) 4398 v.AuxInt = off1 + off2 4399 v.Aux = mergeSym(sym1, sym2) 4400 v.AddArg(x) 4401 v.AddArg(y) 4402 return true 4403 } 4404 return false 4405 } 4406 func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool { 4407 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 4408 // cond: is32Bit(c+d) && x.Op != OpSB 4409 // result: (LEAQ4 [c+d] {s} x y) 4410 for { 4411 c := v.AuxInt 4412 s := v.Aux 4413 _ = v.Args[1] 4414 v_0 := v.Args[0] 4415 if v_0.Op != OpAMD64ADDQconst { 4416 break 4417 } 4418 d := v_0.AuxInt 4419 x := v_0.Args[0] 4420 y := v.Args[1] 4421 if !(is32Bit(c+d) && x.Op != OpSB) { 4422 break 4423 } 4424 v.reset(OpAMD64LEAQ4) 4425 v.AuxInt = c + d 4426 v.Aux = s 4427 v.AddArg(x) 4428 v.AddArg(y) 4429 return true 4430 } 4431 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 4432 // cond: is32Bit(c+4*d) && y.Op != OpSB 4433 // result: (LEAQ4 [c+4*d] {s} x y) 4434 for { 4435 c := v.AuxInt 4436 s := v.Aux 4437 _ = v.Args[1] 4438 x := v.Args[0] 4439 v_1 := v.Args[1] 4440 if v_1.Op != OpAMD64ADDQconst { 4441 break 4442 } 4443 d := v_1.AuxInt 4444 y := v_1.Args[0] 4445 if !(is32Bit(c+4*d) && y.Op != OpSB) { 4446 break 4447 } 4448 v.reset(OpAMD64LEAQ4) 4449 v.AuxInt = c + 4*d 4450 v.Aux = s 4451 v.AddArg(x) 4452 v.AddArg(y) 4453 return true 4454 } 4455 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 4456 // cond: 4457 // result: (LEAQ8 [c] {s} x y) 4458 for { 4459 c := v.AuxInt 4460 s := v.Aux 4461 _ = v.Args[1] 4462 x := v.Args[0] 4463 v_1 := v.Args[1] 4464 if v_1.Op != OpAMD64SHLQconst { 4465 break 4466 } 4467 if v_1.AuxInt != 1 { 4468 break 4469 } 4470 y := v_1.Args[0] 4471 v.reset(OpAMD64LEAQ8) 4472 v.AuxInt = c 4473 v.Aux = s 4474 v.AddArg(x) 4475 v.AddArg(y) 4476 return true 4477 } 4478 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4479 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4480 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 4481 for { 4482 off1 := v.AuxInt 4483 sym1 := v.Aux 4484 _ = v.Args[1] 4485 v_0 := v.Args[0] 4486 if v_0.Op != OpAMD64LEAQ { 4487 break 4488 } 4489 off2 := v_0.AuxInt 4490 sym2 := v_0.Aux 4491 x := v_0.Args[0] 4492 y := v.Args[1] 4493 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4494 break 4495 } 4496 v.reset(OpAMD64LEAQ4) 4497 v.AuxInt = off1 + off2 4498 v.Aux = mergeSym(sym1, sym2) 4499 v.AddArg(x) 4500 v.AddArg(y) 4501 return true 4502 } 4503 return false 4504 } 4505 func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool { 4506 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 4507 // cond: is32Bit(c+d) && x.Op != OpSB 4508 // result: (LEAQ8 [c+d] {s} x y) 4509 for { 4510 c := v.AuxInt 4511 s := v.Aux 4512 _ = v.Args[1] 4513 v_0 := v.Args[0] 4514 if v_0.Op != OpAMD64ADDQconst { 4515 break 4516 } 4517 d := v_0.AuxInt 4518 x := v_0.Args[0] 4519 y := v.Args[1] 4520 if !(is32Bit(c+d) && x.Op != OpSB) { 4521 break 4522 } 4523 v.reset(OpAMD64LEAQ8) 4524 v.AuxInt = c + d 4525 v.Aux = s 4526 v.AddArg(x) 4527 v.AddArg(y) 4528 return true 4529 } 4530 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 4531 // cond: is32Bit(c+8*d) && y.Op != OpSB 4532 // result: (LEAQ8 [c+8*d] {s} x y) 4533 for { 4534 c := v.AuxInt 4535 s := v.Aux 4536 _ = v.Args[1] 4537 x := v.Args[0] 4538 v_1 := v.Args[1] 4539 if v_1.Op != OpAMD64ADDQconst { 4540 break 4541 } 4542 d := v_1.AuxInt 4543 y := v_1.Args[0] 4544 if !(is32Bit(c+8*d) && y.Op != OpSB) { 4545 break 4546 } 4547 v.reset(OpAMD64LEAQ8) 4548 v.AuxInt = c + 8*d 4549 v.Aux = s 4550 v.AddArg(x) 4551 v.AddArg(y) 4552 return true 4553 } 4554 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4555 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4556 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 4557 for { 4558 off1 := v.AuxInt 4559 sym1 := v.Aux 4560 _ = v.Args[1] 4561 v_0 := v.Args[0] 4562 if v_0.Op != OpAMD64LEAQ { 4563 break 4564 } 4565 off2 := v_0.AuxInt 4566 sym2 := v_0.Aux 4567 x := v_0.Args[0] 4568 y := v.Args[1] 4569 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4570 break 4571 } 4572 v.reset(OpAMD64LEAQ8) 4573 v.AuxInt = off1 + off2 4574 v.Aux = mergeSym(sym1, sym2) 4575 v.AddArg(x) 4576 v.AddArg(y) 4577 return true 4578 } 4579 return false 4580 } 4581 func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { 4582 b := v.Block 4583 _ = b 4584 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 4585 // cond: x.Uses == 1 && clobber(x) 4586 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4587 for { 4588 x := v.Args[0] 4589 if x.Op != OpAMD64MOVBload { 4590 break 4591 } 4592 off := x.AuxInt 4593 sym := x.Aux 4594 _ = x.Args[1] 4595 ptr := x.Args[0] 4596 mem := x.Args[1] 4597 if !(x.Uses == 1 && clobber(x)) { 4598 break 4599 } 4600 b = x.Block 4601 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4602 v.reset(OpCopy) 4603 v.AddArg(v0) 4604 v0.AuxInt = off 4605 v0.Aux = sym 4606 v0.AddArg(ptr) 4607 v0.AddArg(mem) 4608 return true 4609 } 4610 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 4611 // cond: x.Uses == 1 && clobber(x) 4612 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4613 for { 4614 x := v.Args[0] 4615 if x.Op != OpAMD64MOVWload { 4616 break 4617 } 4618 off := x.AuxInt 4619 sym := x.Aux 4620 _ = x.Args[1] 4621 ptr := x.Args[0] 4622 mem := x.Args[1] 4623 if !(x.Uses == 1 && clobber(x)) { 4624 break 4625 } 4626 b = x.Block 4627 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4628 v.reset(OpCopy) 4629 v.AddArg(v0) 4630 v0.AuxInt = off 4631 v0.Aux = sym 4632 v0.AddArg(ptr) 4633 v0.AddArg(mem) 4634 return true 4635 } 4636 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 4637 // cond: x.Uses == 1 && clobber(x) 4638 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4639 for { 4640 x := v.Args[0] 4641 if x.Op != OpAMD64MOVLload { 4642 break 4643 } 4644 off := x.AuxInt 4645 sym := x.Aux 4646 _ = x.Args[1] 4647 ptr := x.Args[0] 4648 mem := x.Args[1] 4649 if !(x.Uses == 1 && clobber(x)) { 4650 break 4651 } 4652 b = x.Block 4653 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4654 v.reset(OpCopy) 4655 v.AddArg(v0) 4656 v0.AuxInt = off 4657 v0.Aux = sym 4658 v0.AddArg(ptr) 4659 v0.AddArg(mem) 4660 return true 4661 } 4662 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 4663 // cond: x.Uses == 1 && clobber(x) 4664 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4665 for { 4666 x := v.Args[0] 4667 if x.Op != OpAMD64MOVQload { 4668 break 4669 } 4670 off := x.AuxInt 4671 sym := x.Aux 4672 _ = x.Args[1] 4673 ptr := x.Args[0] 4674 mem := x.Args[1] 4675 if !(x.Uses == 1 && clobber(x)) { 4676 break 4677 } 4678 b = x.Block 4679 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4680 v.reset(OpCopy) 4681 v.AddArg(v0) 4682 v0.AuxInt = off 4683 v0.Aux = sym 4684 v0.AddArg(ptr) 4685 v0.AddArg(mem) 4686 return true 4687 } 4688 // match: (MOVBQSX (ANDLconst [c] x)) 4689 // cond: c & 0x80 == 0 4690 // result: (ANDLconst [c & 0x7f] x) 4691 for { 4692 v_0 := v.Args[0] 4693 if v_0.Op != OpAMD64ANDLconst { 4694 break 4695 } 4696 c := v_0.AuxInt 4697 x := v_0.Args[0] 4698 if !(c&0x80 == 0) { 4699 break 4700 } 4701 v.reset(OpAMD64ANDLconst) 4702 v.AuxInt = c & 0x7f 4703 v.AddArg(x) 4704 return true 4705 } 4706 // match: (MOVBQSX (MOVBQSX x)) 4707 // cond: 4708 // result: (MOVBQSX x) 4709 for { 4710 v_0 := v.Args[0] 4711 if v_0.Op != OpAMD64MOVBQSX { 4712 break 4713 } 4714 x := v_0.Args[0] 4715 v.reset(OpAMD64MOVBQSX) 4716 v.AddArg(x) 4717 return true 4718 } 4719 return false 4720 } 4721 func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool { 4722 // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4723 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4724 // result: (MOVBQSX x) 4725 for { 4726 off := v.AuxInt 4727 sym := v.Aux 4728 _ = v.Args[1] 4729 ptr := v.Args[0] 4730 v_1 := v.Args[1] 4731 if v_1.Op != OpAMD64MOVBstore { 4732 break 4733 } 4734 off2 := v_1.AuxInt 4735 sym2 := v_1.Aux 4736 _ = v_1.Args[2] 4737 ptr2 := v_1.Args[0] 4738 x := v_1.Args[1] 4739 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4740 break 4741 } 4742 v.reset(OpAMD64MOVBQSX) 4743 v.AddArg(x) 4744 return true 4745 } 4746 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4747 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4748 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4749 for { 4750 off1 := v.AuxInt 4751 sym1 := v.Aux 4752 _ = v.Args[1] 4753 v_0 := v.Args[0] 4754 if v_0.Op != OpAMD64LEAQ { 4755 break 4756 } 4757 off2 := v_0.AuxInt 4758 sym2 := v_0.Aux 4759 base := v_0.Args[0] 4760 mem := v.Args[1] 4761 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4762 break 4763 } 4764 v.reset(OpAMD64MOVBQSXload) 4765 v.AuxInt = off1 + off2 4766 v.Aux = mergeSym(sym1, sym2) 4767 v.AddArg(base) 4768 v.AddArg(mem) 4769 return true 4770 } 4771 return false 4772 } 4773 func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { 4774 b := v.Block 4775 _ = b 4776 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 4777 // cond: x.Uses == 1 && clobber(x) 4778 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4779 for { 4780 x := v.Args[0] 4781 if x.Op != OpAMD64MOVBload { 4782 break 4783 } 4784 off := x.AuxInt 4785 sym := x.Aux 4786 _ = x.Args[1] 4787 ptr := x.Args[0] 4788 mem := x.Args[1] 4789 if !(x.Uses == 1 && clobber(x)) { 4790 break 4791 } 4792 b = x.Block 4793 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4794 v.reset(OpCopy) 4795 v.AddArg(v0) 4796 v0.AuxInt = off 4797 v0.Aux = sym 4798 v0.AddArg(ptr) 4799 v0.AddArg(mem) 4800 return true 4801 } 4802 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 4803 // cond: x.Uses == 1 && clobber(x) 4804 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4805 for { 4806 x := v.Args[0] 4807 if x.Op != OpAMD64MOVWload { 4808 break 4809 } 4810 off := x.AuxInt 4811 sym := x.Aux 4812 _ = x.Args[1] 4813 ptr := x.Args[0] 4814 mem := x.Args[1] 4815 if !(x.Uses == 1 && clobber(x)) { 4816 break 4817 } 4818 b = x.Block 4819 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4820 v.reset(OpCopy) 4821 v.AddArg(v0) 4822 v0.AuxInt = off 4823 v0.Aux = sym 4824 v0.AddArg(ptr) 4825 v0.AddArg(mem) 4826 return true 4827 } 4828 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 4829 // cond: x.Uses == 1 && clobber(x) 4830 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4831 for { 4832 x := v.Args[0] 4833 if x.Op != OpAMD64MOVLload { 4834 break 4835 } 4836 off := x.AuxInt 4837 sym := x.Aux 4838 _ = x.Args[1] 4839 ptr := x.Args[0] 4840 mem := x.Args[1] 4841 if !(x.Uses == 1 && clobber(x)) { 4842 break 4843 } 4844 b = x.Block 4845 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4846 v.reset(OpCopy) 4847 v.AddArg(v0) 4848 v0.AuxInt = off 4849 v0.Aux = sym 4850 v0.AddArg(ptr) 4851 v0.AddArg(mem) 4852 return true 4853 } 4854 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 4855 // cond: x.Uses == 1 && clobber(x) 4856 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4857 for { 4858 x := v.Args[0] 4859 if x.Op != OpAMD64MOVQload { 4860 break 4861 } 4862 off := x.AuxInt 4863 sym := x.Aux 4864 _ = x.Args[1] 4865 ptr := x.Args[0] 4866 mem := x.Args[1] 4867 if !(x.Uses == 1 && clobber(x)) { 4868 break 4869 } 4870 b = x.Block 4871 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4872 v.reset(OpCopy) 4873 v.AddArg(v0) 4874 v0.AuxInt = off 4875 v0.Aux = sym 4876 v0.AddArg(ptr) 4877 v0.AddArg(mem) 4878 return true 4879 } 4880 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 4881 // cond: x.Uses == 1 && clobber(x) 4882 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 4883 for { 4884 x := v.Args[0] 4885 if x.Op != OpAMD64MOVBloadidx1 { 4886 break 4887 } 4888 off := x.AuxInt 4889 sym := x.Aux 4890 _ = x.Args[2] 4891 ptr := x.Args[0] 4892 idx := x.Args[1] 4893 mem := x.Args[2] 4894 if !(x.Uses == 1 && clobber(x)) { 4895 break 4896 } 4897 b = x.Block 4898 v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) 4899 v.reset(OpCopy) 4900 v.AddArg(v0) 4901 v0.AuxInt = off 4902 v0.Aux = sym 4903 v0.AddArg(ptr) 4904 v0.AddArg(idx) 4905 v0.AddArg(mem) 4906 return true 4907 } 4908 // match: (MOVBQZX (ANDLconst [c] x)) 4909 // cond: 4910 // result: (ANDLconst [c & 0xff] x) 4911 for { 4912 v_0 := v.Args[0] 4913 if v_0.Op != OpAMD64ANDLconst { 4914 break 4915 } 4916 c := v_0.AuxInt 4917 x := v_0.Args[0] 4918 v.reset(OpAMD64ANDLconst) 4919 v.AuxInt = c & 0xff 4920 v.AddArg(x) 4921 return true 4922 } 4923 // match: (MOVBQZX (MOVBQZX x)) 4924 // cond: 4925 // result: (MOVBQZX x) 4926 for { 4927 v_0 := v.Args[0] 4928 if v_0.Op != OpAMD64MOVBQZX { 4929 break 4930 } 4931 x := v_0.Args[0] 4932 v.reset(OpAMD64MOVBQZX) 4933 v.AddArg(x) 4934 return true 4935 } 4936 return false 4937 } 4938 func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { 4939 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4940 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4941 // result: (MOVBQZX x) 4942 for { 4943 off := v.AuxInt 4944 sym := v.Aux 4945 _ = v.Args[1] 4946 ptr := v.Args[0] 4947 v_1 := v.Args[1] 4948 if v_1.Op != OpAMD64MOVBstore { 4949 break 4950 } 4951 off2 := v_1.AuxInt 4952 sym2 := v_1.Aux 4953 _ = v_1.Args[2] 4954 ptr2 := v_1.Args[0] 4955 x := v_1.Args[1] 4956 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4957 break 4958 } 4959 v.reset(OpAMD64MOVBQZX) 4960 v.AddArg(x) 4961 return true 4962 } 4963 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 4964 // cond: is32Bit(off1+off2) 4965 // result: (MOVBload [off1+off2] {sym} ptr mem) 4966 for { 4967 off1 := v.AuxInt 4968 sym := v.Aux 4969 _ = v.Args[1] 4970 v_0 := v.Args[0] 4971 if v_0.Op != OpAMD64ADDQconst { 4972 break 4973 } 4974 off2 := v_0.AuxInt 4975 ptr := v_0.Args[0] 4976 mem := v.Args[1] 4977 if !(is32Bit(off1 + off2)) { 4978 break 4979 } 4980 v.reset(OpAMD64MOVBload) 4981 v.AuxInt = off1 + off2 4982 v.Aux = sym 4983 v.AddArg(ptr) 4984 v.AddArg(mem) 4985 return true 4986 } 4987 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4988 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4989 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4990 for { 4991 off1 := v.AuxInt 4992 sym1 := v.Aux 4993 _ = v.Args[1] 4994 v_0 := v.Args[0] 4995 if v_0.Op != OpAMD64LEAQ { 4996 break 4997 } 4998 off2 := v_0.AuxInt 4999 sym2 := v_0.Aux 5000 base := v_0.Args[0] 5001 mem := v.Args[1] 5002 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5003 break 5004 } 5005 v.reset(OpAMD64MOVBload) 5006 v.AuxInt = off1 + off2 5007 v.Aux = mergeSym(sym1, sym2) 5008 v.AddArg(base) 5009 v.AddArg(mem) 5010 return true 5011 } 5012 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 5013 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5014 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 5015 for { 5016 off1 := v.AuxInt 5017 sym1 := v.Aux 5018 _ = v.Args[1] 5019 v_0 := v.Args[0] 5020 if v_0.Op != OpAMD64LEAQ1 { 5021 break 5022 } 5023 off2 := v_0.AuxInt 5024 sym2 := v_0.Aux 5025 _ = v_0.Args[1] 5026 ptr := v_0.Args[0] 5027 idx := v_0.Args[1] 5028 mem := v.Args[1] 5029 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5030 break 5031 } 5032 v.reset(OpAMD64MOVBloadidx1) 5033 v.AuxInt = off1 + off2 5034 v.Aux = mergeSym(sym1, sym2) 5035 v.AddArg(ptr) 5036 v.AddArg(idx) 5037 v.AddArg(mem) 5038 return true 5039 } 5040 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 5041 // cond: ptr.Op != OpSB 5042 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 5043 for { 5044 off := v.AuxInt 5045 sym := v.Aux 5046 _ = v.Args[1] 5047 v_0 := v.Args[0] 5048 if v_0.Op != OpAMD64ADDQ { 5049 break 5050 } 5051 _ = v_0.Args[1] 5052 ptr := v_0.Args[0] 5053 idx := v_0.Args[1] 5054 mem := v.Args[1] 5055 if !(ptr.Op != OpSB) { 5056 break 5057 } 5058 v.reset(OpAMD64MOVBloadidx1) 5059 v.AuxInt = off 5060 v.Aux = sym 5061 v.AddArg(ptr) 5062 v.AddArg(idx) 5063 v.AddArg(mem) 5064 return true 5065 } 5066 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 5067 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 5068 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 5069 for { 5070 off1 := v.AuxInt 5071 sym1 := v.Aux 5072 _ = v.Args[1] 5073 v_0 := v.Args[0] 5074 if v_0.Op != OpAMD64LEAL { 5075 break 5076 } 5077 off2 := v_0.AuxInt 5078 sym2 := v_0.Aux 5079 base := v_0.Args[0] 5080 mem := v.Args[1] 5081 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 5082 break 5083 } 5084 v.reset(OpAMD64MOVBload) 5085 v.AuxInt = off1 + off2 5086 v.Aux = mergeSym(sym1, sym2) 5087 v.AddArg(base) 5088 v.AddArg(mem) 5089 return true 5090 } 5091 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 5092 // cond: is32Bit(off1+off2) 5093 // result: (MOVBload [off1+off2] {sym} ptr mem) 5094 for { 5095 off1 := v.AuxInt 5096 sym := v.Aux 5097 _ = v.Args[1] 5098 v_0 := v.Args[0] 5099 if v_0.Op != OpAMD64ADDLconst { 5100 break 5101 } 5102 off2 := v_0.AuxInt 5103 ptr := v_0.Args[0] 5104 mem := v.Args[1] 5105 if !(is32Bit(off1 + off2)) { 5106 break 5107 } 5108 v.reset(OpAMD64MOVBload) 5109 v.AuxInt = off1 + off2 5110 v.Aux = sym 5111 v.AddArg(ptr) 5112 v.AddArg(mem) 5113 return true 5114 } 5115 return false 5116 } 5117 func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { 5118 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 5119 // cond: is32Bit(c+d) 5120 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5121 for { 5122 c := v.AuxInt 5123 sym := v.Aux 5124 _ = v.Args[2] 5125 v_0 := v.Args[0] 5126 if v_0.Op != OpAMD64ADDQconst { 5127 break 5128 } 5129 d := v_0.AuxInt 5130 ptr := v_0.Args[0] 5131 idx := v.Args[1] 5132 mem := v.Args[2] 5133 if !(is32Bit(c + d)) { 5134 break 5135 } 5136 v.reset(OpAMD64MOVBloadidx1) 5137 v.AuxInt = c + d 5138 v.Aux = sym 5139 v.AddArg(ptr) 5140 v.AddArg(idx) 5141 v.AddArg(mem) 5142 return true 5143 } 5144 // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 5145 // cond: is32Bit(c+d) 5146 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5147 for { 5148 c := v.AuxInt 5149 sym := v.Aux 5150 _ = v.Args[2] 5151 idx := v.Args[0] 5152 v_1 := v.Args[1] 5153 if v_1.Op != OpAMD64ADDQconst { 5154 break 5155 } 5156 d := v_1.AuxInt 5157 ptr := v_1.Args[0] 5158 mem := v.Args[2] 5159 if !(is32Bit(c + d)) { 5160 break 5161 } 5162 v.reset(OpAMD64MOVBloadidx1) 5163 v.AuxInt = c + d 5164 v.Aux = sym 5165 v.AddArg(ptr) 5166 v.AddArg(idx) 5167 v.AddArg(mem) 5168 return true 5169 } 5170 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 5171 // cond: is32Bit(c+d) 5172 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5173 for { 5174 c := v.AuxInt 5175 sym := v.Aux 5176 _ = v.Args[2] 5177 ptr := v.Args[0] 5178 v_1 := v.Args[1] 5179 if v_1.Op != OpAMD64ADDQconst { 5180 break 5181 } 5182 d := v_1.AuxInt 5183 idx := v_1.Args[0] 5184 mem := v.Args[2] 5185 if !(is32Bit(c + d)) { 5186 break 5187 } 5188 v.reset(OpAMD64MOVBloadidx1) 5189 v.AuxInt = c + d 5190 v.Aux = sym 5191 v.AddArg(ptr) 5192 v.AddArg(idx) 5193 v.AddArg(mem) 5194 return true 5195 } 5196 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 5197 // cond: is32Bit(c+d) 5198 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5199 for { 5200 c := v.AuxInt 5201 sym := v.Aux 5202 _ = v.Args[2] 5203 v_0 := v.Args[0] 5204 if v_0.Op != OpAMD64ADDQconst { 5205 break 5206 } 5207 d := v_0.AuxInt 5208 idx := v_0.Args[0] 5209 ptr := v.Args[1] 5210 mem := v.Args[2] 5211 if !(is32Bit(c + d)) { 5212 break 5213 } 5214 v.reset(OpAMD64MOVBloadidx1) 5215 v.AuxInt = c + d 5216 v.Aux = sym 5217 v.AddArg(ptr) 5218 v.AddArg(idx) 5219 v.AddArg(mem) 5220 return true 5221 } 5222 return false 5223 } 5224 func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { 5225 // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem) 5226 // cond: y.Uses == 1 5227 // result: (SETLmem [off] {sym} ptr x mem) 5228 for { 5229 off := v.AuxInt 5230 sym := v.Aux 5231 _ = v.Args[2] 5232 ptr := v.Args[0] 5233 y := v.Args[1] 5234 if y.Op != OpAMD64SETL { 5235 break 5236 } 5237 x := y.Args[0] 5238 mem := v.Args[2] 5239 if !(y.Uses == 1) { 5240 break 5241 } 5242 v.reset(OpAMD64SETLmem) 5243 v.AuxInt = off 5244 v.Aux = sym 5245 v.AddArg(ptr) 5246 v.AddArg(x) 5247 v.AddArg(mem) 5248 return true 5249 } 5250 // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem) 5251 // cond: y.Uses == 1 5252 // result: (SETLEmem [off] {sym} ptr x mem) 5253 for { 5254 off := v.AuxInt 5255 sym := v.Aux 5256 _ = v.Args[2] 5257 ptr := v.Args[0] 5258 y := v.Args[1] 5259 if y.Op != OpAMD64SETLE { 5260 break 5261 } 5262 x := y.Args[0] 5263 mem := v.Args[2] 5264 if !(y.Uses == 1) { 5265 break 5266 } 5267 v.reset(OpAMD64SETLEmem) 5268 v.AuxInt = off 5269 v.Aux = sym 5270 v.AddArg(ptr) 5271 v.AddArg(x) 5272 v.AddArg(mem) 5273 return true 5274 } 5275 // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem) 5276 // cond: y.Uses == 1 5277 // result: (SETGmem [off] {sym} ptr x mem) 5278 for { 5279 off := v.AuxInt 5280 sym := v.Aux 5281 _ = v.Args[2] 5282 ptr := v.Args[0] 5283 y := v.Args[1] 5284 if y.Op != OpAMD64SETG { 5285 break 5286 } 5287 x := y.Args[0] 5288 mem := v.Args[2] 5289 if !(y.Uses == 1) { 5290 break 5291 } 5292 v.reset(OpAMD64SETGmem) 5293 v.AuxInt = off 5294 v.Aux = sym 5295 v.AddArg(ptr) 5296 v.AddArg(x) 5297 v.AddArg(mem) 5298 return true 5299 } 5300 // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem) 5301 // cond: y.Uses == 1 5302 // result: (SETGEmem [off] {sym} ptr x mem) 5303 for { 5304 off := v.AuxInt 5305 sym := v.Aux 5306 _ = v.Args[2] 5307 ptr := v.Args[0] 5308 y := v.Args[1] 5309 if y.Op != OpAMD64SETGE { 5310 break 5311 } 5312 x := y.Args[0] 5313 mem := v.Args[2] 5314 if !(y.Uses == 1) { 5315 break 5316 } 5317 v.reset(OpAMD64SETGEmem) 5318 v.AuxInt = off 5319 v.Aux = sym 5320 v.AddArg(ptr) 5321 v.AddArg(x) 5322 v.AddArg(mem) 5323 return true 5324 } 5325 // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) 5326 // cond: y.Uses == 1 5327 // result: (SETEQmem [off] {sym} ptr x mem) 5328 for { 5329 off := v.AuxInt 5330 sym := v.Aux 5331 _ = v.Args[2] 5332 ptr := v.Args[0] 5333 y := v.Args[1] 5334 if y.Op != OpAMD64SETEQ { 5335 break 5336 } 5337 x := y.Args[0] 5338 mem := v.Args[2] 5339 if !(y.Uses == 1) { 5340 break 5341 } 5342 v.reset(OpAMD64SETEQmem) 5343 v.AuxInt = off 5344 v.Aux = sym 5345 v.AddArg(ptr) 5346 v.AddArg(x) 5347 v.AddArg(mem) 5348 return true 5349 } 5350 // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem) 5351 // cond: y.Uses == 1 5352 // result: (SETNEmem [off] {sym} ptr x mem) 5353 for { 5354 off := v.AuxInt 5355 sym := v.Aux 5356 _ = v.Args[2] 5357 ptr := v.Args[0] 5358 y := v.Args[1] 5359 if y.Op != OpAMD64SETNE { 5360 break 5361 } 5362 x := y.Args[0] 5363 mem := v.Args[2] 5364 if !(y.Uses == 1) { 5365 break 5366 } 5367 v.reset(OpAMD64SETNEmem) 5368 v.AuxInt = off 5369 v.Aux = sym 5370 v.AddArg(ptr) 5371 v.AddArg(x) 5372 v.AddArg(mem) 5373 return true 5374 } 5375 // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem) 5376 // cond: y.Uses == 1 5377 // result: (SETBmem [off] {sym} ptr x mem) 5378 for { 5379 off := v.AuxInt 5380 sym := v.Aux 5381 _ = v.Args[2] 5382 ptr := v.Args[0] 5383 y := v.Args[1] 5384 if y.Op != OpAMD64SETB { 5385 break 5386 } 5387 x := y.Args[0] 5388 mem := v.Args[2] 5389 if !(y.Uses == 1) { 5390 break 5391 } 5392 v.reset(OpAMD64SETBmem) 5393 v.AuxInt = off 5394 v.Aux = sym 5395 v.AddArg(ptr) 5396 v.AddArg(x) 5397 v.AddArg(mem) 5398 return true 5399 } 5400 // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem) 5401 // cond: y.Uses == 1 5402 // result: (SETBEmem [off] {sym} ptr x mem) 5403 for { 5404 off := v.AuxInt 5405 sym := v.Aux 5406 _ = v.Args[2] 5407 ptr := v.Args[0] 5408 y := v.Args[1] 5409 if y.Op != OpAMD64SETBE { 5410 break 5411 } 5412 x := y.Args[0] 5413 mem := v.Args[2] 5414 if !(y.Uses == 1) { 5415 break 5416 } 5417 v.reset(OpAMD64SETBEmem) 5418 v.AuxInt = off 5419 v.Aux = sym 5420 v.AddArg(ptr) 5421 v.AddArg(x) 5422 v.AddArg(mem) 5423 return true 5424 } 5425 // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem) 5426 // cond: y.Uses == 1 5427 // result: (SETAmem [off] {sym} ptr x mem) 5428 for { 5429 off := v.AuxInt 5430 sym := v.Aux 5431 _ = v.Args[2] 5432 ptr := v.Args[0] 5433 y := v.Args[1] 5434 if y.Op != OpAMD64SETA { 5435 break 5436 } 5437 x := y.Args[0] 5438 mem := v.Args[2] 5439 if !(y.Uses == 1) { 5440 break 5441 } 5442 v.reset(OpAMD64SETAmem) 5443 v.AuxInt = off 5444 v.Aux = sym 5445 v.AddArg(ptr) 5446 v.AddArg(x) 5447 v.AddArg(mem) 5448 return true 5449 } 5450 // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem) 5451 // cond: y.Uses == 1 5452 // result: (SETAEmem [off] {sym} ptr x mem) 5453 for { 5454 off := v.AuxInt 5455 sym := v.Aux 5456 _ = v.Args[2] 5457 ptr := v.Args[0] 5458 y := v.Args[1] 5459 if y.Op != OpAMD64SETAE { 5460 break 5461 } 5462 x := y.Args[0] 5463 mem := v.Args[2] 5464 if !(y.Uses == 1) { 5465 break 5466 } 5467 v.reset(OpAMD64SETAEmem) 5468 v.AuxInt = off 5469 v.Aux = sym 5470 v.AddArg(ptr) 5471 v.AddArg(x) 5472 v.AddArg(mem) 5473 return true 5474 } 5475 return false 5476 } 5477 func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { 5478 b := v.Block 5479 _ = b 5480 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 5481 // cond: 5482 // result: (MOVBstore [off] {sym} ptr x mem) 5483 for { 5484 off := v.AuxInt 5485 sym := v.Aux 5486 _ = v.Args[2] 5487 ptr := v.Args[0] 5488 v_1 := v.Args[1] 5489 if v_1.Op != OpAMD64MOVBQSX { 5490 break 5491 } 5492 x := v_1.Args[0] 5493 mem := v.Args[2] 5494 v.reset(OpAMD64MOVBstore) 5495 v.AuxInt = off 5496 v.Aux = sym 5497 v.AddArg(ptr) 5498 v.AddArg(x) 5499 v.AddArg(mem) 5500 return true 5501 } 5502 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 5503 // cond: 5504 // result: (MOVBstore [off] {sym} ptr x mem) 5505 for { 5506 off := v.AuxInt 5507 sym := v.Aux 5508 _ = v.Args[2] 5509 ptr := v.Args[0] 5510 v_1 := v.Args[1] 5511 if v_1.Op != OpAMD64MOVBQZX { 5512 break 5513 } 5514 x := v_1.Args[0] 5515 mem := v.Args[2] 5516 v.reset(OpAMD64MOVBstore) 5517 v.AuxInt = off 5518 v.Aux = sym 5519 v.AddArg(ptr) 5520 v.AddArg(x) 5521 v.AddArg(mem) 5522 return true 5523 } 5524 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 5525 // cond: is32Bit(off1+off2) 5526 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 5527 for { 5528 off1 := v.AuxInt 5529 sym := v.Aux 5530 _ = v.Args[2] 5531 v_0 := v.Args[0] 5532 if v_0.Op != OpAMD64ADDQconst { 5533 break 5534 } 5535 off2 := v_0.AuxInt 5536 ptr := v_0.Args[0] 5537 val := v.Args[1] 5538 mem := v.Args[2] 5539 if !(is32Bit(off1 + off2)) { 5540 break 5541 } 5542 v.reset(OpAMD64MOVBstore) 5543 v.AuxInt = off1 + off2 5544 v.Aux = sym 5545 v.AddArg(ptr) 5546 v.AddArg(val) 5547 v.AddArg(mem) 5548 return true 5549 } 5550 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 5551 // cond: validOff(off) 5552 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 5553 for { 5554 off := v.AuxInt 5555 sym := v.Aux 5556 _ = v.Args[2] 5557 ptr := v.Args[0] 5558 v_1 := v.Args[1] 5559 if v_1.Op != OpAMD64MOVLconst { 5560 break 5561 } 5562 c := v_1.AuxInt 5563 mem := v.Args[2] 5564 if !(validOff(off)) { 5565 break 5566 } 5567 v.reset(OpAMD64MOVBstoreconst) 5568 v.AuxInt = makeValAndOff(int64(int8(c)), off) 5569 v.Aux = sym 5570 v.AddArg(ptr) 5571 v.AddArg(mem) 5572 return true 5573 } 5574 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5575 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5576 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5577 for { 5578 off1 := v.AuxInt 5579 sym1 := v.Aux 5580 _ = v.Args[2] 5581 v_0 := v.Args[0] 5582 if v_0.Op != OpAMD64LEAQ { 5583 break 5584 } 5585 off2 := v_0.AuxInt 5586 sym2 := v_0.Aux 5587 base := v_0.Args[0] 5588 val := v.Args[1] 5589 mem := v.Args[2] 5590 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5591 break 5592 } 5593 v.reset(OpAMD64MOVBstore) 5594 v.AuxInt = off1 + off2 5595 v.Aux = mergeSym(sym1, sym2) 5596 v.AddArg(base) 5597 v.AddArg(val) 5598 v.AddArg(mem) 5599 return true 5600 } 5601 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 5602 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5603 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 5604 for { 5605 off1 := v.AuxInt 5606 sym1 := v.Aux 5607 _ = v.Args[2] 5608 v_0 := v.Args[0] 5609 if v_0.Op != OpAMD64LEAQ1 { 5610 break 5611 } 5612 off2 := v_0.AuxInt 5613 sym2 := v_0.Aux 5614 _ = v_0.Args[1] 5615 ptr := v_0.Args[0] 5616 idx := v_0.Args[1] 5617 val := v.Args[1] 5618 mem := v.Args[2] 5619 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5620 break 5621 } 5622 v.reset(OpAMD64MOVBstoreidx1) 5623 v.AuxInt = off1 + off2 5624 v.Aux = mergeSym(sym1, sym2) 5625 v.AddArg(ptr) 5626 v.AddArg(idx) 5627 v.AddArg(val) 5628 v.AddArg(mem) 5629 return true 5630 } 5631 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 5632 // cond: ptr.Op != OpSB 5633 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 5634 for { 5635 off := v.AuxInt 5636 sym := v.Aux 5637 _ = v.Args[2] 5638 v_0 := v.Args[0] 5639 if v_0.Op != OpAMD64ADDQ { 5640 break 5641 } 5642 _ = v_0.Args[1] 5643 ptr := v_0.Args[0] 5644 idx := v_0.Args[1] 5645 val := v.Args[1] 5646 mem := v.Args[2] 5647 if !(ptr.Op != OpSB) { 5648 break 5649 } 5650 v.reset(OpAMD64MOVBstoreidx1) 5651 v.AuxInt = off 5652 v.Aux = sym 5653 v.AddArg(ptr) 5654 v.AddArg(idx) 5655 v.AddArg(val) 5656 v.AddArg(mem) 5657 return true 5658 } 5659 // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) 5660 // cond: x0.Uses == 1 && clobber(x0) 5661 // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem) 5662 for { 5663 i := v.AuxInt 5664 s := v.Aux 5665 _ = v.Args[2] 5666 p := v.Args[0] 5667 w := v.Args[1] 5668 x0 := v.Args[2] 5669 if x0.Op != OpAMD64MOVBstore { 5670 break 5671 } 5672 if x0.AuxInt != i-1 { 5673 break 5674 } 5675 if x0.Aux != s { 5676 break 5677 } 5678 _ = x0.Args[2] 5679 if p != x0.Args[0] { 5680 break 5681 } 5682 x0_1 := x0.Args[1] 5683 if x0_1.Op != OpAMD64SHRWconst { 5684 break 5685 } 5686 if x0_1.AuxInt != 8 { 5687 break 5688 } 5689 if w != x0_1.Args[0] { 5690 break 5691 } 5692 mem := x0.Args[2] 5693 if !(x0.Uses == 1 && clobber(x0)) { 5694 break 5695 } 5696 v.reset(OpAMD64MOVWstore) 5697 v.AuxInt = i - 1 5698 v.Aux = s 5699 v.AddArg(p) 5700 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 5701 v0.AuxInt = 8 5702 v0.AddArg(w) 5703 v.AddArg(v0) 5704 v.AddArg(mem) 5705 return true 5706 } 5707 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 5708 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 5709 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) 5710 for { 5711 i := v.AuxInt 5712 s := v.Aux 5713 _ = v.Args[2] 5714 p := v.Args[0] 5715 w := v.Args[1] 5716 x2 := v.Args[2] 5717 if x2.Op != OpAMD64MOVBstore { 5718 break 5719 } 5720 if x2.AuxInt != i-1 { 5721 break 5722 } 5723 if x2.Aux != s { 5724 break 5725 } 5726 _ = x2.Args[2] 5727 if p != x2.Args[0] { 5728 break 5729 } 5730 x2_1 := x2.Args[1] 5731 if x2_1.Op != OpAMD64SHRLconst { 5732 break 5733 } 5734 if x2_1.AuxInt != 8 { 5735 break 5736 } 5737 if w != x2_1.Args[0] { 5738 break 5739 } 5740 x1 := x2.Args[2] 5741 if x1.Op != OpAMD64MOVBstore { 5742 break 5743 } 5744 if x1.AuxInt != i-2 { 5745 break 5746 } 5747 if x1.Aux != s { 5748 break 5749 } 5750 _ = x1.Args[2] 5751 if p != x1.Args[0] { 5752 break 5753 } 5754 x1_1 := x1.Args[1] 5755 if x1_1.Op != OpAMD64SHRLconst { 5756 break 5757 } 5758 if x1_1.AuxInt != 16 { 5759 break 5760 } 5761 if w != x1_1.Args[0] { 5762 break 5763 } 5764 x0 := x1.Args[2] 5765 if x0.Op != OpAMD64MOVBstore { 5766 break 5767 } 5768 if x0.AuxInt != i-3 { 5769 break 5770 } 5771 if x0.Aux != s { 5772 break 5773 } 5774 _ = x0.Args[2] 5775 if p != x0.Args[0] { 5776 break 5777 } 5778 x0_1 := x0.Args[1] 5779 if x0_1.Op != OpAMD64SHRLconst { 5780 break 5781 } 5782 if x0_1.AuxInt != 24 { 5783 break 5784 } 5785 if w != x0_1.Args[0] { 5786 break 5787 } 5788 mem := x0.Args[2] 5789 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 5790 break 5791 } 5792 v.reset(OpAMD64MOVLstore) 5793 v.AuxInt = i - 3 5794 v.Aux = s 5795 v.AddArg(p) 5796 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 5797 v0.AddArg(w) 5798 v.AddArg(v0) 5799 v.AddArg(mem) 5800 return true 5801 } 5802 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 5803 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 5804 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) 5805 for { 5806 i := v.AuxInt 5807 s := v.Aux 5808 _ = v.Args[2] 5809 p := v.Args[0] 5810 w := v.Args[1] 5811 x6 := v.Args[2] 5812 if x6.Op != OpAMD64MOVBstore { 5813 break 5814 } 5815 if x6.AuxInt != i-1 { 5816 break 5817 } 5818 if x6.Aux != s { 5819 break 5820 } 5821 _ = x6.Args[2] 5822 if p != x6.Args[0] { 5823 break 5824 } 5825 x6_1 := x6.Args[1] 5826 if x6_1.Op != OpAMD64SHRQconst { 5827 break 5828 } 5829 if x6_1.AuxInt != 8 { 5830 break 5831 } 5832 if w != x6_1.Args[0] { 5833 break 5834 } 5835 x5 := x6.Args[2] 5836 if x5.Op != OpAMD64MOVBstore { 5837 break 5838 } 5839 if x5.AuxInt != i-2 { 5840 break 5841 } 5842 if x5.Aux != s { 5843 break 5844 } 5845 _ = x5.Args[2] 5846 if p != x5.Args[0] { 5847 break 5848 } 5849 x5_1 := x5.Args[1] 5850 if x5_1.Op != OpAMD64SHRQconst { 5851 break 5852 } 5853 if x5_1.AuxInt != 16 { 5854 break 5855 } 5856 if w != x5_1.Args[0] { 5857 break 5858 } 5859 x4 := x5.Args[2] 5860 if x4.Op != OpAMD64MOVBstore { 5861 break 5862 } 5863 if x4.AuxInt != i-3 { 5864 break 5865 } 5866 if x4.Aux != s { 5867 break 5868 } 5869 _ = x4.Args[2] 5870 if p != x4.Args[0] { 5871 break 5872 } 5873 x4_1 := x4.Args[1] 5874 if x4_1.Op != OpAMD64SHRQconst { 5875 break 5876 } 5877 if x4_1.AuxInt != 24 { 5878 break 5879 } 5880 if w != x4_1.Args[0] { 5881 break 5882 } 5883 x3 := x4.Args[2] 5884 if x3.Op != OpAMD64MOVBstore { 5885 break 5886 } 5887 if x3.AuxInt != i-4 { 5888 break 5889 } 5890 if x3.Aux != s { 5891 break 5892 } 5893 _ = x3.Args[2] 5894 if p != x3.Args[0] { 5895 break 5896 } 5897 x3_1 := x3.Args[1] 5898 if x3_1.Op != OpAMD64SHRQconst { 5899 break 5900 } 5901 if x3_1.AuxInt != 32 { 5902 break 5903 } 5904 if w != x3_1.Args[0] { 5905 break 5906 } 5907 x2 := x3.Args[2] 5908 if x2.Op != OpAMD64MOVBstore { 5909 break 5910 } 5911 if x2.AuxInt != i-5 { 5912 break 5913 } 5914 if x2.Aux != s { 5915 break 5916 } 5917 _ = x2.Args[2] 5918 if p != x2.Args[0] { 5919 break 5920 } 5921 x2_1 := x2.Args[1] 5922 if x2_1.Op != OpAMD64SHRQconst { 5923 break 5924 } 5925 if x2_1.AuxInt != 40 { 5926 break 5927 } 5928 if w != x2_1.Args[0] { 5929 break 5930 } 5931 x1 := x2.Args[2] 5932 if x1.Op != OpAMD64MOVBstore { 5933 break 5934 } 5935 if x1.AuxInt != i-6 { 5936 break 5937 } 5938 if x1.Aux != s { 5939 break 5940 } 5941 _ = x1.Args[2] 5942 if p != x1.Args[0] { 5943 break 5944 } 5945 x1_1 := x1.Args[1] 5946 if x1_1.Op != OpAMD64SHRQconst { 5947 break 5948 } 5949 if x1_1.AuxInt != 48 { 5950 break 5951 } 5952 if w != x1_1.Args[0] { 5953 break 5954 } 5955 x0 := x1.Args[2] 5956 if x0.Op != OpAMD64MOVBstore { 5957 break 5958 } 5959 if x0.AuxInt != i-7 { 5960 break 5961 } 5962 if x0.Aux != s { 5963 break 5964 } 5965 _ = x0.Args[2] 5966 if p != x0.Args[0] { 5967 break 5968 } 5969 x0_1 := x0.Args[1] 5970 if x0_1.Op != OpAMD64SHRQconst { 5971 break 5972 } 5973 if x0_1.AuxInt != 56 { 5974 break 5975 } 5976 if w != x0_1.Args[0] { 5977 break 5978 } 5979 mem := x0.Args[2] 5980 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 5981 break 5982 } 5983 v.reset(OpAMD64MOVQstore) 5984 v.AuxInt = i - 7 5985 v.Aux = s 5986 v.AddArg(p) 5987 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 5988 v0.AddArg(w) 5989 v.AddArg(v0) 5990 v.AddArg(mem) 5991 return true 5992 } 5993 return false 5994 } 5995 func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { 5996 b := v.Block 5997 _ = b 5998 typ := &b.Func.Config.Types 5999 _ = typ 6000 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 6001 // cond: x.Uses == 1 && clobber(x) 6002 // result: (MOVWstore [i-1] {s} p w mem) 6003 for { 6004 i := v.AuxInt 6005 s := v.Aux 6006 _ = v.Args[2] 6007 p := v.Args[0] 6008 v_1 := v.Args[1] 6009 if v_1.Op != OpAMD64SHRQconst { 6010 break 6011 } 6012 if v_1.AuxInt != 8 { 6013 break 6014 } 6015 w := v_1.Args[0] 6016 x := v.Args[2] 6017 if x.Op != OpAMD64MOVBstore { 6018 break 6019 } 6020 if x.AuxInt != i-1 { 6021 break 6022 } 6023 if x.Aux != s { 6024 break 6025 } 6026 _ = x.Args[2] 6027 if p != x.Args[0] { 6028 break 6029 } 6030 if w != x.Args[1] { 6031 break 6032 } 6033 mem := x.Args[2] 6034 if !(x.Uses == 1 && clobber(x)) { 6035 break 6036 } 6037 v.reset(OpAMD64MOVWstore) 6038 v.AuxInt = i - 1 6039 v.Aux = s 6040 v.AddArg(p) 6041 v.AddArg(w) 6042 v.AddArg(mem) 6043 return true 6044 } 6045 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 6046 // cond: x.Uses == 1 && clobber(x) 6047 // result: (MOVWstore [i-1] {s} p w0 mem) 6048 for { 6049 i := v.AuxInt 6050 s := v.Aux 6051 _ = v.Args[2] 6052 p := v.Args[0] 6053 v_1 := v.Args[1] 6054 if v_1.Op != OpAMD64SHRQconst { 6055 break 6056 } 6057 j := v_1.AuxInt 6058 w := v_1.Args[0] 6059 x := v.Args[2] 6060 if x.Op != OpAMD64MOVBstore { 6061 break 6062 } 6063 if x.AuxInt != i-1 { 6064 break 6065 } 6066 if x.Aux != s { 6067 break 6068 } 6069 _ = x.Args[2] 6070 if p != x.Args[0] { 6071 break 6072 } 6073 w0 := x.Args[1] 6074 if w0.Op != OpAMD64SHRQconst { 6075 break 6076 } 6077 if w0.AuxInt != j-8 { 6078 break 6079 } 6080 if w != w0.Args[0] { 6081 break 6082 } 6083 mem := x.Args[2] 6084 if !(x.Uses == 1 && clobber(x)) { 6085 break 6086 } 6087 v.reset(OpAMD64MOVWstore) 6088 v.AuxInt = i - 1 6089 v.Aux = s 6090 v.AddArg(p) 6091 v.AddArg(w0) 6092 v.AddArg(mem) 6093 return true 6094 } 6095 // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem)) 6096 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 6097 // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) 6098 for { 6099 i := v.AuxInt 6100 s := v.Aux 6101 _ = v.Args[2] 6102 p := v.Args[0] 6103 x1 := v.Args[1] 6104 if x1.Op != OpAMD64MOVBload { 6105 break 6106 } 6107 j := x1.AuxInt 6108 s2 := x1.Aux 6109 _ = x1.Args[1] 6110 p2 := x1.Args[0] 6111 mem := x1.Args[1] 6112 mem2 := v.Args[2] 6113 if mem2.Op != OpAMD64MOVBstore { 6114 break 6115 } 6116 if mem2.AuxInt != i-1 { 6117 break 6118 } 6119 if mem2.Aux != s { 6120 break 6121 } 6122 _ = mem2.Args[2] 6123 if p != mem2.Args[0] { 6124 break 6125 } 6126 x2 := mem2.Args[1] 6127 if x2.Op != OpAMD64MOVBload { 6128 break 6129 } 6130 if x2.AuxInt != j-1 { 6131 break 6132 } 6133 if x2.Aux != s2 { 6134 break 6135 } 6136 _ = x2.Args[1] 6137 if p2 != x2.Args[0] { 6138 break 6139 } 6140 if mem != x2.Args[1] { 6141 break 6142 } 6143 if mem != mem2.Args[2] { 6144 break 6145 } 6146 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 6147 break 6148 } 6149 v.reset(OpAMD64MOVWstore) 6150 v.AuxInt = i - 1 6151 v.Aux = s 6152 v.AddArg(p) 6153 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 6154 v0.AuxInt = j - 1 6155 v0.Aux = s2 6156 v0.AddArg(p2) 6157 v0.AddArg(mem) 6158 v.AddArg(v0) 6159 v.AddArg(mem) 6160 return true 6161 } 6162 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 6163 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 6164 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6165 for { 6166 off1 := v.AuxInt 6167 sym1 := v.Aux 6168 _ = v.Args[2] 6169 v_0 := v.Args[0] 6170 if v_0.Op != OpAMD64LEAL { 6171 break 6172 } 6173 off2 := v_0.AuxInt 6174 sym2 := v_0.Aux 6175 base := v_0.Args[0] 6176 val := v.Args[1] 6177 mem := v.Args[2] 6178 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 6179 break 6180 } 6181 v.reset(OpAMD64MOVBstore) 6182 v.AuxInt = off1 + off2 6183 v.Aux = mergeSym(sym1, sym2) 6184 v.AddArg(base) 6185 v.AddArg(val) 6186 v.AddArg(mem) 6187 return true 6188 } 6189 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 6190 // cond: is32Bit(off1+off2) 6191 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 6192 for { 6193 off1 := v.AuxInt 6194 sym := v.Aux 6195 _ = v.Args[2] 6196 v_0 := v.Args[0] 6197 if v_0.Op != OpAMD64ADDLconst { 6198 break 6199 } 6200 off2 := v_0.AuxInt 6201 ptr := v_0.Args[0] 6202 val := v.Args[1] 6203 mem := v.Args[2] 6204 if !(is32Bit(off1 + off2)) { 6205 break 6206 } 6207 v.reset(OpAMD64MOVBstore) 6208 v.AuxInt = off1 + off2 6209 v.Aux = sym 6210 v.AddArg(ptr) 6211 v.AddArg(val) 6212 v.AddArg(mem) 6213 return true 6214 } 6215 return false 6216 } 6217 func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { 6218 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 6219 // cond: ValAndOff(sc).canAdd(off) 6220 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6221 for { 6222 sc := v.AuxInt 6223 s := v.Aux 6224 _ = v.Args[1] 6225 v_0 := v.Args[0] 6226 if v_0.Op != OpAMD64ADDQconst { 6227 break 6228 } 6229 off := v_0.AuxInt 6230 ptr := v_0.Args[0] 6231 mem := v.Args[1] 6232 if !(ValAndOff(sc).canAdd(off)) { 6233 break 6234 } 6235 v.reset(OpAMD64MOVBstoreconst) 6236 v.AuxInt = ValAndOff(sc).add(off) 6237 v.Aux = s 6238 v.AddArg(ptr) 6239 v.AddArg(mem) 6240 return true 6241 } 6242 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 6243 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6244 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6245 for { 6246 sc := v.AuxInt 6247 sym1 := v.Aux 6248 _ = v.Args[1] 6249 v_0 := v.Args[0] 6250 if v_0.Op != OpAMD64LEAQ { 6251 break 6252 } 6253 off := v_0.AuxInt 6254 sym2 := v_0.Aux 6255 ptr := v_0.Args[0] 6256 mem := v.Args[1] 6257 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6258 break 6259 } 6260 v.reset(OpAMD64MOVBstoreconst) 6261 v.AuxInt = ValAndOff(sc).add(off) 6262 v.Aux = mergeSym(sym1, sym2) 6263 v.AddArg(ptr) 6264 v.AddArg(mem) 6265 return true 6266 } 6267 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 6268 // cond: canMergeSym(sym1, sym2) 6269 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 6270 for { 6271 x := v.AuxInt 6272 sym1 := v.Aux 6273 _ = v.Args[1] 6274 v_0 := v.Args[0] 6275 if v_0.Op != OpAMD64LEAQ1 { 6276 break 6277 } 6278 off := v_0.AuxInt 6279 sym2 := v_0.Aux 6280 _ = v_0.Args[1] 6281 ptr := v_0.Args[0] 6282 idx := v_0.Args[1] 6283 mem := v.Args[1] 6284 if !(canMergeSym(sym1, sym2)) { 6285 break 6286 } 6287 v.reset(OpAMD64MOVBstoreconstidx1) 6288 v.AuxInt = ValAndOff(x).add(off) 6289 v.Aux = mergeSym(sym1, sym2) 6290 v.AddArg(ptr) 6291 v.AddArg(idx) 6292 v.AddArg(mem) 6293 return true 6294 } 6295 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 6296 // cond: 6297 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 6298 for { 6299 x := v.AuxInt 6300 sym := v.Aux 6301 _ = v.Args[1] 6302 v_0 := v.Args[0] 6303 if v_0.Op != OpAMD64ADDQ { 6304 break 6305 } 6306 _ = v_0.Args[1] 6307 ptr := v_0.Args[0] 6308 idx := v_0.Args[1] 6309 mem := v.Args[1] 6310 v.reset(OpAMD64MOVBstoreconstidx1) 6311 v.AuxInt = x 6312 v.Aux = sym 6313 v.AddArg(ptr) 6314 v.AddArg(idx) 6315 v.AddArg(mem) 6316 return true 6317 } 6318 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 6319 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 6320 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 6321 for { 6322 c := v.AuxInt 6323 s := v.Aux 6324 _ = v.Args[1] 6325 p := v.Args[0] 6326 x := v.Args[1] 6327 if x.Op != OpAMD64MOVBstoreconst { 6328 break 6329 } 6330 a := x.AuxInt 6331 if x.Aux != s { 6332 break 6333 } 6334 _ = x.Args[1] 6335 if p != x.Args[0] { 6336 break 6337 } 6338 mem := x.Args[1] 6339 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 6340 break 6341 } 6342 v.reset(OpAMD64MOVWstoreconst) 6343 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 6344 v.Aux = s 6345 v.AddArg(p) 6346 v.AddArg(mem) 6347 return true 6348 } 6349 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 6350 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6351 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6352 for { 6353 sc := v.AuxInt 6354 sym1 := v.Aux 6355 _ = v.Args[1] 6356 v_0 := v.Args[0] 6357 if v_0.Op != OpAMD64LEAL { 6358 break 6359 } 6360 off := v_0.AuxInt 6361 sym2 := v_0.Aux 6362 ptr := v_0.Args[0] 6363 mem := v.Args[1] 6364 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6365 break 6366 } 6367 v.reset(OpAMD64MOVBstoreconst) 6368 v.AuxInt = ValAndOff(sc).add(off) 6369 v.Aux = mergeSym(sym1, sym2) 6370 v.AddArg(ptr) 6371 v.AddArg(mem) 6372 return true 6373 } 6374 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 6375 // cond: ValAndOff(sc).canAdd(off) 6376 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6377 for { 6378 sc := v.AuxInt 6379 s := v.Aux 6380 _ = v.Args[1] 6381 v_0 := v.Args[0] 6382 if v_0.Op != OpAMD64ADDLconst { 6383 break 6384 } 6385 off := v_0.AuxInt 6386 ptr := v_0.Args[0] 6387 mem := v.Args[1] 6388 if !(ValAndOff(sc).canAdd(off)) { 6389 break 6390 } 6391 v.reset(OpAMD64MOVBstoreconst) 6392 v.AuxInt = ValAndOff(sc).add(off) 6393 v.Aux = s 6394 v.AddArg(ptr) 6395 v.AddArg(mem) 6396 return true 6397 } 6398 return false 6399 } 6400 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { 6401 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 6402 // cond: ValAndOff(x).canAdd(c) 6403 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6404 for { 6405 x := v.AuxInt 6406 sym := v.Aux 6407 _ = v.Args[2] 6408 v_0 := v.Args[0] 6409 if v_0.Op != OpAMD64ADDQconst { 6410 break 6411 } 6412 c := v_0.AuxInt 6413 ptr := v_0.Args[0] 6414 idx := v.Args[1] 6415 mem := v.Args[2] 6416 if !(ValAndOff(x).canAdd(c)) { 6417 break 6418 } 6419 v.reset(OpAMD64MOVBstoreconstidx1) 6420 v.AuxInt = ValAndOff(x).add(c) 6421 v.Aux = sym 6422 v.AddArg(ptr) 6423 v.AddArg(idx) 6424 v.AddArg(mem) 6425 return true 6426 } 6427 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 6428 // cond: ValAndOff(x).canAdd(c) 6429 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6430 for { 6431 x := v.AuxInt 6432 sym := v.Aux 6433 _ = v.Args[2] 6434 ptr := v.Args[0] 6435 v_1 := v.Args[1] 6436 if v_1.Op != OpAMD64ADDQconst { 6437 break 6438 } 6439 c := v_1.AuxInt 6440 idx := v_1.Args[0] 6441 mem := v.Args[2] 6442 if !(ValAndOff(x).canAdd(c)) { 6443 break 6444 } 6445 v.reset(OpAMD64MOVBstoreconstidx1) 6446 v.AuxInt = ValAndOff(x).add(c) 6447 v.Aux = sym 6448 v.AddArg(ptr) 6449 v.AddArg(idx) 6450 v.AddArg(mem) 6451 return true 6452 } 6453 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 6454 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 6455 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 6456 for { 6457 c := v.AuxInt 6458 s := v.Aux 6459 _ = v.Args[2] 6460 p := v.Args[0] 6461 i := v.Args[1] 6462 x := v.Args[2] 6463 if x.Op != OpAMD64MOVBstoreconstidx1 { 6464 break 6465 } 6466 a := x.AuxInt 6467 if x.Aux != s { 6468 break 6469 } 6470 _ = x.Args[2] 6471 if p != x.Args[0] { 6472 break 6473 } 6474 if i != x.Args[1] { 6475 break 6476 } 6477 mem := x.Args[2] 6478 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 6479 break 6480 } 6481 v.reset(OpAMD64MOVWstoreconstidx1) 6482 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 6483 v.Aux = s 6484 v.AddArg(p) 6485 v.AddArg(i) 6486 v.AddArg(mem) 6487 return true 6488 } 6489 return false 6490 } 6491 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { 6492 b := v.Block 6493 _ = b 6494 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 6495 // cond: is32Bit(c+d) 6496 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 6497 for { 6498 c := v.AuxInt 6499 sym := v.Aux 6500 _ = v.Args[3] 6501 v_0 := v.Args[0] 6502 if v_0.Op != OpAMD64ADDQconst { 6503 break 6504 } 6505 d := v_0.AuxInt 6506 ptr := v_0.Args[0] 6507 idx := v.Args[1] 6508 val := v.Args[2] 6509 mem := v.Args[3] 6510 if !(is32Bit(c + d)) { 6511 break 6512 } 6513 v.reset(OpAMD64MOVBstoreidx1) 6514 v.AuxInt = c + d 6515 v.Aux = sym 6516 v.AddArg(ptr) 6517 v.AddArg(idx) 6518 v.AddArg(val) 6519 v.AddArg(mem) 6520 return true 6521 } 6522 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 6523 // cond: is32Bit(c+d) 6524 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 6525 for { 6526 c := v.AuxInt 6527 sym := v.Aux 6528 _ = v.Args[3] 6529 ptr := v.Args[0] 6530 v_1 := v.Args[1] 6531 if v_1.Op != OpAMD64ADDQconst { 6532 break 6533 } 6534 d := v_1.AuxInt 6535 idx := v_1.Args[0] 6536 val := v.Args[2] 6537 mem := v.Args[3] 6538 if !(is32Bit(c + d)) { 6539 break 6540 } 6541 v.reset(OpAMD64MOVBstoreidx1) 6542 v.AuxInt = c + d 6543 v.Aux = sym 6544 v.AddArg(ptr) 6545 v.AddArg(idx) 6546 v.AddArg(val) 6547 v.AddArg(mem) 6548 return true 6549 } 6550 // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) 6551 // cond: x0.Uses == 1 && clobber(x0) 6552 // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem) 6553 for { 6554 i := v.AuxInt 6555 s := v.Aux 6556 _ = v.Args[3] 6557 p := v.Args[0] 6558 idx := v.Args[1] 6559 w := v.Args[2] 6560 x0 := v.Args[3] 6561 if x0.Op != OpAMD64MOVBstoreidx1 { 6562 break 6563 } 6564 if x0.AuxInt != i-1 { 6565 break 6566 } 6567 if x0.Aux != s { 6568 break 6569 } 6570 _ = x0.Args[3] 6571 if p != x0.Args[0] { 6572 break 6573 } 6574 if idx != x0.Args[1] { 6575 break 6576 } 6577 x0_2 := x0.Args[2] 6578 if x0_2.Op != OpAMD64SHRWconst { 6579 break 6580 } 6581 if x0_2.AuxInt != 8 { 6582 break 6583 } 6584 if w != x0_2.Args[0] { 6585 break 6586 } 6587 mem := x0.Args[3] 6588 if !(x0.Uses == 1 && clobber(x0)) { 6589 break 6590 } 6591 v.reset(OpAMD64MOVWstoreidx1) 6592 v.AuxInt = i - 1 6593 v.Aux = s 6594 v.AddArg(p) 6595 v.AddArg(idx) 6596 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 6597 v0.AuxInt = 8 6598 v0.AddArg(w) 6599 v.AddArg(v0) 6600 v.AddArg(mem) 6601 return true 6602 } 6603 // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) 6604 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 6605 // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem) 6606 for { 6607 i := v.AuxInt 6608 s := v.Aux 6609 _ = v.Args[3] 6610 p := v.Args[0] 6611 idx := v.Args[1] 6612 w := v.Args[2] 6613 x2 := v.Args[3] 6614 if x2.Op != OpAMD64MOVBstoreidx1 { 6615 break 6616 } 6617 if x2.AuxInt != i-1 { 6618 break 6619 } 6620 if x2.Aux != s { 6621 break 6622 } 6623 _ = x2.Args[3] 6624 if p != x2.Args[0] { 6625 break 6626 } 6627 if idx != x2.Args[1] { 6628 break 6629 } 6630 x2_2 := x2.Args[2] 6631 if x2_2.Op != OpAMD64SHRLconst { 6632 break 6633 } 6634 if x2_2.AuxInt != 8 { 6635 break 6636 } 6637 if w != x2_2.Args[0] { 6638 break 6639 } 6640 x1 := x2.Args[3] 6641 if x1.Op != OpAMD64MOVBstoreidx1 { 6642 break 6643 } 6644 if x1.AuxInt != i-2 { 6645 break 6646 } 6647 if x1.Aux != s { 6648 break 6649 } 6650 _ = x1.Args[3] 6651 if p != x1.Args[0] { 6652 break 6653 } 6654 if idx != x1.Args[1] { 6655 break 6656 } 6657 x1_2 := x1.Args[2] 6658 if x1_2.Op != OpAMD64SHRLconst { 6659 break 6660 } 6661 if x1_2.AuxInt != 16 { 6662 break 6663 } 6664 if w != x1_2.Args[0] { 6665 break 6666 } 6667 x0 := x1.Args[3] 6668 if x0.Op != OpAMD64MOVBstoreidx1 { 6669 break 6670 } 6671 if x0.AuxInt != i-3 { 6672 break 6673 } 6674 if x0.Aux != s { 6675 break 6676 } 6677 _ = x0.Args[3] 6678 if p != x0.Args[0] { 6679 break 6680 } 6681 if idx != x0.Args[1] { 6682 break 6683 } 6684 x0_2 := x0.Args[2] 6685 if x0_2.Op != OpAMD64SHRLconst { 6686 break 6687 } 6688 if x0_2.AuxInt != 24 { 6689 break 6690 } 6691 if w != x0_2.Args[0] { 6692 break 6693 } 6694 mem := x0.Args[3] 6695 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 6696 break 6697 } 6698 v.reset(OpAMD64MOVLstoreidx1) 6699 v.AuxInt = i - 3 6700 v.Aux = s 6701 v.AddArg(p) 6702 v.AddArg(idx) 6703 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 6704 v0.AddArg(w) 6705 v.AddArg(v0) 6706 v.AddArg(mem) 6707 return true 6708 } 6709 // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) 6710 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 6711 // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem) 6712 for { 6713 i := v.AuxInt 6714 s := v.Aux 6715 _ = v.Args[3] 6716 p := v.Args[0] 6717 idx := v.Args[1] 6718 w := v.Args[2] 6719 x6 := v.Args[3] 6720 if x6.Op != OpAMD64MOVBstoreidx1 { 6721 break 6722 } 6723 if x6.AuxInt != i-1 { 6724 break 6725 } 6726 if x6.Aux != s { 6727 break 6728 } 6729 _ = x6.Args[3] 6730 if p != x6.Args[0] { 6731 break 6732 } 6733 if idx != x6.Args[1] { 6734 break 6735 } 6736 x6_2 := x6.Args[2] 6737 if x6_2.Op != OpAMD64SHRQconst { 6738 break 6739 } 6740 if x6_2.AuxInt != 8 { 6741 break 6742 } 6743 if w != x6_2.Args[0] { 6744 break 6745 } 6746 x5 := x6.Args[3] 6747 if x5.Op != OpAMD64MOVBstoreidx1 { 6748 break 6749 } 6750 if x5.AuxInt != i-2 { 6751 break 6752 } 6753 if x5.Aux != s { 6754 break 6755 } 6756 _ = x5.Args[3] 6757 if p != x5.Args[0] { 6758 break 6759 } 6760 if idx != x5.Args[1] { 6761 break 6762 } 6763 x5_2 := x5.Args[2] 6764 if x5_2.Op != OpAMD64SHRQconst { 6765 break 6766 } 6767 if x5_2.AuxInt != 16 { 6768 break 6769 } 6770 if w != x5_2.Args[0] { 6771 break 6772 } 6773 x4 := x5.Args[3] 6774 if x4.Op != OpAMD64MOVBstoreidx1 { 6775 break 6776 } 6777 if x4.AuxInt != i-3 { 6778 break 6779 } 6780 if x4.Aux != s { 6781 break 6782 } 6783 _ = x4.Args[3] 6784 if p != x4.Args[0] { 6785 break 6786 } 6787 if idx != x4.Args[1] { 6788 break 6789 } 6790 x4_2 := x4.Args[2] 6791 if x4_2.Op != OpAMD64SHRQconst { 6792 break 6793 } 6794 if x4_2.AuxInt != 24 { 6795 break 6796 } 6797 if w != x4_2.Args[0] { 6798 break 6799 } 6800 x3 := x4.Args[3] 6801 if x3.Op != OpAMD64MOVBstoreidx1 { 6802 break 6803 } 6804 if x3.AuxInt != i-4 { 6805 break 6806 } 6807 if x3.Aux != s { 6808 break 6809 } 6810 _ = x3.Args[3] 6811 if p != x3.Args[0] { 6812 break 6813 } 6814 if idx != x3.Args[1] { 6815 break 6816 } 6817 x3_2 := x3.Args[2] 6818 if x3_2.Op != OpAMD64SHRQconst { 6819 break 6820 } 6821 if x3_2.AuxInt != 32 { 6822 break 6823 } 6824 if w != x3_2.Args[0] { 6825 break 6826 } 6827 x2 := x3.Args[3] 6828 if x2.Op != OpAMD64MOVBstoreidx1 { 6829 break 6830 } 6831 if x2.AuxInt != i-5 { 6832 break 6833 } 6834 if x2.Aux != s { 6835 break 6836 } 6837 _ = x2.Args[3] 6838 if p != x2.Args[0] { 6839 break 6840 } 6841 if idx != x2.Args[1] { 6842 break 6843 } 6844 x2_2 := x2.Args[2] 6845 if x2_2.Op != OpAMD64SHRQconst { 6846 break 6847 } 6848 if x2_2.AuxInt != 40 { 6849 break 6850 } 6851 if w != x2_2.Args[0] { 6852 break 6853 } 6854 x1 := x2.Args[3] 6855 if x1.Op != OpAMD64MOVBstoreidx1 { 6856 break 6857 } 6858 if x1.AuxInt != i-6 { 6859 break 6860 } 6861 if x1.Aux != s { 6862 break 6863 } 6864 _ = x1.Args[3] 6865 if p != x1.Args[0] { 6866 break 6867 } 6868 if idx != x1.Args[1] { 6869 break 6870 } 6871 x1_2 := x1.Args[2] 6872 if x1_2.Op != OpAMD64SHRQconst { 6873 break 6874 } 6875 if x1_2.AuxInt != 48 { 6876 break 6877 } 6878 if w != x1_2.Args[0] { 6879 break 6880 } 6881 x0 := x1.Args[3] 6882 if x0.Op != OpAMD64MOVBstoreidx1 { 6883 break 6884 } 6885 if x0.AuxInt != i-7 { 6886 break 6887 } 6888 if x0.Aux != s { 6889 break 6890 } 6891 _ = x0.Args[3] 6892 if p != x0.Args[0] { 6893 break 6894 } 6895 if idx != x0.Args[1] { 6896 break 6897 } 6898 x0_2 := x0.Args[2] 6899 if x0_2.Op != OpAMD64SHRQconst { 6900 break 6901 } 6902 if x0_2.AuxInt != 56 { 6903 break 6904 } 6905 if w != x0_2.Args[0] { 6906 break 6907 } 6908 mem := x0.Args[3] 6909 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 6910 break 6911 } 6912 v.reset(OpAMD64MOVQstoreidx1) 6913 v.AuxInt = i - 7 6914 v.Aux = s 6915 v.AddArg(p) 6916 v.AddArg(idx) 6917 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 6918 v0.AddArg(w) 6919 v.AddArg(v0) 6920 v.AddArg(mem) 6921 return true 6922 } 6923 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 6924 // cond: x.Uses == 1 && clobber(x) 6925 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 6926 for { 6927 i := v.AuxInt 6928 s := v.Aux 6929 _ = v.Args[3] 6930 p := v.Args[0] 6931 idx := v.Args[1] 6932 v_2 := v.Args[2] 6933 if v_2.Op != OpAMD64SHRQconst { 6934 break 6935 } 6936 if v_2.AuxInt != 8 { 6937 break 6938 } 6939 w := v_2.Args[0] 6940 x := v.Args[3] 6941 if x.Op != OpAMD64MOVBstoreidx1 { 6942 break 6943 } 6944 if x.AuxInt != i-1 { 6945 break 6946 } 6947 if x.Aux != s { 6948 break 6949 } 6950 _ = x.Args[3] 6951 if p != x.Args[0] { 6952 break 6953 } 6954 if idx != x.Args[1] { 6955 break 6956 } 6957 if w != x.Args[2] { 6958 break 6959 } 6960 mem := x.Args[3] 6961 if !(x.Uses == 1 && clobber(x)) { 6962 break 6963 } 6964 v.reset(OpAMD64MOVWstoreidx1) 6965 v.AuxInt = i - 1 6966 v.Aux = s 6967 v.AddArg(p) 6968 v.AddArg(idx) 6969 v.AddArg(w) 6970 v.AddArg(mem) 6971 return true 6972 } 6973 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 6974 // cond: x.Uses == 1 && clobber(x) 6975 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 6976 for { 6977 i := v.AuxInt 6978 s := v.Aux 6979 _ = v.Args[3] 6980 p := v.Args[0] 6981 idx := v.Args[1] 6982 v_2 := v.Args[2] 6983 if v_2.Op != OpAMD64SHRQconst { 6984 break 6985 } 6986 j := v_2.AuxInt 6987 w := v_2.Args[0] 6988 x := v.Args[3] 6989 if x.Op != OpAMD64MOVBstoreidx1 { 6990 break 6991 } 6992 if x.AuxInt != i-1 { 6993 break 6994 } 6995 if x.Aux != s { 6996 break 6997 } 6998 _ = x.Args[3] 6999 if p != x.Args[0] { 7000 break 7001 } 7002 if idx != x.Args[1] { 7003 break 7004 } 7005 w0 := x.Args[2] 7006 if w0.Op != OpAMD64SHRQconst { 7007 break 7008 } 7009 if w0.AuxInt != j-8 { 7010 break 7011 } 7012 if w != w0.Args[0] { 7013 break 7014 } 7015 mem := x.Args[3] 7016 if !(x.Uses == 1 && clobber(x)) { 7017 break 7018 } 7019 v.reset(OpAMD64MOVWstoreidx1) 7020 v.AuxInt = i - 1 7021 v.Aux = s 7022 v.AddArg(p) 7023 v.AddArg(idx) 7024 v.AddArg(w0) 7025 v.AddArg(mem) 7026 return true 7027 } 7028 return false 7029 } 7030 func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { 7031 b := v.Block 7032 _ = b 7033 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 7034 // cond: x.Uses == 1 && clobber(x) 7035 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 7036 for { 7037 x := v.Args[0] 7038 if x.Op != OpAMD64MOVLload { 7039 break 7040 } 7041 off := x.AuxInt 7042 sym := x.Aux 7043 _ = x.Args[1] 7044 ptr := x.Args[0] 7045 mem := x.Args[1] 7046 if !(x.Uses == 1 && clobber(x)) { 7047 break 7048 } 7049 b = x.Block 7050 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 7051 v.reset(OpCopy) 7052 v.AddArg(v0) 7053 v0.AuxInt = off 7054 v0.Aux = sym 7055 v0.AddArg(ptr) 7056 v0.AddArg(mem) 7057 return true 7058 } 7059 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 7060 // cond: x.Uses == 1 && clobber(x) 7061 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 7062 for { 7063 x := v.Args[0] 7064 if x.Op != OpAMD64MOVQload { 7065 break 7066 } 7067 off := x.AuxInt 7068 sym := x.Aux 7069 _ = x.Args[1] 7070 ptr := x.Args[0] 7071 mem := x.Args[1] 7072 if !(x.Uses == 1 && clobber(x)) { 7073 break 7074 } 7075 b = x.Block 7076 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 7077 v.reset(OpCopy) 7078 v.AddArg(v0) 7079 v0.AuxInt = off 7080 v0.Aux = sym 7081 v0.AddArg(ptr) 7082 v0.AddArg(mem) 7083 return true 7084 } 7085 // match: (MOVLQSX (ANDLconst [c] x)) 7086 // cond: c & 0x80000000 == 0 7087 // result: (ANDLconst [c & 0x7fffffff] x) 7088 for { 7089 v_0 := v.Args[0] 7090 if v_0.Op != OpAMD64ANDLconst { 7091 break 7092 } 7093 c := v_0.AuxInt 7094 x := v_0.Args[0] 7095 if !(c&0x80000000 == 0) { 7096 break 7097 } 7098 v.reset(OpAMD64ANDLconst) 7099 v.AuxInt = c & 0x7fffffff 7100 v.AddArg(x) 7101 return true 7102 } 7103 // match: (MOVLQSX (MOVLQSX x)) 7104 // cond: 7105 // result: (MOVLQSX x) 7106 for { 7107 v_0 := v.Args[0] 7108 if v_0.Op != OpAMD64MOVLQSX { 7109 break 7110 } 7111 x := v_0.Args[0] 7112 v.reset(OpAMD64MOVLQSX) 7113 v.AddArg(x) 7114 return true 7115 } 7116 // match: (MOVLQSX (MOVWQSX x)) 7117 // cond: 7118 // result: (MOVWQSX x) 7119 for { 7120 v_0 := v.Args[0] 7121 if v_0.Op != OpAMD64MOVWQSX { 7122 break 7123 } 7124 x := v_0.Args[0] 7125 v.reset(OpAMD64MOVWQSX) 7126 v.AddArg(x) 7127 return true 7128 } 7129 // match: (MOVLQSX (MOVBQSX x)) 7130 // cond: 7131 // result: (MOVBQSX x) 7132 for { 7133 v_0 := v.Args[0] 7134 if v_0.Op != OpAMD64MOVBQSX { 7135 break 7136 } 7137 x := v_0.Args[0] 7138 v.reset(OpAMD64MOVBQSX) 7139 v.AddArg(x) 7140 return true 7141 } 7142 return false 7143 } 7144 func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool { 7145 // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 7146 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 7147 // result: (MOVLQSX x) 7148 for { 7149 off := v.AuxInt 7150 sym := v.Aux 7151 _ = v.Args[1] 7152 ptr := v.Args[0] 7153 v_1 := v.Args[1] 7154 if v_1.Op != OpAMD64MOVLstore { 7155 break 7156 } 7157 off2 := v_1.AuxInt 7158 sym2 := v_1.Aux 7159 _ = v_1.Args[2] 7160 ptr2 := v_1.Args[0] 7161 x := v_1.Args[1] 7162 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 7163 break 7164 } 7165 v.reset(OpAMD64MOVLQSX) 7166 v.AddArg(x) 7167 return true 7168 } 7169 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7170 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7171 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7172 for { 7173 off1 := v.AuxInt 7174 sym1 := v.Aux 7175 _ = v.Args[1] 7176 v_0 := v.Args[0] 7177 if v_0.Op != OpAMD64LEAQ { 7178 break 7179 } 7180 off2 := v_0.AuxInt 7181 sym2 := v_0.Aux 7182 base := v_0.Args[0] 7183 mem := v.Args[1] 7184 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7185 break 7186 } 7187 v.reset(OpAMD64MOVLQSXload) 7188 v.AuxInt = off1 + off2 7189 v.Aux = mergeSym(sym1, sym2) 7190 v.AddArg(base) 7191 v.AddArg(mem) 7192 return true 7193 } 7194 return false 7195 } 7196 func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { 7197 b := v.Block 7198 _ = b 7199 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 7200 // cond: x.Uses == 1 && clobber(x) 7201 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 7202 for { 7203 x := v.Args[0] 7204 if x.Op != OpAMD64MOVLload { 7205 break 7206 } 7207 off := x.AuxInt 7208 sym := x.Aux 7209 _ = x.Args[1] 7210 ptr := x.Args[0] 7211 mem := x.Args[1] 7212 if !(x.Uses == 1 && clobber(x)) { 7213 break 7214 } 7215 b = x.Block 7216 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 7217 v.reset(OpCopy) 7218 v.AddArg(v0) 7219 v0.AuxInt = off 7220 v0.Aux = sym 7221 v0.AddArg(ptr) 7222 v0.AddArg(mem) 7223 return true 7224 } 7225 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 7226 // cond: x.Uses == 1 && clobber(x) 7227 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 7228 for { 7229 x := v.Args[0] 7230 if x.Op != OpAMD64MOVQload { 7231 break 7232 } 7233 off := x.AuxInt 7234 sym := x.Aux 7235 _ = x.Args[1] 7236 ptr := x.Args[0] 7237 mem := x.Args[1] 7238 if !(x.Uses == 1 && clobber(x)) { 7239 break 7240 } 7241 b = x.Block 7242 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 7243 v.reset(OpCopy) 7244 v.AddArg(v0) 7245 v0.AuxInt = off 7246 v0.Aux = sym 7247 v0.AddArg(ptr) 7248 v0.AddArg(mem) 7249 return true 7250 } 7251 // match: (MOVLQZX x) 7252 // cond: zeroUpper32Bits(x,3) 7253 // result: x 7254 for { 7255 x := v.Args[0] 7256 if !(zeroUpper32Bits(x, 3)) { 7257 break 7258 } 7259 v.reset(OpCopy) 7260 v.Type = x.Type 7261 v.AddArg(x) 7262 return true 7263 } 7264 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 7265 // cond: x.Uses == 1 && clobber(x) 7266 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 7267 for { 7268 x := v.Args[0] 7269 if x.Op != OpAMD64MOVLloadidx1 { 7270 break 7271 } 7272 off := x.AuxInt 7273 sym := x.Aux 7274 _ = x.Args[2] 7275 ptr := x.Args[0] 7276 idx := x.Args[1] 7277 mem := x.Args[2] 7278 if !(x.Uses == 1 && clobber(x)) { 7279 break 7280 } 7281 b = x.Block 7282 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 7283 v.reset(OpCopy) 7284 v.AddArg(v0) 7285 v0.AuxInt = off 7286 v0.Aux = sym 7287 v0.AddArg(ptr) 7288 v0.AddArg(idx) 7289 v0.AddArg(mem) 7290 return true 7291 } 7292 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 7293 // cond: x.Uses == 1 && clobber(x) 7294 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 7295 for { 7296 x := v.Args[0] 7297 if x.Op != OpAMD64MOVLloadidx4 { 7298 break 7299 } 7300 off := x.AuxInt 7301 sym := x.Aux 7302 _ = x.Args[2] 7303 ptr := x.Args[0] 7304 idx := x.Args[1] 7305 mem := x.Args[2] 7306 if !(x.Uses == 1 && clobber(x)) { 7307 break 7308 } 7309 b = x.Block 7310 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) 7311 v.reset(OpCopy) 7312 v.AddArg(v0) 7313 v0.AuxInt = off 7314 v0.Aux = sym 7315 v0.AddArg(ptr) 7316 v0.AddArg(idx) 7317 v0.AddArg(mem) 7318 return true 7319 } 7320 // match: (MOVLQZX (ANDLconst [c] x)) 7321 // cond: 7322 // result: (ANDLconst [c] x) 7323 for { 7324 v_0 := v.Args[0] 7325 if v_0.Op != OpAMD64ANDLconst { 7326 break 7327 } 7328 c := v_0.AuxInt 7329 x := v_0.Args[0] 7330 v.reset(OpAMD64ANDLconst) 7331 v.AuxInt = c 7332 v.AddArg(x) 7333 return true 7334 } 7335 // match: (MOVLQZX (MOVLQZX x)) 7336 // cond: 7337 // result: (MOVLQZX x) 7338 for { 7339 v_0 := v.Args[0] 7340 if v_0.Op != OpAMD64MOVLQZX { 7341 break 7342 } 7343 x := v_0.Args[0] 7344 v.reset(OpAMD64MOVLQZX) 7345 v.AddArg(x) 7346 return true 7347 } 7348 // match: (MOVLQZX (MOVWQZX x)) 7349 // cond: 7350 // result: (MOVWQZX x) 7351 for { 7352 v_0 := v.Args[0] 7353 if v_0.Op != OpAMD64MOVWQZX { 7354 break 7355 } 7356 x := v_0.Args[0] 7357 v.reset(OpAMD64MOVWQZX) 7358 v.AddArg(x) 7359 return true 7360 } 7361 // match: (MOVLQZX (MOVBQZX x)) 7362 // cond: 7363 // result: (MOVBQZX x) 7364 for { 7365 v_0 := v.Args[0] 7366 if v_0.Op != OpAMD64MOVBQZX { 7367 break 7368 } 7369 x := v_0.Args[0] 7370 v.reset(OpAMD64MOVBQZX) 7371 v.AddArg(x) 7372 return true 7373 } 7374 return false 7375 } 7376 func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool { 7377 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 7378 // cond: is32Bit(off1+off2) 7379 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 7380 for { 7381 off1 := v.AuxInt 7382 sym := v.Aux 7383 _ = v.Args[1] 7384 v_0 := v.Args[0] 7385 if v_0.Op != OpAMD64ADDQconst { 7386 break 7387 } 7388 off2 := v_0.AuxInt 7389 ptr := v_0.Args[0] 7390 mem := v.Args[1] 7391 if !(is32Bit(off1 + off2)) { 7392 break 7393 } 7394 v.reset(OpAMD64MOVLatomicload) 7395 v.AuxInt = off1 + off2 7396 v.Aux = sym 7397 v.AddArg(ptr) 7398 v.AddArg(mem) 7399 return true 7400 } 7401 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 7402 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7403 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 7404 for { 7405 off1 := v.AuxInt 7406 sym1 := v.Aux 7407 _ = v.Args[1] 7408 v_0 := v.Args[0] 7409 if v_0.Op != OpAMD64LEAQ { 7410 break 7411 } 7412 off2 := v_0.AuxInt 7413 sym2 := v_0.Aux 7414 ptr := v_0.Args[0] 7415 mem := v.Args[1] 7416 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7417 break 7418 } 7419 v.reset(OpAMD64MOVLatomicload) 7420 v.AuxInt = off1 + off2 7421 v.Aux = mergeSym(sym1, sym2) 7422 v.AddArg(ptr) 7423 v.AddArg(mem) 7424 return true 7425 } 7426 return false 7427 } 7428 func rewriteValueAMD64_OpAMD64MOVLf2i_0(v *Value) bool { 7429 b := v.Block 7430 _ = b 7431 // match: (MOVLf2i <t> (Arg [off] {sym})) 7432 // cond: 7433 // result: @b.Func.Entry (Arg <t> [off] {sym}) 7434 for { 7435 t := v.Type 7436 v_0 := v.Args[0] 7437 if v_0.Op != OpArg { 7438 break 7439 } 7440 off := v_0.AuxInt 7441 sym := v_0.Aux 7442 b = b.Func.Entry 7443 v0 := b.NewValue0(v.Pos, OpArg, t) 7444 v.reset(OpCopy) 7445 v.AddArg(v0) 7446 v0.AuxInt = off 7447 v0.Aux = sym 7448 return true 7449 } 7450 return false 7451 } 7452 func rewriteValueAMD64_OpAMD64MOVLi2f_0(v *Value) bool { 7453 b := v.Block 7454 _ = b 7455 // match: (MOVLi2f <t> (Arg [off] {sym})) 7456 // cond: 7457 // result: @b.Func.Entry (Arg <t> [off] {sym}) 7458 for { 7459 t := v.Type 7460 v_0 := v.Args[0] 7461 if v_0.Op != OpArg { 7462 break 7463 } 7464 off := v_0.AuxInt 7465 sym := v_0.Aux 7466 b = b.Func.Entry 7467 v0 := b.NewValue0(v.Pos, OpArg, t) 7468 v.reset(OpCopy) 7469 v.AddArg(v0) 7470 v0.AuxInt = off 7471 v0.Aux = sym 7472 return true 7473 } 7474 return false 7475 } 7476 func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { 7477 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 7478 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 7479 // result: (MOVLQZX x) 7480 for { 7481 off := v.AuxInt 7482 sym := v.Aux 7483 _ = v.Args[1] 7484 ptr := v.Args[0] 7485 v_1 := v.Args[1] 7486 if v_1.Op != OpAMD64MOVLstore { 7487 break 7488 } 7489 off2 := v_1.AuxInt 7490 sym2 := v_1.Aux 7491 _ = v_1.Args[2] 7492 ptr2 := v_1.Args[0] 7493 x := v_1.Args[1] 7494 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 7495 break 7496 } 7497 v.reset(OpAMD64MOVLQZX) 7498 v.AddArg(x) 7499 return true 7500 } 7501 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 7502 // cond: is32Bit(off1+off2) 7503 // result: (MOVLload [off1+off2] {sym} ptr mem) 7504 for { 7505 off1 := v.AuxInt 7506 sym := v.Aux 7507 _ = v.Args[1] 7508 v_0 := v.Args[0] 7509 if v_0.Op != OpAMD64ADDQconst { 7510 break 7511 } 7512 off2 := v_0.AuxInt 7513 ptr := v_0.Args[0] 7514 mem := v.Args[1] 7515 if !(is32Bit(off1 + off2)) { 7516 break 7517 } 7518 v.reset(OpAMD64MOVLload) 7519 v.AuxInt = off1 + off2 7520 v.Aux = sym 7521 v.AddArg(ptr) 7522 v.AddArg(mem) 7523 return true 7524 } 7525 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7526 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7527 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7528 for { 7529 off1 := v.AuxInt 7530 sym1 := v.Aux 7531 _ = v.Args[1] 7532 v_0 := v.Args[0] 7533 if v_0.Op != OpAMD64LEAQ { 7534 break 7535 } 7536 off2 := v_0.AuxInt 7537 sym2 := v_0.Aux 7538 base := v_0.Args[0] 7539 mem := v.Args[1] 7540 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7541 break 7542 } 7543 v.reset(OpAMD64MOVLload) 7544 v.AuxInt = off1 + off2 7545 v.Aux = mergeSym(sym1, sym2) 7546 v.AddArg(base) 7547 v.AddArg(mem) 7548 return true 7549 } 7550 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 7551 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7552 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7553 for { 7554 off1 := v.AuxInt 7555 sym1 := v.Aux 7556 _ = v.Args[1] 7557 v_0 := v.Args[0] 7558 if v_0.Op != OpAMD64LEAQ1 { 7559 break 7560 } 7561 off2 := v_0.AuxInt 7562 sym2 := v_0.Aux 7563 _ = v_0.Args[1] 7564 ptr := v_0.Args[0] 7565 idx := v_0.Args[1] 7566 mem := v.Args[1] 7567 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7568 break 7569 } 7570 v.reset(OpAMD64MOVLloadidx1) 7571 v.AuxInt = off1 + off2 7572 v.Aux = mergeSym(sym1, sym2) 7573 v.AddArg(ptr) 7574 v.AddArg(idx) 7575 v.AddArg(mem) 7576 return true 7577 } 7578 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 7579 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7580 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7581 for { 7582 off1 := v.AuxInt 7583 sym1 := v.Aux 7584 _ = v.Args[1] 7585 v_0 := v.Args[0] 7586 if v_0.Op != OpAMD64LEAQ4 { 7587 break 7588 } 7589 off2 := v_0.AuxInt 7590 sym2 := v_0.Aux 7591 _ = v_0.Args[1] 7592 ptr := v_0.Args[0] 7593 idx := v_0.Args[1] 7594 mem := v.Args[1] 7595 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7596 break 7597 } 7598 v.reset(OpAMD64MOVLloadidx4) 7599 v.AuxInt = off1 + off2 7600 v.Aux = mergeSym(sym1, sym2) 7601 v.AddArg(ptr) 7602 v.AddArg(idx) 7603 v.AddArg(mem) 7604 return true 7605 } 7606 // match: (MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 7607 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7608 // result: (MOVLloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7609 for { 7610 off1 := v.AuxInt 7611 sym1 := v.Aux 7612 _ = v.Args[1] 7613 v_0 := v.Args[0] 7614 if v_0.Op != OpAMD64LEAQ8 { 7615 break 7616 } 7617 off2 := v_0.AuxInt 7618 sym2 := v_0.Aux 7619 _ = v_0.Args[1] 7620 ptr := v_0.Args[0] 7621 idx := v_0.Args[1] 7622 mem := v.Args[1] 7623 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7624 break 7625 } 7626 v.reset(OpAMD64MOVLloadidx8) 7627 v.AuxInt = off1 + off2 7628 v.Aux = mergeSym(sym1, sym2) 7629 v.AddArg(ptr) 7630 v.AddArg(idx) 7631 v.AddArg(mem) 7632 return true 7633 } 7634 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 7635 // cond: ptr.Op != OpSB 7636 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 7637 for { 7638 off := v.AuxInt 7639 sym := v.Aux 7640 _ = v.Args[1] 7641 v_0 := v.Args[0] 7642 if v_0.Op != OpAMD64ADDQ { 7643 break 7644 } 7645 _ = v_0.Args[1] 7646 ptr := v_0.Args[0] 7647 idx := v_0.Args[1] 7648 mem := v.Args[1] 7649 if !(ptr.Op != OpSB) { 7650 break 7651 } 7652 v.reset(OpAMD64MOVLloadidx1) 7653 v.AuxInt = off 7654 v.Aux = sym 7655 v.AddArg(ptr) 7656 v.AddArg(idx) 7657 v.AddArg(mem) 7658 return true 7659 } 7660 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 7661 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 7662 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7663 for { 7664 off1 := v.AuxInt 7665 sym1 := v.Aux 7666 _ = v.Args[1] 7667 v_0 := v.Args[0] 7668 if v_0.Op != OpAMD64LEAL { 7669 break 7670 } 7671 off2 := v_0.AuxInt 7672 sym2 := v_0.Aux 7673 base := v_0.Args[0] 7674 mem := v.Args[1] 7675 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 7676 break 7677 } 7678 v.reset(OpAMD64MOVLload) 7679 v.AuxInt = off1 + off2 7680 v.Aux = mergeSym(sym1, sym2) 7681 v.AddArg(base) 7682 v.AddArg(mem) 7683 return true 7684 } 7685 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 7686 // cond: is32Bit(off1+off2) 7687 // result: (MOVLload [off1+off2] {sym} ptr mem) 7688 for { 7689 off1 := v.AuxInt 7690 sym := v.Aux 7691 _ = v.Args[1] 7692 v_0 := v.Args[0] 7693 if v_0.Op != OpAMD64ADDLconst { 7694 break 7695 } 7696 off2 := v_0.AuxInt 7697 ptr := v_0.Args[0] 7698 mem := v.Args[1] 7699 if !(is32Bit(off1 + off2)) { 7700 break 7701 } 7702 v.reset(OpAMD64MOVLload) 7703 v.AuxInt = off1 + off2 7704 v.Aux = sym 7705 v.AddArg(ptr) 7706 v.AddArg(mem) 7707 return true 7708 } 7709 // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) 7710 // cond: 7711 // result: (MOVLf2i val) 7712 for { 7713 off := v.AuxInt 7714 sym := v.Aux 7715 _ = v.Args[1] 7716 ptr := v.Args[0] 7717 v_1 := v.Args[1] 7718 if v_1.Op != OpAMD64MOVSSstore { 7719 break 7720 } 7721 if v_1.AuxInt != off { 7722 break 7723 } 7724 if v_1.Aux != sym { 7725 break 7726 } 7727 _ = v_1.Args[2] 7728 if ptr != v_1.Args[0] { 7729 break 7730 } 7731 val := v_1.Args[1] 7732 v.reset(OpAMD64MOVLf2i) 7733 v.AddArg(val) 7734 return true 7735 } 7736 return false 7737 } 7738 func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { 7739 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 7740 // cond: 7741 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 7742 for { 7743 c := v.AuxInt 7744 sym := v.Aux 7745 _ = v.Args[2] 7746 ptr := v.Args[0] 7747 v_1 := v.Args[1] 7748 if v_1.Op != OpAMD64SHLQconst { 7749 break 7750 } 7751 if v_1.AuxInt != 2 { 7752 break 7753 } 7754 idx := v_1.Args[0] 7755 mem := v.Args[2] 7756 v.reset(OpAMD64MOVLloadidx4) 7757 v.AuxInt = c 7758 v.Aux = sym 7759 v.AddArg(ptr) 7760 v.AddArg(idx) 7761 v.AddArg(mem) 7762 return true 7763 } 7764 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [2] idx) ptr mem) 7765 // cond: 7766 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 7767 for { 7768 c := v.AuxInt 7769 sym := v.Aux 7770 _ = v.Args[2] 7771 v_0 := v.Args[0] 7772 if v_0.Op != OpAMD64SHLQconst { 7773 break 7774 } 7775 if v_0.AuxInt != 2 { 7776 break 7777 } 7778 idx := v_0.Args[0] 7779 ptr := v.Args[1] 7780 mem := v.Args[2] 7781 v.reset(OpAMD64MOVLloadidx4) 7782 v.AuxInt = c 7783 v.Aux = sym 7784 v.AddArg(ptr) 7785 v.AddArg(idx) 7786 v.AddArg(mem) 7787 return true 7788 } 7789 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 7790 // cond: 7791 // result: (MOVLloadidx8 [c] {sym} ptr idx mem) 7792 for { 7793 c := v.AuxInt 7794 sym := v.Aux 7795 _ = v.Args[2] 7796 ptr := v.Args[0] 7797 v_1 := v.Args[1] 7798 if v_1.Op != OpAMD64SHLQconst { 7799 break 7800 } 7801 if v_1.AuxInt != 3 { 7802 break 7803 } 7804 idx := v_1.Args[0] 7805 mem := v.Args[2] 7806 v.reset(OpAMD64MOVLloadidx8) 7807 v.AuxInt = c 7808 v.Aux = sym 7809 v.AddArg(ptr) 7810 v.AddArg(idx) 7811 v.AddArg(mem) 7812 return true 7813 } 7814 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 7815 // cond: 7816 // result: (MOVLloadidx8 [c] {sym} ptr idx mem) 7817 for { 7818 c := v.AuxInt 7819 sym := v.Aux 7820 _ = v.Args[2] 7821 v_0 := v.Args[0] 7822 if v_0.Op != OpAMD64SHLQconst { 7823 break 7824 } 7825 if v_0.AuxInt != 3 { 7826 break 7827 } 7828 idx := v_0.Args[0] 7829 ptr := v.Args[1] 7830 mem := v.Args[2] 7831 v.reset(OpAMD64MOVLloadidx8) 7832 v.AuxInt = c 7833 v.Aux = sym 7834 v.AddArg(ptr) 7835 v.AddArg(idx) 7836 v.AddArg(mem) 7837 return true 7838 } 7839 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 7840 // cond: is32Bit(c+d) 7841 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7842 for { 7843 c := v.AuxInt 7844 sym := v.Aux 7845 _ = v.Args[2] 7846 v_0 := v.Args[0] 7847 if v_0.Op != OpAMD64ADDQconst { 7848 break 7849 } 7850 d := v_0.AuxInt 7851 ptr := v_0.Args[0] 7852 idx := v.Args[1] 7853 mem := v.Args[2] 7854 if !(is32Bit(c + d)) { 7855 break 7856 } 7857 v.reset(OpAMD64MOVLloadidx1) 7858 v.AuxInt = c + d 7859 v.Aux = sym 7860 v.AddArg(ptr) 7861 v.AddArg(idx) 7862 v.AddArg(mem) 7863 return true 7864 } 7865 // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 7866 // cond: is32Bit(c+d) 7867 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7868 for { 7869 c := v.AuxInt 7870 sym := v.Aux 7871 _ = v.Args[2] 7872 idx := v.Args[0] 7873 v_1 := v.Args[1] 7874 if v_1.Op != OpAMD64ADDQconst { 7875 break 7876 } 7877 d := v_1.AuxInt 7878 ptr := v_1.Args[0] 7879 mem := v.Args[2] 7880 if !(is32Bit(c + d)) { 7881 break 7882 } 7883 v.reset(OpAMD64MOVLloadidx1) 7884 v.AuxInt = c + d 7885 v.Aux = sym 7886 v.AddArg(ptr) 7887 v.AddArg(idx) 7888 v.AddArg(mem) 7889 return true 7890 } 7891 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7892 // cond: is32Bit(c+d) 7893 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7894 for { 7895 c := v.AuxInt 7896 sym := v.Aux 7897 _ = v.Args[2] 7898 ptr := v.Args[0] 7899 v_1 := v.Args[1] 7900 if v_1.Op != OpAMD64ADDQconst { 7901 break 7902 } 7903 d := v_1.AuxInt 7904 idx := v_1.Args[0] 7905 mem := v.Args[2] 7906 if !(is32Bit(c + d)) { 7907 break 7908 } 7909 v.reset(OpAMD64MOVLloadidx1) 7910 v.AuxInt = c + d 7911 v.Aux = sym 7912 v.AddArg(ptr) 7913 v.AddArg(idx) 7914 v.AddArg(mem) 7915 return true 7916 } 7917 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 7918 // cond: is32Bit(c+d) 7919 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7920 for { 7921 c := v.AuxInt 7922 sym := v.Aux 7923 _ = v.Args[2] 7924 v_0 := v.Args[0] 7925 if v_0.Op != OpAMD64ADDQconst { 7926 break 7927 } 7928 d := v_0.AuxInt 7929 idx := v_0.Args[0] 7930 ptr := v.Args[1] 7931 mem := v.Args[2] 7932 if !(is32Bit(c + d)) { 7933 break 7934 } 7935 v.reset(OpAMD64MOVLloadidx1) 7936 v.AuxInt = c + d 7937 v.Aux = sym 7938 v.AddArg(ptr) 7939 v.AddArg(idx) 7940 v.AddArg(mem) 7941 return true 7942 } 7943 return false 7944 } 7945 func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { 7946 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 7947 // cond: is32Bit(c+d) 7948 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 7949 for { 7950 c := v.AuxInt 7951 sym := v.Aux 7952 _ = v.Args[2] 7953 v_0 := v.Args[0] 7954 if v_0.Op != OpAMD64ADDQconst { 7955 break 7956 } 7957 d := v_0.AuxInt 7958 ptr := v_0.Args[0] 7959 idx := v.Args[1] 7960 mem := v.Args[2] 7961 if !(is32Bit(c + d)) { 7962 break 7963 } 7964 v.reset(OpAMD64MOVLloadidx4) 7965 v.AuxInt = c + d 7966 v.Aux = sym 7967 v.AddArg(ptr) 7968 v.AddArg(idx) 7969 v.AddArg(mem) 7970 return true 7971 } 7972 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 7973 // cond: is32Bit(c+4*d) 7974 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 7975 for { 7976 c := v.AuxInt 7977 sym := v.Aux 7978 _ = v.Args[2] 7979 ptr := v.Args[0] 7980 v_1 := v.Args[1] 7981 if v_1.Op != OpAMD64ADDQconst { 7982 break 7983 } 7984 d := v_1.AuxInt 7985 idx := v_1.Args[0] 7986 mem := v.Args[2] 7987 if !(is32Bit(c + 4*d)) { 7988 break 7989 } 7990 v.reset(OpAMD64MOVLloadidx4) 7991 v.AuxInt = c + 4*d 7992 v.Aux = sym 7993 v.AddArg(ptr) 7994 v.AddArg(idx) 7995 v.AddArg(mem) 7996 return true 7997 } 7998 return false 7999 } 8000 func rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v *Value) bool { 8001 // match: (MOVLloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 8002 // cond: is32Bit(c+d) 8003 // result: (MOVLloadidx8 [c+d] {sym} ptr idx mem) 8004 for { 8005 c := v.AuxInt 8006 sym := v.Aux 8007 _ = v.Args[2] 8008 v_0 := v.Args[0] 8009 if v_0.Op != OpAMD64ADDQconst { 8010 break 8011 } 8012 d := v_0.AuxInt 8013 ptr := v_0.Args[0] 8014 idx := v.Args[1] 8015 mem := v.Args[2] 8016 if !(is32Bit(c + d)) { 8017 break 8018 } 8019 v.reset(OpAMD64MOVLloadidx8) 8020 v.AuxInt = c + d 8021 v.Aux = sym 8022 v.AddArg(ptr) 8023 v.AddArg(idx) 8024 v.AddArg(mem) 8025 return true 8026 } 8027 // match: (MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 8028 // cond: is32Bit(c+8*d) 8029 // result: (MOVLloadidx8 [c+8*d] {sym} ptr idx mem) 8030 for { 8031 c := v.AuxInt 8032 sym := v.Aux 8033 _ = v.Args[2] 8034 ptr := v.Args[0] 8035 v_1 := v.Args[1] 8036 if v_1.Op != OpAMD64ADDQconst { 8037 break 8038 } 8039 d := v_1.AuxInt 8040 idx := v_1.Args[0] 8041 mem := v.Args[2] 8042 if !(is32Bit(c + 8*d)) { 8043 break 8044 } 8045 v.reset(OpAMD64MOVLloadidx8) 8046 v.AuxInt = c + 8*d 8047 v.Aux = sym 8048 v.AddArg(ptr) 8049 v.AddArg(idx) 8050 v.AddArg(mem) 8051 return true 8052 } 8053 return false 8054 } 8055 func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { 8056 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 8057 // cond: 8058 // result: (MOVLstore [off] {sym} ptr x mem) 8059 for { 8060 off := v.AuxInt 8061 sym := v.Aux 8062 _ = v.Args[2] 8063 ptr := v.Args[0] 8064 v_1 := v.Args[1] 8065 if v_1.Op != OpAMD64MOVLQSX { 8066 break 8067 } 8068 x := v_1.Args[0] 8069 mem := v.Args[2] 8070 v.reset(OpAMD64MOVLstore) 8071 v.AuxInt = off 8072 v.Aux = sym 8073 v.AddArg(ptr) 8074 v.AddArg(x) 8075 v.AddArg(mem) 8076 return true 8077 } 8078 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 8079 // cond: 8080 // result: (MOVLstore [off] {sym} ptr x mem) 8081 for { 8082 off := v.AuxInt 8083 sym := v.Aux 8084 _ = v.Args[2] 8085 ptr := v.Args[0] 8086 v_1 := v.Args[1] 8087 if v_1.Op != OpAMD64MOVLQZX { 8088 break 8089 } 8090 x := v_1.Args[0] 8091 mem := v.Args[2] 8092 v.reset(OpAMD64MOVLstore) 8093 v.AuxInt = off 8094 v.Aux = sym 8095 v.AddArg(ptr) 8096 v.AddArg(x) 8097 v.AddArg(mem) 8098 return true 8099 } 8100 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8101 // cond: is32Bit(off1+off2) 8102 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 8103 for { 8104 off1 := v.AuxInt 8105 sym := v.Aux 8106 _ = v.Args[2] 8107 v_0 := v.Args[0] 8108 if v_0.Op != OpAMD64ADDQconst { 8109 break 8110 } 8111 off2 := v_0.AuxInt 8112 ptr := v_0.Args[0] 8113 val := v.Args[1] 8114 mem := v.Args[2] 8115 if !(is32Bit(off1 + off2)) { 8116 break 8117 } 8118 v.reset(OpAMD64MOVLstore) 8119 v.AuxInt = off1 + off2 8120 v.Aux = sym 8121 v.AddArg(ptr) 8122 v.AddArg(val) 8123 v.AddArg(mem) 8124 return true 8125 } 8126 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 8127 // cond: validOff(off) 8128 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 8129 for { 8130 off := v.AuxInt 8131 sym := v.Aux 8132 _ = v.Args[2] 8133 ptr := v.Args[0] 8134 v_1 := v.Args[1] 8135 if v_1.Op != OpAMD64MOVLconst { 8136 break 8137 } 8138 c := v_1.AuxInt 8139 mem := v.Args[2] 8140 if !(validOff(off)) { 8141 break 8142 } 8143 v.reset(OpAMD64MOVLstoreconst) 8144 v.AuxInt = makeValAndOff(int64(int32(c)), off) 8145 v.Aux = sym 8146 v.AddArg(ptr) 8147 v.AddArg(mem) 8148 return true 8149 } 8150 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8151 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8152 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8153 for { 8154 off1 := v.AuxInt 8155 sym1 := v.Aux 8156 _ = v.Args[2] 8157 v_0 := v.Args[0] 8158 if v_0.Op != OpAMD64LEAQ { 8159 break 8160 } 8161 off2 := v_0.AuxInt 8162 sym2 := v_0.Aux 8163 base := v_0.Args[0] 8164 val := v.Args[1] 8165 mem := v.Args[2] 8166 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8167 break 8168 } 8169 v.reset(OpAMD64MOVLstore) 8170 v.AuxInt = off1 + off2 8171 v.Aux = mergeSym(sym1, sym2) 8172 v.AddArg(base) 8173 v.AddArg(val) 8174 v.AddArg(mem) 8175 return true 8176 } 8177 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8178 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8179 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8180 for { 8181 off1 := v.AuxInt 8182 sym1 := v.Aux 8183 _ = v.Args[2] 8184 v_0 := v.Args[0] 8185 if v_0.Op != OpAMD64LEAQ1 { 8186 break 8187 } 8188 off2 := v_0.AuxInt 8189 sym2 := v_0.Aux 8190 _ = v_0.Args[1] 8191 ptr := v_0.Args[0] 8192 idx := v_0.Args[1] 8193 val := v.Args[1] 8194 mem := v.Args[2] 8195 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8196 break 8197 } 8198 v.reset(OpAMD64MOVLstoreidx1) 8199 v.AuxInt = off1 + off2 8200 v.Aux = mergeSym(sym1, sym2) 8201 v.AddArg(ptr) 8202 v.AddArg(idx) 8203 v.AddArg(val) 8204 v.AddArg(mem) 8205 return true 8206 } 8207 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 8208 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8209 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8210 for { 8211 off1 := v.AuxInt 8212 sym1 := v.Aux 8213 _ = v.Args[2] 8214 v_0 := v.Args[0] 8215 if v_0.Op != OpAMD64LEAQ4 { 8216 break 8217 } 8218 off2 := v_0.AuxInt 8219 sym2 := v_0.Aux 8220 _ = v_0.Args[1] 8221 ptr := v_0.Args[0] 8222 idx := v_0.Args[1] 8223 val := v.Args[1] 8224 mem := v.Args[2] 8225 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8226 break 8227 } 8228 v.reset(OpAMD64MOVLstoreidx4) 8229 v.AuxInt = off1 + off2 8230 v.Aux = mergeSym(sym1, sym2) 8231 v.AddArg(ptr) 8232 v.AddArg(idx) 8233 v.AddArg(val) 8234 v.AddArg(mem) 8235 return true 8236 } 8237 // match: (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 8238 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8239 // result: (MOVLstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8240 for { 8241 off1 := v.AuxInt 8242 sym1 := v.Aux 8243 _ = v.Args[2] 8244 v_0 := v.Args[0] 8245 if v_0.Op != OpAMD64LEAQ8 { 8246 break 8247 } 8248 off2 := v_0.AuxInt 8249 sym2 := v_0.Aux 8250 _ = v_0.Args[1] 8251 ptr := v_0.Args[0] 8252 idx := v_0.Args[1] 8253 val := v.Args[1] 8254 mem := v.Args[2] 8255 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8256 break 8257 } 8258 v.reset(OpAMD64MOVLstoreidx8) 8259 v.AuxInt = off1 + off2 8260 v.Aux = mergeSym(sym1, sym2) 8261 v.AddArg(ptr) 8262 v.AddArg(idx) 8263 v.AddArg(val) 8264 v.AddArg(mem) 8265 return true 8266 } 8267 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 8268 // cond: ptr.Op != OpSB 8269 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 8270 for { 8271 off := v.AuxInt 8272 sym := v.Aux 8273 _ = v.Args[2] 8274 v_0 := v.Args[0] 8275 if v_0.Op != OpAMD64ADDQ { 8276 break 8277 } 8278 _ = v_0.Args[1] 8279 ptr := v_0.Args[0] 8280 idx := v_0.Args[1] 8281 val := v.Args[1] 8282 mem := v.Args[2] 8283 if !(ptr.Op != OpSB) { 8284 break 8285 } 8286 v.reset(OpAMD64MOVLstoreidx1) 8287 v.AuxInt = off 8288 v.Aux = sym 8289 v.AddArg(ptr) 8290 v.AddArg(idx) 8291 v.AddArg(val) 8292 v.AddArg(mem) 8293 return true 8294 } 8295 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 8296 // cond: x.Uses == 1 && clobber(x) 8297 // result: (MOVQstore [i-4] {s} p w mem) 8298 for { 8299 i := v.AuxInt 8300 s := v.Aux 8301 _ = v.Args[2] 8302 p := v.Args[0] 8303 v_1 := v.Args[1] 8304 if v_1.Op != OpAMD64SHRQconst { 8305 break 8306 } 8307 if v_1.AuxInt != 32 { 8308 break 8309 } 8310 w := v_1.Args[0] 8311 x := v.Args[2] 8312 if x.Op != OpAMD64MOVLstore { 8313 break 8314 } 8315 if x.AuxInt != i-4 { 8316 break 8317 } 8318 if x.Aux != s { 8319 break 8320 } 8321 _ = x.Args[2] 8322 if p != x.Args[0] { 8323 break 8324 } 8325 if w != x.Args[1] { 8326 break 8327 } 8328 mem := x.Args[2] 8329 if !(x.Uses == 1 && clobber(x)) { 8330 break 8331 } 8332 v.reset(OpAMD64MOVQstore) 8333 v.AuxInt = i - 4 8334 v.Aux = s 8335 v.AddArg(p) 8336 v.AddArg(w) 8337 v.AddArg(mem) 8338 return true 8339 } 8340 return false 8341 } 8342 func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { 8343 b := v.Block 8344 _ = b 8345 typ := &b.Func.Config.Types 8346 _ = typ 8347 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 8348 // cond: x.Uses == 1 && clobber(x) 8349 // result: (MOVQstore [i-4] {s} p w0 mem) 8350 for { 8351 i := v.AuxInt 8352 s := v.Aux 8353 _ = v.Args[2] 8354 p := v.Args[0] 8355 v_1 := v.Args[1] 8356 if v_1.Op != OpAMD64SHRQconst { 8357 break 8358 } 8359 j := v_1.AuxInt 8360 w := v_1.Args[0] 8361 x := v.Args[2] 8362 if x.Op != OpAMD64MOVLstore { 8363 break 8364 } 8365 if x.AuxInt != i-4 { 8366 break 8367 } 8368 if x.Aux != s { 8369 break 8370 } 8371 _ = x.Args[2] 8372 if p != x.Args[0] { 8373 break 8374 } 8375 w0 := x.Args[1] 8376 if w0.Op != OpAMD64SHRQconst { 8377 break 8378 } 8379 if w0.AuxInt != j-32 { 8380 break 8381 } 8382 if w != w0.Args[0] { 8383 break 8384 } 8385 mem := x.Args[2] 8386 if !(x.Uses == 1 && clobber(x)) { 8387 break 8388 } 8389 v.reset(OpAMD64MOVQstore) 8390 v.AuxInt = i - 4 8391 v.Aux = s 8392 v.AddArg(p) 8393 v.AddArg(w0) 8394 v.AddArg(mem) 8395 return true 8396 } 8397 // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem)) 8398 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 8399 // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) 8400 for { 8401 i := v.AuxInt 8402 s := v.Aux 8403 _ = v.Args[2] 8404 p := v.Args[0] 8405 x1 := v.Args[1] 8406 if x1.Op != OpAMD64MOVLload { 8407 break 8408 } 8409 j := x1.AuxInt 8410 s2 := x1.Aux 8411 _ = x1.Args[1] 8412 p2 := x1.Args[0] 8413 mem := x1.Args[1] 8414 mem2 := v.Args[2] 8415 if mem2.Op != OpAMD64MOVLstore { 8416 break 8417 } 8418 if mem2.AuxInt != i-4 { 8419 break 8420 } 8421 if mem2.Aux != s { 8422 break 8423 } 8424 _ = mem2.Args[2] 8425 if p != mem2.Args[0] { 8426 break 8427 } 8428 x2 := mem2.Args[1] 8429 if x2.Op != OpAMD64MOVLload { 8430 break 8431 } 8432 if x2.AuxInt != j-4 { 8433 break 8434 } 8435 if x2.Aux != s2 { 8436 break 8437 } 8438 _ = x2.Args[1] 8439 if p2 != x2.Args[0] { 8440 break 8441 } 8442 if mem != x2.Args[1] { 8443 break 8444 } 8445 if mem != mem2.Args[2] { 8446 break 8447 } 8448 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 8449 break 8450 } 8451 v.reset(OpAMD64MOVQstore) 8452 v.AuxInt = i - 4 8453 v.Aux = s 8454 v.AddArg(p) 8455 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 8456 v0.AuxInt = j - 4 8457 v0.Aux = s2 8458 v0.AddArg(p2) 8459 v0.AddArg(mem) 8460 v.AddArg(v0) 8461 v.AddArg(mem) 8462 return true 8463 } 8464 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 8465 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 8466 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8467 for { 8468 off1 := v.AuxInt 8469 sym1 := v.Aux 8470 _ = v.Args[2] 8471 v_0 := v.Args[0] 8472 if v_0.Op != OpAMD64LEAL { 8473 break 8474 } 8475 off2 := v_0.AuxInt 8476 sym2 := v_0.Aux 8477 base := v_0.Args[0] 8478 val := v.Args[1] 8479 mem := v.Args[2] 8480 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 8481 break 8482 } 8483 v.reset(OpAMD64MOVLstore) 8484 v.AuxInt = off1 + off2 8485 v.Aux = mergeSym(sym1, sym2) 8486 v.AddArg(base) 8487 v.AddArg(val) 8488 v.AddArg(mem) 8489 return true 8490 } 8491 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 8492 // cond: is32Bit(off1+off2) 8493 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 8494 for { 8495 off1 := v.AuxInt 8496 sym := v.Aux 8497 _ = v.Args[2] 8498 v_0 := v.Args[0] 8499 if v_0.Op != OpAMD64ADDLconst { 8500 break 8501 } 8502 off2 := v_0.AuxInt 8503 ptr := v_0.Args[0] 8504 val := v.Args[1] 8505 mem := v.Args[2] 8506 if !(is32Bit(off1 + off2)) { 8507 break 8508 } 8509 v.reset(OpAMD64MOVLstore) 8510 v.AuxInt = off1 + off2 8511 v.Aux = sym 8512 v.AddArg(ptr) 8513 v.AddArg(val) 8514 v.AddArg(mem) 8515 return true 8516 } 8517 // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 8518 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) 8519 // result: (ADDLconstmem {sym} [makeValAndOff(c,off)] ptr mem) 8520 for { 8521 off := v.AuxInt 8522 sym := v.Aux 8523 _ = v.Args[2] 8524 ptr := v.Args[0] 8525 a := v.Args[1] 8526 if a.Op != OpAMD64ADDLconst { 8527 break 8528 } 8529 c := a.AuxInt 8530 l := a.Args[0] 8531 if l.Op != OpAMD64MOVLload { 8532 break 8533 } 8534 if l.AuxInt != off { 8535 break 8536 } 8537 if l.Aux != sym { 8538 break 8539 } 8540 _ = l.Args[1] 8541 ptr2 := l.Args[0] 8542 mem := l.Args[1] 8543 if mem != v.Args[2] { 8544 break 8545 } 8546 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) { 8547 break 8548 } 8549 v.reset(OpAMD64ADDLconstmem) 8550 v.AuxInt = makeValAndOff(c, off) 8551 v.Aux = sym 8552 v.AddArg(ptr) 8553 v.AddArg(mem) 8554 return true 8555 } 8556 // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem) 8557 // cond: 8558 // result: (MOVSSstore [off] {sym} ptr val mem) 8559 for { 8560 off := v.AuxInt 8561 sym := v.Aux 8562 _ = v.Args[2] 8563 ptr := v.Args[0] 8564 v_1 := v.Args[1] 8565 if v_1.Op != OpAMD64MOVLf2i { 8566 break 8567 } 8568 val := v_1.Args[0] 8569 mem := v.Args[2] 8570 v.reset(OpAMD64MOVSSstore) 8571 v.AuxInt = off 8572 v.Aux = sym 8573 v.AddArg(ptr) 8574 v.AddArg(val) 8575 v.AddArg(mem) 8576 return true 8577 } 8578 return false 8579 } 8580 func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { 8581 b := v.Block 8582 _ = b 8583 typ := &b.Func.Config.Types 8584 _ = typ 8585 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 8586 // cond: ValAndOff(sc).canAdd(off) 8587 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8588 for { 8589 sc := v.AuxInt 8590 s := v.Aux 8591 _ = v.Args[1] 8592 v_0 := v.Args[0] 8593 if v_0.Op != OpAMD64ADDQconst { 8594 break 8595 } 8596 off := v_0.AuxInt 8597 ptr := v_0.Args[0] 8598 mem := v.Args[1] 8599 if !(ValAndOff(sc).canAdd(off)) { 8600 break 8601 } 8602 v.reset(OpAMD64MOVLstoreconst) 8603 v.AuxInt = ValAndOff(sc).add(off) 8604 v.Aux = s 8605 v.AddArg(ptr) 8606 v.AddArg(mem) 8607 return true 8608 } 8609 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 8610 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8611 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8612 for { 8613 sc := v.AuxInt 8614 sym1 := v.Aux 8615 _ = v.Args[1] 8616 v_0 := v.Args[0] 8617 if v_0.Op != OpAMD64LEAQ { 8618 break 8619 } 8620 off := v_0.AuxInt 8621 sym2 := v_0.Aux 8622 ptr := v_0.Args[0] 8623 mem := v.Args[1] 8624 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8625 break 8626 } 8627 v.reset(OpAMD64MOVLstoreconst) 8628 v.AuxInt = ValAndOff(sc).add(off) 8629 v.Aux = mergeSym(sym1, sym2) 8630 v.AddArg(ptr) 8631 v.AddArg(mem) 8632 return true 8633 } 8634 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 8635 // cond: canMergeSym(sym1, sym2) 8636 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8637 for { 8638 x := v.AuxInt 8639 sym1 := v.Aux 8640 _ = v.Args[1] 8641 v_0 := v.Args[0] 8642 if v_0.Op != OpAMD64LEAQ1 { 8643 break 8644 } 8645 off := v_0.AuxInt 8646 sym2 := v_0.Aux 8647 _ = v_0.Args[1] 8648 ptr := v_0.Args[0] 8649 idx := v_0.Args[1] 8650 mem := v.Args[1] 8651 if !(canMergeSym(sym1, sym2)) { 8652 break 8653 } 8654 v.reset(OpAMD64MOVLstoreconstidx1) 8655 v.AuxInt = ValAndOff(x).add(off) 8656 v.Aux = mergeSym(sym1, sym2) 8657 v.AddArg(ptr) 8658 v.AddArg(idx) 8659 v.AddArg(mem) 8660 return true 8661 } 8662 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 8663 // cond: canMergeSym(sym1, sym2) 8664 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8665 for { 8666 x := v.AuxInt 8667 sym1 := v.Aux 8668 _ = v.Args[1] 8669 v_0 := v.Args[0] 8670 if v_0.Op != OpAMD64LEAQ4 { 8671 break 8672 } 8673 off := v_0.AuxInt 8674 sym2 := v_0.Aux 8675 _ = v_0.Args[1] 8676 ptr := v_0.Args[0] 8677 idx := v_0.Args[1] 8678 mem := v.Args[1] 8679 if !(canMergeSym(sym1, sym2)) { 8680 break 8681 } 8682 v.reset(OpAMD64MOVLstoreconstidx4) 8683 v.AuxInt = ValAndOff(x).add(off) 8684 v.Aux = mergeSym(sym1, sym2) 8685 v.AddArg(ptr) 8686 v.AddArg(idx) 8687 v.AddArg(mem) 8688 return true 8689 } 8690 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 8691 // cond: 8692 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 8693 for { 8694 x := v.AuxInt 8695 sym := v.Aux 8696 _ = v.Args[1] 8697 v_0 := v.Args[0] 8698 if v_0.Op != OpAMD64ADDQ { 8699 break 8700 } 8701 _ = v_0.Args[1] 8702 ptr := v_0.Args[0] 8703 idx := v_0.Args[1] 8704 mem := v.Args[1] 8705 v.reset(OpAMD64MOVLstoreconstidx1) 8706 v.AuxInt = x 8707 v.Aux = sym 8708 v.AddArg(ptr) 8709 v.AddArg(idx) 8710 v.AddArg(mem) 8711 return true 8712 } 8713 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 8714 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 8715 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 8716 for { 8717 c := v.AuxInt 8718 s := v.Aux 8719 _ = v.Args[1] 8720 p := v.Args[0] 8721 x := v.Args[1] 8722 if x.Op != OpAMD64MOVLstoreconst { 8723 break 8724 } 8725 a := x.AuxInt 8726 if x.Aux != s { 8727 break 8728 } 8729 _ = x.Args[1] 8730 if p != x.Args[0] { 8731 break 8732 } 8733 mem := x.Args[1] 8734 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 8735 break 8736 } 8737 v.reset(OpAMD64MOVQstore) 8738 v.AuxInt = ValAndOff(a).Off() 8739 v.Aux = s 8740 v.AddArg(p) 8741 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 8742 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 8743 v.AddArg(v0) 8744 v.AddArg(mem) 8745 return true 8746 } 8747 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 8748 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8749 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8750 for { 8751 sc := v.AuxInt 8752 sym1 := v.Aux 8753 _ = v.Args[1] 8754 v_0 := v.Args[0] 8755 if v_0.Op != OpAMD64LEAL { 8756 break 8757 } 8758 off := v_0.AuxInt 8759 sym2 := v_0.Aux 8760 ptr := v_0.Args[0] 8761 mem := v.Args[1] 8762 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8763 break 8764 } 8765 v.reset(OpAMD64MOVLstoreconst) 8766 v.AuxInt = ValAndOff(sc).add(off) 8767 v.Aux = mergeSym(sym1, sym2) 8768 v.AddArg(ptr) 8769 v.AddArg(mem) 8770 return true 8771 } 8772 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 8773 // cond: ValAndOff(sc).canAdd(off) 8774 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8775 for { 8776 sc := v.AuxInt 8777 s := v.Aux 8778 _ = v.Args[1] 8779 v_0 := v.Args[0] 8780 if v_0.Op != OpAMD64ADDLconst { 8781 break 8782 } 8783 off := v_0.AuxInt 8784 ptr := v_0.Args[0] 8785 mem := v.Args[1] 8786 if !(ValAndOff(sc).canAdd(off)) { 8787 break 8788 } 8789 v.reset(OpAMD64MOVLstoreconst) 8790 v.AuxInt = ValAndOff(sc).add(off) 8791 v.Aux = s 8792 v.AddArg(ptr) 8793 v.AddArg(mem) 8794 return true 8795 } 8796 return false 8797 } 8798 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { 8799 b := v.Block 8800 _ = b 8801 typ := &b.Func.Config.Types 8802 _ = typ 8803 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 8804 // cond: 8805 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 8806 for { 8807 c := v.AuxInt 8808 sym := v.Aux 8809 _ = v.Args[2] 8810 ptr := v.Args[0] 8811 v_1 := v.Args[1] 8812 if v_1.Op != OpAMD64SHLQconst { 8813 break 8814 } 8815 if v_1.AuxInt != 2 { 8816 break 8817 } 8818 idx := v_1.Args[0] 8819 mem := v.Args[2] 8820 v.reset(OpAMD64MOVLstoreconstidx4) 8821 v.AuxInt = c 8822 v.Aux = sym 8823 v.AddArg(ptr) 8824 v.AddArg(idx) 8825 v.AddArg(mem) 8826 return true 8827 } 8828 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 8829 // cond: ValAndOff(x).canAdd(c) 8830 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8831 for { 8832 x := v.AuxInt 8833 sym := v.Aux 8834 _ = v.Args[2] 8835 v_0 := v.Args[0] 8836 if v_0.Op != OpAMD64ADDQconst { 8837 break 8838 } 8839 c := v_0.AuxInt 8840 ptr := v_0.Args[0] 8841 idx := v.Args[1] 8842 mem := v.Args[2] 8843 if !(ValAndOff(x).canAdd(c)) { 8844 break 8845 } 8846 v.reset(OpAMD64MOVLstoreconstidx1) 8847 v.AuxInt = ValAndOff(x).add(c) 8848 v.Aux = sym 8849 v.AddArg(ptr) 8850 v.AddArg(idx) 8851 v.AddArg(mem) 8852 return true 8853 } 8854 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 8855 // cond: ValAndOff(x).canAdd(c) 8856 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8857 for { 8858 x := v.AuxInt 8859 sym := v.Aux 8860 _ = v.Args[2] 8861 ptr := v.Args[0] 8862 v_1 := v.Args[1] 8863 if v_1.Op != OpAMD64ADDQconst { 8864 break 8865 } 8866 c := v_1.AuxInt 8867 idx := v_1.Args[0] 8868 mem := v.Args[2] 8869 if !(ValAndOff(x).canAdd(c)) { 8870 break 8871 } 8872 v.reset(OpAMD64MOVLstoreconstidx1) 8873 v.AuxInt = ValAndOff(x).add(c) 8874 v.Aux = sym 8875 v.AddArg(ptr) 8876 v.AddArg(idx) 8877 v.AddArg(mem) 8878 return true 8879 } 8880 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 8881 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 8882 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 8883 for { 8884 c := v.AuxInt 8885 s := v.Aux 8886 _ = v.Args[2] 8887 p := v.Args[0] 8888 i := v.Args[1] 8889 x := v.Args[2] 8890 if x.Op != OpAMD64MOVLstoreconstidx1 { 8891 break 8892 } 8893 a := x.AuxInt 8894 if x.Aux != s { 8895 break 8896 } 8897 _ = x.Args[2] 8898 if p != x.Args[0] { 8899 break 8900 } 8901 if i != x.Args[1] { 8902 break 8903 } 8904 mem := x.Args[2] 8905 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 8906 break 8907 } 8908 v.reset(OpAMD64MOVQstoreidx1) 8909 v.AuxInt = ValAndOff(a).Off() 8910 v.Aux = s 8911 v.AddArg(p) 8912 v.AddArg(i) 8913 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 8914 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 8915 v.AddArg(v0) 8916 v.AddArg(mem) 8917 return true 8918 } 8919 return false 8920 } 8921 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { 8922 b := v.Block 8923 _ = b 8924 typ := &b.Func.Config.Types 8925 _ = typ 8926 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 8927 // cond: ValAndOff(x).canAdd(c) 8928 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8929 for { 8930 x := v.AuxInt 8931 sym := v.Aux 8932 _ = v.Args[2] 8933 v_0 := v.Args[0] 8934 if v_0.Op != OpAMD64ADDQconst { 8935 break 8936 } 8937 c := v_0.AuxInt 8938 ptr := v_0.Args[0] 8939 idx := v.Args[1] 8940 mem := v.Args[2] 8941 if !(ValAndOff(x).canAdd(c)) { 8942 break 8943 } 8944 v.reset(OpAMD64MOVLstoreconstidx4) 8945 v.AuxInt = ValAndOff(x).add(c) 8946 v.Aux = sym 8947 v.AddArg(ptr) 8948 v.AddArg(idx) 8949 v.AddArg(mem) 8950 return true 8951 } 8952 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 8953 // cond: ValAndOff(x).canAdd(4*c) 8954 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 8955 for { 8956 x := v.AuxInt 8957 sym := v.Aux 8958 _ = v.Args[2] 8959 ptr := v.Args[0] 8960 v_1 := v.Args[1] 8961 if v_1.Op != OpAMD64ADDQconst { 8962 break 8963 } 8964 c := v_1.AuxInt 8965 idx := v_1.Args[0] 8966 mem := v.Args[2] 8967 if !(ValAndOff(x).canAdd(4 * c)) { 8968 break 8969 } 8970 v.reset(OpAMD64MOVLstoreconstidx4) 8971 v.AuxInt = ValAndOff(x).add(4 * c) 8972 v.Aux = sym 8973 v.AddArg(ptr) 8974 v.AddArg(idx) 8975 v.AddArg(mem) 8976 return true 8977 } 8978 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 8979 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 8980 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 8981 for { 8982 c := v.AuxInt 8983 s := v.Aux 8984 _ = v.Args[2] 8985 p := v.Args[0] 8986 i := v.Args[1] 8987 x := v.Args[2] 8988 if x.Op != OpAMD64MOVLstoreconstidx4 { 8989 break 8990 } 8991 a := x.AuxInt 8992 if x.Aux != s { 8993 break 8994 } 8995 _ = x.Args[2] 8996 if p != x.Args[0] { 8997 break 8998 } 8999 if i != x.Args[1] { 9000 break 9001 } 9002 mem := x.Args[2] 9003 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 9004 break 9005 } 9006 v.reset(OpAMD64MOVQstoreidx1) 9007 v.AuxInt = ValAndOff(a).Off() 9008 v.Aux = s 9009 v.AddArg(p) 9010 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 9011 v0.AuxInt = 2 9012 v0.AddArg(i) 9013 v.AddArg(v0) 9014 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 9015 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 9016 v.AddArg(v1) 9017 v.AddArg(mem) 9018 return true 9019 } 9020 return false 9021 } 9022 func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { 9023 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 9024 // cond: 9025 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 9026 for { 9027 c := v.AuxInt 9028 sym := v.Aux 9029 _ = v.Args[3] 9030 ptr := v.Args[0] 9031 v_1 := v.Args[1] 9032 if v_1.Op != OpAMD64SHLQconst { 9033 break 9034 } 9035 if v_1.AuxInt != 2 { 9036 break 9037 } 9038 idx := v_1.Args[0] 9039 val := v.Args[2] 9040 mem := v.Args[3] 9041 v.reset(OpAMD64MOVLstoreidx4) 9042 v.AuxInt = c 9043 v.Aux = sym 9044 v.AddArg(ptr) 9045 v.AddArg(idx) 9046 v.AddArg(val) 9047 v.AddArg(mem) 9048 return true 9049 } 9050 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 9051 // cond: 9052 // result: (MOVLstoreidx8 [c] {sym} ptr idx val mem) 9053 for { 9054 c := v.AuxInt 9055 sym := v.Aux 9056 _ = v.Args[3] 9057 ptr := v.Args[0] 9058 v_1 := v.Args[1] 9059 if v_1.Op != OpAMD64SHLQconst { 9060 break 9061 } 9062 if v_1.AuxInt != 3 { 9063 break 9064 } 9065 idx := v_1.Args[0] 9066 val := v.Args[2] 9067 mem := v.Args[3] 9068 v.reset(OpAMD64MOVLstoreidx8) 9069 v.AuxInt = c 9070 v.Aux = sym 9071 v.AddArg(ptr) 9072 v.AddArg(idx) 9073 v.AddArg(val) 9074 v.AddArg(mem) 9075 return true 9076 } 9077 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9078 // cond: is32Bit(c+d) 9079 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 9080 for { 9081 c := v.AuxInt 9082 sym := v.Aux 9083 _ = v.Args[3] 9084 v_0 := v.Args[0] 9085 if v_0.Op != OpAMD64ADDQconst { 9086 break 9087 } 9088 d := v_0.AuxInt 9089 ptr := v_0.Args[0] 9090 idx := v.Args[1] 9091 val := v.Args[2] 9092 mem := v.Args[3] 9093 if !(is32Bit(c + d)) { 9094 break 9095 } 9096 v.reset(OpAMD64MOVLstoreidx1) 9097 v.AuxInt = c + d 9098 v.Aux = sym 9099 v.AddArg(ptr) 9100 v.AddArg(idx) 9101 v.AddArg(val) 9102 v.AddArg(mem) 9103 return true 9104 } 9105 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9106 // cond: is32Bit(c+d) 9107 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 9108 for { 9109 c := v.AuxInt 9110 sym := v.Aux 9111 _ = v.Args[3] 9112 ptr := v.Args[0] 9113 v_1 := v.Args[1] 9114 if v_1.Op != OpAMD64ADDQconst { 9115 break 9116 } 9117 d := v_1.AuxInt 9118 idx := v_1.Args[0] 9119 val := v.Args[2] 9120 mem := v.Args[3] 9121 if !(is32Bit(c + d)) { 9122 break 9123 } 9124 v.reset(OpAMD64MOVLstoreidx1) 9125 v.AuxInt = c + d 9126 v.Aux = sym 9127 v.AddArg(ptr) 9128 v.AddArg(idx) 9129 v.AddArg(val) 9130 v.AddArg(mem) 9131 return true 9132 } 9133 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 9134 // cond: x.Uses == 1 && clobber(x) 9135 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 9136 for { 9137 i := v.AuxInt 9138 s := v.Aux 9139 _ = v.Args[3] 9140 p := v.Args[0] 9141 idx := v.Args[1] 9142 v_2 := v.Args[2] 9143 if v_2.Op != OpAMD64SHRQconst { 9144 break 9145 } 9146 if v_2.AuxInt != 32 { 9147 break 9148 } 9149 w := v_2.Args[0] 9150 x := v.Args[3] 9151 if x.Op != OpAMD64MOVLstoreidx1 { 9152 break 9153 } 9154 if x.AuxInt != i-4 { 9155 break 9156 } 9157 if x.Aux != s { 9158 break 9159 } 9160 _ = x.Args[3] 9161 if p != x.Args[0] { 9162 break 9163 } 9164 if idx != x.Args[1] { 9165 break 9166 } 9167 if w != x.Args[2] { 9168 break 9169 } 9170 mem := x.Args[3] 9171 if !(x.Uses == 1 && clobber(x)) { 9172 break 9173 } 9174 v.reset(OpAMD64MOVQstoreidx1) 9175 v.AuxInt = i - 4 9176 v.Aux = s 9177 v.AddArg(p) 9178 v.AddArg(idx) 9179 v.AddArg(w) 9180 v.AddArg(mem) 9181 return true 9182 } 9183 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 9184 // cond: x.Uses == 1 && clobber(x) 9185 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 9186 for { 9187 i := v.AuxInt 9188 s := v.Aux 9189 _ = v.Args[3] 9190 p := v.Args[0] 9191 idx := v.Args[1] 9192 v_2 := v.Args[2] 9193 if v_2.Op != OpAMD64SHRQconst { 9194 break 9195 } 9196 j := v_2.AuxInt 9197 w := v_2.Args[0] 9198 x := v.Args[3] 9199 if x.Op != OpAMD64MOVLstoreidx1 { 9200 break 9201 } 9202 if x.AuxInt != i-4 { 9203 break 9204 } 9205 if x.Aux != s { 9206 break 9207 } 9208 _ = x.Args[3] 9209 if p != x.Args[0] { 9210 break 9211 } 9212 if idx != x.Args[1] { 9213 break 9214 } 9215 w0 := x.Args[2] 9216 if w0.Op != OpAMD64SHRQconst { 9217 break 9218 } 9219 if w0.AuxInt != j-32 { 9220 break 9221 } 9222 if w != w0.Args[0] { 9223 break 9224 } 9225 mem := x.Args[3] 9226 if !(x.Uses == 1 && clobber(x)) { 9227 break 9228 } 9229 v.reset(OpAMD64MOVQstoreidx1) 9230 v.AuxInt = i - 4 9231 v.Aux = s 9232 v.AddArg(p) 9233 v.AddArg(idx) 9234 v.AddArg(w0) 9235 v.AddArg(mem) 9236 return true 9237 } 9238 return false 9239 } 9240 func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { 9241 b := v.Block 9242 _ = b 9243 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9244 // cond: is32Bit(c+d) 9245 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 9246 for { 9247 c := v.AuxInt 9248 sym := v.Aux 9249 _ = v.Args[3] 9250 v_0 := v.Args[0] 9251 if v_0.Op != OpAMD64ADDQconst { 9252 break 9253 } 9254 d := v_0.AuxInt 9255 ptr := v_0.Args[0] 9256 idx := v.Args[1] 9257 val := v.Args[2] 9258 mem := v.Args[3] 9259 if !(is32Bit(c + d)) { 9260 break 9261 } 9262 v.reset(OpAMD64MOVLstoreidx4) 9263 v.AuxInt = c + d 9264 v.Aux = sym 9265 v.AddArg(ptr) 9266 v.AddArg(idx) 9267 v.AddArg(val) 9268 v.AddArg(mem) 9269 return true 9270 } 9271 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9272 // cond: is32Bit(c+4*d) 9273 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 9274 for { 9275 c := v.AuxInt 9276 sym := v.Aux 9277 _ = v.Args[3] 9278 ptr := v.Args[0] 9279 v_1 := v.Args[1] 9280 if v_1.Op != OpAMD64ADDQconst { 9281 break 9282 } 9283 d := v_1.AuxInt 9284 idx := v_1.Args[0] 9285 val := v.Args[2] 9286 mem := v.Args[3] 9287 if !(is32Bit(c + 4*d)) { 9288 break 9289 } 9290 v.reset(OpAMD64MOVLstoreidx4) 9291 v.AuxInt = c + 4*d 9292 v.Aux = sym 9293 v.AddArg(ptr) 9294 v.AddArg(idx) 9295 v.AddArg(val) 9296 v.AddArg(mem) 9297 return true 9298 } 9299 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 9300 // cond: x.Uses == 1 && clobber(x) 9301 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 9302 for { 9303 i := v.AuxInt 9304 s := v.Aux 9305 _ = v.Args[3] 9306 p := v.Args[0] 9307 idx := v.Args[1] 9308 v_2 := v.Args[2] 9309 if v_2.Op != OpAMD64SHRQconst { 9310 break 9311 } 9312 if v_2.AuxInt != 32 { 9313 break 9314 } 9315 w := v_2.Args[0] 9316 x := v.Args[3] 9317 if x.Op != OpAMD64MOVLstoreidx4 { 9318 break 9319 } 9320 if x.AuxInt != i-4 { 9321 break 9322 } 9323 if x.Aux != s { 9324 break 9325 } 9326 _ = x.Args[3] 9327 if p != x.Args[0] { 9328 break 9329 } 9330 if idx != x.Args[1] { 9331 break 9332 } 9333 if w != x.Args[2] { 9334 break 9335 } 9336 mem := x.Args[3] 9337 if !(x.Uses == 1 && clobber(x)) { 9338 break 9339 } 9340 v.reset(OpAMD64MOVQstoreidx1) 9341 v.AuxInt = i - 4 9342 v.Aux = s 9343 v.AddArg(p) 9344 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 9345 v0.AuxInt = 2 9346 v0.AddArg(idx) 9347 v.AddArg(v0) 9348 v.AddArg(w) 9349 v.AddArg(mem) 9350 return true 9351 } 9352 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 9353 // cond: x.Uses == 1 && clobber(x) 9354 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 9355 for { 9356 i := v.AuxInt 9357 s := v.Aux 9358 _ = v.Args[3] 9359 p := v.Args[0] 9360 idx := v.Args[1] 9361 v_2 := v.Args[2] 9362 if v_2.Op != OpAMD64SHRQconst { 9363 break 9364 } 9365 j := v_2.AuxInt 9366 w := v_2.Args[0] 9367 x := v.Args[3] 9368 if x.Op != OpAMD64MOVLstoreidx4 { 9369 break 9370 } 9371 if x.AuxInt != i-4 { 9372 break 9373 } 9374 if x.Aux != s { 9375 break 9376 } 9377 _ = x.Args[3] 9378 if p != x.Args[0] { 9379 break 9380 } 9381 if idx != x.Args[1] { 9382 break 9383 } 9384 w0 := x.Args[2] 9385 if w0.Op != OpAMD64SHRQconst { 9386 break 9387 } 9388 if w0.AuxInt != j-32 { 9389 break 9390 } 9391 if w != w0.Args[0] { 9392 break 9393 } 9394 mem := x.Args[3] 9395 if !(x.Uses == 1 && clobber(x)) { 9396 break 9397 } 9398 v.reset(OpAMD64MOVQstoreidx1) 9399 v.AuxInt = i - 4 9400 v.Aux = s 9401 v.AddArg(p) 9402 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 9403 v0.AuxInt = 2 9404 v0.AddArg(idx) 9405 v.AddArg(v0) 9406 v.AddArg(w0) 9407 v.AddArg(mem) 9408 return true 9409 } 9410 return false 9411 } 9412 func rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v *Value) bool { 9413 // match: (MOVLstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9414 // cond: is32Bit(c+d) 9415 // result: (MOVLstoreidx8 [c+d] {sym} ptr idx val mem) 9416 for { 9417 c := v.AuxInt 9418 sym := v.Aux 9419 _ = v.Args[3] 9420 v_0 := v.Args[0] 9421 if v_0.Op != OpAMD64ADDQconst { 9422 break 9423 } 9424 d := v_0.AuxInt 9425 ptr := v_0.Args[0] 9426 idx := v.Args[1] 9427 val := v.Args[2] 9428 mem := v.Args[3] 9429 if !(is32Bit(c + d)) { 9430 break 9431 } 9432 v.reset(OpAMD64MOVLstoreidx8) 9433 v.AuxInt = c + d 9434 v.Aux = sym 9435 v.AddArg(ptr) 9436 v.AddArg(idx) 9437 v.AddArg(val) 9438 v.AddArg(mem) 9439 return true 9440 } 9441 // match: (MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9442 // cond: is32Bit(c+8*d) 9443 // result: (MOVLstoreidx8 [c+8*d] {sym} ptr idx val mem) 9444 for { 9445 c := v.AuxInt 9446 sym := v.Aux 9447 _ = v.Args[3] 9448 ptr := v.Args[0] 9449 v_1 := v.Args[1] 9450 if v_1.Op != OpAMD64ADDQconst { 9451 break 9452 } 9453 d := v_1.AuxInt 9454 idx := v_1.Args[0] 9455 val := v.Args[2] 9456 mem := v.Args[3] 9457 if !(is32Bit(c + 8*d)) { 9458 break 9459 } 9460 v.reset(OpAMD64MOVLstoreidx8) 9461 v.AuxInt = c + 8*d 9462 v.Aux = sym 9463 v.AddArg(ptr) 9464 v.AddArg(idx) 9465 v.AddArg(val) 9466 v.AddArg(mem) 9467 return true 9468 } 9469 return false 9470 } 9471 func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool { 9472 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 9473 // cond: is32Bit(off1+off2) 9474 // result: (MOVOload [off1+off2] {sym} ptr mem) 9475 for { 9476 off1 := v.AuxInt 9477 sym := v.Aux 9478 _ = v.Args[1] 9479 v_0 := v.Args[0] 9480 if v_0.Op != OpAMD64ADDQconst { 9481 break 9482 } 9483 off2 := v_0.AuxInt 9484 ptr := v_0.Args[0] 9485 mem := v.Args[1] 9486 if !(is32Bit(off1 + off2)) { 9487 break 9488 } 9489 v.reset(OpAMD64MOVOload) 9490 v.AuxInt = off1 + off2 9491 v.Aux = sym 9492 v.AddArg(ptr) 9493 v.AddArg(mem) 9494 return true 9495 } 9496 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9497 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9498 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9499 for { 9500 off1 := v.AuxInt 9501 sym1 := v.Aux 9502 _ = v.Args[1] 9503 v_0 := v.Args[0] 9504 if v_0.Op != OpAMD64LEAQ { 9505 break 9506 } 9507 off2 := v_0.AuxInt 9508 sym2 := v_0.Aux 9509 base := v_0.Args[0] 9510 mem := v.Args[1] 9511 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9512 break 9513 } 9514 v.reset(OpAMD64MOVOload) 9515 v.AuxInt = off1 + off2 9516 v.Aux = mergeSym(sym1, sym2) 9517 v.AddArg(base) 9518 v.AddArg(mem) 9519 return true 9520 } 9521 return false 9522 } 9523 func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool { 9524 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9525 // cond: is32Bit(off1+off2) 9526 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 9527 for { 9528 off1 := v.AuxInt 9529 sym := v.Aux 9530 _ = v.Args[2] 9531 v_0 := v.Args[0] 9532 if v_0.Op != OpAMD64ADDQconst { 9533 break 9534 } 9535 off2 := v_0.AuxInt 9536 ptr := v_0.Args[0] 9537 val := v.Args[1] 9538 mem := v.Args[2] 9539 if !(is32Bit(off1 + off2)) { 9540 break 9541 } 9542 v.reset(OpAMD64MOVOstore) 9543 v.AuxInt = off1 + off2 9544 v.Aux = sym 9545 v.AddArg(ptr) 9546 v.AddArg(val) 9547 v.AddArg(mem) 9548 return true 9549 } 9550 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9551 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9552 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9553 for { 9554 off1 := v.AuxInt 9555 sym1 := v.Aux 9556 _ = v.Args[2] 9557 v_0 := v.Args[0] 9558 if v_0.Op != OpAMD64LEAQ { 9559 break 9560 } 9561 off2 := v_0.AuxInt 9562 sym2 := v_0.Aux 9563 base := v_0.Args[0] 9564 val := v.Args[1] 9565 mem := v.Args[2] 9566 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9567 break 9568 } 9569 v.reset(OpAMD64MOVOstore) 9570 v.AuxInt = off1 + off2 9571 v.Aux = mergeSym(sym1, sym2) 9572 v.AddArg(base) 9573 v.AddArg(val) 9574 v.AddArg(mem) 9575 return true 9576 } 9577 return false 9578 } 9579 func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool { 9580 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 9581 // cond: is32Bit(off1+off2) 9582 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 9583 for { 9584 off1 := v.AuxInt 9585 sym := v.Aux 9586 _ = v.Args[1] 9587 v_0 := v.Args[0] 9588 if v_0.Op != OpAMD64ADDQconst { 9589 break 9590 } 9591 off2 := v_0.AuxInt 9592 ptr := v_0.Args[0] 9593 mem := v.Args[1] 9594 if !(is32Bit(off1 + off2)) { 9595 break 9596 } 9597 v.reset(OpAMD64MOVQatomicload) 9598 v.AuxInt = off1 + off2 9599 v.Aux = sym 9600 v.AddArg(ptr) 9601 v.AddArg(mem) 9602 return true 9603 } 9604 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 9605 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9606 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 9607 for { 9608 off1 := v.AuxInt 9609 sym1 := v.Aux 9610 _ = v.Args[1] 9611 v_0 := v.Args[0] 9612 if v_0.Op != OpAMD64LEAQ { 9613 break 9614 } 9615 off2 := v_0.AuxInt 9616 sym2 := v_0.Aux 9617 ptr := v_0.Args[0] 9618 mem := v.Args[1] 9619 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9620 break 9621 } 9622 v.reset(OpAMD64MOVQatomicload) 9623 v.AuxInt = off1 + off2 9624 v.Aux = mergeSym(sym1, sym2) 9625 v.AddArg(ptr) 9626 v.AddArg(mem) 9627 return true 9628 } 9629 return false 9630 } 9631 func rewriteValueAMD64_OpAMD64MOVQf2i_0(v *Value) bool { 9632 b := v.Block 9633 _ = b 9634 // match: (MOVQf2i <t> (Arg [off] {sym})) 9635 // cond: 9636 // result: @b.Func.Entry (Arg <t> [off] {sym}) 9637 for { 9638 t := v.Type 9639 v_0 := v.Args[0] 9640 if v_0.Op != OpArg { 9641 break 9642 } 9643 off := v_0.AuxInt 9644 sym := v_0.Aux 9645 b = b.Func.Entry 9646 v0 := b.NewValue0(v.Pos, OpArg, t) 9647 v.reset(OpCopy) 9648 v.AddArg(v0) 9649 v0.AuxInt = off 9650 v0.Aux = sym 9651 return true 9652 } 9653 return false 9654 } 9655 func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool { 9656 b := v.Block 9657 _ = b 9658 // match: (MOVQi2f <t> (Arg [off] {sym})) 9659 // cond: 9660 // result: @b.Func.Entry (Arg <t> [off] {sym}) 9661 for { 9662 t := v.Type 9663 v_0 := v.Args[0] 9664 if v_0.Op != OpArg { 9665 break 9666 } 9667 off := v_0.AuxInt 9668 sym := v_0.Aux 9669 b = b.Func.Entry 9670 v0 := b.NewValue0(v.Pos, OpArg, t) 9671 v.reset(OpCopy) 9672 v.AddArg(v0) 9673 v0.AuxInt = off 9674 v0.Aux = sym 9675 return true 9676 } 9677 return false 9678 } 9679 func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { 9680 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 9681 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 9682 // result: x 9683 for { 9684 off := v.AuxInt 9685 sym := v.Aux 9686 _ = v.Args[1] 9687 ptr := v.Args[0] 9688 v_1 := v.Args[1] 9689 if v_1.Op != OpAMD64MOVQstore { 9690 break 9691 } 9692 off2 := v_1.AuxInt 9693 sym2 := v_1.Aux 9694 _ = v_1.Args[2] 9695 ptr2 := v_1.Args[0] 9696 x := v_1.Args[1] 9697 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 9698 break 9699 } 9700 v.reset(OpCopy) 9701 v.Type = x.Type 9702 v.AddArg(x) 9703 return true 9704 } 9705 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 9706 // cond: is32Bit(off1+off2) 9707 // result: (MOVQload [off1+off2] {sym} ptr mem) 9708 for { 9709 off1 := v.AuxInt 9710 sym := v.Aux 9711 _ = v.Args[1] 9712 v_0 := v.Args[0] 9713 if v_0.Op != OpAMD64ADDQconst { 9714 break 9715 } 9716 off2 := v_0.AuxInt 9717 ptr := v_0.Args[0] 9718 mem := v.Args[1] 9719 if !(is32Bit(off1 + off2)) { 9720 break 9721 } 9722 v.reset(OpAMD64MOVQload) 9723 v.AuxInt = off1 + off2 9724 v.Aux = sym 9725 v.AddArg(ptr) 9726 v.AddArg(mem) 9727 return true 9728 } 9729 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9730 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9731 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9732 for { 9733 off1 := v.AuxInt 9734 sym1 := v.Aux 9735 _ = v.Args[1] 9736 v_0 := v.Args[0] 9737 if v_0.Op != OpAMD64LEAQ { 9738 break 9739 } 9740 off2 := v_0.AuxInt 9741 sym2 := v_0.Aux 9742 base := v_0.Args[0] 9743 mem := v.Args[1] 9744 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9745 break 9746 } 9747 v.reset(OpAMD64MOVQload) 9748 v.AuxInt = off1 + off2 9749 v.Aux = mergeSym(sym1, sym2) 9750 v.AddArg(base) 9751 v.AddArg(mem) 9752 return true 9753 } 9754 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 9755 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9756 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9757 for { 9758 off1 := v.AuxInt 9759 sym1 := v.Aux 9760 _ = v.Args[1] 9761 v_0 := v.Args[0] 9762 if v_0.Op != OpAMD64LEAQ1 { 9763 break 9764 } 9765 off2 := v_0.AuxInt 9766 sym2 := v_0.Aux 9767 _ = v_0.Args[1] 9768 ptr := v_0.Args[0] 9769 idx := v_0.Args[1] 9770 mem := v.Args[1] 9771 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9772 break 9773 } 9774 v.reset(OpAMD64MOVQloadidx1) 9775 v.AuxInt = off1 + off2 9776 v.Aux = mergeSym(sym1, sym2) 9777 v.AddArg(ptr) 9778 v.AddArg(idx) 9779 v.AddArg(mem) 9780 return true 9781 } 9782 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 9783 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9784 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9785 for { 9786 off1 := v.AuxInt 9787 sym1 := v.Aux 9788 _ = v.Args[1] 9789 v_0 := v.Args[0] 9790 if v_0.Op != OpAMD64LEAQ8 { 9791 break 9792 } 9793 off2 := v_0.AuxInt 9794 sym2 := v_0.Aux 9795 _ = v_0.Args[1] 9796 ptr := v_0.Args[0] 9797 idx := v_0.Args[1] 9798 mem := v.Args[1] 9799 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9800 break 9801 } 9802 v.reset(OpAMD64MOVQloadidx8) 9803 v.AuxInt = off1 + off2 9804 v.Aux = mergeSym(sym1, sym2) 9805 v.AddArg(ptr) 9806 v.AddArg(idx) 9807 v.AddArg(mem) 9808 return true 9809 } 9810 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 9811 // cond: ptr.Op != OpSB 9812 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 9813 for { 9814 off := v.AuxInt 9815 sym := v.Aux 9816 _ = v.Args[1] 9817 v_0 := v.Args[0] 9818 if v_0.Op != OpAMD64ADDQ { 9819 break 9820 } 9821 _ = v_0.Args[1] 9822 ptr := v_0.Args[0] 9823 idx := v_0.Args[1] 9824 mem := v.Args[1] 9825 if !(ptr.Op != OpSB) { 9826 break 9827 } 9828 v.reset(OpAMD64MOVQloadidx1) 9829 v.AuxInt = off 9830 v.Aux = sym 9831 v.AddArg(ptr) 9832 v.AddArg(idx) 9833 v.AddArg(mem) 9834 return true 9835 } 9836 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 9837 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 9838 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9839 for { 9840 off1 := v.AuxInt 9841 sym1 := v.Aux 9842 _ = v.Args[1] 9843 v_0 := v.Args[0] 9844 if v_0.Op != OpAMD64LEAL { 9845 break 9846 } 9847 off2 := v_0.AuxInt 9848 sym2 := v_0.Aux 9849 base := v_0.Args[0] 9850 mem := v.Args[1] 9851 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 9852 break 9853 } 9854 v.reset(OpAMD64MOVQload) 9855 v.AuxInt = off1 + off2 9856 v.Aux = mergeSym(sym1, sym2) 9857 v.AddArg(base) 9858 v.AddArg(mem) 9859 return true 9860 } 9861 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 9862 // cond: is32Bit(off1+off2) 9863 // result: (MOVQload [off1+off2] {sym} ptr mem) 9864 for { 9865 off1 := v.AuxInt 9866 sym := v.Aux 9867 _ = v.Args[1] 9868 v_0 := v.Args[0] 9869 if v_0.Op != OpAMD64ADDLconst { 9870 break 9871 } 9872 off2 := v_0.AuxInt 9873 ptr := v_0.Args[0] 9874 mem := v.Args[1] 9875 if !(is32Bit(off1 + off2)) { 9876 break 9877 } 9878 v.reset(OpAMD64MOVQload) 9879 v.AuxInt = off1 + off2 9880 v.Aux = sym 9881 v.AddArg(ptr) 9882 v.AddArg(mem) 9883 return true 9884 } 9885 // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) 9886 // cond: 9887 // result: (MOVQf2i val) 9888 for { 9889 off := v.AuxInt 9890 sym := v.Aux 9891 _ = v.Args[1] 9892 ptr := v.Args[0] 9893 v_1 := v.Args[1] 9894 if v_1.Op != OpAMD64MOVSDstore { 9895 break 9896 } 9897 if v_1.AuxInt != off { 9898 break 9899 } 9900 if v_1.Aux != sym { 9901 break 9902 } 9903 _ = v_1.Args[2] 9904 if ptr != v_1.Args[0] { 9905 break 9906 } 9907 val := v_1.Args[1] 9908 v.reset(OpAMD64MOVQf2i) 9909 v.AddArg(val) 9910 return true 9911 } 9912 return false 9913 } 9914 func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { 9915 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 9916 // cond: 9917 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 9918 for { 9919 c := v.AuxInt 9920 sym := v.Aux 9921 _ = v.Args[2] 9922 ptr := v.Args[0] 9923 v_1 := v.Args[1] 9924 if v_1.Op != OpAMD64SHLQconst { 9925 break 9926 } 9927 if v_1.AuxInt != 3 { 9928 break 9929 } 9930 idx := v_1.Args[0] 9931 mem := v.Args[2] 9932 v.reset(OpAMD64MOVQloadidx8) 9933 v.AuxInt = c 9934 v.Aux = sym 9935 v.AddArg(ptr) 9936 v.AddArg(idx) 9937 v.AddArg(mem) 9938 return true 9939 } 9940 // match: (MOVQloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 9941 // cond: 9942 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 9943 for { 9944 c := v.AuxInt 9945 sym := v.Aux 9946 _ = v.Args[2] 9947 v_0 := v.Args[0] 9948 if v_0.Op != OpAMD64SHLQconst { 9949 break 9950 } 9951 if v_0.AuxInt != 3 { 9952 break 9953 } 9954 idx := v_0.Args[0] 9955 ptr := v.Args[1] 9956 mem := v.Args[2] 9957 v.reset(OpAMD64MOVQloadidx8) 9958 v.AuxInt = c 9959 v.Aux = sym 9960 v.AddArg(ptr) 9961 v.AddArg(idx) 9962 v.AddArg(mem) 9963 return true 9964 } 9965 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9966 // cond: is32Bit(c+d) 9967 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 9968 for { 9969 c := v.AuxInt 9970 sym := v.Aux 9971 _ = v.Args[2] 9972 v_0 := v.Args[0] 9973 if v_0.Op != OpAMD64ADDQconst { 9974 break 9975 } 9976 d := v_0.AuxInt 9977 ptr := v_0.Args[0] 9978 idx := v.Args[1] 9979 mem := v.Args[2] 9980 if !(is32Bit(c + d)) { 9981 break 9982 } 9983 v.reset(OpAMD64MOVQloadidx1) 9984 v.AuxInt = c + d 9985 v.Aux = sym 9986 v.AddArg(ptr) 9987 v.AddArg(idx) 9988 v.AddArg(mem) 9989 return true 9990 } 9991 // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 9992 // cond: is32Bit(c+d) 9993 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 9994 for { 9995 c := v.AuxInt 9996 sym := v.Aux 9997 _ = v.Args[2] 9998 idx := v.Args[0] 9999 v_1 := v.Args[1] 10000 if v_1.Op != OpAMD64ADDQconst { 10001 break 10002 } 10003 d := v_1.AuxInt 10004 ptr := v_1.Args[0] 10005 mem := v.Args[2] 10006 if !(is32Bit(c + d)) { 10007 break 10008 } 10009 v.reset(OpAMD64MOVQloadidx1) 10010 v.AuxInt = c + d 10011 v.Aux = sym 10012 v.AddArg(ptr) 10013 v.AddArg(idx) 10014 v.AddArg(mem) 10015 return true 10016 } 10017 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 10018 // cond: is32Bit(c+d) 10019 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 10020 for { 10021 c := v.AuxInt 10022 sym := v.Aux 10023 _ = v.Args[2] 10024 ptr := v.Args[0] 10025 v_1 := v.Args[1] 10026 if v_1.Op != OpAMD64ADDQconst { 10027 break 10028 } 10029 d := v_1.AuxInt 10030 idx := v_1.Args[0] 10031 mem := v.Args[2] 10032 if !(is32Bit(c + d)) { 10033 break 10034 } 10035 v.reset(OpAMD64MOVQloadidx1) 10036 v.AuxInt = c + d 10037 v.Aux = sym 10038 v.AddArg(ptr) 10039 v.AddArg(idx) 10040 v.AddArg(mem) 10041 return true 10042 } 10043 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 10044 // cond: is32Bit(c+d) 10045 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 10046 for { 10047 c := v.AuxInt 10048 sym := v.Aux 10049 _ = v.Args[2] 10050 v_0 := v.Args[0] 10051 if v_0.Op != OpAMD64ADDQconst { 10052 break 10053 } 10054 d := v_0.AuxInt 10055 idx := v_0.Args[0] 10056 ptr := v.Args[1] 10057 mem := v.Args[2] 10058 if !(is32Bit(c + d)) { 10059 break 10060 } 10061 v.reset(OpAMD64MOVQloadidx1) 10062 v.AuxInt = c + d 10063 v.Aux = sym 10064 v.AddArg(ptr) 10065 v.AddArg(idx) 10066 v.AddArg(mem) 10067 return true 10068 } 10069 return false 10070 } 10071 func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { 10072 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 10073 // cond: is32Bit(c+d) 10074 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 10075 for { 10076 c := v.AuxInt 10077 sym := v.Aux 10078 _ = v.Args[2] 10079 v_0 := v.Args[0] 10080 if v_0.Op != OpAMD64ADDQconst { 10081 break 10082 } 10083 d := v_0.AuxInt 10084 ptr := v_0.Args[0] 10085 idx := v.Args[1] 10086 mem := v.Args[2] 10087 if !(is32Bit(c + d)) { 10088 break 10089 } 10090 v.reset(OpAMD64MOVQloadidx8) 10091 v.AuxInt = c + d 10092 v.Aux = sym 10093 v.AddArg(ptr) 10094 v.AddArg(idx) 10095 v.AddArg(mem) 10096 return true 10097 } 10098 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 10099 // cond: is32Bit(c+8*d) 10100 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 10101 for { 10102 c := v.AuxInt 10103 sym := v.Aux 10104 _ = v.Args[2] 10105 ptr := v.Args[0] 10106 v_1 := v.Args[1] 10107 if v_1.Op != OpAMD64ADDQconst { 10108 break 10109 } 10110 d := v_1.AuxInt 10111 idx := v_1.Args[0] 10112 mem := v.Args[2] 10113 if !(is32Bit(c + 8*d)) { 10114 break 10115 } 10116 v.reset(OpAMD64MOVQloadidx8) 10117 v.AuxInt = c + 8*d 10118 v.Aux = sym 10119 v.AddArg(ptr) 10120 v.AddArg(idx) 10121 v.AddArg(mem) 10122 return true 10123 } 10124 return false 10125 } 10126 func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { 10127 b := v.Block 10128 _ = b 10129 config := b.Func.Config 10130 _ = config 10131 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 10132 // cond: is32Bit(off1+off2) 10133 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 10134 for { 10135 off1 := v.AuxInt 10136 sym := v.Aux 10137 _ = v.Args[2] 10138 v_0 := v.Args[0] 10139 if v_0.Op != OpAMD64ADDQconst { 10140 break 10141 } 10142 off2 := v_0.AuxInt 10143 ptr := v_0.Args[0] 10144 val := v.Args[1] 10145 mem := v.Args[2] 10146 if !(is32Bit(off1 + off2)) { 10147 break 10148 } 10149 v.reset(OpAMD64MOVQstore) 10150 v.AuxInt = off1 + off2 10151 v.Aux = sym 10152 v.AddArg(ptr) 10153 v.AddArg(val) 10154 v.AddArg(mem) 10155 return true 10156 } 10157 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 10158 // cond: validValAndOff(c,off) 10159 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 10160 for { 10161 off := v.AuxInt 10162 sym := v.Aux 10163 _ = v.Args[2] 10164 ptr := v.Args[0] 10165 v_1 := v.Args[1] 10166 if v_1.Op != OpAMD64MOVQconst { 10167 break 10168 } 10169 c := v_1.AuxInt 10170 mem := v.Args[2] 10171 if !(validValAndOff(c, off)) { 10172 break 10173 } 10174 v.reset(OpAMD64MOVQstoreconst) 10175 v.AuxInt = makeValAndOff(c, off) 10176 v.Aux = sym 10177 v.AddArg(ptr) 10178 v.AddArg(mem) 10179 return true 10180 } 10181 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 10182 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10183 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10184 for { 10185 off1 := v.AuxInt 10186 sym1 := v.Aux 10187 _ = v.Args[2] 10188 v_0 := v.Args[0] 10189 if v_0.Op != OpAMD64LEAQ { 10190 break 10191 } 10192 off2 := v_0.AuxInt 10193 sym2 := v_0.Aux 10194 base := v_0.Args[0] 10195 val := v.Args[1] 10196 mem := v.Args[2] 10197 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10198 break 10199 } 10200 v.reset(OpAMD64MOVQstore) 10201 v.AuxInt = off1 + off2 10202 v.Aux = mergeSym(sym1, sym2) 10203 v.AddArg(base) 10204 v.AddArg(val) 10205 v.AddArg(mem) 10206 return true 10207 } 10208 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 10209 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10210 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10211 for { 10212 off1 := v.AuxInt 10213 sym1 := v.Aux 10214 _ = v.Args[2] 10215 v_0 := v.Args[0] 10216 if v_0.Op != OpAMD64LEAQ1 { 10217 break 10218 } 10219 off2 := v_0.AuxInt 10220 sym2 := v_0.Aux 10221 _ = v_0.Args[1] 10222 ptr := v_0.Args[0] 10223 idx := v_0.Args[1] 10224 val := v.Args[1] 10225 mem := v.Args[2] 10226 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10227 break 10228 } 10229 v.reset(OpAMD64MOVQstoreidx1) 10230 v.AuxInt = off1 + off2 10231 v.Aux = mergeSym(sym1, sym2) 10232 v.AddArg(ptr) 10233 v.AddArg(idx) 10234 v.AddArg(val) 10235 v.AddArg(mem) 10236 return true 10237 } 10238 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 10239 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10240 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10241 for { 10242 off1 := v.AuxInt 10243 sym1 := v.Aux 10244 _ = v.Args[2] 10245 v_0 := v.Args[0] 10246 if v_0.Op != OpAMD64LEAQ8 { 10247 break 10248 } 10249 off2 := v_0.AuxInt 10250 sym2 := v_0.Aux 10251 _ = v_0.Args[1] 10252 ptr := v_0.Args[0] 10253 idx := v_0.Args[1] 10254 val := v.Args[1] 10255 mem := v.Args[2] 10256 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10257 break 10258 } 10259 v.reset(OpAMD64MOVQstoreidx8) 10260 v.AuxInt = off1 + off2 10261 v.Aux = mergeSym(sym1, sym2) 10262 v.AddArg(ptr) 10263 v.AddArg(idx) 10264 v.AddArg(val) 10265 v.AddArg(mem) 10266 return true 10267 } 10268 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 10269 // cond: ptr.Op != OpSB 10270 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 10271 for { 10272 off := v.AuxInt 10273 sym := v.Aux 10274 _ = v.Args[2] 10275 v_0 := v.Args[0] 10276 if v_0.Op != OpAMD64ADDQ { 10277 break 10278 } 10279 _ = v_0.Args[1] 10280 ptr := v_0.Args[0] 10281 idx := v_0.Args[1] 10282 val := v.Args[1] 10283 mem := v.Args[2] 10284 if !(ptr.Op != OpSB) { 10285 break 10286 } 10287 v.reset(OpAMD64MOVQstoreidx1) 10288 v.AuxInt = off 10289 v.Aux = sym 10290 v.AddArg(ptr) 10291 v.AddArg(idx) 10292 v.AddArg(val) 10293 v.AddArg(mem) 10294 return true 10295 } 10296 // match: (MOVQstore [i] {s} p x1:(MOVQload [j] {s2} p2 mem) mem2:(MOVQstore [i-8] {s} p x2:(MOVQload [j-8] {s2} p2 mem) mem)) 10297 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && config.useSSE && clobber(x1) && clobber(x2) && clobber(mem2) 10298 // result: (MOVOstore [i-8] {s} p (MOVOload [j-8] {s2} p2 mem) mem) 10299 for { 10300 i := v.AuxInt 10301 s := v.Aux 10302 _ = v.Args[2] 10303 p := v.Args[0] 10304 x1 := v.Args[1] 10305 if x1.Op != OpAMD64MOVQload { 10306 break 10307 } 10308 j := x1.AuxInt 10309 s2 := x1.Aux 10310 _ = x1.Args[1] 10311 p2 := x1.Args[0] 10312 mem := x1.Args[1] 10313 mem2 := v.Args[2] 10314 if mem2.Op != OpAMD64MOVQstore { 10315 break 10316 } 10317 if mem2.AuxInt != i-8 { 10318 break 10319 } 10320 if mem2.Aux != s { 10321 break 10322 } 10323 _ = mem2.Args[2] 10324 if p != mem2.Args[0] { 10325 break 10326 } 10327 x2 := mem2.Args[1] 10328 if x2.Op != OpAMD64MOVQload { 10329 break 10330 } 10331 if x2.AuxInt != j-8 { 10332 break 10333 } 10334 if x2.Aux != s2 { 10335 break 10336 } 10337 _ = x2.Args[1] 10338 if p2 != x2.Args[0] { 10339 break 10340 } 10341 if mem != x2.Args[1] { 10342 break 10343 } 10344 if mem != mem2.Args[2] { 10345 break 10346 } 10347 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && config.useSSE && clobber(x1) && clobber(x2) && clobber(mem2)) { 10348 break 10349 } 10350 v.reset(OpAMD64MOVOstore) 10351 v.AuxInt = i - 8 10352 v.Aux = s 10353 v.AddArg(p) 10354 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 10355 v0.AuxInt = j - 8 10356 v0.Aux = s2 10357 v0.AddArg(p2) 10358 v0.AddArg(mem) 10359 v.AddArg(v0) 10360 v.AddArg(mem) 10361 return true 10362 } 10363 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 10364 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 10365 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10366 for { 10367 off1 := v.AuxInt 10368 sym1 := v.Aux 10369 _ = v.Args[2] 10370 v_0 := v.Args[0] 10371 if v_0.Op != OpAMD64LEAL { 10372 break 10373 } 10374 off2 := v_0.AuxInt 10375 sym2 := v_0.Aux 10376 base := v_0.Args[0] 10377 val := v.Args[1] 10378 mem := v.Args[2] 10379 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 10380 break 10381 } 10382 v.reset(OpAMD64MOVQstore) 10383 v.AuxInt = off1 + off2 10384 v.Aux = mergeSym(sym1, sym2) 10385 v.AddArg(base) 10386 v.AddArg(val) 10387 v.AddArg(mem) 10388 return true 10389 } 10390 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 10391 // cond: is32Bit(off1+off2) 10392 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 10393 for { 10394 off1 := v.AuxInt 10395 sym := v.Aux 10396 _ = v.Args[2] 10397 v_0 := v.Args[0] 10398 if v_0.Op != OpAMD64ADDLconst { 10399 break 10400 } 10401 off2 := v_0.AuxInt 10402 ptr := v_0.Args[0] 10403 val := v.Args[1] 10404 mem := v.Args[2] 10405 if !(is32Bit(off1 + off2)) { 10406 break 10407 } 10408 v.reset(OpAMD64MOVQstore) 10409 v.AuxInt = off1 + off2 10410 v.Aux = sym 10411 v.AddArg(ptr) 10412 v.AddArg(val) 10413 v.AddArg(mem) 10414 return true 10415 } 10416 // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 10417 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) 10418 // result: (ADDQconstmem {sym} [makeValAndOff(c,off)] ptr mem) 10419 for { 10420 off := v.AuxInt 10421 sym := v.Aux 10422 _ = v.Args[2] 10423 ptr := v.Args[0] 10424 a := v.Args[1] 10425 if a.Op != OpAMD64ADDQconst { 10426 break 10427 } 10428 c := a.AuxInt 10429 l := a.Args[0] 10430 if l.Op != OpAMD64MOVQload { 10431 break 10432 } 10433 if l.AuxInt != off { 10434 break 10435 } 10436 if l.Aux != sym { 10437 break 10438 } 10439 _ = l.Args[1] 10440 ptr2 := l.Args[0] 10441 mem := l.Args[1] 10442 if mem != v.Args[2] { 10443 break 10444 } 10445 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) { 10446 break 10447 } 10448 v.reset(OpAMD64ADDQconstmem) 10449 v.AuxInt = makeValAndOff(c, off) 10450 v.Aux = sym 10451 v.AddArg(ptr) 10452 v.AddArg(mem) 10453 return true 10454 } 10455 return false 10456 } 10457 func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { 10458 // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem) 10459 // cond: 10460 // result: (MOVSDstore [off] {sym} ptr val mem) 10461 for { 10462 off := v.AuxInt 10463 sym := v.Aux 10464 _ = v.Args[2] 10465 ptr := v.Args[0] 10466 v_1 := v.Args[1] 10467 if v_1.Op != OpAMD64MOVQf2i { 10468 break 10469 } 10470 val := v_1.Args[0] 10471 mem := v.Args[2] 10472 v.reset(OpAMD64MOVSDstore) 10473 v.AuxInt = off 10474 v.Aux = sym 10475 v.AddArg(ptr) 10476 v.AddArg(val) 10477 v.AddArg(mem) 10478 return true 10479 } 10480 return false 10481 } 10482 func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { 10483 b := v.Block 10484 _ = b 10485 config := b.Func.Config 10486 _ = config 10487 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 10488 // cond: ValAndOff(sc).canAdd(off) 10489 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 10490 for { 10491 sc := v.AuxInt 10492 s := v.Aux 10493 _ = v.Args[1] 10494 v_0 := v.Args[0] 10495 if v_0.Op != OpAMD64ADDQconst { 10496 break 10497 } 10498 off := v_0.AuxInt 10499 ptr := v_0.Args[0] 10500 mem := v.Args[1] 10501 if !(ValAndOff(sc).canAdd(off)) { 10502 break 10503 } 10504 v.reset(OpAMD64MOVQstoreconst) 10505 v.AuxInt = ValAndOff(sc).add(off) 10506 v.Aux = s 10507 v.AddArg(ptr) 10508 v.AddArg(mem) 10509 return true 10510 } 10511 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 10512 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 10513 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 10514 for { 10515 sc := v.AuxInt 10516 sym1 := v.Aux 10517 _ = v.Args[1] 10518 v_0 := v.Args[0] 10519 if v_0.Op != OpAMD64LEAQ { 10520 break 10521 } 10522 off := v_0.AuxInt 10523 sym2 := v_0.Aux 10524 ptr := v_0.Args[0] 10525 mem := v.Args[1] 10526 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 10527 break 10528 } 10529 v.reset(OpAMD64MOVQstoreconst) 10530 v.AuxInt = ValAndOff(sc).add(off) 10531 v.Aux = mergeSym(sym1, sym2) 10532 v.AddArg(ptr) 10533 v.AddArg(mem) 10534 return true 10535 } 10536 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 10537 // cond: canMergeSym(sym1, sym2) 10538 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 10539 for { 10540 x := v.AuxInt 10541 sym1 := v.Aux 10542 _ = v.Args[1] 10543 v_0 := v.Args[0] 10544 if v_0.Op != OpAMD64LEAQ1 { 10545 break 10546 } 10547 off := v_0.AuxInt 10548 sym2 := v_0.Aux 10549 _ = v_0.Args[1] 10550 ptr := v_0.Args[0] 10551 idx := v_0.Args[1] 10552 mem := v.Args[1] 10553 if !(canMergeSym(sym1, sym2)) { 10554 break 10555 } 10556 v.reset(OpAMD64MOVQstoreconstidx1) 10557 v.AuxInt = ValAndOff(x).add(off) 10558 v.Aux = mergeSym(sym1, sym2) 10559 v.AddArg(ptr) 10560 v.AddArg(idx) 10561 v.AddArg(mem) 10562 return true 10563 } 10564 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 10565 // cond: canMergeSym(sym1, sym2) 10566 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 10567 for { 10568 x := v.AuxInt 10569 sym1 := v.Aux 10570 _ = v.Args[1] 10571 v_0 := v.Args[0] 10572 if v_0.Op != OpAMD64LEAQ8 { 10573 break 10574 } 10575 off := v_0.AuxInt 10576 sym2 := v_0.Aux 10577 _ = v_0.Args[1] 10578 ptr := v_0.Args[0] 10579 idx := v_0.Args[1] 10580 mem := v.Args[1] 10581 if !(canMergeSym(sym1, sym2)) { 10582 break 10583 } 10584 v.reset(OpAMD64MOVQstoreconstidx8) 10585 v.AuxInt = ValAndOff(x).add(off) 10586 v.Aux = mergeSym(sym1, sym2) 10587 v.AddArg(ptr) 10588 v.AddArg(idx) 10589 v.AddArg(mem) 10590 return true 10591 } 10592 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 10593 // cond: 10594 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 10595 for { 10596 x := v.AuxInt 10597 sym := v.Aux 10598 _ = v.Args[1] 10599 v_0 := v.Args[0] 10600 if v_0.Op != OpAMD64ADDQ { 10601 break 10602 } 10603 _ = v_0.Args[1] 10604 ptr := v_0.Args[0] 10605 idx := v_0.Args[1] 10606 mem := v.Args[1] 10607 v.reset(OpAMD64MOVQstoreconstidx1) 10608 v.AuxInt = x 10609 v.Aux = sym 10610 v.AddArg(ptr) 10611 v.AddArg(idx) 10612 v.AddArg(mem) 10613 return true 10614 } 10615 // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) 10616 // cond: config.useSSE && x.Uses == 1 && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x) 10617 // result: (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem) 10618 for { 10619 c := v.AuxInt 10620 s := v.Aux 10621 _ = v.Args[1] 10622 p := v.Args[0] 10623 x := v.Args[1] 10624 if x.Op != OpAMD64MOVQstoreconst { 10625 break 10626 } 10627 c2 := x.AuxInt 10628 if x.Aux != s { 10629 break 10630 } 10631 _ = x.Args[1] 10632 if p != x.Args[0] { 10633 break 10634 } 10635 mem := x.Args[1] 10636 if !(config.useSSE && x.Uses == 1 && ValAndOff(c2).Off()+8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x)) { 10637 break 10638 } 10639 v.reset(OpAMD64MOVOstore) 10640 v.AuxInt = ValAndOff(c2).Off() 10641 v.Aux = s 10642 v.AddArg(p) 10643 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 10644 v0.AuxInt = 0 10645 v.AddArg(v0) 10646 v.AddArg(mem) 10647 return true 10648 } 10649 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 10650 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 10651 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 10652 for { 10653 sc := v.AuxInt 10654 sym1 := v.Aux 10655 _ = v.Args[1] 10656 v_0 := v.Args[0] 10657 if v_0.Op != OpAMD64LEAL { 10658 break 10659 } 10660 off := v_0.AuxInt 10661 sym2 := v_0.Aux 10662 ptr := v_0.Args[0] 10663 mem := v.Args[1] 10664 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 10665 break 10666 } 10667 v.reset(OpAMD64MOVQstoreconst) 10668 v.AuxInt = ValAndOff(sc).add(off) 10669 v.Aux = mergeSym(sym1, sym2) 10670 v.AddArg(ptr) 10671 v.AddArg(mem) 10672 return true 10673 } 10674 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 10675 // cond: ValAndOff(sc).canAdd(off) 10676 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 10677 for { 10678 sc := v.AuxInt 10679 s := v.Aux 10680 _ = v.Args[1] 10681 v_0 := v.Args[0] 10682 if v_0.Op != OpAMD64ADDLconst { 10683 break 10684 } 10685 off := v_0.AuxInt 10686 ptr := v_0.Args[0] 10687 mem := v.Args[1] 10688 if !(ValAndOff(sc).canAdd(off)) { 10689 break 10690 } 10691 v.reset(OpAMD64MOVQstoreconst) 10692 v.AuxInt = ValAndOff(sc).add(off) 10693 v.Aux = s 10694 v.AddArg(ptr) 10695 v.AddArg(mem) 10696 return true 10697 } 10698 return false 10699 } 10700 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { 10701 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 10702 // cond: 10703 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 10704 for { 10705 c := v.AuxInt 10706 sym := v.Aux 10707 _ = v.Args[2] 10708 ptr := v.Args[0] 10709 v_1 := v.Args[1] 10710 if v_1.Op != OpAMD64SHLQconst { 10711 break 10712 } 10713 if v_1.AuxInt != 3 { 10714 break 10715 } 10716 idx := v_1.Args[0] 10717 mem := v.Args[2] 10718 v.reset(OpAMD64MOVQstoreconstidx8) 10719 v.AuxInt = c 10720 v.Aux = sym 10721 v.AddArg(ptr) 10722 v.AddArg(idx) 10723 v.AddArg(mem) 10724 return true 10725 } 10726 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 10727 // cond: ValAndOff(x).canAdd(c) 10728 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10729 for { 10730 x := v.AuxInt 10731 sym := v.Aux 10732 _ = v.Args[2] 10733 v_0 := v.Args[0] 10734 if v_0.Op != OpAMD64ADDQconst { 10735 break 10736 } 10737 c := v_0.AuxInt 10738 ptr := v_0.Args[0] 10739 idx := v.Args[1] 10740 mem := v.Args[2] 10741 if !(ValAndOff(x).canAdd(c)) { 10742 break 10743 } 10744 v.reset(OpAMD64MOVQstoreconstidx1) 10745 v.AuxInt = ValAndOff(x).add(c) 10746 v.Aux = sym 10747 v.AddArg(ptr) 10748 v.AddArg(idx) 10749 v.AddArg(mem) 10750 return true 10751 } 10752 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 10753 // cond: ValAndOff(x).canAdd(c) 10754 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10755 for { 10756 x := v.AuxInt 10757 sym := v.Aux 10758 _ = v.Args[2] 10759 ptr := v.Args[0] 10760 v_1 := v.Args[1] 10761 if v_1.Op != OpAMD64ADDQconst { 10762 break 10763 } 10764 c := v_1.AuxInt 10765 idx := v_1.Args[0] 10766 mem := v.Args[2] 10767 if !(ValAndOff(x).canAdd(c)) { 10768 break 10769 } 10770 v.reset(OpAMD64MOVQstoreconstidx1) 10771 v.AuxInt = ValAndOff(x).add(c) 10772 v.Aux = sym 10773 v.AddArg(ptr) 10774 v.AddArg(idx) 10775 v.AddArg(mem) 10776 return true 10777 } 10778 return false 10779 } 10780 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { 10781 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 10782 // cond: ValAndOff(x).canAdd(c) 10783 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10784 for { 10785 x := v.AuxInt 10786 sym := v.Aux 10787 _ = v.Args[2] 10788 v_0 := v.Args[0] 10789 if v_0.Op != OpAMD64ADDQconst { 10790 break 10791 } 10792 c := v_0.AuxInt 10793 ptr := v_0.Args[0] 10794 idx := v.Args[1] 10795 mem := v.Args[2] 10796 if !(ValAndOff(x).canAdd(c)) { 10797 break 10798 } 10799 v.reset(OpAMD64MOVQstoreconstidx8) 10800 v.AuxInt = ValAndOff(x).add(c) 10801 v.Aux = sym 10802 v.AddArg(ptr) 10803 v.AddArg(idx) 10804 v.AddArg(mem) 10805 return true 10806 } 10807 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 10808 // cond: ValAndOff(x).canAdd(8*c) 10809 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 10810 for { 10811 x := v.AuxInt 10812 sym := v.Aux 10813 _ = v.Args[2] 10814 ptr := v.Args[0] 10815 v_1 := v.Args[1] 10816 if v_1.Op != OpAMD64ADDQconst { 10817 break 10818 } 10819 c := v_1.AuxInt 10820 idx := v_1.Args[0] 10821 mem := v.Args[2] 10822 if !(ValAndOff(x).canAdd(8 * c)) { 10823 break 10824 } 10825 v.reset(OpAMD64MOVQstoreconstidx8) 10826 v.AuxInt = ValAndOff(x).add(8 * c) 10827 v.Aux = sym 10828 v.AddArg(ptr) 10829 v.AddArg(idx) 10830 v.AddArg(mem) 10831 return true 10832 } 10833 return false 10834 } 10835 func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { 10836 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 10837 // cond: 10838 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 10839 for { 10840 c := v.AuxInt 10841 sym := v.Aux 10842 _ = v.Args[3] 10843 ptr := v.Args[0] 10844 v_1 := v.Args[1] 10845 if v_1.Op != OpAMD64SHLQconst { 10846 break 10847 } 10848 if v_1.AuxInt != 3 { 10849 break 10850 } 10851 idx := v_1.Args[0] 10852 val := v.Args[2] 10853 mem := v.Args[3] 10854 v.reset(OpAMD64MOVQstoreidx8) 10855 v.AuxInt = c 10856 v.Aux = sym 10857 v.AddArg(ptr) 10858 v.AddArg(idx) 10859 v.AddArg(val) 10860 v.AddArg(mem) 10861 return true 10862 } 10863 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10864 // cond: is32Bit(c+d) 10865 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 10866 for { 10867 c := v.AuxInt 10868 sym := v.Aux 10869 _ = v.Args[3] 10870 v_0 := v.Args[0] 10871 if v_0.Op != OpAMD64ADDQconst { 10872 break 10873 } 10874 d := v_0.AuxInt 10875 ptr := v_0.Args[0] 10876 idx := v.Args[1] 10877 val := v.Args[2] 10878 mem := v.Args[3] 10879 if !(is32Bit(c + d)) { 10880 break 10881 } 10882 v.reset(OpAMD64MOVQstoreidx1) 10883 v.AuxInt = c + d 10884 v.Aux = sym 10885 v.AddArg(ptr) 10886 v.AddArg(idx) 10887 v.AddArg(val) 10888 v.AddArg(mem) 10889 return true 10890 } 10891 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10892 // cond: is32Bit(c+d) 10893 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 10894 for { 10895 c := v.AuxInt 10896 sym := v.Aux 10897 _ = v.Args[3] 10898 ptr := v.Args[0] 10899 v_1 := v.Args[1] 10900 if v_1.Op != OpAMD64ADDQconst { 10901 break 10902 } 10903 d := v_1.AuxInt 10904 idx := v_1.Args[0] 10905 val := v.Args[2] 10906 mem := v.Args[3] 10907 if !(is32Bit(c + d)) { 10908 break 10909 } 10910 v.reset(OpAMD64MOVQstoreidx1) 10911 v.AuxInt = c + d 10912 v.Aux = sym 10913 v.AddArg(ptr) 10914 v.AddArg(idx) 10915 v.AddArg(val) 10916 v.AddArg(mem) 10917 return true 10918 } 10919 return false 10920 } 10921 func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { 10922 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10923 // cond: is32Bit(c+d) 10924 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 10925 for { 10926 c := v.AuxInt 10927 sym := v.Aux 10928 _ = v.Args[3] 10929 v_0 := v.Args[0] 10930 if v_0.Op != OpAMD64ADDQconst { 10931 break 10932 } 10933 d := v_0.AuxInt 10934 ptr := v_0.Args[0] 10935 idx := v.Args[1] 10936 val := v.Args[2] 10937 mem := v.Args[3] 10938 if !(is32Bit(c + d)) { 10939 break 10940 } 10941 v.reset(OpAMD64MOVQstoreidx8) 10942 v.AuxInt = c + d 10943 v.Aux = sym 10944 v.AddArg(ptr) 10945 v.AddArg(idx) 10946 v.AddArg(val) 10947 v.AddArg(mem) 10948 return true 10949 } 10950 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10951 // cond: is32Bit(c+8*d) 10952 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 10953 for { 10954 c := v.AuxInt 10955 sym := v.Aux 10956 _ = v.Args[3] 10957 ptr := v.Args[0] 10958 v_1 := v.Args[1] 10959 if v_1.Op != OpAMD64ADDQconst { 10960 break 10961 } 10962 d := v_1.AuxInt 10963 idx := v_1.Args[0] 10964 val := v.Args[2] 10965 mem := v.Args[3] 10966 if !(is32Bit(c + 8*d)) { 10967 break 10968 } 10969 v.reset(OpAMD64MOVQstoreidx8) 10970 v.AuxInt = c + 8*d 10971 v.Aux = sym 10972 v.AddArg(ptr) 10973 v.AddArg(idx) 10974 v.AddArg(val) 10975 v.AddArg(mem) 10976 return true 10977 } 10978 return false 10979 } 10980 func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { 10981 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 10982 // cond: is32Bit(off1+off2) 10983 // result: (MOVSDload [off1+off2] {sym} ptr mem) 10984 for { 10985 off1 := v.AuxInt 10986 sym := v.Aux 10987 _ = v.Args[1] 10988 v_0 := v.Args[0] 10989 if v_0.Op != OpAMD64ADDQconst { 10990 break 10991 } 10992 off2 := v_0.AuxInt 10993 ptr := v_0.Args[0] 10994 mem := v.Args[1] 10995 if !(is32Bit(off1 + off2)) { 10996 break 10997 } 10998 v.reset(OpAMD64MOVSDload) 10999 v.AuxInt = off1 + off2 11000 v.Aux = sym 11001 v.AddArg(ptr) 11002 v.AddArg(mem) 11003 return true 11004 } 11005 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 11006 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11007 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11008 for { 11009 off1 := v.AuxInt 11010 sym1 := v.Aux 11011 _ = v.Args[1] 11012 v_0 := v.Args[0] 11013 if v_0.Op != OpAMD64LEAQ { 11014 break 11015 } 11016 off2 := v_0.AuxInt 11017 sym2 := v_0.Aux 11018 base := v_0.Args[0] 11019 mem := v.Args[1] 11020 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11021 break 11022 } 11023 v.reset(OpAMD64MOVSDload) 11024 v.AuxInt = off1 + off2 11025 v.Aux = mergeSym(sym1, sym2) 11026 v.AddArg(base) 11027 v.AddArg(mem) 11028 return true 11029 } 11030 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 11031 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11032 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11033 for { 11034 off1 := v.AuxInt 11035 sym1 := v.Aux 11036 _ = v.Args[1] 11037 v_0 := v.Args[0] 11038 if v_0.Op != OpAMD64LEAQ1 { 11039 break 11040 } 11041 off2 := v_0.AuxInt 11042 sym2 := v_0.Aux 11043 _ = v_0.Args[1] 11044 ptr := v_0.Args[0] 11045 idx := v_0.Args[1] 11046 mem := v.Args[1] 11047 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11048 break 11049 } 11050 v.reset(OpAMD64MOVSDloadidx1) 11051 v.AuxInt = off1 + off2 11052 v.Aux = mergeSym(sym1, sym2) 11053 v.AddArg(ptr) 11054 v.AddArg(idx) 11055 v.AddArg(mem) 11056 return true 11057 } 11058 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 11059 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11060 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11061 for { 11062 off1 := v.AuxInt 11063 sym1 := v.Aux 11064 _ = v.Args[1] 11065 v_0 := v.Args[0] 11066 if v_0.Op != OpAMD64LEAQ8 { 11067 break 11068 } 11069 off2 := v_0.AuxInt 11070 sym2 := v_0.Aux 11071 _ = v_0.Args[1] 11072 ptr := v_0.Args[0] 11073 idx := v_0.Args[1] 11074 mem := v.Args[1] 11075 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11076 break 11077 } 11078 v.reset(OpAMD64MOVSDloadidx8) 11079 v.AuxInt = off1 + off2 11080 v.Aux = mergeSym(sym1, sym2) 11081 v.AddArg(ptr) 11082 v.AddArg(idx) 11083 v.AddArg(mem) 11084 return true 11085 } 11086 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 11087 // cond: ptr.Op != OpSB 11088 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 11089 for { 11090 off := v.AuxInt 11091 sym := v.Aux 11092 _ = v.Args[1] 11093 v_0 := v.Args[0] 11094 if v_0.Op != OpAMD64ADDQ { 11095 break 11096 } 11097 _ = v_0.Args[1] 11098 ptr := v_0.Args[0] 11099 idx := v_0.Args[1] 11100 mem := v.Args[1] 11101 if !(ptr.Op != OpSB) { 11102 break 11103 } 11104 v.reset(OpAMD64MOVSDloadidx1) 11105 v.AuxInt = off 11106 v.Aux = sym 11107 v.AddArg(ptr) 11108 v.AddArg(idx) 11109 v.AddArg(mem) 11110 return true 11111 } 11112 // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) 11113 // cond: 11114 // result: (MOVQi2f val) 11115 for { 11116 off := v.AuxInt 11117 sym := v.Aux 11118 _ = v.Args[1] 11119 ptr := v.Args[0] 11120 v_1 := v.Args[1] 11121 if v_1.Op != OpAMD64MOVQstore { 11122 break 11123 } 11124 if v_1.AuxInt != off { 11125 break 11126 } 11127 if v_1.Aux != sym { 11128 break 11129 } 11130 _ = v_1.Args[2] 11131 if ptr != v_1.Args[0] { 11132 break 11133 } 11134 val := v_1.Args[1] 11135 v.reset(OpAMD64MOVQi2f) 11136 v.AddArg(val) 11137 return true 11138 } 11139 return false 11140 } 11141 func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { 11142 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 11143 // cond: 11144 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 11145 for { 11146 c := v.AuxInt 11147 sym := v.Aux 11148 _ = v.Args[2] 11149 ptr := v.Args[0] 11150 v_1 := v.Args[1] 11151 if v_1.Op != OpAMD64SHLQconst { 11152 break 11153 } 11154 if v_1.AuxInt != 3 { 11155 break 11156 } 11157 idx := v_1.Args[0] 11158 mem := v.Args[2] 11159 v.reset(OpAMD64MOVSDloadidx8) 11160 v.AuxInt = c 11161 v.Aux = sym 11162 v.AddArg(ptr) 11163 v.AddArg(idx) 11164 v.AddArg(mem) 11165 return true 11166 } 11167 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 11168 // cond: is32Bit(c+d) 11169 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 11170 for { 11171 c := v.AuxInt 11172 sym := v.Aux 11173 _ = v.Args[2] 11174 v_0 := v.Args[0] 11175 if v_0.Op != OpAMD64ADDQconst { 11176 break 11177 } 11178 d := v_0.AuxInt 11179 ptr := v_0.Args[0] 11180 idx := v.Args[1] 11181 mem := v.Args[2] 11182 if !(is32Bit(c + d)) { 11183 break 11184 } 11185 v.reset(OpAMD64MOVSDloadidx1) 11186 v.AuxInt = c + d 11187 v.Aux = sym 11188 v.AddArg(ptr) 11189 v.AddArg(idx) 11190 v.AddArg(mem) 11191 return true 11192 } 11193 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 11194 // cond: is32Bit(c+d) 11195 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 11196 for { 11197 c := v.AuxInt 11198 sym := v.Aux 11199 _ = v.Args[2] 11200 ptr := v.Args[0] 11201 v_1 := v.Args[1] 11202 if v_1.Op != OpAMD64ADDQconst { 11203 break 11204 } 11205 d := v_1.AuxInt 11206 idx := v_1.Args[0] 11207 mem := v.Args[2] 11208 if !(is32Bit(c + d)) { 11209 break 11210 } 11211 v.reset(OpAMD64MOVSDloadidx1) 11212 v.AuxInt = c + d 11213 v.Aux = sym 11214 v.AddArg(ptr) 11215 v.AddArg(idx) 11216 v.AddArg(mem) 11217 return true 11218 } 11219 return false 11220 } 11221 func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { 11222 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 11223 // cond: is32Bit(c+d) 11224 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 11225 for { 11226 c := v.AuxInt 11227 sym := v.Aux 11228 _ = v.Args[2] 11229 v_0 := v.Args[0] 11230 if v_0.Op != OpAMD64ADDQconst { 11231 break 11232 } 11233 d := v_0.AuxInt 11234 ptr := v_0.Args[0] 11235 idx := v.Args[1] 11236 mem := v.Args[2] 11237 if !(is32Bit(c + d)) { 11238 break 11239 } 11240 v.reset(OpAMD64MOVSDloadidx8) 11241 v.AuxInt = c + d 11242 v.Aux = sym 11243 v.AddArg(ptr) 11244 v.AddArg(idx) 11245 v.AddArg(mem) 11246 return true 11247 } 11248 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 11249 // cond: is32Bit(c+8*d) 11250 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 11251 for { 11252 c := v.AuxInt 11253 sym := v.Aux 11254 _ = v.Args[2] 11255 ptr := v.Args[0] 11256 v_1 := v.Args[1] 11257 if v_1.Op != OpAMD64ADDQconst { 11258 break 11259 } 11260 d := v_1.AuxInt 11261 idx := v_1.Args[0] 11262 mem := v.Args[2] 11263 if !(is32Bit(c + 8*d)) { 11264 break 11265 } 11266 v.reset(OpAMD64MOVSDloadidx8) 11267 v.AuxInt = c + 8*d 11268 v.Aux = sym 11269 v.AddArg(ptr) 11270 v.AddArg(idx) 11271 v.AddArg(mem) 11272 return true 11273 } 11274 return false 11275 } 11276 func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { 11277 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 11278 // cond: is32Bit(off1+off2) 11279 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 11280 for { 11281 off1 := v.AuxInt 11282 sym := v.Aux 11283 _ = v.Args[2] 11284 v_0 := v.Args[0] 11285 if v_0.Op != OpAMD64ADDQconst { 11286 break 11287 } 11288 off2 := v_0.AuxInt 11289 ptr := v_0.Args[0] 11290 val := v.Args[1] 11291 mem := v.Args[2] 11292 if !(is32Bit(off1 + off2)) { 11293 break 11294 } 11295 v.reset(OpAMD64MOVSDstore) 11296 v.AuxInt = off1 + off2 11297 v.Aux = sym 11298 v.AddArg(ptr) 11299 v.AddArg(val) 11300 v.AddArg(mem) 11301 return true 11302 } 11303 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 11304 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11305 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11306 for { 11307 off1 := v.AuxInt 11308 sym1 := v.Aux 11309 _ = v.Args[2] 11310 v_0 := v.Args[0] 11311 if v_0.Op != OpAMD64LEAQ { 11312 break 11313 } 11314 off2 := v_0.AuxInt 11315 sym2 := v_0.Aux 11316 base := v_0.Args[0] 11317 val := v.Args[1] 11318 mem := v.Args[2] 11319 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11320 break 11321 } 11322 v.reset(OpAMD64MOVSDstore) 11323 v.AuxInt = off1 + off2 11324 v.Aux = mergeSym(sym1, sym2) 11325 v.AddArg(base) 11326 v.AddArg(val) 11327 v.AddArg(mem) 11328 return true 11329 } 11330 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 11331 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11332 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11333 for { 11334 off1 := v.AuxInt 11335 sym1 := v.Aux 11336 _ = v.Args[2] 11337 v_0 := v.Args[0] 11338 if v_0.Op != OpAMD64LEAQ1 { 11339 break 11340 } 11341 off2 := v_0.AuxInt 11342 sym2 := v_0.Aux 11343 _ = v_0.Args[1] 11344 ptr := v_0.Args[0] 11345 idx := v_0.Args[1] 11346 val := v.Args[1] 11347 mem := v.Args[2] 11348 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11349 break 11350 } 11351 v.reset(OpAMD64MOVSDstoreidx1) 11352 v.AuxInt = off1 + off2 11353 v.Aux = mergeSym(sym1, sym2) 11354 v.AddArg(ptr) 11355 v.AddArg(idx) 11356 v.AddArg(val) 11357 v.AddArg(mem) 11358 return true 11359 } 11360 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 11361 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11362 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11363 for { 11364 off1 := v.AuxInt 11365 sym1 := v.Aux 11366 _ = v.Args[2] 11367 v_0 := v.Args[0] 11368 if v_0.Op != OpAMD64LEAQ8 { 11369 break 11370 } 11371 off2 := v_0.AuxInt 11372 sym2 := v_0.Aux 11373 _ = v_0.Args[1] 11374 ptr := v_0.Args[0] 11375 idx := v_0.Args[1] 11376 val := v.Args[1] 11377 mem := v.Args[2] 11378 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11379 break 11380 } 11381 v.reset(OpAMD64MOVSDstoreidx8) 11382 v.AuxInt = off1 + off2 11383 v.Aux = mergeSym(sym1, sym2) 11384 v.AddArg(ptr) 11385 v.AddArg(idx) 11386 v.AddArg(val) 11387 v.AddArg(mem) 11388 return true 11389 } 11390 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 11391 // cond: ptr.Op != OpSB 11392 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 11393 for { 11394 off := v.AuxInt 11395 sym := v.Aux 11396 _ = v.Args[2] 11397 v_0 := v.Args[0] 11398 if v_0.Op != OpAMD64ADDQ { 11399 break 11400 } 11401 _ = v_0.Args[1] 11402 ptr := v_0.Args[0] 11403 idx := v_0.Args[1] 11404 val := v.Args[1] 11405 mem := v.Args[2] 11406 if !(ptr.Op != OpSB) { 11407 break 11408 } 11409 v.reset(OpAMD64MOVSDstoreidx1) 11410 v.AuxInt = off 11411 v.Aux = sym 11412 v.AddArg(ptr) 11413 v.AddArg(idx) 11414 v.AddArg(val) 11415 v.AddArg(mem) 11416 return true 11417 } 11418 // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) 11419 // cond: 11420 // result: (MOVQstore [off] {sym} ptr val mem) 11421 for { 11422 off := v.AuxInt 11423 sym := v.Aux 11424 _ = v.Args[2] 11425 ptr := v.Args[0] 11426 v_1 := v.Args[1] 11427 if v_1.Op != OpAMD64MOVQi2f { 11428 break 11429 } 11430 val := v_1.Args[0] 11431 mem := v.Args[2] 11432 v.reset(OpAMD64MOVQstore) 11433 v.AuxInt = off 11434 v.Aux = sym 11435 v.AddArg(ptr) 11436 v.AddArg(val) 11437 v.AddArg(mem) 11438 return true 11439 } 11440 return false 11441 } 11442 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { 11443 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 11444 // cond: 11445 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 11446 for { 11447 c := v.AuxInt 11448 sym := v.Aux 11449 _ = v.Args[3] 11450 ptr := v.Args[0] 11451 v_1 := v.Args[1] 11452 if v_1.Op != OpAMD64SHLQconst { 11453 break 11454 } 11455 if v_1.AuxInt != 3 { 11456 break 11457 } 11458 idx := v_1.Args[0] 11459 val := v.Args[2] 11460 mem := v.Args[3] 11461 v.reset(OpAMD64MOVSDstoreidx8) 11462 v.AuxInt = c 11463 v.Aux = sym 11464 v.AddArg(ptr) 11465 v.AddArg(idx) 11466 v.AddArg(val) 11467 v.AddArg(mem) 11468 return true 11469 } 11470 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 11471 // cond: is32Bit(c+d) 11472 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 11473 for { 11474 c := v.AuxInt 11475 sym := v.Aux 11476 _ = v.Args[3] 11477 v_0 := v.Args[0] 11478 if v_0.Op != OpAMD64ADDQconst { 11479 break 11480 } 11481 d := v_0.AuxInt 11482 ptr := v_0.Args[0] 11483 idx := v.Args[1] 11484 val := v.Args[2] 11485 mem := v.Args[3] 11486 if !(is32Bit(c + d)) { 11487 break 11488 } 11489 v.reset(OpAMD64MOVSDstoreidx1) 11490 v.AuxInt = c + d 11491 v.Aux = sym 11492 v.AddArg(ptr) 11493 v.AddArg(idx) 11494 v.AddArg(val) 11495 v.AddArg(mem) 11496 return true 11497 } 11498 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 11499 // cond: is32Bit(c+d) 11500 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 11501 for { 11502 c := v.AuxInt 11503 sym := v.Aux 11504 _ = v.Args[3] 11505 ptr := v.Args[0] 11506 v_1 := v.Args[1] 11507 if v_1.Op != OpAMD64ADDQconst { 11508 break 11509 } 11510 d := v_1.AuxInt 11511 idx := v_1.Args[0] 11512 val := v.Args[2] 11513 mem := v.Args[3] 11514 if !(is32Bit(c + d)) { 11515 break 11516 } 11517 v.reset(OpAMD64MOVSDstoreidx1) 11518 v.AuxInt = c + d 11519 v.Aux = sym 11520 v.AddArg(ptr) 11521 v.AddArg(idx) 11522 v.AddArg(val) 11523 v.AddArg(mem) 11524 return true 11525 } 11526 return false 11527 } 11528 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { 11529 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 11530 // cond: is32Bit(c+d) 11531 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 11532 for { 11533 c := v.AuxInt 11534 sym := v.Aux 11535 _ = v.Args[3] 11536 v_0 := v.Args[0] 11537 if v_0.Op != OpAMD64ADDQconst { 11538 break 11539 } 11540 d := v_0.AuxInt 11541 ptr := v_0.Args[0] 11542 idx := v.Args[1] 11543 val := v.Args[2] 11544 mem := v.Args[3] 11545 if !(is32Bit(c + d)) { 11546 break 11547 } 11548 v.reset(OpAMD64MOVSDstoreidx8) 11549 v.AuxInt = c + d 11550 v.Aux = sym 11551 v.AddArg(ptr) 11552 v.AddArg(idx) 11553 v.AddArg(val) 11554 v.AddArg(mem) 11555 return true 11556 } 11557 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 11558 // cond: is32Bit(c+8*d) 11559 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 11560 for { 11561 c := v.AuxInt 11562 sym := v.Aux 11563 _ = v.Args[3] 11564 ptr := v.Args[0] 11565 v_1 := v.Args[1] 11566 if v_1.Op != OpAMD64ADDQconst { 11567 break 11568 } 11569 d := v_1.AuxInt 11570 idx := v_1.Args[0] 11571 val := v.Args[2] 11572 mem := v.Args[3] 11573 if !(is32Bit(c + 8*d)) { 11574 break 11575 } 11576 v.reset(OpAMD64MOVSDstoreidx8) 11577 v.AuxInt = c + 8*d 11578 v.Aux = sym 11579 v.AddArg(ptr) 11580 v.AddArg(idx) 11581 v.AddArg(val) 11582 v.AddArg(mem) 11583 return true 11584 } 11585 return false 11586 } 11587 func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { 11588 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 11589 // cond: is32Bit(off1+off2) 11590 // result: (MOVSSload [off1+off2] {sym} ptr mem) 11591 for { 11592 off1 := v.AuxInt 11593 sym := v.Aux 11594 _ = v.Args[1] 11595 v_0 := v.Args[0] 11596 if v_0.Op != OpAMD64ADDQconst { 11597 break 11598 } 11599 off2 := v_0.AuxInt 11600 ptr := v_0.Args[0] 11601 mem := v.Args[1] 11602 if !(is32Bit(off1 + off2)) { 11603 break 11604 } 11605 v.reset(OpAMD64MOVSSload) 11606 v.AuxInt = off1 + off2 11607 v.Aux = sym 11608 v.AddArg(ptr) 11609 v.AddArg(mem) 11610 return true 11611 } 11612 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 11613 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11614 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11615 for { 11616 off1 := v.AuxInt 11617 sym1 := v.Aux 11618 _ = v.Args[1] 11619 v_0 := v.Args[0] 11620 if v_0.Op != OpAMD64LEAQ { 11621 break 11622 } 11623 off2 := v_0.AuxInt 11624 sym2 := v_0.Aux 11625 base := v_0.Args[0] 11626 mem := v.Args[1] 11627 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11628 break 11629 } 11630 v.reset(OpAMD64MOVSSload) 11631 v.AuxInt = off1 + off2 11632 v.Aux = mergeSym(sym1, sym2) 11633 v.AddArg(base) 11634 v.AddArg(mem) 11635 return true 11636 } 11637 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 11638 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11639 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11640 for { 11641 off1 := v.AuxInt 11642 sym1 := v.Aux 11643 _ = v.Args[1] 11644 v_0 := v.Args[0] 11645 if v_0.Op != OpAMD64LEAQ1 { 11646 break 11647 } 11648 off2 := v_0.AuxInt 11649 sym2 := v_0.Aux 11650 _ = v_0.Args[1] 11651 ptr := v_0.Args[0] 11652 idx := v_0.Args[1] 11653 mem := v.Args[1] 11654 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11655 break 11656 } 11657 v.reset(OpAMD64MOVSSloadidx1) 11658 v.AuxInt = off1 + off2 11659 v.Aux = mergeSym(sym1, sym2) 11660 v.AddArg(ptr) 11661 v.AddArg(idx) 11662 v.AddArg(mem) 11663 return true 11664 } 11665 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 11666 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11667 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11668 for { 11669 off1 := v.AuxInt 11670 sym1 := v.Aux 11671 _ = v.Args[1] 11672 v_0 := v.Args[0] 11673 if v_0.Op != OpAMD64LEAQ4 { 11674 break 11675 } 11676 off2 := v_0.AuxInt 11677 sym2 := v_0.Aux 11678 _ = v_0.Args[1] 11679 ptr := v_0.Args[0] 11680 idx := v_0.Args[1] 11681 mem := v.Args[1] 11682 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11683 break 11684 } 11685 v.reset(OpAMD64MOVSSloadidx4) 11686 v.AuxInt = off1 + off2 11687 v.Aux = mergeSym(sym1, sym2) 11688 v.AddArg(ptr) 11689 v.AddArg(idx) 11690 v.AddArg(mem) 11691 return true 11692 } 11693 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 11694 // cond: ptr.Op != OpSB 11695 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 11696 for { 11697 off := v.AuxInt 11698 sym := v.Aux 11699 _ = v.Args[1] 11700 v_0 := v.Args[0] 11701 if v_0.Op != OpAMD64ADDQ { 11702 break 11703 } 11704 _ = v_0.Args[1] 11705 ptr := v_0.Args[0] 11706 idx := v_0.Args[1] 11707 mem := v.Args[1] 11708 if !(ptr.Op != OpSB) { 11709 break 11710 } 11711 v.reset(OpAMD64MOVSSloadidx1) 11712 v.AuxInt = off 11713 v.Aux = sym 11714 v.AddArg(ptr) 11715 v.AddArg(idx) 11716 v.AddArg(mem) 11717 return true 11718 } 11719 // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) 11720 // cond: 11721 // result: (MOVLi2f val) 11722 for { 11723 off := v.AuxInt 11724 sym := v.Aux 11725 _ = v.Args[1] 11726 ptr := v.Args[0] 11727 v_1 := v.Args[1] 11728 if v_1.Op != OpAMD64MOVLstore { 11729 break 11730 } 11731 if v_1.AuxInt != off { 11732 break 11733 } 11734 if v_1.Aux != sym { 11735 break 11736 } 11737 _ = v_1.Args[2] 11738 if ptr != v_1.Args[0] { 11739 break 11740 } 11741 val := v_1.Args[1] 11742 v.reset(OpAMD64MOVLi2f) 11743 v.AddArg(val) 11744 return true 11745 } 11746 return false 11747 } 11748 func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { 11749 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 11750 // cond: 11751 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 11752 for { 11753 c := v.AuxInt 11754 sym := v.Aux 11755 _ = v.Args[2] 11756 ptr := v.Args[0] 11757 v_1 := v.Args[1] 11758 if v_1.Op != OpAMD64SHLQconst { 11759 break 11760 } 11761 if v_1.AuxInt != 2 { 11762 break 11763 } 11764 idx := v_1.Args[0] 11765 mem := v.Args[2] 11766 v.reset(OpAMD64MOVSSloadidx4) 11767 v.AuxInt = c 11768 v.Aux = sym 11769 v.AddArg(ptr) 11770 v.AddArg(idx) 11771 v.AddArg(mem) 11772 return true 11773 } 11774 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 11775 // cond: is32Bit(c+d) 11776 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 11777 for { 11778 c := v.AuxInt 11779 sym := v.Aux 11780 _ = v.Args[2] 11781 v_0 := v.Args[0] 11782 if v_0.Op != OpAMD64ADDQconst { 11783 break 11784 } 11785 d := v_0.AuxInt 11786 ptr := v_0.Args[0] 11787 idx := v.Args[1] 11788 mem := v.Args[2] 11789 if !(is32Bit(c + d)) { 11790 break 11791 } 11792 v.reset(OpAMD64MOVSSloadidx1) 11793 v.AuxInt = c + d 11794 v.Aux = sym 11795 v.AddArg(ptr) 11796 v.AddArg(idx) 11797 v.AddArg(mem) 11798 return true 11799 } 11800 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 11801 // cond: is32Bit(c+d) 11802 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 11803 for { 11804 c := v.AuxInt 11805 sym := v.Aux 11806 _ = v.Args[2] 11807 ptr := v.Args[0] 11808 v_1 := v.Args[1] 11809 if v_1.Op != OpAMD64ADDQconst { 11810 break 11811 } 11812 d := v_1.AuxInt 11813 idx := v_1.Args[0] 11814 mem := v.Args[2] 11815 if !(is32Bit(c + d)) { 11816 break 11817 } 11818 v.reset(OpAMD64MOVSSloadidx1) 11819 v.AuxInt = c + d 11820 v.Aux = sym 11821 v.AddArg(ptr) 11822 v.AddArg(idx) 11823 v.AddArg(mem) 11824 return true 11825 } 11826 return false 11827 } 11828 func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { 11829 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 11830 // cond: is32Bit(c+d) 11831 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 11832 for { 11833 c := v.AuxInt 11834 sym := v.Aux 11835 _ = v.Args[2] 11836 v_0 := v.Args[0] 11837 if v_0.Op != OpAMD64ADDQconst { 11838 break 11839 } 11840 d := v_0.AuxInt 11841 ptr := v_0.Args[0] 11842 idx := v.Args[1] 11843 mem := v.Args[2] 11844 if !(is32Bit(c + d)) { 11845 break 11846 } 11847 v.reset(OpAMD64MOVSSloadidx4) 11848 v.AuxInt = c + d 11849 v.Aux = sym 11850 v.AddArg(ptr) 11851 v.AddArg(idx) 11852 v.AddArg(mem) 11853 return true 11854 } 11855 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 11856 // cond: is32Bit(c+4*d) 11857 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 11858 for { 11859 c := v.AuxInt 11860 sym := v.Aux 11861 _ = v.Args[2] 11862 ptr := v.Args[0] 11863 v_1 := v.Args[1] 11864 if v_1.Op != OpAMD64ADDQconst { 11865 break 11866 } 11867 d := v_1.AuxInt 11868 idx := v_1.Args[0] 11869 mem := v.Args[2] 11870 if !(is32Bit(c + 4*d)) { 11871 break 11872 } 11873 v.reset(OpAMD64MOVSSloadidx4) 11874 v.AuxInt = c + 4*d 11875 v.Aux = sym 11876 v.AddArg(ptr) 11877 v.AddArg(idx) 11878 v.AddArg(mem) 11879 return true 11880 } 11881 return false 11882 } 11883 func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { 11884 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 11885 // cond: is32Bit(off1+off2) 11886 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 11887 for { 11888 off1 := v.AuxInt 11889 sym := v.Aux 11890 _ = v.Args[2] 11891 v_0 := v.Args[0] 11892 if v_0.Op != OpAMD64ADDQconst { 11893 break 11894 } 11895 off2 := v_0.AuxInt 11896 ptr := v_0.Args[0] 11897 val := v.Args[1] 11898 mem := v.Args[2] 11899 if !(is32Bit(off1 + off2)) { 11900 break 11901 } 11902 v.reset(OpAMD64MOVSSstore) 11903 v.AuxInt = off1 + off2 11904 v.Aux = sym 11905 v.AddArg(ptr) 11906 v.AddArg(val) 11907 v.AddArg(mem) 11908 return true 11909 } 11910 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 11911 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11912 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11913 for { 11914 off1 := v.AuxInt 11915 sym1 := v.Aux 11916 _ = v.Args[2] 11917 v_0 := v.Args[0] 11918 if v_0.Op != OpAMD64LEAQ { 11919 break 11920 } 11921 off2 := v_0.AuxInt 11922 sym2 := v_0.Aux 11923 base := v_0.Args[0] 11924 val := v.Args[1] 11925 mem := v.Args[2] 11926 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11927 break 11928 } 11929 v.reset(OpAMD64MOVSSstore) 11930 v.AuxInt = off1 + off2 11931 v.Aux = mergeSym(sym1, sym2) 11932 v.AddArg(base) 11933 v.AddArg(val) 11934 v.AddArg(mem) 11935 return true 11936 } 11937 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 11938 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11939 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11940 for { 11941 off1 := v.AuxInt 11942 sym1 := v.Aux 11943 _ = v.Args[2] 11944 v_0 := v.Args[0] 11945 if v_0.Op != OpAMD64LEAQ1 { 11946 break 11947 } 11948 off2 := v_0.AuxInt 11949 sym2 := v_0.Aux 11950 _ = v_0.Args[1] 11951 ptr := v_0.Args[0] 11952 idx := v_0.Args[1] 11953 val := v.Args[1] 11954 mem := v.Args[2] 11955 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11956 break 11957 } 11958 v.reset(OpAMD64MOVSSstoreidx1) 11959 v.AuxInt = off1 + off2 11960 v.Aux = mergeSym(sym1, sym2) 11961 v.AddArg(ptr) 11962 v.AddArg(idx) 11963 v.AddArg(val) 11964 v.AddArg(mem) 11965 return true 11966 } 11967 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 11968 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11969 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11970 for { 11971 off1 := v.AuxInt 11972 sym1 := v.Aux 11973 _ = v.Args[2] 11974 v_0 := v.Args[0] 11975 if v_0.Op != OpAMD64LEAQ4 { 11976 break 11977 } 11978 off2 := v_0.AuxInt 11979 sym2 := v_0.Aux 11980 _ = v_0.Args[1] 11981 ptr := v_0.Args[0] 11982 idx := v_0.Args[1] 11983 val := v.Args[1] 11984 mem := v.Args[2] 11985 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11986 break 11987 } 11988 v.reset(OpAMD64MOVSSstoreidx4) 11989 v.AuxInt = off1 + off2 11990 v.Aux = mergeSym(sym1, sym2) 11991 v.AddArg(ptr) 11992 v.AddArg(idx) 11993 v.AddArg(val) 11994 v.AddArg(mem) 11995 return true 11996 } 11997 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 11998 // cond: ptr.Op != OpSB 11999 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 12000 for { 12001 off := v.AuxInt 12002 sym := v.Aux 12003 _ = v.Args[2] 12004 v_0 := v.Args[0] 12005 if v_0.Op != OpAMD64ADDQ { 12006 break 12007 } 12008 _ = v_0.Args[1] 12009 ptr := v_0.Args[0] 12010 idx := v_0.Args[1] 12011 val := v.Args[1] 12012 mem := v.Args[2] 12013 if !(ptr.Op != OpSB) { 12014 break 12015 } 12016 v.reset(OpAMD64MOVSSstoreidx1) 12017 v.AuxInt = off 12018 v.Aux = sym 12019 v.AddArg(ptr) 12020 v.AddArg(idx) 12021 v.AddArg(val) 12022 v.AddArg(mem) 12023 return true 12024 } 12025 // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) 12026 // cond: 12027 // result: (MOVLstore [off] {sym} ptr val mem) 12028 for { 12029 off := v.AuxInt 12030 sym := v.Aux 12031 _ = v.Args[2] 12032 ptr := v.Args[0] 12033 v_1 := v.Args[1] 12034 if v_1.Op != OpAMD64MOVLi2f { 12035 break 12036 } 12037 val := v_1.Args[0] 12038 mem := v.Args[2] 12039 v.reset(OpAMD64MOVLstore) 12040 v.AuxInt = off 12041 v.Aux = sym 12042 v.AddArg(ptr) 12043 v.AddArg(val) 12044 v.AddArg(mem) 12045 return true 12046 } 12047 return false 12048 } 12049 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { 12050 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 12051 // cond: 12052 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 12053 for { 12054 c := v.AuxInt 12055 sym := v.Aux 12056 _ = v.Args[3] 12057 ptr := v.Args[0] 12058 v_1 := v.Args[1] 12059 if v_1.Op != OpAMD64SHLQconst { 12060 break 12061 } 12062 if v_1.AuxInt != 2 { 12063 break 12064 } 12065 idx := v_1.Args[0] 12066 val := v.Args[2] 12067 mem := v.Args[3] 12068 v.reset(OpAMD64MOVSSstoreidx4) 12069 v.AuxInt = c 12070 v.Aux = sym 12071 v.AddArg(ptr) 12072 v.AddArg(idx) 12073 v.AddArg(val) 12074 v.AddArg(mem) 12075 return true 12076 } 12077 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 12078 // cond: is32Bit(c+d) 12079 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 12080 for { 12081 c := v.AuxInt 12082 sym := v.Aux 12083 _ = v.Args[3] 12084 v_0 := v.Args[0] 12085 if v_0.Op != OpAMD64ADDQconst { 12086 break 12087 } 12088 d := v_0.AuxInt 12089 ptr := v_0.Args[0] 12090 idx := v.Args[1] 12091 val := v.Args[2] 12092 mem := v.Args[3] 12093 if !(is32Bit(c + d)) { 12094 break 12095 } 12096 v.reset(OpAMD64MOVSSstoreidx1) 12097 v.AuxInt = c + d 12098 v.Aux = sym 12099 v.AddArg(ptr) 12100 v.AddArg(idx) 12101 v.AddArg(val) 12102 v.AddArg(mem) 12103 return true 12104 } 12105 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 12106 // cond: is32Bit(c+d) 12107 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 12108 for { 12109 c := v.AuxInt 12110 sym := v.Aux 12111 _ = v.Args[3] 12112 ptr := v.Args[0] 12113 v_1 := v.Args[1] 12114 if v_1.Op != OpAMD64ADDQconst { 12115 break 12116 } 12117 d := v_1.AuxInt 12118 idx := v_1.Args[0] 12119 val := v.Args[2] 12120 mem := v.Args[3] 12121 if !(is32Bit(c + d)) { 12122 break 12123 } 12124 v.reset(OpAMD64MOVSSstoreidx1) 12125 v.AuxInt = c + d 12126 v.Aux = sym 12127 v.AddArg(ptr) 12128 v.AddArg(idx) 12129 v.AddArg(val) 12130 v.AddArg(mem) 12131 return true 12132 } 12133 return false 12134 } 12135 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { 12136 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 12137 // cond: is32Bit(c+d) 12138 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 12139 for { 12140 c := v.AuxInt 12141 sym := v.Aux 12142 _ = v.Args[3] 12143 v_0 := v.Args[0] 12144 if v_0.Op != OpAMD64ADDQconst { 12145 break 12146 } 12147 d := v_0.AuxInt 12148 ptr := v_0.Args[0] 12149 idx := v.Args[1] 12150 val := v.Args[2] 12151 mem := v.Args[3] 12152 if !(is32Bit(c + d)) { 12153 break 12154 } 12155 v.reset(OpAMD64MOVSSstoreidx4) 12156 v.AuxInt = c + d 12157 v.Aux = sym 12158 v.AddArg(ptr) 12159 v.AddArg(idx) 12160 v.AddArg(val) 12161 v.AddArg(mem) 12162 return true 12163 } 12164 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 12165 // cond: is32Bit(c+4*d) 12166 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 12167 for { 12168 c := v.AuxInt 12169 sym := v.Aux 12170 _ = v.Args[3] 12171 ptr := v.Args[0] 12172 v_1 := v.Args[1] 12173 if v_1.Op != OpAMD64ADDQconst { 12174 break 12175 } 12176 d := v_1.AuxInt 12177 idx := v_1.Args[0] 12178 val := v.Args[2] 12179 mem := v.Args[3] 12180 if !(is32Bit(c + 4*d)) { 12181 break 12182 } 12183 v.reset(OpAMD64MOVSSstoreidx4) 12184 v.AuxInt = c + 4*d 12185 v.Aux = sym 12186 v.AddArg(ptr) 12187 v.AddArg(idx) 12188 v.AddArg(val) 12189 v.AddArg(mem) 12190 return true 12191 } 12192 return false 12193 } 12194 func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { 12195 b := v.Block 12196 _ = b 12197 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 12198 // cond: x.Uses == 1 && clobber(x) 12199 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 12200 for { 12201 x := v.Args[0] 12202 if x.Op != OpAMD64MOVWload { 12203 break 12204 } 12205 off := x.AuxInt 12206 sym := x.Aux 12207 _ = x.Args[1] 12208 ptr := x.Args[0] 12209 mem := x.Args[1] 12210 if !(x.Uses == 1 && clobber(x)) { 12211 break 12212 } 12213 b = x.Block 12214 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 12215 v.reset(OpCopy) 12216 v.AddArg(v0) 12217 v0.AuxInt = off 12218 v0.Aux = sym 12219 v0.AddArg(ptr) 12220 v0.AddArg(mem) 12221 return true 12222 } 12223 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 12224 // cond: x.Uses == 1 && clobber(x) 12225 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 12226 for { 12227 x := v.Args[0] 12228 if x.Op != OpAMD64MOVLload { 12229 break 12230 } 12231 off := x.AuxInt 12232 sym := x.Aux 12233 _ = x.Args[1] 12234 ptr := x.Args[0] 12235 mem := x.Args[1] 12236 if !(x.Uses == 1 && clobber(x)) { 12237 break 12238 } 12239 b = x.Block 12240 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 12241 v.reset(OpCopy) 12242 v.AddArg(v0) 12243 v0.AuxInt = off 12244 v0.Aux = sym 12245 v0.AddArg(ptr) 12246 v0.AddArg(mem) 12247 return true 12248 } 12249 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 12250 // cond: x.Uses == 1 && clobber(x) 12251 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 12252 for { 12253 x := v.Args[0] 12254 if x.Op != OpAMD64MOVQload { 12255 break 12256 } 12257 off := x.AuxInt 12258 sym := x.Aux 12259 _ = x.Args[1] 12260 ptr := x.Args[0] 12261 mem := x.Args[1] 12262 if !(x.Uses == 1 && clobber(x)) { 12263 break 12264 } 12265 b = x.Block 12266 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 12267 v.reset(OpCopy) 12268 v.AddArg(v0) 12269 v0.AuxInt = off 12270 v0.Aux = sym 12271 v0.AddArg(ptr) 12272 v0.AddArg(mem) 12273 return true 12274 } 12275 // match: (MOVWQSX (ANDLconst [c] x)) 12276 // cond: c & 0x8000 == 0 12277 // result: (ANDLconst [c & 0x7fff] x) 12278 for { 12279 v_0 := v.Args[0] 12280 if v_0.Op != OpAMD64ANDLconst { 12281 break 12282 } 12283 c := v_0.AuxInt 12284 x := v_0.Args[0] 12285 if !(c&0x8000 == 0) { 12286 break 12287 } 12288 v.reset(OpAMD64ANDLconst) 12289 v.AuxInt = c & 0x7fff 12290 v.AddArg(x) 12291 return true 12292 } 12293 // match: (MOVWQSX (MOVWQSX x)) 12294 // cond: 12295 // result: (MOVWQSX x) 12296 for { 12297 v_0 := v.Args[0] 12298 if v_0.Op != OpAMD64MOVWQSX { 12299 break 12300 } 12301 x := v_0.Args[0] 12302 v.reset(OpAMD64MOVWQSX) 12303 v.AddArg(x) 12304 return true 12305 } 12306 // match: (MOVWQSX (MOVBQSX x)) 12307 // cond: 12308 // result: (MOVBQSX x) 12309 for { 12310 v_0 := v.Args[0] 12311 if v_0.Op != OpAMD64MOVBQSX { 12312 break 12313 } 12314 x := v_0.Args[0] 12315 v.reset(OpAMD64MOVBQSX) 12316 v.AddArg(x) 12317 return true 12318 } 12319 return false 12320 } 12321 func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool { 12322 // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 12323 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12324 // result: (MOVWQSX x) 12325 for { 12326 off := v.AuxInt 12327 sym := v.Aux 12328 _ = v.Args[1] 12329 ptr := v.Args[0] 12330 v_1 := v.Args[1] 12331 if v_1.Op != OpAMD64MOVWstore { 12332 break 12333 } 12334 off2 := v_1.AuxInt 12335 sym2 := v_1.Aux 12336 _ = v_1.Args[2] 12337 ptr2 := v_1.Args[0] 12338 x := v_1.Args[1] 12339 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12340 break 12341 } 12342 v.reset(OpAMD64MOVWQSX) 12343 v.AddArg(x) 12344 return true 12345 } 12346 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12347 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12348 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12349 for { 12350 off1 := v.AuxInt 12351 sym1 := v.Aux 12352 _ = v.Args[1] 12353 v_0 := v.Args[0] 12354 if v_0.Op != OpAMD64LEAQ { 12355 break 12356 } 12357 off2 := v_0.AuxInt 12358 sym2 := v_0.Aux 12359 base := v_0.Args[0] 12360 mem := v.Args[1] 12361 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12362 break 12363 } 12364 v.reset(OpAMD64MOVWQSXload) 12365 v.AuxInt = off1 + off2 12366 v.Aux = mergeSym(sym1, sym2) 12367 v.AddArg(base) 12368 v.AddArg(mem) 12369 return true 12370 } 12371 return false 12372 } 12373 func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { 12374 b := v.Block 12375 _ = b 12376 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 12377 // cond: x.Uses == 1 && clobber(x) 12378 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12379 for { 12380 x := v.Args[0] 12381 if x.Op != OpAMD64MOVWload { 12382 break 12383 } 12384 off := x.AuxInt 12385 sym := x.Aux 12386 _ = x.Args[1] 12387 ptr := x.Args[0] 12388 mem := x.Args[1] 12389 if !(x.Uses == 1 && clobber(x)) { 12390 break 12391 } 12392 b = x.Block 12393 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 12394 v.reset(OpCopy) 12395 v.AddArg(v0) 12396 v0.AuxInt = off 12397 v0.Aux = sym 12398 v0.AddArg(ptr) 12399 v0.AddArg(mem) 12400 return true 12401 } 12402 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 12403 // cond: x.Uses == 1 && clobber(x) 12404 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12405 for { 12406 x := v.Args[0] 12407 if x.Op != OpAMD64MOVLload { 12408 break 12409 } 12410 off := x.AuxInt 12411 sym := x.Aux 12412 _ = x.Args[1] 12413 ptr := x.Args[0] 12414 mem := x.Args[1] 12415 if !(x.Uses == 1 && clobber(x)) { 12416 break 12417 } 12418 b = x.Block 12419 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 12420 v.reset(OpCopy) 12421 v.AddArg(v0) 12422 v0.AuxInt = off 12423 v0.Aux = sym 12424 v0.AddArg(ptr) 12425 v0.AddArg(mem) 12426 return true 12427 } 12428 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 12429 // cond: x.Uses == 1 && clobber(x) 12430 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12431 for { 12432 x := v.Args[0] 12433 if x.Op != OpAMD64MOVQload { 12434 break 12435 } 12436 off := x.AuxInt 12437 sym := x.Aux 12438 _ = x.Args[1] 12439 ptr := x.Args[0] 12440 mem := x.Args[1] 12441 if !(x.Uses == 1 && clobber(x)) { 12442 break 12443 } 12444 b = x.Block 12445 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 12446 v.reset(OpCopy) 12447 v.AddArg(v0) 12448 v0.AuxInt = off 12449 v0.Aux = sym 12450 v0.AddArg(ptr) 12451 v0.AddArg(mem) 12452 return true 12453 } 12454 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 12455 // cond: x.Uses == 1 && clobber(x) 12456 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 12457 for { 12458 x := v.Args[0] 12459 if x.Op != OpAMD64MOVWloadidx1 { 12460 break 12461 } 12462 off := x.AuxInt 12463 sym := x.Aux 12464 _ = x.Args[2] 12465 ptr := x.Args[0] 12466 idx := x.Args[1] 12467 mem := x.Args[2] 12468 if !(x.Uses == 1 && clobber(x)) { 12469 break 12470 } 12471 b = x.Block 12472 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 12473 v.reset(OpCopy) 12474 v.AddArg(v0) 12475 v0.AuxInt = off 12476 v0.Aux = sym 12477 v0.AddArg(ptr) 12478 v0.AddArg(idx) 12479 v0.AddArg(mem) 12480 return true 12481 } 12482 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 12483 // cond: x.Uses == 1 && clobber(x) 12484 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 12485 for { 12486 x := v.Args[0] 12487 if x.Op != OpAMD64MOVWloadidx2 { 12488 break 12489 } 12490 off := x.AuxInt 12491 sym := x.Aux 12492 _ = x.Args[2] 12493 ptr := x.Args[0] 12494 idx := x.Args[1] 12495 mem := x.Args[2] 12496 if !(x.Uses == 1 && clobber(x)) { 12497 break 12498 } 12499 b = x.Block 12500 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) 12501 v.reset(OpCopy) 12502 v.AddArg(v0) 12503 v0.AuxInt = off 12504 v0.Aux = sym 12505 v0.AddArg(ptr) 12506 v0.AddArg(idx) 12507 v0.AddArg(mem) 12508 return true 12509 } 12510 // match: (MOVWQZX (ANDLconst [c] x)) 12511 // cond: 12512 // result: (ANDLconst [c & 0xffff] x) 12513 for { 12514 v_0 := v.Args[0] 12515 if v_0.Op != OpAMD64ANDLconst { 12516 break 12517 } 12518 c := v_0.AuxInt 12519 x := v_0.Args[0] 12520 v.reset(OpAMD64ANDLconst) 12521 v.AuxInt = c & 0xffff 12522 v.AddArg(x) 12523 return true 12524 } 12525 // match: (MOVWQZX (MOVWQZX x)) 12526 // cond: 12527 // result: (MOVWQZX x) 12528 for { 12529 v_0 := v.Args[0] 12530 if v_0.Op != OpAMD64MOVWQZX { 12531 break 12532 } 12533 x := v_0.Args[0] 12534 v.reset(OpAMD64MOVWQZX) 12535 v.AddArg(x) 12536 return true 12537 } 12538 // match: (MOVWQZX (MOVBQZX x)) 12539 // cond: 12540 // result: (MOVBQZX x) 12541 for { 12542 v_0 := v.Args[0] 12543 if v_0.Op != OpAMD64MOVBQZX { 12544 break 12545 } 12546 x := v_0.Args[0] 12547 v.reset(OpAMD64MOVBQZX) 12548 v.AddArg(x) 12549 return true 12550 } 12551 return false 12552 } 12553 func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { 12554 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 12555 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12556 // result: (MOVWQZX x) 12557 for { 12558 off := v.AuxInt 12559 sym := v.Aux 12560 _ = v.Args[1] 12561 ptr := v.Args[0] 12562 v_1 := v.Args[1] 12563 if v_1.Op != OpAMD64MOVWstore { 12564 break 12565 } 12566 off2 := v_1.AuxInt 12567 sym2 := v_1.Aux 12568 _ = v_1.Args[2] 12569 ptr2 := v_1.Args[0] 12570 x := v_1.Args[1] 12571 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12572 break 12573 } 12574 v.reset(OpAMD64MOVWQZX) 12575 v.AddArg(x) 12576 return true 12577 } 12578 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 12579 // cond: is32Bit(off1+off2) 12580 // result: (MOVWload [off1+off2] {sym} ptr mem) 12581 for { 12582 off1 := v.AuxInt 12583 sym := v.Aux 12584 _ = v.Args[1] 12585 v_0 := v.Args[0] 12586 if v_0.Op != OpAMD64ADDQconst { 12587 break 12588 } 12589 off2 := v_0.AuxInt 12590 ptr := v_0.Args[0] 12591 mem := v.Args[1] 12592 if !(is32Bit(off1 + off2)) { 12593 break 12594 } 12595 v.reset(OpAMD64MOVWload) 12596 v.AuxInt = off1 + off2 12597 v.Aux = sym 12598 v.AddArg(ptr) 12599 v.AddArg(mem) 12600 return true 12601 } 12602 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12603 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12604 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12605 for { 12606 off1 := v.AuxInt 12607 sym1 := v.Aux 12608 _ = v.Args[1] 12609 v_0 := v.Args[0] 12610 if v_0.Op != OpAMD64LEAQ { 12611 break 12612 } 12613 off2 := v_0.AuxInt 12614 sym2 := v_0.Aux 12615 base := v_0.Args[0] 12616 mem := v.Args[1] 12617 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12618 break 12619 } 12620 v.reset(OpAMD64MOVWload) 12621 v.AuxInt = off1 + off2 12622 v.Aux = mergeSym(sym1, sym2) 12623 v.AddArg(base) 12624 v.AddArg(mem) 12625 return true 12626 } 12627 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 12628 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12629 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 12630 for { 12631 off1 := v.AuxInt 12632 sym1 := v.Aux 12633 _ = v.Args[1] 12634 v_0 := v.Args[0] 12635 if v_0.Op != OpAMD64LEAQ1 { 12636 break 12637 } 12638 off2 := v_0.AuxInt 12639 sym2 := v_0.Aux 12640 _ = v_0.Args[1] 12641 ptr := v_0.Args[0] 12642 idx := v_0.Args[1] 12643 mem := v.Args[1] 12644 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12645 break 12646 } 12647 v.reset(OpAMD64MOVWloadidx1) 12648 v.AuxInt = off1 + off2 12649 v.Aux = mergeSym(sym1, sym2) 12650 v.AddArg(ptr) 12651 v.AddArg(idx) 12652 v.AddArg(mem) 12653 return true 12654 } 12655 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 12656 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12657 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 12658 for { 12659 off1 := v.AuxInt 12660 sym1 := v.Aux 12661 _ = v.Args[1] 12662 v_0 := v.Args[0] 12663 if v_0.Op != OpAMD64LEAQ2 { 12664 break 12665 } 12666 off2 := v_0.AuxInt 12667 sym2 := v_0.Aux 12668 _ = v_0.Args[1] 12669 ptr := v_0.Args[0] 12670 idx := v_0.Args[1] 12671 mem := v.Args[1] 12672 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12673 break 12674 } 12675 v.reset(OpAMD64MOVWloadidx2) 12676 v.AuxInt = off1 + off2 12677 v.Aux = mergeSym(sym1, sym2) 12678 v.AddArg(ptr) 12679 v.AddArg(idx) 12680 v.AddArg(mem) 12681 return true 12682 } 12683 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 12684 // cond: ptr.Op != OpSB 12685 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 12686 for { 12687 off := v.AuxInt 12688 sym := v.Aux 12689 _ = v.Args[1] 12690 v_0 := v.Args[0] 12691 if v_0.Op != OpAMD64ADDQ { 12692 break 12693 } 12694 _ = v_0.Args[1] 12695 ptr := v_0.Args[0] 12696 idx := v_0.Args[1] 12697 mem := v.Args[1] 12698 if !(ptr.Op != OpSB) { 12699 break 12700 } 12701 v.reset(OpAMD64MOVWloadidx1) 12702 v.AuxInt = off 12703 v.Aux = sym 12704 v.AddArg(ptr) 12705 v.AddArg(idx) 12706 v.AddArg(mem) 12707 return true 12708 } 12709 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 12710 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 12711 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12712 for { 12713 off1 := v.AuxInt 12714 sym1 := v.Aux 12715 _ = v.Args[1] 12716 v_0 := v.Args[0] 12717 if v_0.Op != OpAMD64LEAL { 12718 break 12719 } 12720 off2 := v_0.AuxInt 12721 sym2 := v_0.Aux 12722 base := v_0.Args[0] 12723 mem := v.Args[1] 12724 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 12725 break 12726 } 12727 v.reset(OpAMD64MOVWload) 12728 v.AuxInt = off1 + off2 12729 v.Aux = mergeSym(sym1, sym2) 12730 v.AddArg(base) 12731 v.AddArg(mem) 12732 return true 12733 } 12734 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 12735 // cond: is32Bit(off1+off2) 12736 // result: (MOVWload [off1+off2] {sym} ptr mem) 12737 for { 12738 off1 := v.AuxInt 12739 sym := v.Aux 12740 _ = v.Args[1] 12741 v_0 := v.Args[0] 12742 if v_0.Op != OpAMD64ADDLconst { 12743 break 12744 } 12745 off2 := v_0.AuxInt 12746 ptr := v_0.Args[0] 12747 mem := v.Args[1] 12748 if !(is32Bit(off1 + off2)) { 12749 break 12750 } 12751 v.reset(OpAMD64MOVWload) 12752 v.AuxInt = off1 + off2 12753 v.Aux = sym 12754 v.AddArg(ptr) 12755 v.AddArg(mem) 12756 return true 12757 } 12758 return false 12759 } 12760 func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { 12761 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 12762 // cond: 12763 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 12764 for { 12765 c := v.AuxInt 12766 sym := v.Aux 12767 _ = v.Args[2] 12768 ptr := v.Args[0] 12769 v_1 := v.Args[1] 12770 if v_1.Op != OpAMD64SHLQconst { 12771 break 12772 } 12773 if v_1.AuxInt != 1 { 12774 break 12775 } 12776 idx := v_1.Args[0] 12777 mem := v.Args[2] 12778 v.reset(OpAMD64MOVWloadidx2) 12779 v.AuxInt = c 12780 v.Aux = sym 12781 v.AddArg(ptr) 12782 v.AddArg(idx) 12783 v.AddArg(mem) 12784 return true 12785 } 12786 // match: (MOVWloadidx1 [c] {sym} (SHLQconst [1] idx) ptr mem) 12787 // cond: 12788 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 12789 for { 12790 c := v.AuxInt 12791 sym := v.Aux 12792 _ = v.Args[2] 12793 v_0 := v.Args[0] 12794 if v_0.Op != OpAMD64SHLQconst { 12795 break 12796 } 12797 if v_0.AuxInt != 1 { 12798 break 12799 } 12800 idx := v_0.Args[0] 12801 ptr := v.Args[1] 12802 mem := v.Args[2] 12803 v.reset(OpAMD64MOVWloadidx2) 12804 v.AuxInt = c 12805 v.Aux = sym 12806 v.AddArg(ptr) 12807 v.AddArg(idx) 12808 v.AddArg(mem) 12809 return true 12810 } 12811 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 12812 // cond: is32Bit(c+d) 12813 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12814 for { 12815 c := v.AuxInt 12816 sym := v.Aux 12817 _ = v.Args[2] 12818 v_0 := v.Args[0] 12819 if v_0.Op != OpAMD64ADDQconst { 12820 break 12821 } 12822 d := v_0.AuxInt 12823 ptr := v_0.Args[0] 12824 idx := v.Args[1] 12825 mem := v.Args[2] 12826 if !(is32Bit(c + d)) { 12827 break 12828 } 12829 v.reset(OpAMD64MOVWloadidx1) 12830 v.AuxInt = c + d 12831 v.Aux = sym 12832 v.AddArg(ptr) 12833 v.AddArg(idx) 12834 v.AddArg(mem) 12835 return true 12836 } 12837 // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 12838 // cond: is32Bit(c+d) 12839 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12840 for { 12841 c := v.AuxInt 12842 sym := v.Aux 12843 _ = v.Args[2] 12844 idx := v.Args[0] 12845 v_1 := v.Args[1] 12846 if v_1.Op != OpAMD64ADDQconst { 12847 break 12848 } 12849 d := v_1.AuxInt 12850 ptr := v_1.Args[0] 12851 mem := v.Args[2] 12852 if !(is32Bit(c + d)) { 12853 break 12854 } 12855 v.reset(OpAMD64MOVWloadidx1) 12856 v.AuxInt = c + d 12857 v.Aux = sym 12858 v.AddArg(ptr) 12859 v.AddArg(idx) 12860 v.AddArg(mem) 12861 return true 12862 } 12863 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 12864 // cond: is32Bit(c+d) 12865 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12866 for { 12867 c := v.AuxInt 12868 sym := v.Aux 12869 _ = v.Args[2] 12870 ptr := v.Args[0] 12871 v_1 := v.Args[1] 12872 if v_1.Op != OpAMD64ADDQconst { 12873 break 12874 } 12875 d := v_1.AuxInt 12876 idx := v_1.Args[0] 12877 mem := v.Args[2] 12878 if !(is32Bit(c + d)) { 12879 break 12880 } 12881 v.reset(OpAMD64MOVWloadidx1) 12882 v.AuxInt = c + d 12883 v.Aux = sym 12884 v.AddArg(ptr) 12885 v.AddArg(idx) 12886 v.AddArg(mem) 12887 return true 12888 } 12889 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 12890 // cond: is32Bit(c+d) 12891 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12892 for { 12893 c := v.AuxInt 12894 sym := v.Aux 12895 _ = v.Args[2] 12896 v_0 := v.Args[0] 12897 if v_0.Op != OpAMD64ADDQconst { 12898 break 12899 } 12900 d := v_0.AuxInt 12901 idx := v_0.Args[0] 12902 ptr := v.Args[1] 12903 mem := v.Args[2] 12904 if !(is32Bit(c + d)) { 12905 break 12906 } 12907 v.reset(OpAMD64MOVWloadidx1) 12908 v.AuxInt = c + d 12909 v.Aux = sym 12910 v.AddArg(ptr) 12911 v.AddArg(idx) 12912 v.AddArg(mem) 12913 return true 12914 } 12915 return false 12916 } 12917 func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { 12918 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 12919 // cond: is32Bit(c+d) 12920 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 12921 for { 12922 c := v.AuxInt 12923 sym := v.Aux 12924 _ = v.Args[2] 12925 v_0 := v.Args[0] 12926 if v_0.Op != OpAMD64ADDQconst { 12927 break 12928 } 12929 d := v_0.AuxInt 12930 ptr := v_0.Args[0] 12931 idx := v.Args[1] 12932 mem := v.Args[2] 12933 if !(is32Bit(c + d)) { 12934 break 12935 } 12936 v.reset(OpAMD64MOVWloadidx2) 12937 v.AuxInt = c + d 12938 v.Aux = sym 12939 v.AddArg(ptr) 12940 v.AddArg(idx) 12941 v.AddArg(mem) 12942 return true 12943 } 12944 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 12945 // cond: is32Bit(c+2*d) 12946 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 12947 for { 12948 c := v.AuxInt 12949 sym := v.Aux 12950 _ = v.Args[2] 12951 ptr := v.Args[0] 12952 v_1 := v.Args[1] 12953 if v_1.Op != OpAMD64ADDQconst { 12954 break 12955 } 12956 d := v_1.AuxInt 12957 idx := v_1.Args[0] 12958 mem := v.Args[2] 12959 if !(is32Bit(c + 2*d)) { 12960 break 12961 } 12962 v.reset(OpAMD64MOVWloadidx2) 12963 v.AuxInt = c + 2*d 12964 v.Aux = sym 12965 v.AddArg(ptr) 12966 v.AddArg(idx) 12967 v.AddArg(mem) 12968 return true 12969 } 12970 return false 12971 } 12972 func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { 12973 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 12974 // cond: 12975 // result: (MOVWstore [off] {sym} ptr x mem) 12976 for { 12977 off := v.AuxInt 12978 sym := v.Aux 12979 _ = v.Args[2] 12980 ptr := v.Args[0] 12981 v_1 := v.Args[1] 12982 if v_1.Op != OpAMD64MOVWQSX { 12983 break 12984 } 12985 x := v_1.Args[0] 12986 mem := v.Args[2] 12987 v.reset(OpAMD64MOVWstore) 12988 v.AuxInt = off 12989 v.Aux = sym 12990 v.AddArg(ptr) 12991 v.AddArg(x) 12992 v.AddArg(mem) 12993 return true 12994 } 12995 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 12996 // cond: 12997 // result: (MOVWstore [off] {sym} ptr x mem) 12998 for { 12999 off := v.AuxInt 13000 sym := v.Aux 13001 _ = v.Args[2] 13002 ptr := v.Args[0] 13003 v_1 := v.Args[1] 13004 if v_1.Op != OpAMD64MOVWQZX { 13005 break 13006 } 13007 x := v_1.Args[0] 13008 mem := v.Args[2] 13009 v.reset(OpAMD64MOVWstore) 13010 v.AuxInt = off 13011 v.Aux = sym 13012 v.AddArg(ptr) 13013 v.AddArg(x) 13014 v.AddArg(mem) 13015 return true 13016 } 13017 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 13018 // cond: is32Bit(off1+off2) 13019 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 13020 for { 13021 off1 := v.AuxInt 13022 sym := v.Aux 13023 _ = v.Args[2] 13024 v_0 := v.Args[0] 13025 if v_0.Op != OpAMD64ADDQconst { 13026 break 13027 } 13028 off2 := v_0.AuxInt 13029 ptr := v_0.Args[0] 13030 val := v.Args[1] 13031 mem := v.Args[2] 13032 if !(is32Bit(off1 + off2)) { 13033 break 13034 } 13035 v.reset(OpAMD64MOVWstore) 13036 v.AuxInt = off1 + off2 13037 v.Aux = sym 13038 v.AddArg(ptr) 13039 v.AddArg(val) 13040 v.AddArg(mem) 13041 return true 13042 } 13043 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 13044 // cond: validOff(off) 13045 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 13046 for { 13047 off := v.AuxInt 13048 sym := v.Aux 13049 _ = v.Args[2] 13050 ptr := v.Args[0] 13051 v_1 := v.Args[1] 13052 if v_1.Op != OpAMD64MOVLconst { 13053 break 13054 } 13055 c := v_1.AuxInt 13056 mem := v.Args[2] 13057 if !(validOff(off)) { 13058 break 13059 } 13060 v.reset(OpAMD64MOVWstoreconst) 13061 v.AuxInt = makeValAndOff(int64(int16(c)), off) 13062 v.Aux = sym 13063 v.AddArg(ptr) 13064 v.AddArg(mem) 13065 return true 13066 } 13067 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 13068 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 13069 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 13070 for { 13071 off1 := v.AuxInt 13072 sym1 := v.Aux 13073 _ = v.Args[2] 13074 v_0 := v.Args[0] 13075 if v_0.Op != OpAMD64LEAQ { 13076 break 13077 } 13078 off2 := v_0.AuxInt 13079 sym2 := v_0.Aux 13080 base := v_0.Args[0] 13081 val := v.Args[1] 13082 mem := v.Args[2] 13083 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13084 break 13085 } 13086 v.reset(OpAMD64MOVWstore) 13087 v.AuxInt = off1 + off2 13088 v.Aux = mergeSym(sym1, sym2) 13089 v.AddArg(base) 13090 v.AddArg(val) 13091 v.AddArg(mem) 13092 return true 13093 } 13094 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 13095 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 13096 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 13097 for { 13098 off1 := v.AuxInt 13099 sym1 := v.Aux 13100 _ = v.Args[2] 13101 v_0 := v.Args[0] 13102 if v_0.Op != OpAMD64LEAQ1 { 13103 break 13104 } 13105 off2 := v_0.AuxInt 13106 sym2 := v_0.Aux 13107 _ = v_0.Args[1] 13108 ptr := v_0.Args[0] 13109 idx := v_0.Args[1] 13110 val := v.Args[1] 13111 mem := v.Args[2] 13112 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13113 break 13114 } 13115 v.reset(OpAMD64MOVWstoreidx1) 13116 v.AuxInt = off1 + off2 13117 v.Aux = mergeSym(sym1, sym2) 13118 v.AddArg(ptr) 13119 v.AddArg(idx) 13120 v.AddArg(val) 13121 v.AddArg(mem) 13122 return true 13123 } 13124 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 13125 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 13126 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 13127 for { 13128 off1 := v.AuxInt 13129 sym1 := v.Aux 13130 _ = v.Args[2] 13131 v_0 := v.Args[0] 13132 if v_0.Op != OpAMD64LEAQ2 { 13133 break 13134 } 13135 off2 := v_0.AuxInt 13136 sym2 := v_0.Aux 13137 _ = v_0.Args[1] 13138 ptr := v_0.Args[0] 13139 idx := v_0.Args[1] 13140 val := v.Args[1] 13141 mem := v.Args[2] 13142 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13143 break 13144 } 13145 v.reset(OpAMD64MOVWstoreidx2) 13146 v.AuxInt = off1 + off2 13147 v.Aux = mergeSym(sym1, sym2) 13148 v.AddArg(ptr) 13149 v.AddArg(idx) 13150 v.AddArg(val) 13151 v.AddArg(mem) 13152 return true 13153 } 13154 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 13155 // cond: ptr.Op != OpSB 13156 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 13157 for { 13158 off := v.AuxInt 13159 sym := v.Aux 13160 _ = v.Args[2] 13161 v_0 := v.Args[0] 13162 if v_0.Op != OpAMD64ADDQ { 13163 break 13164 } 13165 _ = v_0.Args[1] 13166 ptr := v_0.Args[0] 13167 idx := v_0.Args[1] 13168 val := v.Args[1] 13169 mem := v.Args[2] 13170 if !(ptr.Op != OpSB) { 13171 break 13172 } 13173 v.reset(OpAMD64MOVWstoreidx1) 13174 v.AuxInt = off 13175 v.Aux = sym 13176 v.AddArg(ptr) 13177 v.AddArg(idx) 13178 v.AddArg(val) 13179 v.AddArg(mem) 13180 return true 13181 } 13182 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 13183 // cond: x.Uses == 1 && clobber(x) 13184 // result: (MOVLstore [i-2] {s} p w mem) 13185 for { 13186 i := v.AuxInt 13187 s := v.Aux 13188 _ = v.Args[2] 13189 p := v.Args[0] 13190 v_1 := v.Args[1] 13191 if v_1.Op != OpAMD64SHRQconst { 13192 break 13193 } 13194 if v_1.AuxInt != 16 { 13195 break 13196 } 13197 w := v_1.Args[0] 13198 x := v.Args[2] 13199 if x.Op != OpAMD64MOVWstore { 13200 break 13201 } 13202 if x.AuxInt != i-2 { 13203 break 13204 } 13205 if x.Aux != s { 13206 break 13207 } 13208 _ = x.Args[2] 13209 if p != x.Args[0] { 13210 break 13211 } 13212 if w != x.Args[1] { 13213 break 13214 } 13215 mem := x.Args[2] 13216 if !(x.Uses == 1 && clobber(x)) { 13217 break 13218 } 13219 v.reset(OpAMD64MOVLstore) 13220 v.AuxInt = i - 2 13221 v.Aux = s 13222 v.AddArg(p) 13223 v.AddArg(w) 13224 v.AddArg(mem) 13225 return true 13226 } 13227 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 13228 // cond: x.Uses == 1 && clobber(x) 13229 // result: (MOVLstore [i-2] {s} p w0 mem) 13230 for { 13231 i := v.AuxInt 13232 s := v.Aux 13233 _ = v.Args[2] 13234 p := v.Args[0] 13235 v_1 := v.Args[1] 13236 if v_1.Op != OpAMD64SHRQconst { 13237 break 13238 } 13239 j := v_1.AuxInt 13240 w := v_1.Args[0] 13241 x := v.Args[2] 13242 if x.Op != OpAMD64MOVWstore { 13243 break 13244 } 13245 if x.AuxInt != i-2 { 13246 break 13247 } 13248 if x.Aux != s { 13249 break 13250 } 13251 _ = x.Args[2] 13252 if p != x.Args[0] { 13253 break 13254 } 13255 w0 := x.Args[1] 13256 if w0.Op != OpAMD64SHRQconst { 13257 break 13258 } 13259 if w0.AuxInt != j-16 { 13260 break 13261 } 13262 if w != w0.Args[0] { 13263 break 13264 } 13265 mem := x.Args[2] 13266 if !(x.Uses == 1 && clobber(x)) { 13267 break 13268 } 13269 v.reset(OpAMD64MOVLstore) 13270 v.AuxInt = i - 2 13271 v.Aux = s 13272 v.AddArg(p) 13273 v.AddArg(w0) 13274 v.AddArg(mem) 13275 return true 13276 } 13277 return false 13278 } 13279 func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { 13280 b := v.Block 13281 _ = b 13282 typ := &b.Func.Config.Types 13283 _ = typ 13284 // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem)) 13285 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 13286 // result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem) 13287 for { 13288 i := v.AuxInt 13289 s := v.Aux 13290 _ = v.Args[2] 13291 p := v.Args[0] 13292 x1 := v.Args[1] 13293 if x1.Op != OpAMD64MOVWload { 13294 break 13295 } 13296 j := x1.AuxInt 13297 s2 := x1.Aux 13298 _ = x1.Args[1] 13299 p2 := x1.Args[0] 13300 mem := x1.Args[1] 13301 mem2 := v.Args[2] 13302 if mem2.Op != OpAMD64MOVWstore { 13303 break 13304 } 13305 if mem2.AuxInt != i-2 { 13306 break 13307 } 13308 if mem2.Aux != s { 13309 break 13310 } 13311 _ = mem2.Args[2] 13312 if p != mem2.Args[0] { 13313 break 13314 } 13315 x2 := mem2.Args[1] 13316 if x2.Op != OpAMD64MOVWload { 13317 break 13318 } 13319 if x2.AuxInt != j-2 { 13320 break 13321 } 13322 if x2.Aux != s2 { 13323 break 13324 } 13325 _ = x2.Args[1] 13326 if p2 != x2.Args[0] { 13327 break 13328 } 13329 if mem != x2.Args[1] { 13330 break 13331 } 13332 if mem != mem2.Args[2] { 13333 break 13334 } 13335 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 13336 break 13337 } 13338 v.reset(OpAMD64MOVLstore) 13339 v.AuxInt = i - 2 13340 v.Aux = s 13341 v.AddArg(p) 13342 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 13343 v0.AuxInt = j - 2 13344 v0.Aux = s2 13345 v0.AddArg(p2) 13346 v0.AddArg(mem) 13347 v.AddArg(v0) 13348 v.AddArg(mem) 13349 return true 13350 } 13351 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 13352 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 13353 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 13354 for { 13355 off1 := v.AuxInt 13356 sym1 := v.Aux 13357 _ = v.Args[2] 13358 v_0 := v.Args[0] 13359 if v_0.Op != OpAMD64LEAL { 13360 break 13361 } 13362 off2 := v_0.AuxInt 13363 sym2 := v_0.Aux 13364 base := v_0.Args[0] 13365 val := v.Args[1] 13366 mem := v.Args[2] 13367 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 13368 break 13369 } 13370 v.reset(OpAMD64MOVWstore) 13371 v.AuxInt = off1 + off2 13372 v.Aux = mergeSym(sym1, sym2) 13373 v.AddArg(base) 13374 v.AddArg(val) 13375 v.AddArg(mem) 13376 return true 13377 } 13378 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 13379 // cond: is32Bit(off1+off2) 13380 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 13381 for { 13382 off1 := v.AuxInt 13383 sym := v.Aux 13384 _ = v.Args[2] 13385 v_0 := v.Args[0] 13386 if v_0.Op != OpAMD64ADDLconst { 13387 break 13388 } 13389 off2 := v_0.AuxInt 13390 ptr := v_0.Args[0] 13391 val := v.Args[1] 13392 mem := v.Args[2] 13393 if !(is32Bit(off1 + off2)) { 13394 break 13395 } 13396 v.reset(OpAMD64MOVWstore) 13397 v.AuxInt = off1 + off2 13398 v.Aux = sym 13399 v.AddArg(ptr) 13400 v.AddArg(val) 13401 v.AddArg(mem) 13402 return true 13403 } 13404 return false 13405 } 13406 func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { 13407 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 13408 // cond: ValAndOff(sc).canAdd(off) 13409 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 13410 for { 13411 sc := v.AuxInt 13412 s := v.Aux 13413 _ = v.Args[1] 13414 v_0 := v.Args[0] 13415 if v_0.Op != OpAMD64ADDQconst { 13416 break 13417 } 13418 off := v_0.AuxInt 13419 ptr := v_0.Args[0] 13420 mem := v.Args[1] 13421 if !(ValAndOff(sc).canAdd(off)) { 13422 break 13423 } 13424 v.reset(OpAMD64MOVWstoreconst) 13425 v.AuxInt = ValAndOff(sc).add(off) 13426 v.Aux = s 13427 v.AddArg(ptr) 13428 v.AddArg(mem) 13429 return true 13430 } 13431 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 13432 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 13433 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 13434 for { 13435 sc := v.AuxInt 13436 sym1 := v.Aux 13437 _ = v.Args[1] 13438 v_0 := v.Args[0] 13439 if v_0.Op != OpAMD64LEAQ { 13440 break 13441 } 13442 off := v_0.AuxInt 13443 sym2 := v_0.Aux 13444 ptr := v_0.Args[0] 13445 mem := v.Args[1] 13446 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 13447 break 13448 } 13449 v.reset(OpAMD64MOVWstoreconst) 13450 v.AuxInt = ValAndOff(sc).add(off) 13451 v.Aux = mergeSym(sym1, sym2) 13452 v.AddArg(ptr) 13453 v.AddArg(mem) 13454 return true 13455 } 13456 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 13457 // cond: canMergeSym(sym1, sym2) 13458 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 13459 for { 13460 x := v.AuxInt 13461 sym1 := v.Aux 13462 _ = v.Args[1] 13463 v_0 := v.Args[0] 13464 if v_0.Op != OpAMD64LEAQ1 { 13465 break 13466 } 13467 off := v_0.AuxInt 13468 sym2 := v_0.Aux 13469 _ = v_0.Args[1] 13470 ptr := v_0.Args[0] 13471 idx := v_0.Args[1] 13472 mem := v.Args[1] 13473 if !(canMergeSym(sym1, sym2)) { 13474 break 13475 } 13476 v.reset(OpAMD64MOVWstoreconstidx1) 13477 v.AuxInt = ValAndOff(x).add(off) 13478 v.Aux = mergeSym(sym1, sym2) 13479 v.AddArg(ptr) 13480 v.AddArg(idx) 13481 v.AddArg(mem) 13482 return true 13483 } 13484 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 13485 // cond: canMergeSym(sym1, sym2) 13486 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 13487 for { 13488 x := v.AuxInt 13489 sym1 := v.Aux 13490 _ = v.Args[1] 13491 v_0 := v.Args[0] 13492 if v_0.Op != OpAMD64LEAQ2 { 13493 break 13494 } 13495 off := v_0.AuxInt 13496 sym2 := v_0.Aux 13497 _ = v_0.Args[1] 13498 ptr := v_0.Args[0] 13499 idx := v_0.Args[1] 13500 mem := v.Args[1] 13501 if !(canMergeSym(sym1, sym2)) { 13502 break 13503 } 13504 v.reset(OpAMD64MOVWstoreconstidx2) 13505 v.AuxInt = ValAndOff(x).add(off) 13506 v.Aux = mergeSym(sym1, sym2) 13507 v.AddArg(ptr) 13508 v.AddArg(idx) 13509 v.AddArg(mem) 13510 return true 13511 } 13512 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 13513 // cond: 13514 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 13515 for { 13516 x := v.AuxInt 13517 sym := v.Aux 13518 _ = v.Args[1] 13519 v_0 := v.Args[0] 13520 if v_0.Op != OpAMD64ADDQ { 13521 break 13522 } 13523 _ = v_0.Args[1] 13524 ptr := v_0.Args[0] 13525 idx := v_0.Args[1] 13526 mem := v.Args[1] 13527 v.reset(OpAMD64MOVWstoreconstidx1) 13528 v.AuxInt = x 13529 v.Aux = sym 13530 v.AddArg(ptr) 13531 v.AddArg(idx) 13532 v.AddArg(mem) 13533 return true 13534 } 13535 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 13536 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 13537 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 13538 for { 13539 c := v.AuxInt 13540 s := v.Aux 13541 _ = v.Args[1] 13542 p := v.Args[0] 13543 x := v.Args[1] 13544 if x.Op != OpAMD64MOVWstoreconst { 13545 break 13546 } 13547 a := x.AuxInt 13548 if x.Aux != s { 13549 break 13550 } 13551 _ = x.Args[1] 13552 if p != x.Args[0] { 13553 break 13554 } 13555 mem := x.Args[1] 13556 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 13557 break 13558 } 13559 v.reset(OpAMD64MOVLstoreconst) 13560 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 13561 v.Aux = s 13562 v.AddArg(p) 13563 v.AddArg(mem) 13564 return true 13565 } 13566 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 13567 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 13568 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 13569 for { 13570 sc := v.AuxInt 13571 sym1 := v.Aux 13572 _ = v.Args[1] 13573 v_0 := v.Args[0] 13574 if v_0.Op != OpAMD64LEAL { 13575 break 13576 } 13577 off := v_0.AuxInt 13578 sym2 := v_0.Aux 13579 ptr := v_0.Args[0] 13580 mem := v.Args[1] 13581 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 13582 break 13583 } 13584 v.reset(OpAMD64MOVWstoreconst) 13585 v.AuxInt = ValAndOff(sc).add(off) 13586 v.Aux = mergeSym(sym1, sym2) 13587 v.AddArg(ptr) 13588 v.AddArg(mem) 13589 return true 13590 } 13591 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 13592 // cond: ValAndOff(sc).canAdd(off) 13593 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 13594 for { 13595 sc := v.AuxInt 13596 s := v.Aux 13597 _ = v.Args[1] 13598 v_0 := v.Args[0] 13599 if v_0.Op != OpAMD64ADDLconst { 13600 break 13601 } 13602 off := v_0.AuxInt 13603 ptr := v_0.Args[0] 13604 mem := v.Args[1] 13605 if !(ValAndOff(sc).canAdd(off)) { 13606 break 13607 } 13608 v.reset(OpAMD64MOVWstoreconst) 13609 v.AuxInt = ValAndOff(sc).add(off) 13610 v.Aux = s 13611 v.AddArg(ptr) 13612 v.AddArg(mem) 13613 return true 13614 } 13615 return false 13616 } 13617 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { 13618 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 13619 // cond: 13620 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 13621 for { 13622 c := v.AuxInt 13623 sym := v.Aux 13624 _ = v.Args[2] 13625 ptr := v.Args[0] 13626 v_1 := v.Args[1] 13627 if v_1.Op != OpAMD64SHLQconst { 13628 break 13629 } 13630 if v_1.AuxInt != 1 { 13631 break 13632 } 13633 idx := v_1.Args[0] 13634 mem := v.Args[2] 13635 v.reset(OpAMD64MOVWstoreconstidx2) 13636 v.AuxInt = c 13637 v.Aux = sym 13638 v.AddArg(ptr) 13639 v.AddArg(idx) 13640 v.AddArg(mem) 13641 return true 13642 } 13643 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 13644 // cond: ValAndOff(x).canAdd(c) 13645 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13646 for { 13647 x := v.AuxInt 13648 sym := v.Aux 13649 _ = v.Args[2] 13650 v_0 := v.Args[0] 13651 if v_0.Op != OpAMD64ADDQconst { 13652 break 13653 } 13654 c := v_0.AuxInt 13655 ptr := v_0.Args[0] 13656 idx := v.Args[1] 13657 mem := v.Args[2] 13658 if !(ValAndOff(x).canAdd(c)) { 13659 break 13660 } 13661 v.reset(OpAMD64MOVWstoreconstidx1) 13662 v.AuxInt = ValAndOff(x).add(c) 13663 v.Aux = sym 13664 v.AddArg(ptr) 13665 v.AddArg(idx) 13666 v.AddArg(mem) 13667 return true 13668 } 13669 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 13670 // cond: ValAndOff(x).canAdd(c) 13671 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13672 for { 13673 x := v.AuxInt 13674 sym := v.Aux 13675 _ = v.Args[2] 13676 ptr := v.Args[0] 13677 v_1 := v.Args[1] 13678 if v_1.Op != OpAMD64ADDQconst { 13679 break 13680 } 13681 c := v_1.AuxInt 13682 idx := v_1.Args[0] 13683 mem := v.Args[2] 13684 if !(ValAndOff(x).canAdd(c)) { 13685 break 13686 } 13687 v.reset(OpAMD64MOVWstoreconstidx1) 13688 v.AuxInt = ValAndOff(x).add(c) 13689 v.Aux = sym 13690 v.AddArg(ptr) 13691 v.AddArg(idx) 13692 v.AddArg(mem) 13693 return true 13694 } 13695 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 13696 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 13697 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 13698 for { 13699 c := v.AuxInt 13700 s := v.Aux 13701 _ = v.Args[2] 13702 p := v.Args[0] 13703 i := v.Args[1] 13704 x := v.Args[2] 13705 if x.Op != OpAMD64MOVWstoreconstidx1 { 13706 break 13707 } 13708 a := x.AuxInt 13709 if x.Aux != s { 13710 break 13711 } 13712 _ = x.Args[2] 13713 if p != x.Args[0] { 13714 break 13715 } 13716 if i != x.Args[1] { 13717 break 13718 } 13719 mem := x.Args[2] 13720 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 13721 break 13722 } 13723 v.reset(OpAMD64MOVLstoreconstidx1) 13724 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 13725 v.Aux = s 13726 v.AddArg(p) 13727 v.AddArg(i) 13728 v.AddArg(mem) 13729 return true 13730 } 13731 return false 13732 } 13733 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { 13734 b := v.Block 13735 _ = b 13736 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 13737 // cond: ValAndOff(x).canAdd(c) 13738 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13739 for { 13740 x := v.AuxInt 13741 sym := v.Aux 13742 _ = v.Args[2] 13743 v_0 := v.Args[0] 13744 if v_0.Op != OpAMD64ADDQconst { 13745 break 13746 } 13747 c := v_0.AuxInt 13748 ptr := v_0.Args[0] 13749 idx := v.Args[1] 13750 mem := v.Args[2] 13751 if !(ValAndOff(x).canAdd(c)) { 13752 break 13753 } 13754 v.reset(OpAMD64MOVWstoreconstidx2) 13755 v.AuxInt = ValAndOff(x).add(c) 13756 v.Aux = sym 13757 v.AddArg(ptr) 13758 v.AddArg(idx) 13759 v.AddArg(mem) 13760 return true 13761 } 13762 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 13763 // cond: ValAndOff(x).canAdd(2*c) 13764 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 13765 for { 13766 x := v.AuxInt 13767 sym := v.Aux 13768 _ = v.Args[2] 13769 ptr := v.Args[0] 13770 v_1 := v.Args[1] 13771 if v_1.Op != OpAMD64ADDQconst { 13772 break 13773 } 13774 c := v_1.AuxInt 13775 idx := v_1.Args[0] 13776 mem := v.Args[2] 13777 if !(ValAndOff(x).canAdd(2 * c)) { 13778 break 13779 } 13780 v.reset(OpAMD64MOVWstoreconstidx2) 13781 v.AuxInt = ValAndOff(x).add(2 * c) 13782 v.Aux = sym 13783 v.AddArg(ptr) 13784 v.AddArg(idx) 13785 v.AddArg(mem) 13786 return true 13787 } 13788 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 13789 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 13790 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 13791 for { 13792 c := v.AuxInt 13793 s := v.Aux 13794 _ = v.Args[2] 13795 p := v.Args[0] 13796 i := v.Args[1] 13797 x := v.Args[2] 13798 if x.Op != OpAMD64MOVWstoreconstidx2 { 13799 break 13800 } 13801 a := x.AuxInt 13802 if x.Aux != s { 13803 break 13804 } 13805 _ = x.Args[2] 13806 if p != x.Args[0] { 13807 break 13808 } 13809 if i != x.Args[1] { 13810 break 13811 } 13812 mem := x.Args[2] 13813 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 13814 break 13815 } 13816 v.reset(OpAMD64MOVLstoreconstidx1) 13817 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 13818 v.Aux = s 13819 v.AddArg(p) 13820 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 13821 v0.AuxInt = 1 13822 v0.AddArg(i) 13823 v.AddArg(v0) 13824 v.AddArg(mem) 13825 return true 13826 } 13827 return false 13828 } 13829 func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { 13830 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 13831 // cond: 13832 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 13833 for { 13834 c := v.AuxInt 13835 sym := v.Aux 13836 _ = v.Args[3] 13837 ptr := v.Args[0] 13838 v_1 := v.Args[1] 13839 if v_1.Op != OpAMD64SHLQconst { 13840 break 13841 } 13842 if v_1.AuxInt != 1 { 13843 break 13844 } 13845 idx := v_1.Args[0] 13846 val := v.Args[2] 13847 mem := v.Args[3] 13848 v.reset(OpAMD64MOVWstoreidx2) 13849 v.AuxInt = c 13850 v.Aux = sym 13851 v.AddArg(ptr) 13852 v.AddArg(idx) 13853 v.AddArg(val) 13854 v.AddArg(mem) 13855 return true 13856 } 13857 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 13858 // cond: is32Bit(c+d) 13859 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 13860 for { 13861 c := v.AuxInt 13862 sym := v.Aux 13863 _ = v.Args[3] 13864 v_0 := v.Args[0] 13865 if v_0.Op != OpAMD64ADDQconst { 13866 break 13867 } 13868 d := v_0.AuxInt 13869 ptr := v_0.Args[0] 13870 idx := v.Args[1] 13871 val := v.Args[2] 13872 mem := v.Args[3] 13873 if !(is32Bit(c + d)) { 13874 break 13875 } 13876 v.reset(OpAMD64MOVWstoreidx1) 13877 v.AuxInt = c + d 13878 v.Aux = sym 13879 v.AddArg(ptr) 13880 v.AddArg(idx) 13881 v.AddArg(val) 13882 v.AddArg(mem) 13883 return true 13884 } 13885 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 13886 // cond: is32Bit(c+d) 13887 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 13888 for { 13889 c := v.AuxInt 13890 sym := v.Aux 13891 _ = v.Args[3] 13892 ptr := v.Args[0] 13893 v_1 := v.Args[1] 13894 if v_1.Op != OpAMD64ADDQconst { 13895 break 13896 } 13897 d := v_1.AuxInt 13898 idx := v_1.Args[0] 13899 val := v.Args[2] 13900 mem := v.Args[3] 13901 if !(is32Bit(c + d)) { 13902 break 13903 } 13904 v.reset(OpAMD64MOVWstoreidx1) 13905 v.AuxInt = c + d 13906 v.Aux = sym 13907 v.AddArg(ptr) 13908 v.AddArg(idx) 13909 v.AddArg(val) 13910 v.AddArg(mem) 13911 return true 13912 } 13913 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 13914 // cond: x.Uses == 1 && clobber(x) 13915 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 13916 for { 13917 i := v.AuxInt 13918 s := v.Aux 13919 _ = v.Args[3] 13920 p := v.Args[0] 13921 idx := v.Args[1] 13922 v_2 := v.Args[2] 13923 if v_2.Op != OpAMD64SHRQconst { 13924 break 13925 } 13926 if v_2.AuxInt != 16 { 13927 break 13928 } 13929 w := v_2.Args[0] 13930 x := v.Args[3] 13931 if x.Op != OpAMD64MOVWstoreidx1 { 13932 break 13933 } 13934 if x.AuxInt != i-2 { 13935 break 13936 } 13937 if x.Aux != s { 13938 break 13939 } 13940 _ = x.Args[3] 13941 if p != x.Args[0] { 13942 break 13943 } 13944 if idx != x.Args[1] { 13945 break 13946 } 13947 if w != x.Args[2] { 13948 break 13949 } 13950 mem := x.Args[3] 13951 if !(x.Uses == 1 && clobber(x)) { 13952 break 13953 } 13954 v.reset(OpAMD64MOVLstoreidx1) 13955 v.AuxInt = i - 2 13956 v.Aux = s 13957 v.AddArg(p) 13958 v.AddArg(idx) 13959 v.AddArg(w) 13960 v.AddArg(mem) 13961 return true 13962 } 13963 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 13964 // cond: x.Uses == 1 && clobber(x) 13965 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 13966 for { 13967 i := v.AuxInt 13968 s := v.Aux 13969 _ = v.Args[3] 13970 p := v.Args[0] 13971 idx := v.Args[1] 13972 v_2 := v.Args[2] 13973 if v_2.Op != OpAMD64SHRQconst { 13974 break 13975 } 13976 j := v_2.AuxInt 13977 w := v_2.Args[0] 13978 x := v.Args[3] 13979 if x.Op != OpAMD64MOVWstoreidx1 { 13980 break 13981 } 13982 if x.AuxInt != i-2 { 13983 break 13984 } 13985 if x.Aux != s { 13986 break 13987 } 13988 _ = x.Args[3] 13989 if p != x.Args[0] { 13990 break 13991 } 13992 if idx != x.Args[1] { 13993 break 13994 } 13995 w0 := x.Args[2] 13996 if w0.Op != OpAMD64SHRQconst { 13997 break 13998 } 13999 if w0.AuxInt != j-16 { 14000 break 14001 } 14002 if w != w0.Args[0] { 14003 break 14004 } 14005 mem := x.Args[3] 14006 if !(x.Uses == 1 && clobber(x)) { 14007 break 14008 } 14009 v.reset(OpAMD64MOVLstoreidx1) 14010 v.AuxInt = i - 2 14011 v.Aux = s 14012 v.AddArg(p) 14013 v.AddArg(idx) 14014 v.AddArg(w0) 14015 v.AddArg(mem) 14016 return true 14017 } 14018 return false 14019 } 14020 func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { 14021 b := v.Block 14022 _ = b 14023 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 14024 // cond: is32Bit(c+d) 14025 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 14026 for { 14027 c := v.AuxInt 14028 sym := v.Aux 14029 _ = v.Args[3] 14030 v_0 := v.Args[0] 14031 if v_0.Op != OpAMD64ADDQconst { 14032 break 14033 } 14034 d := v_0.AuxInt 14035 ptr := v_0.Args[0] 14036 idx := v.Args[1] 14037 val := v.Args[2] 14038 mem := v.Args[3] 14039 if !(is32Bit(c + d)) { 14040 break 14041 } 14042 v.reset(OpAMD64MOVWstoreidx2) 14043 v.AuxInt = c + d 14044 v.Aux = sym 14045 v.AddArg(ptr) 14046 v.AddArg(idx) 14047 v.AddArg(val) 14048 v.AddArg(mem) 14049 return true 14050 } 14051 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 14052 // cond: is32Bit(c+2*d) 14053 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 14054 for { 14055 c := v.AuxInt 14056 sym := v.Aux 14057 _ = v.Args[3] 14058 ptr := v.Args[0] 14059 v_1 := v.Args[1] 14060 if v_1.Op != OpAMD64ADDQconst { 14061 break 14062 } 14063 d := v_1.AuxInt 14064 idx := v_1.Args[0] 14065 val := v.Args[2] 14066 mem := v.Args[3] 14067 if !(is32Bit(c + 2*d)) { 14068 break 14069 } 14070 v.reset(OpAMD64MOVWstoreidx2) 14071 v.AuxInt = c + 2*d 14072 v.Aux = sym 14073 v.AddArg(ptr) 14074 v.AddArg(idx) 14075 v.AddArg(val) 14076 v.AddArg(mem) 14077 return true 14078 } 14079 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 14080 // cond: x.Uses == 1 && clobber(x) 14081 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 14082 for { 14083 i := v.AuxInt 14084 s := v.Aux 14085 _ = v.Args[3] 14086 p := v.Args[0] 14087 idx := v.Args[1] 14088 v_2 := v.Args[2] 14089 if v_2.Op != OpAMD64SHRQconst { 14090 break 14091 } 14092 if v_2.AuxInt != 16 { 14093 break 14094 } 14095 w := v_2.Args[0] 14096 x := v.Args[3] 14097 if x.Op != OpAMD64MOVWstoreidx2 { 14098 break 14099 } 14100 if x.AuxInt != i-2 { 14101 break 14102 } 14103 if x.Aux != s { 14104 break 14105 } 14106 _ = x.Args[3] 14107 if p != x.Args[0] { 14108 break 14109 } 14110 if idx != x.Args[1] { 14111 break 14112 } 14113 if w != x.Args[2] { 14114 break 14115 } 14116 mem := x.Args[3] 14117 if !(x.Uses == 1 && clobber(x)) { 14118 break 14119 } 14120 v.reset(OpAMD64MOVLstoreidx1) 14121 v.AuxInt = i - 2 14122 v.Aux = s 14123 v.AddArg(p) 14124 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 14125 v0.AuxInt = 1 14126 v0.AddArg(idx) 14127 v.AddArg(v0) 14128 v.AddArg(w) 14129 v.AddArg(mem) 14130 return true 14131 } 14132 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 14133 // cond: x.Uses == 1 && clobber(x) 14134 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 14135 for { 14136 i := v.AuxInt 14137 s := v.Aux 14138 _ = v.Args[3] 14139 p := v.Args[0] 14140 idx := v.Args[1] 14141 v_2 := v.Args[2] 14142 if v_2.Op != OpAMD64SHRQconst { 14143 break 14144 } 14145 j := v_2.AuxInt 14146 w := v_2.Args[0] 14147 x := v.Args[3] 14148 if x.Op != OpAMD64MOVWstoreidx2 { 14149 break 14150 } 14151 if x.AuxInt != i-2 { 14152 break 14153 } 14154 if x.Aux != s { 14155 break 14156 } 14157 _ = x.Args[3] 14158 if p != x.Args[0] { 14159 break 14160 } 14161 if idx != x.Args[1] { 14162 break 14163 } 14164 w0 := x.Args[2] 14165 if w0.Op != OpAMD64SHRQconst { 14166 break 14167 } 14168 if w0.AuxInt != j-16 { 14169 break 14170 } 14171 if w != w0.Args[0] { 14172 break 14173 } 14174 mem := x.Args[3] 14175 if !(x.Uses == 1 && clobber(x)) { 14176 break 14177 } 14178 v.reset(OpAMD64MOVLstoreidx1) 14179 v.AuxInt = i - 2 14180 v.Aux = s 14181 v.AddArg(p) 14182 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 14183 v0.AuxInt = 1 14184 v0.AddArg(idx) 14185 v.AddArg(v0) 14186 v.AddArg(w0) 14187 v.AddArg(mem) 14188 return true 14189 } 14190 return false 14191 } 14192 func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool { 14193 // match: (MULL x (MOVLconst [c])) 14194 // cond: 14195 // result: (MULLconst [c] x) 14196 for { 14197 _ = v.Args[1] 14198 x := v.Args[0] 14199 v_1 := v.Args[1] 14200 if v_1.Op != OpAMD64MOVLconst { 14201 break 14202 } 14203 c := v_1.AuxInt 14204 v.reset(OpAMD64MULLconst) 14205 v.AuxInt = c 14206 v.AddArg(x) 14207 return true 14208 } 14209 // match: (MULL (MOVLconst [c]) x) 14210 // cond: 14211 // result: (MULLconst [c] x) 14212 for { 14213 _ = v.Args[1] 14214 v_0 := v.Args[0] 14215 if v_0.Op != OpAMD64MOVLconst { 14216 break 14217 } 14218 c := v_0.AuxInt 14219 x := v.Args[1] 14220 v.reset(OpAMD64MULLconst) 14221 v.AuxInt = c 14222 v.AddArg(x) 14223 return true 14224 } 14225 return false 14226 } 14227 func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { 14228 // match: (MULLconst [c] (MULLconst [d] x)) 14229 // cond: 14230 // result: (MULLconst [int64(int32(c * d))] x) 14231 for { 14232 c := v.AuxInt 14233 v_0 := v.Args[0] 14234 if v_0.Op != OpAMD64MULLconst { 14235 break 14236 } 14237 d := v_0.AuxInt 14238 x := v_0.Args[0] 14239 v.reset(OpAMD64MULLconst) 14240 v.AuxInt = int64(int32(c * d)) 14241 v.AddArg(x) 14242 return true 14243 } 14244 // match: (MULLconst [c] (MOVLconst [d])) 14245 // cond: 14246 // result: (MOVLconst [int64(int32(c*d))]) 14247 for { 14248 c := v.AuxInt 14249 v_0 := v.Args[0] 14250 if v_0.Op != OpAMD64MOVLconst { 14251 break 14252 } 14253 d := v_0.AuxInt 14254 v.reset(OpAMD64MOVLconst) 14255 v.AuxInt = int64(int32(c * d)) 14256 return true 14257 } 14258 return false 14259 } 14260 func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool { 14261 // match: (MULQ x (MOVQconst [c])) 14262 // cond: is32Bit(c) 14263 // result: (MULQconst [c] x) 14264 for { 14265 _ = v.Args[1] 14266 x := v.Args[0] 14267 v_1 := v.Args[1] 14268 if v_1.Op != OpAMD64MOVQconst { 14269 break 14270 } 14271 c := v_1.AuxInt 14272 if !(is32Bit(c)) { 14273 break 14274 } 14275 v.reset(OpAMD64MULQconst) 14276 v.AuxInt = c 14277 v.AddArg(x) 14278 return true 14279 } 14280 // match: (MULQ (MOVQconst [c]) x) 14281 // cond: is32Bit(c) 14282 // result: (MULQconst [c] x) 14283 for { 14284 _ = v.Args[1] 14285 v_0 := v.Args[0] 14286 if v_0.Op != OpAMD64MOVQconst { 14287 break 14288 } 14289 c := v_0.AuxInt 14290 x := v.Args[1] 14291 if !(is32Bit(c)) { 14292 break 14293 } 14294 v.reset(OpAMD64MULQconst) 14295 v.AuxInt = c 14296 v.AddArg(x) 14297 return true 14298 } 14299 return false 14300 } 14301 func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { 14302 b := v.Block 14303 _ = b 14304 // match: (MULQconst [c] (MULQconst [d] x)) 14305 // cond: is32Bit(c*d) 14306 // result: (MULQconst [c * d] x) 14307 for { 14308 c := v.AuxInt 14309 v_0 := v.Args[0] 14310 if v_0.Op != OpAMD64MULQconst { 14311 break 14312 } 14313 d := v_0.AuxInt 14314 x := v_0.Args[0] 14315 if !(is32Bit(c * d)) { 14316 break 14317 } 14318 v.reset(OpAMD64MULQconst) 14319 v.AuxInt = c * d 14320 v.AddArg(x) 14321 return true 14322 } 14323 // match: (MULQconst [-1] x) 14324 // cond: 14325 // result: (NEGQ x) 14326 for { 14327 if v.AuxInt != -1 { 14328 break 14329 } 14330 x := v.Args[0] 14331 v.reset(OpAMD64NEGQ) 14332 v.AddArg(x) 14333 return true 14334 } 14335 // match: (MULQconst [0] _) 14336 // cond: 14337 // result: (MOVQconst [0]) 14338 for { 14339 if v.AuxInt != 0 { 14340 break 14341 } 14342 v.reset(OpAMD64MOVQconst) 14343 v.AuxInt = 0 14344 return true 14345 } 14346 // match: (MULQconst [1] x) 14347 // cond: 14348 // result: x 14349 for { 14350 if v.AuxInt != 1 { 14351 break 14352 } 14353 x := v.Args[0] 14354 v.reset(OpCopy) 14355 v.Type = x.Type 14356 v.AddArg(x) 14357 return true 14358 } 14359 // match: (MULQconst [3] x) 14360 // cond: 14361 // result: (LEAQ2 x x) 14362 for { 14363 if v.AuxInt != 3 { 14364 break 14365 } 14366 x := v.Args[0] 14367 v.reset(OpAMD64LEAQ2) 14368 v.AddArg(x) 14369 v.AddArg(x) 14370 return true 14371 } 14372 // match: (MULQconst [5] x) 14373 // cond: 14374 // result: (LEAQ4 x x) 14375 for { 14376 if v.AuxInt != 5 { 14377 break 14378 } 14379 x := v.Args[0] 14380 v.reset(OpAMD64LEAQ4) 14381 v.AddArg(x) 14382 v.AddArg(x) 14383 return true 14384 } 14385 // match: (MULQconst [7] x) 14386 // cond: 14387 // result: (LEAQ8 (NEGQ <v.Type> x) x) 14388 for { 14389 if v.AuxInt != 7 { 14390 break 14391 } 14392 x := v.Args[0] 14393 v.reset(OpAMD64LEAQ8) 14394 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, v.Type) 14395 v0.AddArg(x) 14396 v.AddArg(v0) 14397 v.AddArg(x) 14398 return true 14399 } 14400 // match: (MULQconst [9] x) 14401 // cond: 14402 // result: (LEAQ8 x x) 14403 for { 14404 if v.AuxInt != 9 { 14405 break 14406 } 14407 x := v.Args[0] 14408 v.reset(OpAMD64LEAQ8) 14409 v.AddArg(x) 14410 v.AddArg(x) 14411 return true 14412 } 14413 // match: (MULQconst [11] x) 14414 // cond: 14415 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 14416 for { 14417 if v.AuxInt != 11 { 14418 break 14419 } 14420 x := v.Args[0] 14421 v.reset(OpAMD64LEAQ2) 14422 v.AddArg(x) 14423 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14424 v0.AddArg(x) 14425 v0.AddArg(x) 14426 v.AddArg(v0) 14427 return true 14428 } 14429 // match: (MULQconst [13] x) 14430 // cond: 14431 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 14432 for { 14433 if v.AuxInt != 13 { 14434 break 14435 } 14436 x := v.Args[0] 14437 v.reset(OpAMD64LEAQ4) 14438 v.AddArg(x) 14439 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 14440 v0.AddArg(x) 14441 v0.AddArg(x) 14442 v.AddArg(v0) 14443 return true 14444 } 14445 return false 14446 } 14447 func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { 14448 b := v.Block 14449 _ = b 14450 // match: (MULQconst [21] x) 14451 // cond: 14452 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 14453 for { 14454 if v.AuxInt != 21 { 14455 break 14456 } 14457 x := v.Args[0] 14458 v.reset(OpAMD64LEAQ4) 14459 v.AddArg(x) 14460 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14461 v0.AddArg(x) 14462 v0.AddArg(x) 14463 v.AddArg(v0) 14464 return true 14465 } 14466 // match: (MULQconst [25] x) 14467 // cond: 14468 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 14469 for { 14470 if v.AuxInt != 25 { 14471 break 14472 } 14473 x := v.Args[0] 14474 v.reset(OpAMD64LEAQ8) 14475 v.AddArg(x) 14476 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 14477 v0.AddArg(x) 14478 v0.AddArg(x) 14479 v.AddArg(v0) 14480 return true 14481 } 14482 // match: (MULQconst [37] x) 14483 // cond: 14484 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 14485 for { 14486 if v.AuxInt != 37 { 14487 break 14488 } 14489 x := v.Args[0] 14490 v.reset(OpAMD64LEAQ4) 14491 v.AddArg(x) 14492 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 14493 v0.AddArg(x) 14494 v0.AddArg(x) 14495 v.AddArg(v0) 14496 return true 14497 } 14498 // match: (MULQconst [41] x) 14499 // cond: 14500 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 14501 for { 14502 if v.AuxInt != 41 { 14503 break 14504 } 14505 x := v.Args[0] 14506 v.reset(OpAMD64LEAQ8) 14507 v.AddArg(x) 14508 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14509 v0.AddArg(x) 14510 v0.AddArg(x) 14511 v.AddArg(v0) 14512 return true 14513 } 14514 // match: (MULQconst [73] x) 14515 // cond: 14516 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 14517 for { 14518 if v.AuxInt != 73 { 14519 break 14520 } 14521 x := v.Args[0] 14522 v.reset(OpAMD64LEAQ8) 14523 v.AddArg(x) 14524 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 14525 v0.AddArg(x) 14526 v0.AddArg(x) 14527 v.AddArg(v0) 14528 return true 14529 } 14530 // match: (MULQconst [c] x) 14531 // cond: isPowerOfTwo(c+1) && c >= 15 14532 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 14533 for { 14534 c := v.AuxInt 14535 x := v.Args[0] 14536 if !(isPowerOfTwo(c+1) && c >= 15) { 14537 break 14538 } 14539 v.reset(OpAMD64SUBQ) 14540 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14541 v0.AuxInt = log2(c + 1) 14542 v0.AddArg(x) 14543 v.AddArg(v0) 14544 v.AddArg(x) 14545 return true 14546 } 14547 // match: (MULQconst [c] x) 14548 // cond: isPowerOfTwo(c-1) && c >= 17 14549 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 14550 for { 14551 c := v.AuxInt 14552 x := v.Args[0] 14553 if !(isPowerOfTwo(c-1) && c >= 17) { 14554 break 14555 } 14556 v.reset(OpAMD64LEAQ1) 14557 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14558 v0.AuxInt = log2(c - 1) 14559 v0.AddArg(x) 14560 v.AddArg(v0) 14561 v.AddArg(x) 14562 return true 14563 } 14564 // match: (MULQconst [c] x) 14565 // cond: isPowerOfTwo(c-2) && c >= 34 14566 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 14567 for { 14568 c := v.AuxInt 14569 x := v.Args[0] 14570 if !(isPowerOfTwo(c-2) && c >= 34) { 14571 break 14572 } 14573 v.reset(OpAMD64LEAQ2) 14574 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14575 v0.AuxInt = log2(c - 2) 14576 v0.AddArg(x) 14577 v.AddArg(v0) 14578 v.AddArg(x) 14579 return true 14580 } 14581 // match: (MULQconst [c] x) 14582 // cond: isPowerOfTwo(c-4) && c >= 68 14583 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 14584 for { 14585 c := v.AuxInt 14586 x := v.Args[0] 14587 if !(isPowerOfTwo(c-4) && c >= 68) { 14588 break 14589 } 14590 v.reset(OpAMD64LEAQ4) 14591 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14592 v0.AuxInt = log2(c - 4) 14593 v0.AddArg(x) 14594 v.AddArg(v0) 14595 v.AddArg(x) 14596 return true 14597 } 14598 // match: (MULQconst [c] x) 14599 // cond: isPowerOfTwo(c-8) && c >= 136 14600 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 14601 for { 14602 c := v.AuxInt 14603 x := v.Args[0] 14604 if !(isPowerOfTwo(c-8) && c >= 136) { 14605 break 14606 } 14607 v.reset(OpAMD64LEAQ8) 14608 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14609 v0.AuxInt = log2(c - 8) 14610 v0.AddArg(x) 14611 v.AddArg(v0) 14612 v.AddArg(x) 14613 return true 14614 } 14615 return false 14616 } 14617 func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { 14618 b := v.Block 14619 _ = b 14620 // match: (MULQconst [c] x) 14621 // cond: c%3 == 0 && isPowerOfTwo(c/3) 14622 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 14623 for { 14624 c := v.AuxInt 14625 x := v.Args[0] 14626 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 14627 break 14628 } 14629 v.reset(OpAMD64SHLQconst) 14630 v.AuxInt = log2(c / 3) 14631 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 14632 v0.AddArg(x) 14633 v0.AddArg(x) 14634 v.AddArg(v0) 14635 return true 14636 } 14637 // match: (MULQconst [c] x) 14638 // cond: c%5 == 0 && isPowerOfTwo(c/5) 14639 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 14640 for { 14641 c := v.AuxInt 14642 x := v.Args[0] 14643 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 14644 break 14645 } 14646 v.reset(OpAMD64SHLQconst) 14647 v.AuxInt = log2(c / 5) 14648 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14649 v0.AddArg(x) 14650 v0.AddArg(x) 14651 v.AddArg(v0) 14652 return true 14653 } 14654 // match: (MULQconst [c] x) 14655 // cond: c%9 == 0 && isPowerOfTwo(c/9) 14656 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 14657 for { 14658 c := v.AuxInt 14659 x := v.Args[0] 14660 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 14661 break 14662 } 14663 v.reset(OpAMD64SHLQconst) 14664 v.AuxInt = log2(c / 9) 14665 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 14666 v0.AddArg(x) 14667 v0.AddArg(x) 14668 v.AddArg(v0) 14669 return true 14670 } 14671 // match: (MULQconst [c] (MOVQconst [d])) 14672 // cond: 14673 // result: (MOVQconst [c*d]) 14674 for { 14675 c := v.AuxInt 14676 v_0 := v.Args[0] 14677 if v_0.Op != OpAMD64MOVQconst { 14678 break 14679 } 14680 d := v_0.AuxInt 14681 v.reset(OpAMD64MOVQconst) 14682 v.AuxInt = c * d 14683 return true 14684 } 14685 return false 14686 } 14687 func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { 14688 // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) 14689 // cond: canMergeLoad(v, l, x) && clobber(l) 14690 // result: (MULSDmem x [off] {sym} ptr mem) 14691 for { 14692 _ = v.Args[1] 14693 x := v.Args[0] 14694 l := v.Args[1] 14695 if l.Op != OpAMD64MOVSDload { 14696 break 14697 } 14698 off := l.AuxInt 14699 sym := l.Aux 14700 _ = l.Args[1] 14701 ptr := l.Args[0] 14702 mem := l.Args[1] 14703 if !(canMergeLoad(v, l, x) && clobber(l)) { 14704 break 14705 } 14706 v.reset(OpAMD64MULSDmem) 14707 v.AuxInt = off 14708 v.Aux = sym 14709 v.AddArg(x) 14710 v.AddArg(ptr) 14711 v.AddArg(mem) 14712 return true 14713 } 14714 // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) 14715 // cond: canMergeLoad(v, l, x) && clobber(l) 14716 // result: (MULSDmem x [off] {sym} ptr mem) 14717 for { 14718 _ = v.Args[1] 14719 l := v.Args[0] 14720 if l.Op != OpAMD64MOVSDload { 14721 break 14722 } 14723 off := l.AuxInt 14724 sym := l.Aux 14725 _ = l.Args[1] 14726 ptr := l.Args[0] 14727 mem := l.Args[1] 14728 x := v.Args[1] 14729 if !(canMergeLoad(v, l, x) && clobber(l)) { 14730 break 14731 } 14732 v.reset(OpAMD64MULSDmem) 14733 v.AuxInt = off 14734 v.Aux = sym 14735 v.AddArg(x) 14736 v.AddArg(ptr) 14737 v.AddArg(mem) 14738 return true 14739 } 14740 return false 14741 } 14742 func rewriteValueAMD64_OpAMD64MULSDmem_0(v *Value) bool { 14743 b := v.Block 14744 _ = b 14745 typ := &b.Func.Config.Types 14746 _ = typ 14747 // match: (MULSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 14748 // cond: 14749 // result: (MULSD x (MOVQi2f y)) 14750 for { 14751 off := v.AuxInt 14752 sym := v.Aux 14753 _ = v.Args[2] 14754 x := v.Args[0] 14755 ptr := v.Args[1] 14756 v_2 := v.Args[2] 14757 if v_2.Op != OpAMD64MOVQstore { 14758 break 14759 } 14760 if v_2.AuxInt != off { 14761 break 14762 } 14763 if v_2.Aux != sym { 14764 break 14765 } 14766 _ = v_2.Args[2] 14767 if ptr != v_2.Args[0] { 14768 break 14769 } 14770 y := v_2.Args[1] 14771 v.reset(OpAMD64MULSD) 14772 v.AddArg(x) 14773 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 14774 v0.AddArg(y) 14775 v.AddArg(v0) 14776 return true 14777 } 14778 return false 14779 } 14780 func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { 14781 // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) 14782 // cond: canMergeLoad(v, l, x) && clobber(l) 14783 // result: (MULSSmem x [off] {sym} ptr mem) 14784 for { 14785 _ = v.Args[1] 14786 x := v.Args[0] 14787 l := v.Args[1] 14788 if l.Op != OpAMD64MOVSSload { 14789 break 14790 } 14791 off := l.AuxInt 14792 sym := l.Aux 14793 _ = l.Args[1] 14794 ptr := l.Args[0] 14795 mem := l.Args[1] 14796 if !(canMergeLoad(v, l, x) && clobber(l)) { 14797 break 14798 } 14799 v.reset(OpAMD64MULSSmem) 14800 v.AuxInt = off 14801 v.Aux = sym 14802 v.AddArg(x) 14803 v.AddArg(ptr) 14804 v.AddArg(mem) 14805 return true 14806 } 14807 // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) 14808 // cond: canMergeLoad(v, l, x) && clobber(l) 14809 // result: (MULSSmem x [off] {sym} ptr mem) 14810 for { 14811 _ = v.Args[1] 14812 l := v.Args[0] 14813 if l.Op != OpAMD64MOVSSload { 14814 break 14815 } 14816 off := l.AuxInt 14817 sym := l.Aux 14818 _ = l.Args[1] 14819 ptr := l.Args[0] 14820 mem := l.Args[1] 14821 x := v.Args[1] 14822 if !(canMergeLoad(v, l, x) && clobber(l)) { 14823 break 14824 } 14825 v.reset(OpAMD64MULSSmem) 14826 v.AuxInt = off 14827 v.Aux = sym 14828 v.AddArg(x) 14829 v.AddArg(ptr) 14830 v.AddArg(mem) 14831 return true 14832 } 14833 return false 14834 } 14835 func rewriteValueAMD64_OpAMD64MULSSmem_0(v *Value) bool { 14836 b := v.Block 14837 _ = b 14838 typ := &b.Func.Config.Types 14839 _ = typ 14840 // match: (MULSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 14841 // cond: 14842 // result: (MULSS x (MOVLi2f y)) 14843 for { 14844 off := v.AuxInt 14845 sym := v.Aux 14846 _ = v.Args[2] 14847 x := v.Args[0] 14848 ptr := v.Args[1] 14849 v_2 := v.Args[2] 14850 if v_2.Op != OpAMD64MOVLstore { 14851 break 14852 } 14853 if v_2.AuxInt != off { 14854 break 14855 } 14856 if v_2.Aux != sym { 14857 break 14858 } 14859 _ = v_2.Args[2] 14860 if ptr != v_2.Args[0] { 14861 break 14862 } 14863 y := v_2.Args[1] 14864 v.reset(OpAMD64MULSS) 14865 v.AddArg(x) 14866 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 14867 v0.AddArg(y) 14868 v.AddArg(v0) 14869 return true 14870 } 14871 return false 14872 } 14873 func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { 14874 // match: (NEGL (MOVLconst [c])) 14875 // cond: 14876 // result: (MOVLconst [int64(int32(-c))]) 14877 for { 14878 v_0 := v.Args[0] 14879 if v_0.Op != OpAMD64MOVLconst { 14880 break 14881 } 14882 c := v_0.AuxInt 14883 v.reset(OpAMD64MOVLconst) 14884 v.AuxInt = int64(int32(-c)) 14885 return true 14886 } 14887 return false 14888 } 14889 func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { 14890 // match: (NEGQ (MOVQconst [c])) 14891 // cond: 14892 // result: (MOVQconst [-c]) 14893 for { 14894 v_0 := v.Args[0] 14895 if v_0.Op != OpAMD64MOVQconst { 14896 break 14897 } 14898 c := v_0.AuxInt 14899 v.reset(OpAMD64MOVQconst) 14900 v.AuxInt = -c 14901 return true 14902 } 14903 // match: (NEGQ (ADDQconst [c] (NEGQ x))) 14904 // cond: c != -(1<<31) 14905 // result: (ADDQconst [-c] x) 14906 for { 14907 v_0 := v.Args[0] 14908 if v_0.Op != OpAMD64ADDQconst { 14909 break 14910 } 14911 c := v_0.AuxInt 14912 v_0_0 := v_0.Args[0] 14913 if v_0_0.Op != OpAMD64NEGQ { 14914 break 14915 } 14916 x := v_0_0.Args[0] 14917 if !(c != -(1 << 31)) { 14918 break 14919 } 14920 v.reset(OpAMD64ADDQconst) 14921 v.AuxInt = -c 14922 v.AddArg(x) 14923 return true 14924 } 14925 return false 14926 } 14927 func rewriteValueAMD64_OpAMD64NOTL_0(v *Value) bool { 14928 // match: (NOTL (MOVLconst [c])) 14929 // cond: 14930 // result: (MOVLconst [^c]) 14931 for { 14932 v_0 := v.Args[0] 14933 if v_0.Op != OpAMD64MOVLconst { 14934 break 14935 } 14936 c := v_0.AuxInt 14937 v.reset(OpAMD64MOVLconst) 14938 v.AuxInt = ^c 14939 return true 14940 } 14941 return false 14942 } 14943 func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool { 14944 // match: (NOTQ (MOVQconst [c])) 14945 // cond: 14946 // result: (MOVQconst [^c]) 14947 for { 14948 v_0 := v.Args[0] 14949 if v_0.Op != OpAMD64MOVQconst { 14950 break 14951 } 14952 c := v_0.AuxInt 14953 v.reset(OpAMD64MOVQconst) 14954 v.AuxInt = ^c 14955 return true 14956 } 14957 return false 14958 } 14959 func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { 14960 // match: (ORL x (MOVLconst [c])) 14961 // cond: 14962 // result: (ORLconst [c] x) 14963 for { 14964 _ = v.Args[1] 14965 x := v.Args[0] 14966 v_1 := v.Args[1] 14967 if v_1.Op != OpAMD64MOVLconst { 14968 break 14969 } 14970 c := v_1.AuxInt 14971 v.reset(OpAMD64ORLconst) 14972 v.AuxInt = c 14973 v.AddArg(x) 14974 return true 14975 } 14976 // match: (ORL (MOVLconst [c]) x) 14977 // cond: 14978 // result: (ORLconst [c] x) 14979 for { 14980 _ = v.Args[1] 14981 v_0 := v.Args[0] 14982 if v_0.Op != OpAMD64MOVLconst { 14983 break 14984 } 14985 c := v_0.AuxInt 14986 x := v.Args[1] 14987 v.reset(OpAMD64ORLconst) 14988 v.AuxInt = c 14989 v.AddArg(x) 14990 return true 14991 } 14992 // match: (ORL (SHLLconst x [c]) (SHRLconst x [d])) 14993 // cond: d==32-c 14994 // result: (ROLLconst x [c]) 14995 for { 14996 _ = v.Args[1] 14997 v_0 := v.Args[0] 14998 if v_0.Op != OpAMD64SHLLconst { 14999 break 15000 } 15001 c := v_0.AuxInt 15002 x := v_0.Args[0] 15003 v_1 := v.Args[1] 15004 if v_1.Op != OpAMD64SHRLconst { 15005 break 15006 } 15007 d := v_1.AuxInt 15008 if x != v_1.Args[0] { 15009 break 15010 } 15011 if !(d == 32-c) { 15012 break 15013 } 15014 v.reset(OpAMD64ROLLconst) 15015 v.AuxInt = c 15016 v.AddArg(x) 15017 return true 15018 } 15019 // match: (ORL (SHRLconst x [d]) (SHLLconst x [c])) 15020 // cond: d==32-c 15021 // result: (ROLLconst x [c]) 15022 for { 15023 _ = v.Args[1] 15024 v_0 := v.Args[0] 15025 if v_0.Op != OpAMD64SHRLconst { 15026 break 15027 } 15028 d := v_0.AuxInt 15029 x := v_0.Args[0] 15030 v_1 := v.Args[1] 15031 if v_1.Op != OpAMD64SHLLconst { 15032 break 15033 } 15034 c := v_1.AuxInt 15035 if x != v_1.Args[0] { 15036 break 15037 } 15038 if !(d == 32-c) { 15039 break 15040 } 15041 v.reset(OpAMD64ROLLconst) 15042 v.AuxInt = c 15043 v.AddArg(x) 15044 return true 15045 } 15046 // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 15047 // cond: d==16-c && c < 16 && t.Size() == 2 15048 // result: (ROLWconst x [c]) 15049 for { 15050 t := v.Type 15051 _ = v.Args[1] 15052 v_0 := v.Args[0] 15053 if v_0.Op != OpAMD64SHLLconst { 15054 break 15055 } 15056 c := v_0.AuxInt 15057 x := v_0.Args[0] 15058 v_1 := v.Args[1] 15059 if v_1.Op != OpAMD64SHRWconst { 15060 break 15061 } 15062 d := v_1.AuxInt 15063 if x != v_1.Args[0] { 15064 break 15065 } 15066 if !(d == 16-c && c < 16 && t.Size() == 2) { 15067 break 15068 } 15069 v.reset(OpAMD64ROLWconst) 15070 v.AuxInt = c 15071 v.AddArg(x) 15072 return true 15073 } 15074 // match: (ORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 15075 // cond: d==16-c && c < 16 && t.Size() == 2 15076 // result: (ROLWconst x [c]) 15077 for { 15078 t := v.Type 15079 _ = v.Args[1] 15080 v_0 := v.Args[0] 15081 if v_0.Op != OpAMD64SHRWconst { 15082 break 15083 } 15084 d := v_0.AuxInt 15085 x := v_0.Args[0] 15086 v_1 := v.Args[1] 15087 if v_1.Op != OpAMD64SHLLconst { 15088 break 15089 } 15090 c := v_1.AuxInt 15091 if x != v_1.Args[0] { 15092 break 15093 } 15094 if !(d == 16-c && c < 16 && t.Size() == 2) { 15095 break 15096 } 15097 v.reset(OpAMD64ROLWconst) 15098 v.AuxInt = c 15099 v.AddArg(x) 15100 return true 15101 } 15102 // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 15103 // cond: d==8-c && c < 8 && t.Size() == 1 15104 // result: (ROLBconst x [c]) 15105 for { 15106 t := v.Type 15107 _ = v.Args[1] 15108 v_0 := v.Args[0] 15109 if v_0.Op != OpAMD64SHLLconst { 15110 break 15111 } 15112 c := v_0.AuxInt 15113 x := v_0.Args[0] 15114 v_1 := v.Args[1] 15115 if v_1.Op != OpAMD64SHRBconst { 15116 break 15117 } 15118 d := v_1.AuxInt 15119 if x != v_1.Args[0] { 15120 break 15121 } 15122 if !(d == 8-c && c < 8 && t.Size() == 1) { 15123 break 15124 } 15125 v.reset(OpAMD64ROLBconst) 15126 v.AuxInt = c 15127 v.AddArg(x) 15128 return true 15129 } 15130 // match: (ORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 15131 // cond: d==8-c && c < 8 && t.Size() == 1 15132 // result: (ROLBconst x [c]) 15133 for { 15134 t := v.Type 15135 _ = v.Args[1] 15136 v_0 := v.Args[0] 15137 if v_0.Op != OpAMD64SHRBconst { 15138 break 15139 } 15140 d := v_0.AuxInt 15141 x := v_0.Args[0] 15142 v_1 := v.Args[1] 15143 if v_1.Op != OpAMD64SHLLconst { 15144 break 15145 } 15146 c := v_1.AuxInt 15147 if x != v_1.Args[0] { 15148 break 15149 } 15150 if !(d == 8-c && c < 8 && t.Size() == 1) { 15151 break 15152 } 15153 v.reset(OpAMD64ROLBconst) 15154 v.AuxInt = c 15155 v.AddArg(x) 15156 return true 15157 } 15158 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 15159 // cond: 15160 // result: (ROLL x y) 15161 for { 15162 _ = v.Args[1] 15163 v_0 := v.Args[0] 15164 if v_0.Op != OpAMD64SHLL { 15165 break 15166 } 15167 _ = v_0.Args[1] 15168 x := v_0.Args[0] 15169 y := v_0.Args[1] 15170 v_1 := v.Args[1] 15171 if v_1.Op != OpAMD64ANDL { 15172 break 15173 } 15174 _ = v_1.Args[1] 15175 v_1_0 := v_1.Args[0] 15176 if v_1_0.Op != OpAMD64SHRL { 15177 break 15178 } 15179 _ = v_1_0.Args[1] 15180 if x != v_1_0.Args[0] { 15181 break 15182 } 15183 v_1_0_1 := v_1_0.Args[1] 15184 if v_1_0_1.Op != OpAMD64NEGQ { 15185 break 15186 } 15187 if y != v_1_0_1.Args[0] { 15188 break 15189 } 15190 v_1_1 := v_1.Args[1] 15191 if v_1_1.Op != OpAMD64SBBLcarrymask { 15192 break 15193 } 15194 v_1_1_0 := v_1_1.Args[0] 15195 if v_1_1_0.Op != OpAMD64CMPQconst { 15196 break 15197 } 15198 if v_1_1_0.AuxInt != 32 { 15199 break 15200 } 15201 v_1_1_0_0 := v_1_1_0.Args[0] 15202 if v_1_1_0_0.Op != OpAMD64NEGQ { 15203 break 15204 } 15205 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15206 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 15207 break 15208 } 15209 if v_1_1_0_0_0.AuxInt != -32 { 15210 break 15211 } 15212 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15213 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 15214 break 15215 } 15216 if v_1_1_0_0_0_0.AuxInt != 31 { 15217 break 15218 } 15219 if y != v_1_1_0_0_0_0.Args[0] { 15220 break 15221 } 15222 v.reset(OpAMD64ROLL) 15223 v.AddArg(x) 15224 v.AddArg(y) 15225 return true 15226 } 15227 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y)))) 15228 // cond: 15229 // result: (ROLL x y) 15230 for { 15231 _ = v.Args[1] 15232 v_0 := v.Args[0] 15233 if v_0.Op != OpAMD64SHLL { 15234 break 15235 } 15236 _ = v_0.Args[1] 15237 x := v_0.Args[0] 15238 y := v_0.Args[1] 15239 v_1 := v.Args[1] 15240 if v_1.Op != OpAMD64ANDL { 15241 break 15242 } 15243 _ = v_1.Args[1] 15244 v_1_0 := v_1.Args[0] 15245 if v_1_0.Op != OpAMD64SBBLcarrymask { 15246 break 15247 } 15248 v_1_0_0 := v_1_0.Args[0] 15249 if v_1_0_0.Op != OpAMD64CMPQconst { 15250 break 15251 } 15252 if v_1_0_0.AuxInt != 32 { 15253 break 15254 } 15255 v_1_0_0_0 := v_1_0_0.Args[0] 15256 if v_1_0_0_0.Op != OpAMD64NEGQ { 15257 break 15258 } 15259 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15260 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 15261 break 15262 } 15263 if v_1_0_0_0_0.AuxInt != -32 { 15264 break 15265 } 15266 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15267 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 15268 break 15269 } 15270 if v_1_0_0_0_0_0.AuxInt != 31 { 15271 break 15272 } 15273 if y != v_1_0_0_0_0_0.Args[0] { 15274 break 15275 } 15276 v_1_1 := v_1.Args[1] 15277 if v_1_1.Op != OpAMD64SHRL { 15278 break 15279 } 15280 _ = v_1_1.Args[1] 15281 if x != v_1_1.Args[0] { 15282 break 15283 } 15284 v_1_1_1 := v_1_1.Args[1] 15285 if v_1_1_1.Op != OpAMD64NEGQ { 15286 break 15287 } 15288 if y != v_1_1_1.Args[0] { 15289 break 15290 } 15291 v.reset(OpAMD64ROLL) 15292 v.AddArg(x) 15293 v.AddArg(y) 15294 return true 15295 } 15296 return false 15297 } 15298 func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { 15299 // match: (ORL (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHLL x y)) 15300 // cond: 15301 // result: (ROLL x y) 15302 for { 15303 _ = v.Args[1] 15304 v_0 := v.Args[0] 15305 if v_0.Op != OpAMD64ANDL { 15306 break 15307 } 15308 _ = v_0.Args[1] 15309 v_0_0 := v_0.Args[0] 15310 if v_0_0.Op != OpAMD64SHRL { 15311 break 15312 } 15313 _ = v_0_0.Args[1] 15314 x := v_0_0.Args[0] 15315 v_0_0_1 := v_0_0.Args[1] 15316 if v_0_0_1.Op != OpAMD64NEGQ { 15317 break 15318 } 15319 y := v_0_0_1.Args[0] 15320 v_0_1 := v_0.Args[1] 15321 if v_0_1.Op != OpAMD64SBBLcarrymask { 15322 break 15323 } 15324 v_0_1_0 := v_0_1.Args[0] 15325 if v_0_1_0.Op != OpAMD64CMPQconst { 15326 break 15327 } 15328 if v_0_1_0.AuxInt != 32 { 15329 break 15330 } 15331 v_0_1_0_0 := v_0_1_0.Args[0] 15332 if v_0_1_0_0.Op != OpAMD64NEGQ { 15333 break 15334 } 15335 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15336 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 15337 break 15338 } 15339 if v_0_1_0_0_0.AuxInt != -32 { 15340 break 15341 } 15342 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15343 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 15344 break 15345 } 15346 if v_0_1_0_0_0_0.AuxInt != 31 { 15347 break 15348 } 15349 if y != v_0_1_0_0_0_0.Args[0] { 15350 break 15351 } 15352 v_1 := v.Args[1] 15353 if v_1.Op != OpAMD64SHLL { 15354 break 15355 } 15356 _ = v_1.Args[1] 15357 if x != v_1.Args[0] { 15358 break 15359 } 15360 if y != v_1.Args[1] { 15361 break 15362 } 15363 v.reset(OpAMD64ROLL) 15364 v.AddArg(x) 15365 v.AddArg(y) 15366 return true 15367 } 15368 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y))) (SHLL x y)) 15369 // cond: 15370 // result: (ROLL x y) 15371 for { 15372 _ = v.Args[1] 15373 v_0 := v.Args[0] 15374 if v_0.Op != OpAMD64ANDL { 15375 break 15376 } 15377 _ = v_0.Args[1] 15378 v_0_0 := v_0.Args[0] 15379 if v_0_0.Op != OpAMD64SBBLcarrymask { 15380 break 15381 } 15382 v_0_0_0 := v_0_0.Args[0] 15383 if v_0_0_0.Op != OpAMD64CMPQconst { 15384 break 15385 } 15386 if v_0_0_0.AuxInt != 32 { 15387 break 15388 } 15389 v_0_0_0_0 := v_0_0_0.Args[0] 15390 if v_0_0_0_0.Op != OpAMD64NEGQ { 15391 break 15392 } 15393 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15394 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 15395 break 15396 } 15397 if v_0_0_0_0_0.AuxInt != -32 { 15398 break 15399 } 15400 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15401 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 15402 break 15403 } 15404 if v_0_0_0_0_0_0.AuxInt != 31 { 15405 break 15406 } 15407 y := v_0_0_0_0_0_0.Args[0] 15408 v_0_1 := v_0.Args[1] 15409 if v_0_1.Op != OpAMD64SHRL { 15410 break 15411 } 15412 _ = v_0_1.Args[1] 15413 x := v_0_1.Args[0] 15414 v_0_1_1 := v_0_1.Args[1] 15415 if v_0_1_1.Op != OpAMD64NEGQ { 15416 break 15417 } 15418 if y != v_0_1_1.Args[0] { 15419 break 15420 } 15421 v_1 := v.Args[1] 15422 if v_1.Op != OpAMD64SHLL { 15423 break 15424 } 15425 _ = v_1.Args[1] 15426 if x != v_1.Args[0] { 15427 break 15428 } 15429 if y != v_1.Args[1] { 15430 break 15431 } 15432 v.reset(OpAMD64ROLL) 15433 v.AddArg(x) 15434 v.AddArg(y) 15435 return true 15436 } 15437 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 15438 // cond: 15439 // result: (ROLL x y) 15440 for { 15441 _ = v.Args[1] 15442 v_0 := v.Args[0] 15443 if v_0.Op != OpAMD64SHLL { 15444 break 15445 } 15446 _ = v_0.Args[1] 15447 x := v_0.Args[0] 15448 y := v_0.Args[1] 15449 v_1 := v.Args[1] 15450 if v_1.Op != OpAMD64ANDL { 15451 break 15452 } 15453 _ = v_1.Args[1] 15454 v_1_0 := v_1.Args[0] 15455 if v_1_0.Op != OpAMD64SHRL { 15456 break 15457 } 15458 _ = v_1_0.Args[1] 15459 if x != v_1_0.Args[0] { 15460 break 15461 } 15462 v_1_0_1 := v_1_0.Args[1] 15463 if v_1_0_1.Op != OpAMD64NEGL { 15464 break 15465 } 15466 if y != v_1_0_1.Args[0] { 15467 break 15468 } 15469 v_1_1 := v_1.Args[1] 15470 if v_1_1.Op != OpAMD64SBBLcarrymask { 15471 break 15472 } 15473 v_1_1_0 := v_1_1.Args[0] 15474 if v_1_1_0.Op != OpAMD64CMPLconst { 15475 break 15476 } 15477 if v_1_1_0.AuxInt != 32 { 15478 break 15479 } 15480 v_1_1_0_0 := v_1_1_0.Args[0] 15481 if v_1_1_0_0.Op != OpAMD64NEGL { 15482 break 15483 } 15484 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15485 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 15486 break 15487 } 15488 if v_1_1_0_0_0.AuxInt != -32 { 15489 break 15490 } 15491 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15492 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 15493 break 15494 } 15495 if v_1_1_0_0_0_0.AuxInt != 31 { 15496 break 15497 } 15498 if y != v_1_1_0_0_0_0.Args[0] { 15499 break 15500 } 15501 v.reset(OpAMD64ROLL) 15502 v.AddArg(x) 15503 v.AddArg(y) 15504 return true 15505 } 15506 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y)))) 15507 // cond: 15508 // result: (ROLL x y) 15509 for { 15510 _ = v.Args[1] 15511 v_0 := v.Args[0] 15512 if v_0.Op != OpAMD64SHLL { 15513 break 15514 } 15515 _ = v_0.Args[1] 15516 x := v_0.Args[0] 15517 y := v_0.Args[1] 15518 v_1 := v.Args[1] 15519 if v_1.Op != OpAMD64ANDL { 15520 break 15521 } 15522 _ = v_1.Args[1] 15523 v_1_0 := v_1.Args[0] 15524 if v_1_0.Op != OpAMD64SBBLcarrymask { 15525 break 15526 } 15527 v_1_0_0 := v_1_0.Args[0] 15528 if v_1_0_0.Op != OpAMD64CMPLconst { 15529 break 15530 } 15531 if v_1_0_0.AuxInt != 32 { 15532 break 15533 } 15534 v_1_0_0_0 := v_1_0_0.Args[0] 15535 if v_1_0_0_0.Op != OpAMD64NEGL { 15536 break 15537 } 15538 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15539 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 15540 break 15541 } 15542 if v_1_0_0_0_0.AuxInt != -32 { 15543 break 15544 } 15545 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15546 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 15547 break 15548 } 15549 if v_1_0_0_0_0_0.AuxInt != 31 { 15550 break 15551 } 15552 if y != v_1_0_0_0_0_0.Args[0] { 15553 break 15554 } 15555 v_1_1 := v_1.Args[1] 15556 if v_1_1.Op != OpAMD64SHRL { 15557 break 15558 } 15559 _ = v_1_1.Args[1] 15560 if x != v_1_1.Args[0] { 15561 break 15562 } 15563 v_1_1_1 := v_1_1.Args[1] 15564 if v_1_1_1.Op != OpAMD64NEGL { 15565 break 15566 } 15567 if y != v_1_1_1.Args[0] { 15568 break 15569 } 15570 v.reset(OpAMD64ROLL) 15571 v.AddArg(x) 15572 v.AddArg(y) 15573 return true 15574 } 15575 // match: (ORL (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHLL x y)) 15576 // cond: 15577 // result: (ROLL x y) 15578 for { 15579 _ = v.Args[1] 15580 v_0 := v.Args[0] 15581 if v_0.Op != OpAMD64ANDL { 15582 break 15583 } 15584 _ = v_0.Args[1] 15585 v_0_0 := v_0.Args[0] 15586 if v_0_0.Op != OpAMD64SHRL { 15587 break 15588 } 15589 _ = v_0_0.Args[1] 15590 x := v_0_0.Args[0] 15591 v_0_0_1 := v_0_0.Args[1] 15592 if v_0_0_1.Op != OpAMD64NEGL { 15593 break 15594 } 15595 y := v_0_0_1.Args[0] 15596 v_0_1 := v_0.Args[1] 15597 if v_0_1.Op != OpAMD64SBBLcarrymask { 15598 break 15599 } 15600 v_0_1_0 := v_0_1.Args[0] 15601 if v_0_1_0.Op != OpAMD64CMPLconst { 15602 break 15603 } 15604 if v_0_1_0.AuxInt != 32 { 15605 break 15606 } 15607 v_0_1_0_0 := v_0_1_0.Args[0] 15608 if v_0_1_0_0.Op != OpAMD64NEGL { 15609 break 15610 } 15611 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15612 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 15613 break 15614 } 15615 if v_0_1_0_0_0.AuxInt != -32 { 15616 break 15617 } 15618 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15619 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 15620 break 15621 } 15622 if v_0_1_0_0_0_0.AuxInt != 31 { 15623 break 15624 } 15625 if y != v_0_1_0_0_0_0.Args[0] { 15626 break 15627 } 15628 v_1 := v.Args[1] 15629 if v_1.Op != OpAMD64SHLL { 15630 break 15631 } 15632 _ = v_1.Args[1] 15633 if x != v_1.Args[0] { 15634 break 15635 } 15636 if y != v_1.Args[1] { 15637 break 15638 } 15639 v.reset(OpAMD64ROLL) 15640 v.AddArg(x) 15641 v.AddArg(y) 15642 return true 15643 } 15644 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y))) (SHLL x y)) 15645 // cond: 15646 // result: (ROLL x y) 15647 for { 15648 _ = v.Args[1] 15649 v_0 := v.Args[0] 15650 if v_0.Op != OpAMD64ANDL { 15651 break 15652 } 15653 _ = v_0.Args[1] 15654 v_0_0 := v_0.Args[0] 15655 if v_0_0.Op != OpAMD64SBBLcarrymask { 15656 break 15657 } 15658 v_0_0_0 := v_0_0.Args[0] 15659 if v_0_0_0.Op != OpAMD64CMPLconst { 15660 break 15661 } 15662 if v_0_0_0.AuxInt != 32 { 15663 break 15664 } 15665 v_0_0_0_0 := v_0_0_0.Args[0] 15666 if v_0_0_0_0.Op != OpAMD64NEGL { 15667 break 15668 } 15669 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15670 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 15671 break 15672 } 15673 if v_0_0_0_0_0.AuxInt != -32 { 15674 break 15675 } 15676 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15677 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 15678 break 15679 } 15680 if v_0_0_0_0_0_0.AuxInt != 31 { 15681 break 15682 } 15683 y := v_0_0_0_0_0_0.Args[0] 15684 v_0_1 := v_0.Args[1] 15685 if v_0_1.Op != OpAMD64SHRL { 15686 break 15687 } 15688 _ = v_0_1.Args[1] 15689 x := v_0_1.Args[0] 15690 v_0_1_1 := v_0_1.Args[1] 15691 if v_0_1_1.Op != OpAMD64NEGL { 15692 break 15693 } 15694 if y != v_0_1_1.Args[0] { 15695 break 15696 } 15697 v_1 := v.Args[1] 15698 if v_1.Op != OpAMD64SHLL { 15699 break 15700 } 15701 _ = v_1.Args[1] 15702 if x != v_1.Args[0] { 15703 break 15704 } 15705 if y != v_1.Args[1] { 15706 break 15707 } 15708 v.reset(OpAMD64ROLL) 15709 v.AddArg(x) 15710 v.AddArg(y) 15711 return true 15712 } 15713 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 15714 // cond: 15715 // result: (RORL x y) 15716 for { 15717 _ = v.Args[1] 15718 v_0 := v.Args[0] 15719 if v_0.Op != OpAMD64SHRL { 15720 break 15721 } 15722 _ = v_0.Args[1] 15723 x := v_0.Args[0] 15724 y := v_0.Args[1] 15725 v_1 := v.Args[1] 15726 if v_1.Op != OpAMD64ANDL { 15727 break 15728 } 15729 _ = v_1.Args[1] 15730 v_1_0 := v_1.Args[0] 15731 if v_1_0.Op != OpAMD64SHLL { 15732 break 15733 } 15734 _ = v_1_0.Args[1] 15735 if x != v_1_0.Args[0] { 15736 break 15737 } 15738 v_1_0_1 := v_1_0.Args[1] 15739 if v_1_0_1.Op != OpAMD64NEGQ { 15740 break 15741 } 15742 if y != v_1_0_1.Args[0] { 15743 break 15744 } 15745 v_1_1 := v_1.Args[1] 15746 if v_1_1.Op != OpAMD64SBBLcarrymask { 15747 break 15748 } 15749 v_1_1_0 := v_1_1.Args[0] 15750 if v_1_1_0.Op != OpAMD64CMPQconst { 15751 break 15752 } 15753 if v_1_1_0.AuxInt != 32 { 15754 break 15755 } 15756 v_1_1_0_0 := v_1_1_0.Args[0] 15757 if v_1_1_0_0.Op != OpAMD64NEGQ { 15758 break 15759 } 15760 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15761 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 15762 break 15763 } 15764 if v_1_1_0_0_0.AuxInt != -32 { 15765 break 15766 } 15767 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15768 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 15769 break 15770 } 15771 if v_1_1_0_0_0_0.AuxInt != 31 { 15772 break 15773 } 15774 if y != v_1_1_0_0_0_0.Args[0] { 15775 break 15776 } 15777 v.reset(OpAMD64RORL) 15778 v.AddArg(x) 15779 v.AddArg(y) 15780 return true 15781 } 15782 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y)))) 15783 // cond: 15784 // result: (RORL x y) 15785 for { 15786 _ = v.Args[1] 15787 v_0 := v.Args[0] 15788 if v_0.Op != OpAMD64SHRL { 15789 break 15790 } 15791 _ = v_0.Args[1] 15792 x := v_0.Args[0] 15793 y := v_0.Args[1] 15794 v_1 := v.Args[1] 15795 if v_1.Op != OpAMD64ANDL { 15796 break 15797 } 15798 _ = v_1.Args[1] 15799 v_1_0 := v_1.Args[0] 15800 if v_1_0.Op != OpAMD64SBBLcarrymask { 15801 break 15802 } 15803 v_1_0_0 := v_1_0.Args[0] 15804 if v_1_0_0.Op != OpAMD64CMPQconst { 15805 break 15806 } 15807 if v_1_0_0.AuxInt != 32 { 15808 break 15809 } 15810 v_1_0_0_0 := v_1_0_0.Args[0] 15811 if v_1_0_0_0.Op != OpAMD64NEGQ { 15812 break 15813 } 15814 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15815 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 15816 break 15817 } 15818 if v_1_0_0_0_0.AuxInt != -32 { 15819 break 15820 } 15821 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15822 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 15823 break 15824 } 15825 if v_1_0_0_0_0_0.AuxInt != 31 { 15826 break 15827 } 15828 if y != v_1_0_0_0_0_0.Args[0] { 15829 break 15830 } 15831 v_1_1 := v_1.Args[1] 15832 if v_1_1.Op != OpAMD64SHLL { 15833 break 15834 } 15835 _ = v_1_1.Args[1] 15836 if x != v_1_1.Args[0] { 15837 break 15838 } 15839 v_1_1_1 := v_1_1.Args[1] 15840 if v_1_1_1.Op != OpAMD64NEGQ { 15841 break 15842 } 15843 if y != v_1_1_1.Args[0] { 15844 break 15845 } 15846 v.reset(OpAMD64RORL) 15847 v.AddArg(x) 15848 v.AddArg(y) 15849 return true 15850 } 15851 // match: (ORL (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHRL x y)) 15852 // cond: 15853 // result: (RORL x y) 15854 for { 15855 _ = v.Args[1] 15856 v_0 := v.Args[0] 15857 if v_0.Op != OpAMD64ANDL { 15858 break 15859 } 15860 _ = v_0.Args[1] 15861 v_0_0 := v_0.Args[0] 15862 if v_0_0.Op != OpAMD64SHLL { 15863 break 15864 } 15865 _ = v_0_0.Args[1] 15866 x := v_0_0.Args[0] 15867 v_0_0_1 := v_0_0.Args[1] 15868 if v_0_0_1.Op != OpAMD64NEGQ { 15869 break 15870 } 15871 y := v_0_0_1.Args[0] 15872 v_0_1 := v_0.Args[1] 15873 if v_0_1.Op != OpAMD64SBBLcarrymask { 15874 break 15875 } 15876 v_0_1_0 := v_0_1.Args[0] 15877 if v_0_1_0.Op != OpAMD64CMPQconst { 15878 break 15879 } 15880 if v_0_1_0.AuxInt != 32 { 15881 break 15882 } 15883 v_0_1_0_0 := v_0_1_0.Args[0] 15884 if v_0_1_0_0.Op != OpAMD64NEGQ { 15885 break 15886 } 15887 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15888 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 15889 break 15890 } 15891 if v_0_1_0_0_0.AuxInt != -32 { 15892 break 15893 } 15894 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15895 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 15896 break 15897 } 15898 if v_0_1_0_0_0_0.AuxInt != 31 { 15899 break 15900 } 15901 if y != v_0_1_0_0_0_0.Args[0] { 15902 break 15903 } 15904 v_1 := v.Args[1] 15905 if v_1.Op != OpAMD64SHRL { 15906 break 15907 } 15908 _ = v_1.Args[1] 15909 if x != v_1.Args[0] { 15910 break 15911 } 15912 if y != v_1.Args[1] { 15913 break 15914 } 15915 v.reset(OpAMD64RORL) 15916 v.AddArg(x) 15917 v.AddArg(y) 15918 return true 15919 } 15920 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y))) (SHRL x y)) 15921 // cond: 15922 // result: (RORL x y) 15923 for { 15924 _ = v.Args[1] 15925 v_0 := v.Args[0] 15926 if v_0.Op != OpAMD64ANDL { 15927 break 15928 } 15929 _ = v_0.Args[1] 15930 v_0_0 := v_0.Args[0] 15931 if v_0_0.Op != OpAMD64SBBLcarrymask { 15932 break 15933 } 15934 v_0_0_0 := v_0_0.Args[0] 15935 if v_0_0_0.Op != OpAMD64CMPQconst { 15936 break 15937 } 15938 if v_0_0_0.AuxInt != 32 { 15939 break 15940 } 15941 v_0_0_0_0 := v_0_0_0.Args[0] 15942 if v_0_0_0_0.Op != OpAMD64NEGQ { 15943 break 15944 } 15945 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15946 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 15947 break 15948 } 15949 if v_0_0_0_0_0.AuxInt != -32 { 15950 break 15951 } 15952 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15953 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 15954 break 15955 } 15956 if v_0_0_0_0_0_0.AuxInt != 31 { 15957 break 15958 } 15959 y := v_0_0_0_0_0_0.Args[0] 15960 v_0_1 := v_0.Args[1] 15961 if v_0_1.Op != OpAMD64SHLL { 15962 break 15963 } 15964 _ = v_0_1.Args[1] 15965 x := v_0_1.Args[0] 15966 v_0_1_1 := v_0_1.Args[1] 15967 if v_0_1_1.Op != OpAMD64NEGQ { 15968 break 15969 } 15970 if y != v_0_1_1.Args[0] { 15971 break 15972 } 15973 v_1 := v.Args[1] 15974 if v_1.Op != OpAMD64SHRL { 15975 break 15976 } 15977 _ = v_1.Args[1] 15978 if x != v_1.Args[0] { 15979 break 15980 } 15981 if y != v_1.Args[1] { 15982 break 15983 } 15984 v.reset(OpAMD64RORL) 15985 v.AddArg(x) 15986 v.AddArg(y) 15987 return true 15988 } 15989 return false 15990 } 15991 func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { 15992 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 15993 // cond: 15994 // result: (RORL x y) 15995 for { 15996 _ = v.Args[1] 15997 v_0 := v.Args[0] 15998 if v_0.Op != OpAMD64SHRL { 15999 break 16000 } 16001 _ = v_0.Args[1] 16002 x := v_0.Args[0] 16003 y := v_0.Args[1] 16004 v_1 := v.Args[1] 16005 if v_1.Op != OpAMD64ANDL { 16006 break 16007 } 16008 _ = v_1.Args[1] 16009 v_1_0 := v_1.Args[0] 16010 if v_1_0.Op != OpAMD64SHLL { 16011 break 16012 } 16013 _ = v_1_0.Args[1] 16014 if x != v_1_0.Args[0] { 16015 break 16016 } 16017 v_1_0_1 := v_1_0.Args[1] 16018 if v_1_0_1.Op != OpAMD64NEGL { 16019 break 16020 } 16021 if y != v_1_0_1.Args[0] { 16022 break 16023 } 16024 v_1_1 := v_1.Args[1] 16025 if v_1_1.Op != OpAMD64SBBLcarrymask { 16026 break 16027 } 16028 v_1_1_0 := v_1_1.Args[0] 16029 if v_1_1_0.Op != OpAMD64CMPLconst { 16030 break 16031 } 16032 if v_1_1_0.AuxInt != 32 { 16033 break 16034 } 16035 v_1_1_0_0 := v_1_1_0.Args[0] 16036 if v_1_1_0_0.Op != OpAMD64NEGL { 16037 break 16038 } 16039 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 16040 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 16041 break 16042 } 16043 if v_1_1_0_0_0.AuxInt != -32 { 16044 break 16045 } 16046 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 16047 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 16048 break 16049 } 16050 if v_1_1_0_0_0_0.AuxInt != 31 { 16051 break 16052 } 16053 if y != v_1_1_0_0_0_0.Args[0] { 16054 break 16055 } 16056 v.reset(OpAMD64RORL) 16057 v.AddArg(x) 16058 v.AddArg(y) 16059 return true 16060 } 16061 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y)))) 16062 // cond: 16063 // result: (RORL x y) 16064 for { 16065 _ = v.Args[1] 16066 v_0 := v.Args[0] 16067 if v_0.Op != OpAMD64SHRL { 16068 break 16069 } 16070 _ = v_0.Args[1] 16071 x := v_0.Args[0] 16072 y := v_0.Args[1] 16073 v_1 := v.Args[1] 16074 if v_1.Op != OpAMD64ANDL { 16075 break 16076 } 16077 _ = v_1.Args[1] 16078 v_1_0 := v_1.Args[0] 16079 if v_1_0.Op != OpAMD64SBBLcarrymask { 16080 break 16081 } 16082 v_1_0_0 := v_1_0.Args[0] 16083 if v_1_0_0.Op != OpAMD64CMPLconst { 16084 break 16085 } 16086 if v_1_0_0.AuxInt != 32 { 16087 break 16088 } 16089 v_1_0_0_0 := v_1_0_0.Args[0] 16090 if v_1_0_0_0.Op != OpAMD64NEGL { 16091 break 16092 } 16093 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 16094 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 16095 break 16096 } 16097 if v_1_0_0_0_0.AuxInt != -32 { 16098 break 16099 } 16100 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 16101 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 16102 break 16103 } 16104 if v_1_0_0_0_0_0.AuxInt != 31 { 16105 break 16106 } 16107 if y != v_1_0_0_0_0_0.Args[0] { 16108 break 16109 } 16110 v_1_1 := v_1.Args[1] 16111 if v_1_1.Op != OpAMD64SHLL { 16112 break 16113 } 16114 _ = v_1_1.Args[1] 16115 if x != v_1_1.Args[0] { 16116 break 16117 } 16118 v_1_1_1 := v_1_1.Args[1] 16119 if v_1_1_1.Op != OpAMD64NEGL { 16120 break 16121 } 16122 if y != v_1_1_1.Args[0] { 16123 break 16124 } 16125 v.reset(OpAMD64RORL) 16126 v.AddArg(x) 16127 v.AddArg(y) 16128 return true 16129 } 16130 // match: (ORL (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHRL x y)) 16131 // cond: 16132 // result: (RORL x y) 16133 for { 16134 _ = v.Args[1] 16135 v_0 := v.Args[0] 16136 if v_0.Op != OpAMD64ANDL { 16137 break 16138 } 16139 _ = v_0.Args[1] 16140 v_0_0 := v_0.Args[0] 16141 if v_0_0.Op != OpAMD64SHLL { 16142 break 16143 } 16144 _ = v_0_0.Args[1] 16145 x := v_0_0.Args[0] 16146 v_0_0_1 := v_0_0.Args[1] 16147 if v_0_0_1.Op != OpAMD64NEGL { 16148 break 16149 } 16150 y := v_0_0_1.Args[0] 16151 v_0_1 := v_0.Args[1] 16152 if v_0_1.Op != OpAMD64SBBLcarrymask { 16153 break 16154 } 16155 v_0_1_0 := v_0_1.Args[0] 16156 if v_0_1_0.Op != OpAMD64CMPLconst { 16157 break 16158 } 16159 if v_0_1_0.AuxInt != 32 { 16160 break 16161 } 16162 v_0_1_0_0 := v_0_1_0.Args[0] 16163 if v_0_1_0_0.Op != OpAMD64NEGL { 16164 break 16165 } 16166 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16167 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 16168 break 16169 } 16170 if v_0_1_0_0_0.AuxInt != -32 { 16171 break 16172 } 16173 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16174 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 16175 break 16176 } 16177 if v_0_1_0_0_0_0.AuxInt != 31 { 16178 break 16179 } 16180 if y != v_0_1_0_0_0_0.Args[0] { 16181 break 16182 } 16183 v_1 := v.Args[1] 16184 if v_1.Op != OpAMD64SHRL { 16185 break 16186 } 16187 _ = v_1.Args[1] 16188 if x != v_1.Args[0] { 16189 break 16190 } 16191 if y != v_1.Args[1] { 16192 break 16193 } 16194 v.reset(OpAMD64RORL) 16195 v.AddArg(x) 16196 v.AddArg(y) 16197 return true 16198 } 16199 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y))) (SHRL x y)) 16200 // cond: 16201 // result: (RORL x y) 16202 for { 16203 _ = v.Args[1] 16204 v_0 := v.Args[0] 16205 if v_0.Op != OpAMD64ANDL { 16206 break 16207 } 16208 _ = v_0.Args[1] 16209 v_0_0 := v_0.Args[0] 16210 if v_0_0.Op != OpAMD64SBBLcarrymask { 16211 break 16212 } 16213 v_0_0_0 := v_0_0.Args[0] 16214 if v_0_0_0.Op != OpAMD64CMPLconst { 16215 break 16216 } 16217 if v_0_0_0.AuxInt != 32 { 16218 break 16219 } 16220 v_0_0_0_0 := v_0_0_0.Args[0] 16221 if v_0_0_0_0.Op != OpAMD64NEGL { 16222 break 16223 } 16224 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16225 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 16226 break 16227 } 16228 if v_0_0_0_0_0.AuxInt != -32 { 16229 break 16230 } 16231 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16232 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 16233 break 16234 } 16235 if v_0_0_0_0_0_0.AuxInt != 31 { 16236 break 16237 } 16238 y := v_0_0_0_0_0_0.Args[0] 16239 v_0_1 := v_0.Args[1] 16240 if v_0_1.Op != OpAMD64SHLL { 16241 break 16242 } 16243 _ = v_0_1.Args[1] 16244 x := v_0_1.Args[0] 16245 v_0_1_1 := v_0_1.Args[1] 16246 if v_0_1_1.Op != OpAMD64NEGL { 16247 break 16248 } 16249 if y != v_0_1_1.Args[0] { 16250 break 16251 } 16252 v_1 := v.Args[1] 16253 if v_1.Op != OpAMD64SHRL { 16254 break 16255 } 16256 _ = v_1.Args[1] 16257 if x != v_1.Args[0] { 16258 break 16259 } 16260 if y != v_1.Args[1] { 16261 break 16262 } 16263 v.reset(OpAMD64RORL) 16264 v.AddArg(x) 16265 v.AddArg(y) 16266 return true 16267 } 16268 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) 16269 // cond: v.Type.Size() == 2 16270 // result: (ROLW x y) 16271 for { 16272 _ = v.Args[1] 16273 v_0 := v.Args[0] 16274 if v_0.Op != OpAMD64SHLL { 16275 break 16276 } 16277 _ = v_0.Args[1] 16278 x := v_0.Args[0] 16279 v_0_1 := v_0.Args[1] 16280 if v_0_1.Op != OpAMD64ANDQconst { 16281 break 16282 } 16283 if v_0_1.AuxInt != 15 { 16284 break 16285 } 16286 y := v_0_1.Args[0] 16287 v_1 := v.Args[1] 16288 if v_1.Op != OpAMD64ANDL { 16289 break 16290 } 16291 _ = v_1.Args[1] 16292 v_1_0 := v_1.Args[0] 16293 if v_1_0.Op != OpAMD64SHRW { 16294 break 16295 } 16296 _ = v_1_0.Args[1] 16297 if x != v_1_0.Args[0] { 16298 break 16299 } 16300 v_1_0_1 := v_1_0.Args[1] 16301 if v_1_0_1.Op != OpAMD64NEGQ { 16302 break 16303 } 16304 v_1_0_1_0 := v_1_0_1.Args[0] 16305 if v_1_0_1_0.Op != OpAMD64ADDQconst { 16306 break 16307 } 16308 if v_1_0_1_0.AuxInt != -16 { 16309 break 16310 } 16311 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 16312 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 16313 break 16314 } 16315 if v_1_0_1_0_0.AuxInt != 15 { 16316 break 16317 } 16318 if y != v_1_0_1_0_0.Args[0] { 16319 break 16320 } 16321 v_1_1 := v_1.Args[1] 16322 if v_1_1.Op != OpAMD64SBBLcarrymask { 16323 break 16324 } 16325 v_1_1_0 := v_1_1.Args[0] 16326 if v_1_1_0.Op != OpAMD64CMPQconst { 16327 break 16328 } 16329 if v_1_1_0.AuxInt != 16 { 16330 break 16331 } 16332 v_1_1_0_0 := v_1_1_0.Args[0] 16333 if v_1_1_0_0.Op != OpAMD64NEGQ { 16334 break 16335 } 16336 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 16337 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 16338 break 16339 } 16340 if v_1_1_0_0_0.AuxInt != -16 { 16341 break 16342 } 16343 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 16344 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 16345 break 16346 } 16347 if v_1_1_0_0_0_0.AuxInt != 15 { 16348 break 16349 } 16350 if y != v_1_1_0_0_0_0.Args[0] { 16351 break 16352 } 16353 if !(v.Type.Size() == 2) { 16354 break 16355 } 16356 v.reset(OpAMD64ROLW) 16357 v.AddArg(x) 16358 v.AddArg(y) 16359 return true 16360 } 16361 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))) 16362 // cond: v.Type.Size() == 2 16363 // result: (ROLW x y) 16364 for { 16365 _ = v.Args[1] 16366 v_0 := v.Args[0] 16367 if v_0.Op != OpAMD64SHLL { 16368 break 16369 } 16370 _ = v_0.Args[1] 16371 x := v_0.Args[0] 16372 v_0_1 := v_0.Args[1] 16373 if v_0_1.Op != OpAMD64ANDQconst { 16374 break 16375 } 16376 if v_0_1.AuxInt != 15 { 16377 break 16378 } 16379 y := v_0_1.Args[0] 16380 v_1 := v.Args[1] 16381 if v_1.Op != OpAMD64ANDL { 16382 break 16383 } 16384 _ = v_1.Args[1] 16385 v_1_0 := v_1.Args[0] 16386 if v_1_0.Op != OpAMD64SBBLcarrymask { 16387 break 16388 } 16389 v_1_0_0 := v_1_0.Args[0] 16390 if v_1_0_0.Op != OpAMD64CMPQconst { 16391 break 16392 } 16393 if v_1_0_0.AuxInt != 16 { 16394 break 16395 } 16396 v_1_0_0_0 := v_1_0_0.Args[0] 16397 if v_1_0_0_0.Op != OpAMD64NEGQ { 16398 break 16399 } 16400 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 16401 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 16402 break 16403 } 16404 if v_1_0_0_0_0.AuxInt != -16 { 16405 break 16406 } 16407 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 16408 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 16409 break 16410 } 16411 if v_1_0_0_0_0_0.AuxInt != 15 { 16412 break 16413 } 16414 if y != v_1_0_0_0_0_0.Args[0] { 16415 break 16416 } 16417 v_1_1 := v_1.Args[1] 16418 if v_1_1.Op != OpAMD64SHRW { 16419 break 16420 } 16421 _ = v_1_1.Args[1] 16422 if x != v_1_1.Args[0] { 16423 break 16424 } 16425 v_1_1_1 := v_1_1.Args[1] 16426 if v_1_1_1.Op != OpAMD64NEGQ { 16427 break 16428 } 16429 v_1_1_1_0 := v_1_1_1.Args[0] 16430 if v_1_1_1_0.Op != OpAMD64ADDQconst { 16431 break 16432 } 16433 if v_1_1_1_0.AuxInt != -16 { 16434 break 16435 } 16436 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 16437 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 16438 break 16439 } 16440 if v_1_1_1_0_0.AuxInt != 15 { 16441 break 16442 } 16443 if y != v_1_1_1_0_0.Args[0] { 16444 break 16445 } 16446 if !(v.Type.Size() == 2) { 16447 break 16448 } 16449 v.reset(OpAMD64ROLW) 16450 v.AddArg(x) 16451 v.AddArg(y) 16452 return true 16453 } 16454 // match: (ORL (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))) (SHLL x (ANDQconst y [15]))) 16455 // cond: v.Type.Size() == 2 16456 // result: (ROLW x y) 16457 for { 16458 _ = v.Args[1] 16459 v_0 := v.Args[0] 16460 if v_0.Op != OpAMD64ANDL { 16461 break 16462 } 16463 _ = v_0.Args[1] 16464 v_0_0 := v_0.Args[0] 16465 if v_0_0.Op != OpAMD64SHRW { 16466 break 16467 } 16468 _ = v_0_0.Args[1] 16469 x := v_0_0.Args[0] 16470 v_0_0_1 := v_0_0.Args[1] 16471 if v_0_0_1.Op != OpAMD64NEGQ { 16472 break 16473 } 16474 v_0_0_1_0 := v_0_0_1.Args[0] 16475 if v_0_0_1_0.Op != OpAMD64ADDQconst { 16476 break 16477 } 16478 if v_0_0_1_0.AuxInt != -16 { 16479 break 16480 } 16481 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 16482 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 16483 break 16484 } 16485 if v_0_0_1_0_0.AuxInt != 15 { 16486 break 16487 } 16488 y := v_0_0_1_0_0.Args[0] 16489 v_0_1 := v_0.Args[1] 16490 if v_0_1.Op != OpAMD64SBBLcarrymask { 16491 break 16492 } 16493 v_0_1_0 := v_0_1.Args[0] 16494 if v_0_1_0.Op != OpAMD64CMPQconst { 16495 break 16496 } 16497 if v_0_1_0.AuxInt != 16 { 16498 break 16499 } 16500 v_0_1_0_0 := v_0_1_0.Args[0] 16501 if v_0_1_0_0.Op != OpAMD64NEGQ { 16502 break 16503 } 16504 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16505 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 16506 break 16507 } 16508 if v_0_1_0_0_0.AuxInt != -16 { 16509 break 16510 } 16511 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16512 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 16513 break 16514 } 16515 if v_0_1_0_0_0_0.AuxInt != 15 { 16516 break 16517 } 16518 if y != v_0_1_0_0_0_0.Args[0] { 16519 break 16520 } 16521 v_1 := v.Args[1] 16522 if v_1.Op != OpAMD64SHLL { 16523 break 16524 } 16525 _ = v_1.Args[1] 16526 if x != v_1.Args[0] { 16527 break 16528 } 16529 v_1_1 := v_1.Args[1] 16530 if v_1_1.Op != OpAMD64ANDQconst { 16531 break 16532 } 16533 if v_1_1.AuxInt != 15 { 16534 break 16535 } 16536 if y != v_1_1.Args[0] { 16537 break 16538 } 16539 if !(v.Type.Size() == 2) { 16540 break 16541 } 16542 v.reset(OpAMD64ROLW) 16543 v.AddArg(x) 16544 v.AddArg(y) 16545 return true 16546 } 16547 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) (SHLL x (ANDQconst y [15]))) 16548 // cond: v.Type.Size() == 2 16549 // result: (ROLW x y) 16550 for { 16551 _ = v.Args[1] 16552 v_0 := v.Args[0] 16553 if v_0.Op != OpAMD64ANDL { 16554 break 16555 } 16556 _ = v_0.Args[1] 16557 v_0_0 := v_0.Args[0] 16558 if v_0_0.Op != OpAMD64SBBLcarrymask { 16559 break 16560 } 16561 v_0_0_0 := v_0_0.Args[0] 16562 if v_0_0_0.Op != OpAMD64CMPQconst { 16563 break 16564 } 16565 if v_0_0_0.AuxInt != 16 { 16566 break 16567 } 16568 v_0_0_0_0 := v_0_0_0.Args[0] 16569 if v_0_0_0_0.Op != OpAMD64NEGQ { 16570 break 16571 } 16572 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16573 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 16574 break 16575 } 16576 if v_0_0_0_0_0.AuxInt != -16 { 16577 break 16578 } 16579 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16580 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 16581 break 16582 } 16583 if v_0_0_0_0_0_0.AuxInt != 15 { 16584 break 16585 } 16586 y := v_0_0_0_0_0_0.Args[0] 16587 v_0_1 := v_0.Args[1] 16588 if v_0_1.Op != OpAMD64SHRW { 16589 break 16590 } 16591 _ = v_0_1.Args[1] 16592 x := v_0_1.Args[0] 16593 v_0_1_1 := v_0_1.Args[1] 16594 if v_0_1_1.Op != OpAMD64NEGQ { 16595 break 16596 } 16597 v_0_1_1_0 := v_0_1_1.Args[0] 16598 if v_0_1_1_0.Op != OpAMD64ADDQconst { 16599 break 16600 } 16601 if v_0_1_1_0.AuxInt != -16 { 16602 break 16603 } 16604 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 16605 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 16606 break 16607 } 16608 if v_0_1_1_0_0.AuxInt != 15 { 16609 break 16610 } 16611 if y != v_0_1_1_0_0.Args[0] { 16612 break 16613 } 16614 v_1 := v.Args[1] 16615 if v_1.Op != OpAMD64SHLL { 16616 break 16617 } 16618 _ = v_1.Args[1] 16619 if x != v_1.Args[0] { 16620 break 16621 } 16622 v_1_1 := v_1.Args[1] 16623 if v_1_1.Op != OpAMD64ANDQconst { 16624 break 16625 } 16626 if v_1_1.AuxInt != 15 { 16627 break 16628 } 16629 if y != v_1_1.Args[0] { 16630 break 16631 } 16632 if !(v.Type.Size() == 2) { 16633 break 16634 } 16635 v.reset(OpAMD64ROLW) 16636 v.AddArg(x) 16637 v.AddArg(y) 16638 return true 16639 } 16640 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])))) 16641 // cond: v.Type.Size() == 2 16642 // result: (ROLW x y) 16643 for { 16644 _ = v.Args[1] 16645 v_0 := v.Args[0] 16646 if v_0.Op != OpAMD64SHLL { 16647 break 16648 } 16649 _ = v_0.Args[1] 16650 x := v_0.Args[0] 16651 v_0_1 := v_0.Args[1] 16652 if v_0_1.Op != OpAMD64ANDLconst { 16653 break 16654 } 16655 if v_0_1.AuxInt != 15 { 16656 break 16657 } 16658 y := v_0_1.Args[0] 16659 v_1 := v.Args[1] 16660 if v_1.Op != OpAMD64ANDL { 16661 break 16662 } 16663 _ = v_1.Args[1] 16664 v_1_0 := v_1.Args[0] 16665 if v_1_0.Op != OpAMD64SHRW { 16666 break 16667 } 16668 _ = v_1_0.Args[1] 16669 if x != v_1_0.Args[0] { 16670 break 16671 } 16672 v_1_0_1 := v_1_0.Args[1] 16673 if v_1_0_1.Op != OpAMD64NEGL { 16674 break 16675 } 16676 v_1_0_1_0 := v_1_0_1.Args[0] 16677 if v_1_0_1_0.Op != OpAMD64ADDLconst { 16678 break 16679 } 16680 if v_1_0_1_0.AuxInt != -16 { 16681 break 16682 } 16683 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 16684 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 16685 break 16686 } 16687 if v_1_0_1_0_0.AuxInt != 15 { 16688 break 16689 } 16690 if y != v_1_0_1_0_0.Args[0] { 16691 break 16692 } 16693 v_1_1 := v_1.Args[1] 16694 if v_1_1.Op != OpAMD64SBBLcarrymask { 16695 break 16696 } 16697 v_1_1_0 := v_1_1.Args[0] 16698 if v_1_1_0.Op != OpAMD64CMPLconst { 16699 break 16700 } 16701 if v_1_1_0.AuxInt != 16 { 16702 break 16703 } 16704 v_1_1_0_0 := v_1_1_0.Args[0] 16705 if v_1_1_0_0.Op != OpAMD64NEGL { 16706 break 16707 } 16708 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 16709 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 16710 break 16711 } 16712 if v_1_1_0_0_0.AuxInt != -16 { 16713 break 16714 } 16715 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 16716 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 16717 break 16718 } 16719 if v_1_1_0_0_0_0.AuxInt != 15 { 16720 break 16721 } 16722 if y != v_1_1_0_0_0_0.Args[0] { 16723 break 16724 } 16725 if !(v.Type.Size() == 2) { 16726 break 16727 } 16728 v.reset(OpAMD64ROLW) 16729 v.AddArg(x) 16730 v.AddArg(y) 16731 return true 16732 } 16733 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))) 16734 // cond: v.Type.Size() == 2 16735 // result: (ROLW x y) 16736 for { 16737 _ = v.Args[1] 16738 v_0 := v.Args[0] 16739 if v_0.Op != OpAMD64SHLL { 16740 break 16741 } 16742 _ = v_0.Args[1] 16743 x := v_0.Args[0] 16744 v_0_1 := v_0.Args[1] 16745 if v_0_1.Op != OpAMD64ANDLconst { 16746 break 16747 } 16748 if v_0_1.AuxInt != 15 { 16749 break 16750 } 16751 y := v_0_1.Args[0] 16752 v_1 := v.Args[1] 16753 if v_1.Op != OpAMD64ANDL { 16754 break 16755 } 16756 _ = v_1.Args[1] 16757 v_1_0 := v_1.Args[0] 16758 if v_1_0.Op != OpAMD64SBBLcarrymask { 16759 break 16760 } 16761 v_1_0_0 := v_1_0.Args[0] 16762 if v_1_0_0.Op != OpAMD64CMPLconst { 16763 break 16764 } 16765 if v_1_0_0.AuxInt != 16 { 16766 break 16767 } 16768 v_1_0_0_0 := v_1_0_0.Args[0] 16769 if v_1_0_0_0.Op != OpAMD64NEGL { 16770 break 16771 } 16772 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 16773 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 16774 break 16775 } 16776 if v_1_0_0_0_0.AuxInt != -16 { 16777 break 16778 } 16779 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 16780 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 16781 break 16782 } 16783 if v_1_0_0_0_0_0.AuxInt != 15 { 16784 break 16785 } 16786 if y != v_1_0_0_0_0_0.Args[0] { 16787 break 16788 } 16789 v_1_1 := v_1.Args[1] 16790 if v_1_1.Op != OpAMD64SHRW { 16791 break 16792 } 16793 _ = v_1_1.Args[1] 16794 if x != v_1_1.Args[0] { 16795 break 16796 } 16797 v_1_1_1 := v_1_1.Args[1] 16798 if v_1_1_1.Op != OpAMD64NEGL { 16799 break 16800 } 16801 v_1_1_1_0 := v_1_1_1.Args[0] 16802 if v_1_1_1_0.Op != OpAMD64ADDLconst { 16803 break 16804 } 16805 if v_1_1_1_0.AuxInt != -16 { 16806 break 16807 } 16808 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 16809 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 16810 break 16811 } 16812 if v_1_1_1_0_0.AuxInt != 15 { 16813 break 16814 } 16815 if y != v_1_1_1_0_0.Args[0] { 16816 break 16817 } 16818 if !(v.Type.Size() == 2) { 16819 break 16820 } 16821 v.reset(OpAMD64ROLW) 16822 v.AddArg(x) 16823 v.AddArg(y) 16824 return true 16825 } 16826 return false 16827 } 16828 func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { 16829 // match: (ORL (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))) (SHLL x (ANDLconst y [15]))) 16830 // cond: v.Type.Size() == 2 16831 // result: (ROLW x y) 16832 for { 16833 _ = v.Args[1] 16834 v_0 := v.Args[0] 16835 if v_0.Op != OpAMD64ANDL { 16836 break 16837 } 16838 _ = v_0.Args[1] 16839 v_0_0 := v_0.Args[0] 16840 if v_0_0.Op != OpAMD64SHRW { 16841 break 16842 } 16843 _ = v_0_0.Args[1] 16844 x := v_0_0.Args[0] 16845 v_0_0_1 := v_0_0.Args[1] 16846 if v_0_0_1.Op != OpAMD64NEGL { 16847 break 16848 } 16849 v_0_0_1_0 := v_0_0_1.Args[0] 16850 if v_0_0_1_0.Op != OpAMD64ADDLconst { 16851 break 16852 } 16853 if v_0_0_1_0.AuxInt != -16 { 16854 break 16855 } 16856 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 16857 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 16858 break 16859 } 16860 if v_0_0_1_0_0.AuxInt != 15 { 16861 break 16862 } 16863 y := v_0_0_1_0_0.Args[0] 16864 v_0_1 := v_0.Args[1] 16865 if v_0_1.Op != OpAMD64SBBLcarrymask { 16866 break 16867 } 16868 v_0_1_0 := v_0_1.Args[0] 16869 if v_0_1_0.Op != OpAMD64CMPLconst { 16870 break 16871 } 16872 if v_0_1_0.AuxInt != 16 { 16873 break 16874 } 16875 v_0_1_0_0 := v_0_1_0.Args[0] 16876 if v_0_1_0_0.Op != OpAMD64NEGL { 16877 break 16878 } 16879 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16880 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 16881 break 16882 } 16883 if v_0_1_0_0_0.AuxInt != -16 { 16884 break 16885 } 16886 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16887 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 16888 break 16889 } 16890 if v_0_1_0_0_0_0.AuxInt != 15 { 16891 break 16892 } 16893 if y != v_0_1_0_0_0_0.Args[0] { 16894 break 16895 } 16896 v_1 := v.Args[1] 16897 if v_1.Op != OpAMD64SHLL { 16898 break 16899 } 16900 _ = v_1.Args[1] 16901 if x != v_1.Args[0] { 16902 break 16903 } 16904 v_1_1 := v_1.Args[1] 16905 if v_1_1.Op != OpAMD64ANDLconst { 16906 break 16907 } 16908 if v_1_1.AuxInt != 15 { 16909 break 16910 } 16911 if y != v_1_1.Args[0] { 16912 break 16913 } 16914 if !(v.Type.Size() == 2) { 16915 break 16916 } 16917 v.reset(OpAMD64ROLW) 16918 v.AddArg(x) 16919 v.AddArg(y) 16920 return true 16921 } 16922 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) (SHLL x (ANDLconst y [15]))) 16923 // cond: v.Type.Size() == 2 16924 // result: (ROLW x y) 16925 for { 16926 _ = v.Args[1] 16927 v_0 := v.Args[0] 16928 if v_0.Op != OpAMD64ANDL { 16929 break 16930 } 16931 _ = v_0.Args[1] 16932 v_0_0 := v_0.Args[0] 16933 if v_0_0.Op != OpAMD64SBBLcarrymask { 16934 break 16935 } 16936 v_0_0_0 := v_0_0.Args[0] 16937 if v_0_0_0.Op != OpAMD64CMPLconst { 16938 break 16939 } 16940 if v_0_0_0.AuxInt != 16 { 16941 break 16942 } 16943 v_0_0_0_0 := v_0_0_0.Args[0] 16944 if v_0_0_0_0.Op != OpAMD64NEGL { 16945 break 16946 } 16947 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16948 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 16949 break 16950 } 16951 if v_0_0_0_0_0.AuxInt != -16 { 16952 break 16953 } 16954 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16955 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 16956 break 16957 } 16958 if v_0_0_0_0_0_0.AuxInt != 15 { 16959 break 16960 } 16961 y := v_0_0_0_0_0_0.Args[0] 16962 v_0_1 := v_0.Args[1] 16963 if v_0_1.Op != OpAMD64SHRW { 16964 break 16965 } 16966 _ = v_0_1.Args[1] 16967 x := v_0_1.Args[0] 16968 v_0_1_1 := v_0_1.Args[1] 16969 if v_0_1_1.Op != OpAMD64NEGL { 16970 break 16971 } 16972 v_0_1_1_0 := v_0_1_1.Args[0] 16973 if v_0_1_1_0.Op != OpAMD64ADDLconst { 16974 break 16975 } 16976 if v_0_1_1_0.AuxInt != -16 { 16977 break 16978 } 16979 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 16980 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 16981 break 16982 } 16983 if v_0_1_1_0_0.AuxInt != 15 { 16984 break 16985 } 16986 if y != v_0_1_1_0_0.Args[0] { 16987 break 16988 } 16989 v_1 := v.Args[1] 16990 if v_1.Op != OpAMD64SHLL { 16991 break 16992 } 16993 _ = v_1.Args[1] 16994 if x != v_1.Args[0] { 16995 break 16996 } 16997 v_1_1 := v_1.Args[1] 16998 if v_1_1.Op != OpAMD64ANDLconst { 16999 break 17000 } 17001 if v_1_1.AuxInt != 15 { 17002 break 17003 } 17004 if y != v_1_1.Args[0] { 17005 break 17006 } 17007 if !(v.Type.Size() == 2) { 17008 break 17009 } 17010 v.reset(OpAMD64ROLW) 17011 v.AddArg(x) 17012 v.AddArg(y) 17013 return true 17014 } 17015 // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) 17016 // cond: v.Type.Size() == 2 17017 // result: (RORW x y) 17018 for { 17019 _ = v.Args[1] 17020 v_0 := v.Args[0] 17021 if v_0.Op != OpAMD64SHRW { 17022 break 17023 } 17024 _ = v_0.Args[1] 17025 x := v_0.Args[0] 17026 v_0_1 := v_0.Args[1] 17027 if v_0_1.Op != OpAMD64ANDQconst { 17028 break 17029 } 17030 if v_0_1.AuxInt != 15 { 17031 break 17032 } 17033 y := v_0_1.Args[0] 17034 v_1 := v.Args[1] 17035 if v_1.Op != OpAMD64SHLL { 17036 break 17037 } 17038 _ = v_1.Args[1] 17039 if x != v_1.Args[0] { 17040 break 17041 } 17042 v_1_1 := v_1.Args[1] 17043 if v_1_1.Op != OpAMD64NEGQ { 17044 break 17045 } 17046 v_1_1_0 := v_1_1.Args[0] 17047 if v_1_1_0.Op != OpAMD64ADDQconst { 17048 break 17049 } 17050 if v_1_1_0.AuxInt != -16 { 17051 break 17052 } 17053 v_1_1_0_0 := v_1_1_0.Args[0] 17054 if v_1_1_0_0.Op != OpAMD64ANDQconst { 17055 break 17056 } 17057 if v_1_1_0_0.AuxInt != 15 { 17058 break 17059 } 17060 if y != v_1_1_0_0.Args[0] { 17061 break 17062 } 17063 if !(v.Type.Size() == 2) { 17064 break 17065 } 17066 v.reset(OpAMD64RORW) 17067 v.AddArg(x) 17068 v.AddArg(y) 17069 return true 17070 } 17071 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SHRW x (ANDQconst y [15]))) 17072 // cond: v.Type.Size() == 2 17073 // result: (RORW x y) 17074 for { 17075 _ = v.Args[1] 17076 v_0 := v.Args[0] 17077 if v_0.Op != OpAMD64SHLL { 17078 break 17079 } 17080 _ = v_0.Args[1] 17081 x := v_0.Args[0] 17082 v_0_1 := v_0.Args[1] 17083 if v_0_1.Op != OpAMD64NEGQ { 17084 break 17085 } 17086 v_0_1_0 := v_0_1.Args[0] 17087 if v_0_1_0.Op != OpAMD64ADDQconst { 17088 break 17089 } 17090 if v_0_1_0.AuxInt != -16 { 17091 break 17092 } 17093 v_0_1_0_0 := v_0_1_0.Args[0] 17094 if v_0_1_0_0.Op != OpAMD64ANDQconst { 17095 break 17096 } 17097 if v_0_1_0_0.AuxInt != 15 { 17098 break 17099 } 17100 y := v_0_1_0_0.Args[0] 17101 v_1 := v.Args[1] 17102 if v_1.Op != OpAMD64SHRW { 17103 break 17104 } 17105 _ = v_1.Args[1] 17106 if x != v_1.Args[0] { 17107 break 17108 } 17109 v_1_1 := v_1.Args[1] 17110 if v_1_1.Op != OpAMD64ANDQconst { 17111 break 17112 } 17113 if v_1_1.AuxInt != 15 { 17114 break 17115 } 17116 if y != v_1_1.Args[0] { 17117 break 17118 } 17119 if !(v.Type.Size() == 2) { 17120 break 17121 } 17122 v.reset(OpAMD64RORW) 17123 v.AddArg(x) 17124 v.AddArg(y) 17125 return true 17126 } 17127 // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) 17128 // cond: v.Type.Size() == 2 17129 // result: (RORW x y) 17130 for { 17131 _ = v.Args[1] 17132 v_0 := v.Args[0] 17133 if v_0.Op != OpAMD64SHRW { 17134 break 17135 } 17136 _ = v_0.Args[1] 17137 x := v_0.Args[0] 17138 v_0_1 := v_0.Args[1] 17139 if v_0_1.Op != OpAMD64ANDLconst { 17140 break 17141 } 17142 if v_0_1.AuxInt != 15 { 17143 break 17144 } 17145 y := v_0_1.Args[0] 17146 v_1 := v.Args[1] 17147 if v_1.Op != OpAMD64SHLL { 17148 break 17149 } 17150 _ = v_1.Args[1] 17151 if x != v_1.Args[0] { 17152 break 17153 } 17154 v_1_1 := v_1.Args[1] 17155 if v_1_1.Op != OpAMD64NEGL { 17156 break 17157 } 17158 v_1_1_0 := v_1_1.Args[0] 17159 if v_1_1_0.Op != OpAMD64ADDLconst { 17160 break 17161 } 17162 if v_1_1_0.AuxInt != -16 { 17163 break 17164 } 17165 v_1_1_0_0 := v_1_1_0.Args[0] 17166 if v_1_1_0_0.Op != OpAMD64ANDLconst { 17167 break 17168 } 17169 if v_1_1_0_0.AuxInt != 15 { 17170 break 17171 } 17172 if y != v_1_1_0_0.Args[0] { 17173 break 17174 } 17175 if !(v.Type.Size() == 2) { 17176 break 17177 } 17178 v.reset(OpAMD64RORW) 17179 v.AddArg(x) 17180 v.AddArg(y) 17181 return true 17182 } 17183 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SHRW x (ANDLconst y [15]))) 17184 // cond: v.Type.Size() == 2 17185 // result: (RORW x y) 17186 for { 17187 _ = v.Args[1] 17188 v_0 := v.Args[0] 17189 if v_0.Op != OpAMD64SHLL { 17190 break 17191 } 17192 _ = v_0.Args[1] 17193 x := v_0.Args[0] 17194 v_0_1 := v_0.Args[1] 17195 if v_0_1.Op != OpAMD64NEGL { 17196 break 17197 } 17198 v_0_1_0 := v_0_1.Args[0] 17199 if v_0_1_0.Op != OpAMD64ADDLconst { 17200 break 17201 } 17202 if v_0_1_0.AuxInt != -16 { 17203 break 17204 } 17205 v_0_1_0_0 := v_0_1_0.Args[0] 17206 if v_0_1_0_0.Op != OpAMD64ANDLconst { 17207 break 17208 } 17209 if v_0_1_0_0.AuxInt != 15 { 17210 break 17211 } 17212 y := v_0_1_0_0.Args[0] 17213 v_1 := v.Args[1] 17214 if v_1.Op != OpAMD64SHRW { 17215 break 17216 } 17217 _ = v_1.Args[1] 17218 if x != v_1.Args[0] { 17219 break 17220 } 17221 v_1_1 := v_1.Args[1] 17222 if v_1_1.Op != OpAMD64ANDLconst { 17223 break 17224 } 17225 if v_1_1.AuxInt != 15 { 17226 break 17227 } 17228 if y != v_1_1.Args[0] { 17229 break 17230 } 17231 if !(v.Type.Size() == 2) { 17232 break 17233 } 17234 v.reset(OpAMD64RORW) 17235 v.AddArg(x) 17236 v.AddArg(y) 17237 return true 17238 } 17239 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])))) 17240 // cond: v.Type.Size() == 1 17241 // result: (ROLB x y) 17242 for { 17243 _ = v.Args[1] 17244 v_0 := v.Args[0] 17245 if v_0.Op != OpAMD64SHLL { 17246 break 17247 } 17248 _ = v_0.Args[1] 17249 x := v_0.Args[0] 17250 v_0_1 := v_0.Args[1] 17251 if v_0_1.Op != OpAMD64ANDQconst { 17252 break 17253 } 17254 if v_0_1.AuxInt != 7 { 17255 break 17256 } 17257 y := v_0_1.Args[0] 17258 v_1 := v.Args[1] 17259 if v_1.Op != OpAMD64ANDL { 17260 break 17261 } 17262 _ = v_1.Args[1] 17263 v_1_0 := v_1.Args[0] 17264 if v_1_0.Op != OpAMD64SHRB { 17265 break 17266 } 17267 _ = v_1_0.Args[1] 17268 if x != v_1_0.Args[0] { 17269 break 17270 } 17271 v_1_0_1 := v_1_0.Args[1] 17272 if v_1_0_1.Op != OpAMD64NEGQ { 17273 break 17274 } 17275 v_1_0_1_0 := v_1_0_1.Args[0] 17276 if v_1_0_1_0.Op != OpAMD64ADDQconst { 17277 break 17278 } 17279 if v_1_0_1_0.AuxInt != -8 { 17280 break 17281 } 17282 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 17283 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 17284 break 17285 } 17286 if v_1_0_1_0_0.AuxInt != 7 { 17287 break 17288 } 17289 if y != v_1_0_1_0_0.Args[0] { 17290 break 17291 } 17292 v_1_1 := v_1.Args[1] 17293 if v_1_1.Op != OpAMD64SBBLcarrymask { 17294 break 17295 } 17296 v_1_1_0 := v_1_1.Args[0] 17297 if v_1_1_0.Op != OpAMD64CMPQconst { 17298 break 17299 } 17300 if v_1_1_0.AuxInt != 8 { 17301 break 17302 } 17303 v_1_1_0_0 := v_1_1_0.Args[0] 17304 if v_1_1_0_0.Op != OpAMD64NEGQ { 17305 break 17306 } 17307 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 17308 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 17309 break 17310 } 17311 if v_1_1_0_0_0.AuxInt != -8 { 17312 break 17313 } 17314 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 17315 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 17316 break 17317 } 17318 if v_1_1_0_0_0_0.AuxInt != 7 { 17319 break 17320 } 17321 if y != v_1_1_0_0_0_0.Args[0] { 17322 break 17323 } 17324 if !(v.Type.Size() == 1) { 17325 break 17326 } 17327 v.reset(OpAMD64ROLB) 17328 v.AddArg(x) 17329 v.AddArg(y) 17330 return true 17331 } 17332 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))) 17333 // cond: v.Type.Size() == 1 17334 // result: (ROLB x y) 17335 for { 17336 _ = v.Args[1] 17337 v_0 := v.Args[0] 17338 if v_0.Op != OpAMD64SHLL { 17339 break 17340 } 17341 _ = v_0.Args[1] 17342 x := v_0.Args[0] 17343 v_0_1 := v_0.Args[1] 17344 if v_0_1.Op != OpAMD64ANDQconst { 17345 break 17346 } 17347 if v_0_1.AuxInt != 7 { 17348 break 17349 } 17350 y := v_0_1.Args[0] 17351 v_1 := v.Args[1] 17352 if v_1.Op != OpAMD64ANDL { 17353 break 17354 } 17355 _ = v_1.Args[1] 17356 v_1_0 := v_1.Args[0] 17357 if v_1_0.Op != OpAMD64SBBLcarrymask { 17358 break 17359 } 17360 v_1_0_0 := v_1_0.Args[0] 17361 if v_1_0_0.Op != OpAMD64CMPQconst { 17362 break 17363 } 17364 if v_1_0_0.AuxInt != 8 { 17365 break 17366 } 17367 v_1_0_0_0 := v_1_0_0.Args[0] 17368 if v_1_0_0_0.Op != OpAMD64NEGQ { 17369 break 17370 } 17371 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 17372 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 17373 break 17374 } 17375 if v_1_0_0_0_0.AuxInt != -8 { 17376 break 17377 } 17378 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 17379 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 17380 break 17381 } 17382 if v_1_0_0_0_0_0.AuxInt != 7 { 17383 break 17384 } 17385 if y != v_1_0_0_0_0_0.Args[0] { 17386 break 17387 } 17388 v_1_1 := v_1.Args[1] 17389 if v_1_1.Op != OpAMD64SHRB { 17390 break 17391 } 17392 _ = v_1_1.Args[1] 17393 if x != v_1_1.Args[0] { 17394 break 17395 } 17396 v_1_1_1 := v_1_1.Args[1] 17397 if v_1_1_1.Op != OpAMD64NEGQ { 17398 break 17399 } 17400 v_1_1_1_0 := v_1_1_1.Args[0] 17401 if v_1_1_1_0.Op != OpAMD64ADDQconst { 17402 break 17403 } 17404 if v_1_1_1_0.AuxInt != -8 { 17405 break 17406 } 17407 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 17408 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 17409 break 17410 } 17411 if v_1_1_1_0_0.AuxInt != 7 { 17412 break 17413 } 17414 if y != v_1_1_1_0_0.Args[0] { 17415 break 17416 } 17417 if !(v.Type.Size() == 1) { 17418 break 17419 } 17420 v.reset(OpAMD64ROLB) 17421 v.AddArg(x) 17422 v.AddArg(y) 17423 return true 17424 } 17425 // match: (ORL (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDQconst y [ 7]))) 17426 // cond: v.Type.Size() == 1 17427 // result: (ROLB x y) 17428 for { 17429 _ = v.Args[1] 17430 v_0 := v.Args[0] 17431 if v_0.Op != OpAMD64ANDL { 17432 break 17433 } 17434 _ = v_0.Args[1] 17435 v_0_0 := v_0.Args[0] 17436 if v_0_0.Op != OpAMD64SHRB { 17437 break 17438 } 17439 _ = v_0_0.Args[1] 17440 x := v_0_0.Args[0] 17441 v_0_0_1 := v_0_0.Args[1] 17442 if v_0_0_1.Op != OpAMD64NEGQ { 17443 break 17444 } 17445 v_0_0_1_0 := v_0_0_1.Args[0] 17446 if v_0_0_1_0.Op != OpAMD64ADDQconst { 17447 break 17448 } 17449 if v_0_0_1_0.AuxInt != -8 { 17450 break 17451 } 17452 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 17453 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 17454 break 17455 } 17456 if v_0_0_1_0_0.AuxInt != 7 { 17457 break 17458 } 17459 y := v_0_0_1_0_0.Args[0] 17460 v_0_1 := v_0.Args[1] 17461 if v_0_1.Op != OpAMD64SBBLcarrymask { 17462 break 17463 } 17464 v_0_1_0 := v_0_1.Args[0] 17465 if v_0_1_0.Op != OpAMD64CMPQconst { 17466 break 17467 } 17468 if v_0_1_0.AuxInt != 8 { 17469 break 17470 } 17471 v_0_1_0_0 := v_0_1_0.Args[0] 17472 if v_0_1_0_0.Op != OpAMD64NEGQ { 17473 break 17474 } 17475 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 17476 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 17477 break 17478 } 17479 if v_0_1_0_0_0.AuxInt != -8 { 17480 break 17481 } 17482 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 17483 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 17484 break 17485 } 17486 if v_0_1_0_0_0_0.AuxInt != 7 { 17487 break 17488 } 17489 if y != v_0_1_0_0_0_0.Args[0] { 17490 break 17491 } 17492 v_1 := v.Args[1] 17493 if v_1.Op != OpAMD64SHLL { 17494 break 17495 } 17496 _ = v_1.Args[1] 17497 if x != v_1.Args[0] { 17498 break 17499 } 17500 v_1_1 := v_1.Args[1] 17501 if v_1_1.Op != OpAMD64ANDQconst { 17502 break 17503 } 17504 if v_1_1.AuxInt != 7 { 17505 break 17506 } 17507 if y != v_1_1.Args[0] { 17508 break 17509 } 17510 if !(v.Type.Size() == 1) { 17511 break 17512 } 17513 v.reset(OpAMD64ROLB) 17514 v.AddArg(x) 17515 v.AddArg(y) 17516 return true 17517 } 17518 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) (SHLL x (ANDQconst y [ 7]))) 17519 // cond: v.Type.Size() == 1 17520 // result: (ROLB x y) 17521 for { 17522 _ = v.Args[1] 17523 v_0 := v.Args[0] 17524 if v_0.Op != OpAMD64ANDL { 17525 break 17526 } 17527 _ = v_0.Args[1] 17528 v_0_0 := v_0.Args[0] 17529 if v_0_0.Op != OpAMD64SBBLcarrymask { 17530 break 17531 } 17532 v_0_0_0 := v_0_0.Args[0] 17533 if v_0_0_0.Op != OpAMD64CMPQconst { 17534 break 17535 } 17536 if v_0_0_0.AuxInt != 8 { 17537 break 17538 } 17539 v_0_0_0_0 := v_0_0_0.Args[0] 17540 if v_0_0_0_0.Op != OpAMD64NEGQ { 17541 break 17542 } 17543 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 17544 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 17545 break 17546 } 17547 if v_0_0_0_0_0.AuxInt != -8 { 17548 break 17549 } 17550 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 17551 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 17552 break 17553 } 17554 if v_0_0_0_0_0_0.AuxInt != 7 { 17555 break 17556 } 17557 y := v_0_0_0_0_0_0.Args[0] 17558 v_0_1 := v_0.Args[1] 17559 if v_0_1.Op != OpAMD64SHRB { 17560 break 17561 } 17562 _ = v_0_1.Args[1] 17563 x := v_0_1.Args[0] 17564 v_0_1_1 := v_0_1.Args[1] 17565 if v_0_1_1.Op != OpAMD64NEGQ { 17566 break 17567 } 17568 v_0_1_1_0 := v_0_1_1.Args[0] 17569 if v_0_1_1_0.Op != OpAMD64ADDQconst { 17570 break 17571 } 17572 if v_0_1_1_0.AuxInt != -8 { 17573 break 17574 } 17575 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 17576 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 17577 break 17578 } 17579 if v_0_1_1_0_0.AuxInt != 7 { 17580 break 17581 } 17582 if y != v_0_1_1_0_0.Args[0] { 17583 break 17584 } 17585 v_1 := v.Args[1] 17586 if v_1.Op != OpAMD64SHLL { 17587 break 17588 } 17589 _ = v_1.Args[1] 17590 if x != v_1.Args[0] { 17591 break 17592 } 17593 v_1_1 := v_1.Args[1] 17594 if v_1_1.Op != OpAMD64ANDQconst { 17595 break 17596 } 17597 if v_1_1.AuxInt != 7 { 17598 break 17599 } 17600 if y != v_1_1.Args[0] { 17601 break 17602 } 17603 if !(v.Type.Size() == 1) { 17604 break 17605 } 17606 v.reset(OpAMD64ROLB) 17607 v.AddArg(x) 17608 v.AddArg(y) 17609 return true 17610 } 17611 return false 17612 } 17613 func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool { 17614 b := v.Block 17615 _ = b 17616 typ := &b.Func.Config.Types 17617 _ = typ 17618 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])))) 17619 // cond: v.Type.Size() == 1 17620 // result: (ROLB x y) 17621 for { 17622 _ = v.Args[1] 17623 v_0 := v.Args[0] 17624 if v_0.Op != OpAMD64SHLL { 17625 break 17626 } 17627 _ = v_0.Args[1] 17628 x := v_0.Args[0] 17629 v_0_1 := v_0.Args[1] 17630 if v_0_1.Op != OpAMD64ANDLconst { 17631 break 17632 } 17633 if v_0_1.AuxInt != 7 { 17634 break 17635 } 17636 y := v_0_1.Args[0] 17637 v_1 := v.Args[1] 17638 if v_1.Op != OpAMD64ANDL { 17639 break 17640 } 17641 _ = v_1.Args[1] 17642 v_1_0 := v_1.Args[0] 17643 if v_1_0.Op != OpAMD64SHRB { 17644 break 17645 } 17646 _ = v_1_0.Args[1] 17647 if x != v_1_0.Args[0] { 17648 break 17649 } 17650 v_1_0_1 := v_1_0.Args[1] 17651 if v_1_0_1.Op != OpAMD64NEGL { 17652 break 17653 } 17654 v_1_0_1_0 := v_1_0_1.Args[0] 17655 if v_1_0_1_0.Op != OpAMD64ADDLconst { 17656 break 17657 } 17658 if v_1_0_1_0.AuxInt != -8 { 17659 break 17660 } 17661 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 17662 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 17663 break 17664 } 17665 if v_1_0_1_0_0.AuxInt != 7 { 17666 break 17667 } 17668 if y != v_1_0_1_0_0.Args[0] { 17669 break 17670 } 17671 v_1_1 := v_1.Args[1] 17672 if v_1_1.Op != OpAMD64SBBLcarrymask { 17673 break 17674 } 17675 v_1_1_0 := v_1_1.Args[0] 17676 if v_1_1_0.Op != OpAMD64CMPLconst { 17677 break 17678 } 17679 if v_1_1_0.AuxInt != 8 { 17680 break 17681 } 17682 v_1_1_0_0 := v_1_1_0.Args[0] 17683 if v_1_1_0_0.Op != OpAMD64NEGL { 17684 break 17685 } 17686 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 17687 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 17688 break 17689 } 17690 if v_1_1_0_0_0.AuxInt != -8 { 17691 break 17692 } 17693 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 17694 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 17695 break 17696 } 17697 if v_1_1_0_0_0_0.AuxInt != 7 { 17698 break 17699 } 17700 if y != v_1_1_0_0_0_0.Args[0] { 17701 break 17702 } 17703 if !(v.Type.Size() == 1) { 17704 break 17705 } 17706 v.reset(OpAMD64ROLB) 17707 v.AddArg(x) 17708 v.AddArg(y) 17709 return true 17710 } 17711 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))) 17712 // cond: v.Type.Size() == 1 17713 // result: (ROLB x y) 17714 for { 17715 _ = v.Args[1] 17716 v_0 := v.Args[0] 17717 if v_0.Op != OpAMD64SHLL { 17718 break 17719 } 17720 _ = v_0.Args[1] 17721 x := v_0.Args[0] 17722 v_0_1 := v_0.Args[1] 17723 if v_0_1.Op != OpAMD64ANDLconst { 17724 break 17725 } 17726 if v_0_1.AuxInt != 7 { 17727 break 17728 } 17729 y := v_0_1.Args[0] 17730 v_1 := v.Args[1] 17731 if v_1.Op != OpAMD64ANDL { 17732 break 17733 } 17734 _ = v_1.Args[1] 17735 v_1_0 := v_1.Args[0] 17736 if v_1_0.Op != OpAMD64SBBLcarrymask { 17737 break 17738 } 17739 v_1_0_0 := v_1_0.Args[0] 17740 if v_1_0_0.Op != OpAMD64CMPLconst { 17741 break 17742 } 17743 if v_1_0_0.AuxInt != 8 { 17744 break 17745 } 17746 v_1_0_0_0 := v_1_0_0.Args[0] 17747 if v_1_0_0_0.Op != OpAMD64NEGL { 17748 break 17749 } 17750 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 17751 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 17752 break 17753 } 17754 if v_1_0_0_0_0.AuxInt != -8 { 17755 break 17756 } 17757 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 17758 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 17759 break 17760 } 17761 if v_1_0_0_0_0_0.AuxInt != 7 { 17762 break 17763 } 17764 if y != v_1_0_0_0_0_0.Args[0] { 17765 break 17766 } 17767 v_1_1 := v_1.Args[1] 17768 if v_1_1.Op != OpAMD64SHRB { 17769 break 17770 } 17771 _ = v_1_1.Args[1] 17772 if x != v_1_1.Args[0] { 17773 break 17774 } 17775 v_1_1_1 := v_1_1.Args[1] 17776 if v_1_1_1.Op != OpAMD64NEGL { 17777 break 17778 } 17779 v_1_1_1_0 := v_1_1_1.Args[0] 17780 if v_1_1_1_0.Op != OpAMD64ADDLconst { 17781 break 17782 } 17783 if v_1_1_1_0.AuxInt != -8 { 17784 break 17785 } 17786 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 17787 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 17788 break 17789 } 17790 if v_1_1_1_0_0.AuxInt != 7 { 17791 break 17792 } 17793 if y != v_1_1_1_0_0.Args[0] { 17794 break 17795 } 17796 if !(v.Type.Size() == 1) { 17797 break 17798 } 17799 v.reset(OpAMD64ROLB) 17800 v.AddArg(x) 17801 v.AddArg(y) 17802 return true 17803 } 17804 // match: (ORL (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDLconst y [ 7]))) 17805 // cond: v.Type.Size() == 1 17806 // result: (ROLB x y) 17807 for { 17808 _ = v.Args[1] 17809 v_0 := v.Args[0] 17810 if v_0.Op != OpAMD64ANDL { 17811 break 17812 } 17813 _ = v_0.Args[1] 17814 v_0_0 := v_0.Args[0] 17815 if v_0_0.Op != OpAMD64SHRB { 17816 break 17817 } 17818 _ = v_0_0.Args[1] 17819 x := v_0_0.Args[0] 17820 v_0_0_1 := v_0_0.Args[1] 17821 if v_0_0_1.Op != OpAMD64NEGL { 17822 break 17823 } 17824 v_0_0_1_0 := v_0_0_1.Args[0] 17825 if v_0_0_1_0.Op != OpAMD64ADDLconst { 17826 break 17827 } 17828 if v_0_0_1_0.AuxInt != -8 { 17829 break 17830 } 17831 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 17832 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 17833 break 17834 } 17835 if v_0_0_1_0_0.AuxInt != 7 { 17836 break 17837 } 17838 y := v_0_0_1_0_0.Args[0] 17839 v_0_1 := v_0.Args[1] 17840 if v_0_1.Op != OpAMD64SBBLcarrymask { 17841 break 17842 } 17843 v_0_1_0 := v_0_1.Args[0] 17844 if v_0_1_0.Op != OpAMD64CMPLconst { 17845 break 17846 } 17847 if v_0_1_0.AuxInt != 8 { 17848 break 17849 } 17850 v_0_1_0_0 := v_0_1_0.Args[0] 17851 if v_0_1_0_0.Op != OpAMD64NEGL { 17852 break 17853 } 17854 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 17855 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 17856 break 17857 } 17858 if v_0_1_0_0_0.AuxInt != -8 { 17859 break 17860 } 17861 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 17862 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 17863 break 17864 } 17865 if v_0_1_0_0_0_0.AuxInt != 7 { 17866 break 17867 } 17868 if y != v_0_1_0_0_0_0.Args[0] { 17869 break 17870 } 17871 v_1 := v.Args[1] 17872 if v_1.Op != OpAMD64SHLL { 17873 break 17874 } 17875 _ = v_1.Args[1] 17876 if x != v_1.Args[0] { 17877 break 17878 } 17879 v_1_1 := v_1.Args[1] 17880 if v_1_1.Op != OpAMD64ANDLconst { 17881 break 17882 } 17883 if v_1_1.AuxInt != 7 { 17884 break 17885 } 17886 if y != v_1_1.Args[0] { 17887 break 17888 } 17889 if !(v.Type.Size() == 1) { 17890 break 17891 } 17892 v.reset(OpAMD64ROLB) 17893 v.AddArg(x) 17894 v.AddArg(y) 17895 return true 17896 } 17897 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) (SHLL x (ANDLconst y [ 7]))) 17898 // cond: v.Type.Size() == 1 17899 // result: (ROLB x y) 17900 for { 17901 _ = v.Args[1] 17902 v_0 := v.Args[0] 17903 if v_0.Op != OpAMD64ANDL { 17904 break 17905 } 17906 _ = v_0.Args[1] 17907 v_0_0 := v_0.Args[0] 17908 if v_0_0.Op != OpAMD64SBBLcarrymask { 17909 break 17910 } 17911 v_0_0_0 := v_0_0.Args[0] 17912 if v_0_0_0.Op != OpAMD64CMPLconst { 17913 break 17914 } 17915 if v_0_0_0.AuxInt != 8 { 17916 break 17917 } 17918 v_0_0_0_0 := v_0_0_0.Args[0] 17919 if v_0_0_0_0.Op != OpAMD64NEGL { 17920 break 17921 } 17922 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 17923 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 17924 break 17925 } 17926 if v_0_0_0_0_0.AuxInt != -8 { 17927 break 17928 } 17929 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 17930 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 17931 break 17932 } 17933 if v_0_0_0_0_0_0.AuxInt != 7 { 17934 break 17935 } 17936 y := v_0_0_0_0_0_0.Args[0] 17937 v_0_1 := v_0.Args[1] 17938 if v_0_1.Op != OpAMD64SHRB { 17939 break 17940 } 17941 _ = v_0_1.Args[1] 17942 x := v_0_1.Args[0] 17943 v_0_1_1 := v_0_1.Args[1] 17944 if v_0_1_1.Op != OpAMD64NEGL { 17945 break 17946 } 17947 v_0_1_1_0 := v_0_1_1.Args[0] 17948 if v_0_1_1_0.Op != OpAMD64ADDLconst { 17949 break 17950 } 17951 if v_0_1_1_0.AuxInt != -8 { 17952 break 17953 } 17954 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 17955 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 17956 break 17957 } 17958 if v_0_1_1_0_0.AuxInt != 7 { 17959 break 17960 } 17961 if y != v_0_1_1_0_0.Args[0] { 17962 break 17963 } 17964 v_1 := v.Args[1] 17965 if v_1.Op != OpAMD64SHLL { 17966 break 17967 } 17968 _ = v_1.Args[1] 17969 if x != v_1.Args[0] { 17970 break 17971 } 17972 v_1_1 := v_1.Args[1] 17973 if v_1_1.Op != OpAMD64ANDLconst { 17974 break 17975 } 17976 if v_1_1.AuxInt != 7 { 17977 break 17978 } 17979 if y != v_1_1.Args[0] { 17980 break 17981 } 17982 if !(v.Type.Size() == 1) { 17983 break 17984 } 17985 v.reset(OpAMD64ROLB) 17986 v.AddArg(x) 17987 v.AddArg(y) 17988 return true 17989 } 17990 // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) 17991 // cond: v.Type.Size() == 1 17992 // result: (RORB x y) 17993 for { 17994 _ = v.Args[1] 17995 v_0 := v.Args[0] 17996 if v_0.Op != OpAMD64SHRB { 17997 break 17998 } 17999 _ = v_0.Args[1] 18000 x := v_0.Args[0] 18001 v_0_1 := v_0.Args[1] 18002 if v_0_1.Op != OpAMD64ANDQconst { 18003 break 18004 } 18005 if v_0_1.AuxInt != 7 { 18006 break 18007 } 18008 y := v_0_1.Args[0] 18009 v_1 := v.Args[1] 18010 if v_1.Op != OpAMD64SHLL { 18011 break 18012 } 18013 _ = v_1.Args[1] 18014 if x != v_1.Args[0] { 18015 break 18016 } 18017 v_1_1 := v_1.Args[1] 18018 if v_1_1.Op != OpAMD64NEGQ { 18019 break 18020 } 18021 v_1_1_0 := v_1_1.Args[0] 18022 if v_1_1_0.Op != OpAMD64ADDQconst { 18023 break 18024 } 18025 if v_1_1_0.AuxInt != -8 { 18026 break 18027 } 18028 v_1_1_0_0 := v_1_1_0.Args[0] 18029 if v_1_1_0_0.Op != OpAMD64ANDQconst { 18030 break 18031 } 18032 if v_1_1_0_0.AuxInt != 7 { 18033 break 18034 } 18035 if y != v_1_1_0_0.Args[0] { 18036 break 18037 } 18038 if !(v.Type.Size() == 1) { 18039 break 18040 } 18041 v.reset(OpAMD64RORB) 18042 v.AddArg(x) 18043 v.AddArg(y) 18044 return true 18045 } 18046 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SHRB x (ANDQconst y [ 7]))) 18047 // cond: v.Type.Size() == 1 18048 // result: (RORB x y) 18049 for { 18050 _ = v.Args[1] 18051 v_0 := v.Args[0] 18052 if v_0.Op != OpAMD64SHLL { 18053 break 18054 } 18055 _ = v_0.Args[1] 18056 x := v_0.Args[0] 18057 v_0_1 := v_0.Args[1] 18058 if v_0_1.Op != OpAMD64NEGQ { 18059 break 18060 } 18061 v_0_1_0 := v_0_1.Args[0] 18062 if v_0_1_0.Op != OpAMD64ADDQconst { 18063 break 18064 } 18065 if v_0_1_0.AuxInt != -8 { 18066 break 18067 } 18068 v_0_1_0_0 := v_0_1_0.Args[0] 18069 if v_0_1_0_0.Op != OpAMD64ANDQconst { 18070 break 18071 } 18072 if v_0_1_0_0.AuxInt != 7 { 18073 break 18074 } 18075 y := v_0_1_0_0.Args[0] 18076 v_1 := v.Args[1] 18077 if v_1.Op != OpAMD64SHRB { 18078 break 18079 } 18080 _ = v_1.Args[1] 18081 if x != v_1.Args[0] { 18082 break 18083 } 18084 v_1_1 := v_1.Args[1] 18085 if v_1_1.Op != OpAMD64ANDQconst { 18086 break 18087 } 18088 if v_1_1.AuxInt != 7 { 18089 break 18090 } 18091 if y != v_1_1.Args[0] { 18092 break 18093 } 18094 if !(v.Type.Size() == 1) { 18095 break 18096 } 18097 v.reset(OpAMD64RORB) 18098 v.AddArg(x) 18099 v.AddArg(y) 18100 return true 18101 } 18102 // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) 18103 // cond: v.Type.Size() == 1 18104 // result: (RORB x y) 18105 for { 18106 _ = v.Args[1] 18107 v_0 := v.Args[0] 18108 if v_0.Op != OpAMD64SHRB { 18109 break 18110 } 18111 _ = v_0.Args[1] 18112 x := v_0.Args[0] 18113 v_0_1 := v_0.Args[1] 18114 if v_0_1.Op != OpAMD64ANDLconst { 18115 break 18116 } 18117 if v_0_1.AuxInt != 7 { 18118 break 18119 } 18120 y := v_0_1.Args[0] 18121 v_1 := v.Args[1] 18122 if v_1.Op != OpAMD64SHLL { 18123 break 18124 } 18125 _ = v_1.Args[1] 18126 if x != v_1.Args[0] { 18127 break 18128 } 18129 v_1_1 := v_1.Args[1] 18130 if v_1_1.Op != OpAMD64NEGL { 18131 break 18132 } 18133 v_1_1_0 := v_1_1.Args[0] 18134 if v_1_1_0.Op != OpAMD64ADDLconst { 18135 break 18136 } 18137 if v_1_1_0.AuxInt != -8 { 18138 break 18139 } 18140 v_1_1_0_0 := v_1_1_0.Args[0] 18141 if v_1_1_0_0.Op != OpAMD64ANDLconst { 18142 break 18143 } 18144 if v_1_1_0_0.AuxInt != 7 { 18145 break 18146 } 18147 if y != v_1_1_0_0.Args[0] { 18148 break 18149 } 18150 if !(v.Type.Size() == 1) { 18151 break 18152 } 18153 v.reset(OpAMD64RORB) 18154 v.AddArg(x) 18155 v.AddArg(y) 18156 return true 18157 } 18158 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SHRB x (ANDLconst y [ 7]))) 18159 // cond: v.Type.Size() == 1 18160 // result: (RORB x y) 18161 for { 18162 _ = v.Args[1] 18163 v_0 := v.Args[0] 18164 if v_0.Op != OpAMD64SHLL { 18165 break 18166 } 18167 _ = v_0.Args[1] 18168 x := v_0.Args[0] 18169 v_0_1 := v_0.Args[1] 18170 if v_0_1.Op != OpAMD64NEGL { 18171 break 18172 } 18173 v_0_1_0 := v_0_1.Args[0] 18174 if v_0_1_0.Op != OpAMD64ADDLconst { 18175 break 18176 } 18177 if v_0_1_0.AuxInt != -8 { 18178 break 18179 } 18180 v_0_1_0_0 := v_0_1_0.Args[0] 18181 if v_0_1_0_0.Op != OpAMD64ANDLconst { 18182 break 18183 } 18184 if v_0_1_0_0.AuxInt != 7 { 18185 break 18186 } 18187 y := v_0_1_0_0.Args[0] 18188 v_1 := v.Args[1] 18189 if v_1.Op != OpAMD64SHRB { 18190 break 18191 } 18192 _ = v_1.Args[1] 18193 if x != v_1.Args[0] { 18194 break 18195 } 18196 v_1_1 := v_1.Args[1] 18197 if v_1_1.Op != OpAMD64ANDLconst { 18198 break 18199 } 18200 if v_1_1.AuxInt != 7 { 18201 break 18202 } 18203 if y != v_1_1.Args[0] { 18204 break 18205 } 18206 if !(v.Type.Size() == 1) { 18207 break 18208 } 18209 v.reset(OpAMD64RORB) 18210 v.AddArg(x) 18211 v.AddArg(y) 18212 return true 18213 } 18214 // match: (ORL x x) 18215 // cond: 18216 // result: x 18217 for { 18218 _ = v.Args[1] 18219 x := v.Args[0] 18220 if x != v.Args[1] { 18221 break 18222 } 18223 v.reset(OpCopy) 18224 v.Type = x.Type 18225 v.AddArg(x) 18226 return true 18227 } 18228 // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) 18229 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18230 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 18231 for { 18232 _ = v.Args[1] 18233 x0 := v.Args[0] 18234 if x0.Op != OpAMD64MOVBload { 18235 break 18236 } 18237 i0 := x0.AuxInt 18238 s := x0.Aux 18239 _ = x0.Args[1] 18240 p := x0.Args[0] 18241 mem := x0.Args[1] 18242 sh := v.Args[1] 18243 if sh.Op != OpAMD64SHLLconst { 18244 break 18245 } 18246 if sh.AuxInt != 8 { 18247 break 18248 } 18249 x1 := sh.Args[0] 18250 if x1.Op != OpAMD64MOVBload { 18251 break 18252 } 18253 i1 := x1.AuxInt 18254 if x1.Aux != s { 18255 break 18256 } 18257 _ = x1.Args[1] 18258 if p != x1.Args[0] { 18259 break 18260 } 18261 if mem != x1.Args[1] { 18262 break 18263 } 18264 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18265 break 18266 } 18267 b = mergePoint(b, x0, x1) 18268 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18269 v.reset(OpCopy) 18270 v.AddArg(v0) 18271 v0.AuxInt = i0 18272 v0.Aux = s 18273 v0.AddArg(p) 18274 v0.AddArg(mem) 18275 return true 18276 } 18277 return false 18278 } 18279 func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { 18280 b := v.Block 18281 _ = b 18282 typ := &b.Func.Config.Types 18283 _ = typ 18284 // match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 18285 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18286 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 18287 for { 18288 _ = v.Args[1] 18289 sh := v.Args[0] 18290 if sh.Op != OpAMD64SHLLconst { 18291 break 18292 } 18293 if sh.AuxInt != 8 { 18294 break 18295 } 18296 x1 := sh.Args[0] 18297 if x1.Op != OpAMD64MOVBload { 18298 break 18299 } 18300 i1 := x1.AuxInt 18301 s := x1.Aux 18302 _ = x1.Args[1] 18303 p := x1.Args[0] 18304 mem := x1.Args[1] 18305 x0 := v.Args[1] 18306 if x0.Op != OpAMD64MOVBload { 18307 break 18308 } 18309 i0 := x0.AuxInt 18310 if x0.Aux != s { 18311 break 18312 } 18313 _ = x0.Args[1] 18314 if p != x0.Args[0] { 18315 break 18316 } 18317 if mem != x0.Args[1] { 18318 break 18319 } 18320 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18321 break 18322 } 18323 b = mergePoint(b, x0, x1) 18324 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18325 v.reset(OpCopy) 18326 v.AddArg(v0) 18327 v0.AuxInt = i0 18328 v0.Aux = s 18329 v0.AddArg(p) 18330 v0.AddArg(mem) 18331 return true 18332 } 18333 // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) 18334 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18335 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 18336 for { 18337 _ = v.Args[1] 18338 x0 := v.Args[0] 18339 if x0.Op != OpAMD64MOVWload { 18340 break 18341 } 18342 i0 := x0.AuxInt 18343 s := x0.Aux 18344 _ = x0.Args[1] 18345 p := x0.Args[0] 18346 mem := x0.Args[1] 18347 sh := v.Args[1] 18348 if sh.Op != OpAMD64SHLLconst { 18349 break 18350 } 18351 if sh.AuxInt != 16 { 18352 break 18353 } 18354 x1 := sh.Args[0] 18355 if x1.Op != OpAMD64MOVWload { 18356 break 18357 } 18358 i1 := x1.AuxInt 18359 if x1.Aux != s { 18360 break 18361 } 18362 _ = x1.Args[1] 18363 if p != x1.Args[0] { 18364 break 18365 } 18366 if mem != x1.Args[1] { 18367 break 18368 } 18369 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18370 break 18371 } 18372 b = mergePoint(b, x0, x1) 18373 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 18374 v.reset(OpCopy) 18375 v.AddArg(v0) 18376 v0.AuxInt = i0 18377 v0.Aux = s 18378 v0.AddArg(p) 18379 v0.AddArg(mem) 18380 return true 18381 } 18382 // match: (ORL sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 18383 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18384 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 18385 for { 18386 _ = v.Args[1] 18387 sh := v.Args[0] 18388 if sh.Op != OpAMD64SHLLconst { 18389 break 18390 } 18391 if sh.AuxInt != 16 { 18392 break 18393 } 18394 x1 := sh.Args[0] 18395 if x1.Op != OpAMD64MOVWload { 18396 break 18397 } 18398 i1 := x1.AuxInt 18399 s := x1.Aux 18400 _ = x1.Args[1] 18401 p := x1.Args[0] 18402 mem := x1.Args[1] 18403 x0 := v.Args[1] 18404 if x0.Op != OpAMD64MOVWload { 18405 break 18406 } 18407 i0 := x0.AuxInt 18408 if x0.Aux != s { 18409 break 18410 } 18411 _ = x0.Args[1] 18412 if p != x0.Args[0] { 18413 break 18414 } 18415 if mem != x0.Args[1] { 18416 break 18417 } 18418 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18419 break 18420 } 18421 b = mergePoint(b, x0, x1) 18422 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 18423 v.reset(OpCopy) 18424 v.AddArg(v0) 18425 v0.AuxInt = i0 18426 v0.Aux = s 18427 v0.AddArg(p) 18428 v0.AddArg(mem) 18429 return true 18430 } 18431 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 18432 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18433 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18434 for { 18435 _ = v.Args[1] 18436 s1 := v.Args[0] 18437 if s1.Op != OpAMD64SHLLconst { 18438 break 18439 } 18440 j1 := s1.AuxInt 18441 x1 := s1.Args[0] 18442 if x1.Op != OpAMD64MOVBload { 18443 break 18444 } 18445 i1 := x1.AuxInt 18446 s := x1.Aux 18447 _ = x1.Args[1] 18448 p := x1.Args[0] 18449 mem := x1.Args[1] 18450 or := v.Args[1] 18451 if or.Op != OpAMD64ORL { 18452 break 18453 } 18454 _ = or.Args[1] 18455 s0 := or.Args[0] 18456 if s0.Op != OpAMD64SHLLconst { 18457 break 18458 } 18459 j0 := s0.AuxInt 18460 x0 := s0.Args[0] 18461 if x0.Op != OpAMD64MOVBload { 18462 break 18463 } 18464 i0 := x0.AuxInt 18465 if x0.Aux != s { 18466 break 18467 } 18468 _ = x0.Args[1] 18469 if p != x0.Args[0] { 18470 break 18471 } 18472 if mem != x0.Args[1] { 18473 break 18474 } 18475 y := or.Args[1] 18476 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18477 break 18478 } 18479 b = mergePoint(b, x0, x1) 18480 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18481 v.reset(OpCopy) 18482 v.AddArg(v0) 18483 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18484 v1.AuxInt = j0 18485 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18486 v2.AuxInt = i0 18487 v2.Aux = s 18488 v2.AddArg(p) 18489 v2.AddArg(mem) 18490 v1.AddArg(v2) 18491 v0.AddArg(v1) 18492 v0.AddArg(y) 18493 return true 18494 } 18495 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))) 18496 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18497 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18498 for { 18499 _ = v.Args[1] 18500 s1 := v.Args[0] 18501 if s1.Op != OpAMD64SHLLconst { 18502 break 18503 } 18504 j1 := s1.AuxInt 18505 x1 := s1.Args[0] 18506 if x1.Op != OpAMD64MOVBload { 18507 break 18508 } 18509 i1 := x1.AuxInt 18510 s := x1.Aux 18511 _ = x1.Args[1] 18512 p := x1.Args[0] 18513 mem := x1.Args[1] 18514 or := v.Args[1] 18515 if or.Op != OpAMD64ORL { 18516 break 18517 } 18518 _ = or.Args[1] 18519 y := or.Args[0] 18520 s0 := or.Args[1] 18521 if s0.Op != OpAMD64SHLLconst { 18522 break 18523 } 18524 j0 := s0.AuxInt 18525 x0 := s0.Args[0] 18526 if x0.Op != OpAMD64MOVBload { 18527 break 18528 } 18529 i0 := x0.AuxInt 18530 if x0.Aux != s { 18531 break 18532 } 18533 _ = x0.Args[1] 18534 if p != x0.Args[0] { 18535 break 18536 } 18537 if mem != x0.Args[1] { 18538 break 18539 } 18540 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18541 break 18542 } 18543 b = mergePoint(b, x0, x1) 18544 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18545 v.reset(OpCopy) 18546 v.AddArg(v0) 18547 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18548 v1.AuxInt = j0 18549 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18550 v2.AuxInt = i0 18551 v2.Aux = s 18552 v2.AddArg(p) 18553 v2.AddArg(mem) 18554 v1.AddArg(v2) 18555 v0.AddArg(v1) 18556 v0.AddArg(y) 18557 return true 18558 } 18559 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 18560 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18561 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18562 for { 18563 _ = v.Args[1] 18564 or := v.Args[0] 18565 if or.Op != OpAMD64ORL { 18566 break 18567 } 18568 _ = or.Args[1] 18569 s0 := or.Args[0] 18570 if s0.Op != OpAMD64SHLLconst { 18571 break 18572 } 18573 j0 := s0.AuxInt 18574 x0 := s0.Args[0] 18575 if x0.Op != OpAMD64MOVBload { 18576 break 18577 } 18578 i0 := x0.AuxInt 18579 s := x0.Aux 18580 _ = x0.Args[1] 18581 p := x0.Args[0] 18582 mem := x0.Args[1] 18583 y := or.Args[1] 18584 s1 := v.Args[1] 18585 if s1.Op != OpAMD64SHLLconst { 18586 break 18587 } 18588 j1 := s1.AuxInt 18589 x1 := s1.Args[0] 18590 if x1.Op != OpAMD64MOVBload { 18591 break 18592 } 18593 i1 := x1.AuxInt 18594 if x1.Aux != s { 18595 break 18596 } 18597 _ = x1.Args[1] 18598 if p != x1.Args[0] { 18599 break 18600 } 18601 if mem != x1.Args[1] { 18602 break 18603 } 18604 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18605 break 18606 } 18607 b = mergePoint(b, x0, x1) 18608 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18609 v.reset(OpCopy) 18610 v.AddArg(v0) 18611 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18612 v1.AuxInt = j0 18613 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18614 v2.AuxInt = i0 18615 v2.Aux = s 18616 v2.AddArg(p) 18617 v2.AddArg(mem) 18618 v1.AddArg(v2) 18619 v0.AddArg(v1) 18620 v0.AddArg(y) 18621 return true 18622 } 18623 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 18624 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18625 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18626 for { 18627 _ = v.Args[1] 18628 or := v.Args[0] 18629 if or.Op != OpAMD64ORL { 18630 break 18631 } 18632 _ = or.Args[1] 18633 y := or.Args[0] 18634 s0 := or.Args[1] 18635 if s0.Op != OpAMD64SHLLconst { 18636 break 18637 } 18638 j0 := s0.AuxInt 18639 x0 := s0.Args[0] 18640 if x0.Op != OpAMD64MOVBload { 18641 break 18642 } 18643 i0 := x0.AuxInt 18644 s := x0.Aux 18645 _ = x0.Args[1] 18646 p := x0.Args[0] 18647 mem := x0.Args[1] 18648 s1 := v.Args[1] 18649 if s1.Op != OpAMD64SHLLconst { 18650 break 18651 } 18652 j1 := s1.AuxInt 18653 x1 := s1.Args[0] 18654 if x1.Op != OpAMD64MOVBload { 18655 break 18656 } 18657 i1 := x1.AuxInt 18658 if x1.Aux != s { 18659 break 18660 } 18661 _ = x1.Args[1] 18662 if p != x1.Args[0] { 18663 break 18664 } 18665 if mem != x1.Args[1] { 18666 break 18667 } 18668 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18669 break 18670 } 18671 b = mergePoint(b, x0, x1) 18672 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18673 v.reset(OpCopy) 18674 v.AddArg(v0) 18675 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18676 v1.AuxInt = j0 18677 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18678 v2.AuxInt = i0 18679 v2.Aux = s 18680 v2.AddArg(p) 18681 v2.AddArg(mem) 18682 v1.AddArg(v2) 18683 v0.AddArg(v1) 18684 v0.AddArg(y) 18685 return true 18686 } 18687 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18688 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18689 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18690 for { 18691 _ = v.Args[1] 18692 x0 := v.Args[0] 18693 if x0.Op != OpAMD64MOVBloadidx1 { 18694 break 18695 } 18696 i0 := x0.AuxInt 18697 s := x0.Aux 18698 _ = x0.Args[2] 18699 p := x0.Args[0] 18700 idx := x0.Args[1] 18701 mem := x0.Args[2] 18702 sh := v.Args[1] 18703 if sh.Op != OpAMD64SHLLconst { 18704 break 18705 } 18706 if sh.AuxInt != 8 { 18707 break 18708 } 18709 x1 := sh.Args[0] 18710 if x1.Op != OpAMD64MOVBloadidx1 { 18711 break 18712 } 18713 i1 := x1.AuxInt 18714 if x1.Aux != s { 18715 break 18716 } 18717 _ = x1.Args[2] 18718 if p != x1.Args[0] { 18719 break 18720 } 18721 if idx != x1.Args[1] { 18722 break 18723 } 18724 if mem != x1.Args[2] { 18725 break 18726 } 18727 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18728 break 18729 } 18730 b = mergePoint(b, x0, x1) 18731 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18732 v.reset(OpCopy) 18733 v.AddArg(v0) 18734 v0.AuxInt = i0 18735 v0.Aux = s 18736 v0.AddArg(p) 18737 v0.AddArg(idx) 18738 v0.AddArg(mem) 18739 return true 18740 } 18741 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18742 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18743 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18744 for { 18745 _ = v.Args[1] 18746 x0 := v.Args[0] 18747 if x0.Op != OpAMD64MOVBloadidx1 { 18748 break 18749 } 18750 i0 := x0.AuxInt 18751 s := x0.Aux 18752 _ = x0.Args[2] 18753 idx := x0.Args[0] 18754 p := x0.Args[1] 18755 mem := x0.Args[2] 18756 sh := v.Args[1] 18757 if sh.Op != OpAMD64SHLLconst { 18758 break 18759 } 18760 if sh.AuxInt != 8 { 18761 break 18762 } 18763 x1 := sh.Args[0] 18764 if x1.Op != OpAMD64MOVBloadidx1 { 18765 break 18766 } 18767 i1 := x1.AuxInt 18768 if x1.Aux != s { 18769 break 18770 } 18771 _ = x1.Args[2] 18772 if p != x1.Args[0] { 18773 break 18774 } 18775 if idx != x1.Args[1] { 18776 break 18777 } 18778 if mem != x1.Args[2] { 18779 break 18780 } 18781 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18782 break 18783 } 18784 b = mergePoint(b, x0, x1) 18785 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18786 v.reset(OpCopy) 18787 v.AddArg(v0) 18788 v0.AuxInt = i0 18789 v0.Aux = s 18790 v0.AddArg(p) 18791 v0.AddArg(idx) 18792 v0.AddArg(mem) 18793 return true 18794 } 18795 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18796 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18797 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18798 for { 18799 _ = v.Args[1] 18800 x0 := v.Args[0] 18801 if x0.Op != OpAMD64MOVBloadidx1 { 18802 break 18803 } 18804 i0 := x0.AuxInt 18805 s := x0.Aux 18806 _ = x0.Args[2] 18807 p := x0.Args[0] 18808 idx := x0.Args[1] 18809 mem := x0.Args[2] 18810 sh := v.Args[1] 18811 if sh.Op != OpAMD64SHLLconst { 18812 break 18813 } 18814 if sh.AuxInt != 8 { 18815 break 18816 } 18817 x1 := sh.Args[0] 18818 if x1.Op != OpAMD64MOVBloadidx1 { 18819 break 18820 } 18821 i1 := x1.AuxInt 18822 if x1.Aux != s { 18823 break 18824 } 18825 _ = x1.Args[2] 18826 if idx != x1.Args[0] { 18827 break 18828 } 18829 if p != x1.Args[1] { 18830 break 18831 } 18832 if mem != x1.Args[2] { 18833 break 18834 } 18835 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18836 break 18837 } 18838 b = mergePoint(b, x0, x1) 18839 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18840 v.reset(OpCopy) 18841 v.AddArg(v0) 18842 v0.AuxInt = i0 18843 v0.Aux = s 18844 v0.AddArg(p) 18845 v0.AddArg(idx) 18846 v0.AddArg(mem) 18847 return true 18848 } 18849 return false 18850 } 18851 func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool { 18852 b := v.Block 18853 _ = b 18854 typ := &b.Func.Config.Types 18855 _ = typ 18856 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18857 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18858 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18859 for { 18860 _ = v.Args[1] 18861 x0 := v.Args[0] 18862 if x0.Op != OpAMD64MOVBloadidx1 { 18863 break 18864 } 18865 i0 := x0.AuxInt 18866 s := x0.Aux 18867 _ = x0.Args[2] 18868 idx := x0.Args[0] 18869 p := x0.Args[1] 18870 mem := x0.Args[2] 18871 sh := v.Args[1] 18872 if sh.Op != OpAMD64SHLLconst { 18873 break 18874 } 18875 if sh.AuxInt != 8 { 18876 break 18877 } 18878 x1 := sh.Args[0] 18879 if x1.Op != OpAMD64MOVBloadidx1 { 18880 break 18881 } 18882 i1 := x1.AuxInt 18883 if x1.Aux != s { 18884 break 18885 } 18886 _ = x1.Args[2] 18887 if idx != x1.Args[0] { 18888 break 18889 } 18890 if p != x1.Args[1] { 18891 break 18892 } 18893 if mem != x1.Args[2] { 18894 break 18895 } 18896 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18897 break 18898 } 18899 b = mergePoint(b, x0, x1) 18900 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18901 v.reset(OpCopy) 18902 v.AddArg(v0) 18903 v0.AuxInt = i0 18904 v0.Aux = s 18905 v0.AddArg(p) 18906 v0.AddArg(idx) 18907 v0.AddArg(mem) 18908 return true 18909 } 18910 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 18911 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18912 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18913 for { 18914 _ = v.Args[1] 18915 sh := v.Args[0] 18916 if sh.Op != OpAMD64SHLLconst { 18917 break 18918 } 18919 if sh.AuxInt != 8 { 18920 break 18921 } 18922 x1 := sh.Args[0] 18923 if x1.Op != OpAMD64MOVBloadidx1 { 18924 break 18925 } 18926 i1 := x1.AuxInt 18927 s := x1.Aux 18928 _ = x1.Args[2] 18929 p := x1.Args[0] 18930 idx := x1.Args[1] 18931 mem := x1.Args[2] 18932 x0 := v.Args[1] 18933 if x0.Op != OpAMD64MOVBloadidx1 { 18934 break 18935 } 18936 i0 := x0.AuxInt 18937 if x0.Aux != s { 18938 break 18939 } 18940 _ = x0.Args[2] 18941 if p != x0.Args[0] { 18942 break 18943 } 18944 if idx != x0.Args[1] { 18945 break 18946 } 18947 if mem != x0.Args[2] { 18948 break 18949 } 18950 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18951 break 18952 } 18953 b = mergePoint(b, x0, x1) 18954 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18955 v.reset(OpCopy) 18956 v.AddArg(v0) 18957 v0.AuxInt = i0 18958 v0.Aux = s 18959 v0.AddArg(p) 18960 v0.AddArg(idx) 18961 v0.AddArg(mem) 18962 return true 18963 } 18964 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 18965 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18966 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18967 for { 18968 _ = v.Args[1] 18969 sh := v.Args[0] 18970 if sh.Op != OpAMD64SHLLconst { 18971 break 18972 } 18973 if sh.AuxInt != 8 { 18974 break 18975 } 18976 x1 := sh.Args[0] 18977 if x1.Op != OpAMD64MOVBloadidx1 { 18978 break 18979 } 18980 i1 := x1.AuxInt 18981 s := x1.Aux 18982 _ = x1.Args[2] 18983 idx := x1.Args[0] 18984 p := x1.Args[1] 18985 mem := x1.Args[2] 18986 x0 := v.Args[1] 18987 if x0.Op != OpAMD64MOVBloadidx1 { 18988 break 18989 } 18990 i0 := x0.AuxInt 18991 if x0.Aux != s { 18992 break 18993 } 18994 _ = x0.Args[2] 18995 if p != x0.Args[0] { 18996 break 18997 } 18998 if idx != x0.Args[1] { 18999 break 19000 } 19001 if mem != x0.Args[2] { 19002 break 19003 } 19004 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19005 break 19006 } 19007 b = mergePoint(b, x0, x1) 19008 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 19009 v.reset(OpCopy) 19010 v.AddArg(v0) 19011 v0.AuxInt = i0 19012 v0.Aux = s 19013 v0.AddArg(p) 19014 v0.AddArg(idx) 19015 v0.AddArg(mem) 19016 return true 19017 } 19018 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 19019 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19020 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 19021 for { 19022 _ = v.Args[1] 19023 sh := v.Args[0] 19024 if sh.Op != OpAMD64SHLLconst { 19025 break 19026 } 19027 if sh.AuxInt != 8 { 19028 break 19029 } 19030 x1 := sh.Args[0] 19031 if x1.Op != OpAMD64MOVBloadidx1 { 19032 break 19033 } 19034 i1 := x1.AuxInt 19035 s := x1.Aux 19036 _ = x1.Args[2] 19037 p := x1.Args[0] 19038 idx := x1.Args[1] 19039 mem := x1.Args[2] 19040 x0 := v.Args[1] 19041 if x0.Op != OpAMD64MOVBloadidx1 { 19042 break 19043 } 19044 i0 := x0.AuxInt 19045 if x0.Aux != s { 19046 break 19047 } 19048 _ = x0.Args[2] 19049 if idx != x0.Args[0] { 19050 break 19051 } 19052 if p != x0.Args[1] { 19053 break 19054 } 19055 if mem != x0.Args[2] { 19056 break 19057 } 19058 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19059 break 19060 } 19061 b = mergePoint(b, x0, x1) 19062 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 19063 v.reset(OpCopy) 19064 v.AddArg(v0) 19065 v0.AuxInt = i0 19066 v0.Aux = s 19067 v0.AddArg(p) 19068 v0.AddArg(idx) 19069 v0.AddArg(mem) 19070 return true 19071 } 19072 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 19073 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19074 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 19075 for { 19076 _ = v.Args[1] 19077 sh := v.Args[0] 19078 if sh.Op != OpAMD64SHLLconst { 19079 break 19080 } 19081 if sh.AuxInt != 8 { 19082 break 19083 } 19084 x1 := sh.Args[0] 19085 if x1.Op != OpAMD64MOVBloadidx1 { 19086 break 19087 } 19088 i1 := x1.AuxInt 19089 s := x1.Aux 19090 _ = x1.Args[2] 19091 idx := x1.Args[0] 19092 p := x1.Args[1] 19093 mem := x1.Args[2] 19094 x0 := v.Args[1] 19095 if x0.Op != OpAMD64MOVBloadidx1 { 19096 break 19097 } 19098 i0 := x0.AuxInt 19099 if x0.Aux != s { 19100 break 19101 } 19102 _ = x0.Args[2] 19103 if idx != x0.Args[0] { 19104 break 19105 } 19106 if p != x0.Args[1] { 19107 break 19108 } 19109 if mem != x0.Args[2] { 19110 break 19111 } 19112 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19113 break 19114 } 19115 b = mergePoint(b, x0, x1) 19116 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 19117 v.reset(OpCopy) 19118 v.AddArg(v0) 19119 v0.AuxInt = i0 19120 v0.Aux = s 19121 v0.AddArg(p) 19122 v0.AddArg(idx) 19123 v0.AddArg(mem) 19124 return true 19125 } 19126 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 19127 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19128 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19129 for { 19130 _ = v.Args[1] 19131 x0 := v.Args[0] 19132 if x0.Op != OpAMD64MOVWloadidx1 { 19133 break 19134 } 19135 i0 := x0.AuxInt 19136 s := x0.Aux 19137 _ = x0.Args[2] 19138 p := x0.Args[0] 19139 idx := x0.Args[1] 19140 mem := x0.Args[2] 19141 sh := v.Args[1] 19142 if sh.Op != OpAMD64SHLLconst { 19143 break 19144 } 19145 if sh.AuxInt != 16 { 19146 break 19147 } 19148 x1 := sh.Args[0] 19149 if x1.Op != OpAMD64MOVWloadidx1 { 19150 break 19151 } 19152 i1 := x1.AuxInt 19153 if x1.Aux != s { 19154 break 19155 } 19156 _ = x1.Args[2] 19157 if p != x1.Args[0] { 19158 break 19159 } 19160 if idx != x1.Args[1] { 19161 break 19162 } 19163 if mem != x1.Args[2] { 19164 break 19165 } 19166 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19167 break 19168 } 19169 b = mergePoint(b, x0, x1) 19170 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19171 v.reset(OpCopy) 19172 v.AddArg(v0) 19173 v0.AuxInt = i0 19174 v0.Aux = s 19175 v0.AddArg(p) 19176 v0.AddArg(idx) 19177 v0.AddArg(mem) 19178 return true 19179 } 19180 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 19181 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19182 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19183 for { 19184 _ = v.Args[1] 19185 x0 := v.Args[0] 19186 if x0.Op != OpAMD64MOVWloadidx1 { 19187 break 19188 } 19189 i0 := x0.AuxInt 19190 s := x0.Aux 19191 _ = x0.Args[2] 19192 idx := x0.Args[0] 19193 p := x0.Args[1] 19194 mem := x0.Args[2] 19195 sh := v.Args[1] 19196 if sh.Op != OpAMD64SHLLconst { 19197 break 19198 } 19199 if sh.AuxInt != 16 { 19200 break 19201 } 19202 x1 := sh.Args[0] 19203 if x1.Op != OpAMD64MOVWloadidx1 { 19204 break 19205 } 19206 i1 := x1.AuxInt 19207 if x1.Aux != s { 19208 break 19209 } 19210 _ = x1.Args[2] 19211 if p != x1.Args[0] { 19212 break 19213 } 19214 if idx != x1.Args[1] { 19215 break 19216 } 19217 if mem != x1.Args[2] { 19218 break 19219 } 19220 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19221 break 19222 } 19223 b = mergePoint(b, x0, x1) 19224 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19225 v.reset(OpCopy) 19226 v.AddArg(v0) 19227 v0.AuxInt = i0 19228 v0.Aux = s 19229 v0.AddArg(p) 19230 v0.AddArg(idx) 19231 v0.AddArg(mem) 19232 return true 19233 } 19234 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 19235 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19236 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19237 for { 19238 _ = v.Args[1] 19239 x0 := v.Args[0] 19240 if x0.Op != OpAMD64MOVWloadidx1 { 19241 break 19242 } 19243 i0 := x0.AuxInt 19244 s := x0.Aux 19245 _ = x0.Args[2] 19246 p := x0.Args[0] 19247 idx := x0.Args[1] 19248 mem := x0.Args[2] 19249 sh := v.Args[1] 19250 if sh.Op != OpAMD64SHLLconst { 19251 break 19252 } 19253 if sh.AuxInt != 16 { 19254 break 19255 } 19256 x1 := sh.Args[0] 19257 if x1.Op != OpAMD64MOVWloadidx1 { 19258 break 19259 } 19260 i1 := x1.AuxInt 19261 if x1.Aux != s { 19262 break 19263 } 19264 _ = x1.Args[2] 19265 if idx != x1.Args[0] { 19266 break 19267 } 19268 if p != x1.Args[1] { 19269 break 19270 } 19271 if mem != x1.Args[2] { 19272 break 19273 } 19274 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19275 break 19276 } 19277 b = mergePoint(b, x0, x1) 19278 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19279 v.reset(OpCopy) 19280 v.AddArg(v0) 19281 v0.AuxInt = i0 19282 v0.Aux = s 19283 v0.AddArg(p) 19284 v0.AddArg(idx) 19285 v0.AddArg(mem) 19286 return true 19287 } 19288 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 19289 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19290 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19291 for { 19292 _ = v.Args[1] 19293 x0 := v.Args[0] 19294 if x0.Op != OpAMD64MOVWloadidx1 { 19295 break 19296 } 19297 i0 := x0.AuxInt 19298 s := x0.Aux 19299 _ = x0.Args[2] 19300 idx := x0.Args[0] 19301 p := x0.Args[1] 19302 mem := x0.Args[2] 19303 sh := v.Args[1] 19304 if sh.Op != OpAMD64SHLLconst { 19305 break 19306 } 19307 if sh.AuxInt != 16 { 19308 break 19309 } 19310 x1 := sh.Args[0] 19311 if x1.Op != OpAMD64MOVWloadidx1 { 19312 break 19313 } 19314 i1 := x1.AuxInt 19315 if x1.Aux != s { 19316 break 19317 } 19318 _ = x1.Args[2] 19319 if idx != x1.Args[0] { 19320 break 19321 } 19322 if p != x1.Args[1] { 19323 break 19324 } 19325 if mem != x1.Args[2] { 19326 break 19327 } 19328 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19329 break 19330 } 19331 b = mergePoint(b, x0, x1) 19332 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19333 v.reset(OpCopy) 19334 v.AddArg(v0) 19335 v0.AuxInt = i0 19336 v0.Aux = s 19337 v0.AddArg(p) 19338 v0.AddArg(idx) 19339 v0.AddArg(mem) 19340 return true 19341 } 19342 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 19343 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19344 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19345 for { 19346 _ = v.Args[1] 19347 sh := v.Args[0] 19348 if sh.Op != OpAMD64SHLLconst { 19349 break 19350 } 19351 if sh.AuxInt != 16 { 19352 break 19353 } 19354 x1 := sh.Args[0] 19355 if x1.Op != OpAMD64MOVWloadidx1 { 19356 break 19357 } 19358 i1 := x1.AuxInt 19359 s := x1.Aux 19360 _ = x1.Args[2] 19361 p := x1.Args[0] 19362 idx := x1.Args[1] 19363 mem := x1.Args[2] 19364 x0 := v.Args[1] 19365 if x0.Op != OpAMD64MOVWloadidx1 { 19366 break 19367 } 19368 i0 := x0.AuxInt 19369 if x0.Aux != s { 19370 break 19371 } 19372 _ = x0.Args[2] 19373 if p != x0.Args[0] { 19374 break 19375 } 19376 if idx != x0.Args[1] { 19377 break 19378 } 19379 if mem != x0.Args[2] { 19380 break 19381 } 19382 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19383 break 19384 } 19385 b = mergePoint(b, x0, x1) 19386 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19387 v.reset(OpCopy) 19388 v.AddArg(v0) 19389 v0.AuxInt = i0 19390 v0.Aux = s 19391 v0.AddArg(p) 19392 v0.AddArg(idx) 19393 v0.AddArg(mem) 19394 return true 19395 } 19396 return false 19397 } 19398 func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool { 19399 b := v.Block 19400 _ = b 19401 typ := &b.Func.Config.Types 19402 _ = typ 19403 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 19404 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19405 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19406 for { 19407 _ = v.Args[1] 19408 sh := v.Args[0] 19409 if sh.Op != OpAMD64SHLLconst { 19410 break 19411 } 19412 if sh.AuxInt != 16 { 19413 break 19414 } 19415 x1 := sh.Args[0] 19416 if x1.Op != OpAMD64MOVWloadidx1 { 19417 break 19418 } 19419 i1 := x1.AuxInt 19420 s := x1.Aux 19421 _ = x1.Args[2] 19422 idx := x1.Args[0] 19423 p := x1.Args[1] 19424 mem := x1.Args[2] 19425 x0 := v.Args[1] 19426 if x0.Op != OpAMD64MOVWloadidx1 { 19427 break 19428 } 19429 i0 := x0.AuxInt 19430 if x0.Aux != s { 19431 break 19432 } 19433 _ = x0.Args[2] 19434 if p != x0.Args[0] { 19435 break 19436 } 19437 if idx != x0.Args[1] { 19438 break 19439 } 19440 if mem != x0.Args[2] { 19441 break 19442 } 19443 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19444 break 19445 } 19446 b = mergePoint(b, x0, x1) 19447 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19448 v.reset(OpCopy) 19449 v.AddArg(v0) 19450 v0.AuxInt = i0 19451 v0.Aux = s 19452 v0.AddArg(p) 19453 v0.AddArg(idx) 19454 v0.AddArg(mem) 19455 return true 19456 } 19457 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 19458 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19459 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19460 for { 19461 _ = v.Args[1] 19462 sh := v.Args[0] 19463 if sh.Op != OpAMD64SHLLconst { 19464 break 19465 } 19466 if sh.AuxInt != 16 { 19467 break 19468 } 19469 x1 := sh.Args[0] 19470 if x1.Op != OpAMD64MOVWloadidx1 { 19471 break 19472 } 19473 i1 := x1.AuxInt 19474 s := x1.Aux 19475 _ = x1.Args[2] 19476 p := x1.Args[0] 19477 idx := x1.Args[1] 19478 mem := x1.Args[2] 19479 x0 := v.Args[1] 19480 if x0.Op != OpAMD64MOVWloadidx1 { 19481 break 19482 } 19483 i0 := x0.AuxInt 19484 if x0.Aux != s { 19485 break 19486 } 19487 _ = x0.Args[2] 19488 if idx != x0.Args[0] { 19489 break 19490 } 19491 if p != x0.Args[1] { 19492 break 19493 } 19494 if mem != x0.Args[2] { 19495 break 19496 } 19497 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19498 break 19499 } 19500 b = mergePoint(b, x0, x1) 19501 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19502 v.reset(OpCopy) 19503 v.AddArg(v0) 19504 v0.AuxInt = i0 19505 v0.Aux = s 19506 v0.AddArg(p) 19507 v0.AddArg(idx) 19508 v0.AddArg(mem) 19509 return true 19510 } 19511 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 19512 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19513 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19514 for { 19515 _ = v.Args[1] 19516 sh := v.Args[0] 19517 if sh.Op != OpAMD64SHLLconst { 19518 break 19519 } 19520 if sh.AuxInt != 16 { 19521 break 19522 } 19523 x1 := sh.Args[0] 19524 if x1.Op != OpAMD64MOVWloadidx1 { 19525 break 19526 } 19527 i1 := x1.AuxInt 19528 s := x1.Aux 19529 _ = x1.Args[2] 19530 idx := x1.Args[0] 19531 p := x1.Args[1] 19532 mem := x1.Args[2] 19533 x0 := v.Args[1] 19534 if x0.Op != OpAMD64MOVWloadidx1 { 19535 break 19536 } 19537 i0 := x0.AuxInt 19538 if x0.Aux != s { 19539 break 19540 } 19541 _ = x0.Args[2] 19542 if idx != x0.Args[0] { 19543 break 19544 } 19545 if p != x0.Args[1] { 19546 break 19547 } 19548 if mem != x0.Args[2] { 19549 break 19550 } 19551 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19552 break 19553 } 19554 b = mergePoint(b, x0, x1) 19555 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19556 v.reset(OpCopy) 19557 v.AddArg(v0) 19558 v0.AuxInt = i0 19559 v0.Aux = s 19560 v0.AddArg(p) 19561 v0.AddArg(idx) 19562 v0.AddArg(mem) 19563 return true 19564 } 19565 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 19566 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19567 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19568 for { 19569 _ = v.Args[1] 19570 s1 := v.Args[0] 19571 if s1.Op != OpAMD64SHLLconst { 19572 break 19573 } 19574 j1 := s1.AuxInt 19575 x1 := s1.Args[0] 19576 if x1.Op != OpAMD64MOVBloadidx1 { 19577 break 19578 } 19579 i1 := x1.AuxInt 19580 s := x1.Aux 19581 _ = x1.Args[2] 19582 p := x1.Args[0] 19583 idx := x1.Args[1] 19584 mem := x1.Args[2] 19585 or := v.Args[1] 19586 if or.Op != OpAMD64ORL { 19587 break 19588 } 19589 _ = or.Args[1] 19590 s0 := or.Args[0] 19591 if s0.Op != OpAMD64SHLLconst { 19592 break 19593 } 19594 j0 := s0.AuxInt 19595 x0 := s0.Args[0] 19596 if x0.Op != OpAMD64MOVBloadidx1 { 19597 break 19598 } 19599 i0 := x0.AuxInt 19600 if x0.Aux != s { 19601 break 19602 } 19603 _ = x0.Args[2] 19604 if p != x0.Args[0] { 19605 break 19606 } 19607 if idx != x0.Args[1] { 19608 break 19609 } 19610 if mem != x0.Args[2] { 19611 break 19612 } 19613 y := or.Args[1] 19614 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19615 break 19616 } 19617 b = mergePoint(b, x0, x1) 19618 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19619 v.reset(OpCopy) 19620 v.AddArg(v0) 19621 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19622 v1.AuxInt = j0 19623 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19624 v2.AuxInt = i0 19625 v2.Aux = s 19626 v2.AddArg(p) 19627 v2.AddArg(idx) 19628 v2.AddArg(mem) 19629 v1.AddArg(v2) 19630 v0.AddArg(v1) 19631 v0.AddArg(y) 19632 return true 19633 } 19634 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 19635 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19636 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19637 for { 19638 _ = v.Args[1] 19639 s1 := v.Args[0] 19640 if s1.Op != OpAMD64SHLLconst { 19641 break 19642 } 19643 j1 := s1.AuxInt 19644 x1 := s1.Args[0] 19645 if x1.Op != OpAMD64MOVBloadidx1 { 19646 break 19647 } 19648 i1 := x1.AuxInt 19649 s := x1.Aux 19650 _ = x1.Args[2] 19651 idx := x1.Args[0] 19652 p := x1.Args[1] 19653 mem := x1.Args[2] 19654 or := v.Args[1] 19655 if or.Op != OpAMD64ORL { 19656 break 19657 } 19658 _ = or.Args[1] 19659 s0 := or.Args[0] 19660 if s0.Op != OpAMD64SHLLconst { 19661 break 19662 } 19663 j0 := s0.AuxInt 19664 x0 := s0.Args[0] 19665 if x0.Op != OpAMD64MOVBloadidx1 { 19666 break 19667 } 19668 i0 := x0.AuxInt 19669 if x0.Aux != s { 19670 break 19671 } 19672 _ = x0.Args[2] 19673 if p != x0.Args[0] { 19674 break 19675 } 19676 if idx != x0.Args[1] { 19677 break 19678 } 19679 if mem != x0.Args[2] { 19680 break 19681 } 19682 y := or.Args[1] 19683 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19684 break 19685 } 19686 b = mergePoint(b, x0, x1) 19687 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19688 v.reset(OpCopy) 19689 v.AddArg(v0) 19690 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19691 v1.AuxInt = j0 19692 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19693 v2.AuxInt = i0 19694 v2.Aux = s 19695 v2.AddArg(p) 19696 v2.AddArg(idx) 19697 v2.AddArg(mem) 19698 v1.AddArg(v2) 19699 v0.AddArg(v1) 19700 v0.AddArg(y) 19701 return true 19702 } 19703 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 19704 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19705 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19706 for { 19707 _ = v.Args[1] 19708 s1 := v.Args[0] 19709 if s1.Op != OpAMD64SHLLconst { 19710 break 19711 } 19712 j1 := s1.AuxInt 19713 x1 := s1.Args[0] 19714 if x1.Op != OpAMD64MOVBloadidx1 { 19715 break 19716 } 19717 i1 := x1.AuxInt 19718 s := x1.Aux 19719 _ = x1.Args[2] 19720 p := x1.Args[0] 19721 idx := x1.Args[1] 19722 mem := x1.Args[2] 19723 or := v.Args[1] 19724 if or.Op != OpAMD64ORL { 19725 break 19726 } 19727 _ = or.Args[1] 19728 s0 := or.Args[0] 19729 if s0.Op != OpAMD64SHLLconst { 19730 break 19731 } 19732 j0 := s0.AuxInt 19733 x0 := s0.Args[0] 19734 if x0.Op != OpAMD64MOVBloadidx1 { 19735 break 19736 } 19737 i0 := x0.AuxInt 19738 if x0.Aux != s { 19739 break 19740 } 19741 _ = x0.Args[2] 19742 if idx != x0.Args[0] { 19743 break 19744 } 19745 if p != x0.Args[1] { 19746 break 19747 } 19748 if mem != x0.Args[2] { 19749 break 19750 } 19751 y := or.Args[1] 19752 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19753 break 19754 } 19755 b = mergePoint(b, x0, x1) 19756 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19757 v.reset(OpCopy) 19758 v.AddArg(v0) 19759 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19760 v1.AuxInt = j0 19761 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19762 v2.AuxInt = i0 19763 v2.Aux = s 19764 v2.AddArg(p) 19765 v2.AddArg(idx) 19766 v2.AddArg(mem) 19767 v1.AddArg(v2) 19768 v0.AddArg(v1) 19769 v0.AddArg(y) 19770 return true 19771 } 19772 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 19773 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19774 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19775 for { 19776 _ = v.Args[1] 19777 s1 := v.Args[0] 19778 if s1.Op != OpAMD64SHLLconst { 19779 break 19780 } 19781 j1 := s1.AuxInt 19782 x1 := s1.Args[0] 19783 if x1.Op != OpAMD64MOVBloadidx1 { 19784 break 19785 } 19786 i1 := x1.AuxInt 19787 s := x1.Aux 19788 _ = x1.Args[2] 19789 idx := x1.Args[0] 19790 p := x1.Args[1] 19791 mem := x1.Args[2] 19792 or := v.Args[1] 19793 if or.Op != OpAMD64ORL { 19794 break 19795 } 19796 _ = or.Args[1] 19797 s0 := or.Args[0] 19798 if s0.Op != OpAMD64SHLLconst { 19799 break 19800 } 19801 j0 := s0.AuxInt 19802 x0 := s0.Args[0] 19803 if x0.Op != OpAMD64MOVBloadidx1 { 19804 break 19805 } 19806 i0 := x0.AuxInt 19807 if x0.Aux != s { 19808 break 19809 } 19810 _ = x0.Args[2] 19811 if idx != x0.Args[0] { 19812 break 19813 } 19814 if p != x0.Args[1] { 19815 break 19816 } 19817 if mem != x0.Args[2] { 19818 break 19819 } 19820 y := or.Args[1] 19821 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19822 break 19823 } 19824 b = mergePoint(b, x0, x1) 19825 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19826 v.reset(OpCopy) 19827 v.AddArg(v0) 19828 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19829 v1.AuxInt = j0 19830 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19831 v2.AuxInt = i0 19832 v2.Aux = s 19833 v2.AddArg(p) 19834 v2.AddArg(idx) 19835 v2.AddArg(mem) 19836 v1.AddArg(v2) 19837 v0.AddArg(v1) 19838 v0.AddArg(y) 19839 return true 19840 } 19841 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 19842 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19843 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19844 for { 19845 _ = v.Args[1] 19846 s1 := v.Args[0] 19847 if s1.Op != OpAMD64SHLLconst { 19848 break 19849 } 19850 j1 := s1.AuxInt 19851 x1 := s1.Args[0] 19852 if x1.Op != OpAMD64MOVBloadidx1 { 19853 break 19854 } 19855 i1 := x1.AuxInt 19856 s := x1.Aux 19857 _ = x1.Args[2] 19858 p := x1.Args[0] 19859 idx := x1.Args[1] 19860 mem := x1.Args[2] 19861 or := v.Args[1] 19862 if or.Op != OpAMD64ORL { 19863 break 19864 } 19865 _ = or.Args[1] 19866 y := or.Args[0] 19867 s0 := or.Args[1] 19868 if s0.Op != OpAMD64SHLLconst { 19869 break 19870 } 19871 j0 := s0.AuxInt 19872 x0 := s0.Args[0] 19873 if x0.Op != OpAMD64MOVBloadidx1 { 19874 break 19875 } 19876 i0 := x0.AuxInt 19877 if x0.Aux != s { 19878 break 19879 } 19880 _ = x0.Args[2] 19881 if p != x0.Args[0] { 19882 break 19883 } 19884 if idx != x0.Args[1] { 19885 break 19886 } 19887 if mem != x0.Args[2] { 19888 break 19889 } 19890 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19891 break 19892 } 19893 b = mergePoint(b, x0, x1) 19894 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19895 v.reset(OpCopy) 19896 v.AddArg(v0) 19897 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19898 v1.AuxInt = j0 19899 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19900 v2.AuxInt = i0 19901 v2.Aux = s 19902 v2.AddArg(p) 19903 v2.AddArg(idx) 19904 v2.AddArg(mem) 19905 v1.AddArg(v2) 19906 v0.AddArg(v1) 19907 v0.AddArg(y) 19908 return true 19909 } 19910 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 19911 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19912 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19913 for { 19914 _ = v.Args[1] 19915 s1 := v.Args[0] 19916 if s1.Op != OpAMD64SHLLconst { 19917 break 19918 } 19919 j1 := s1.AuxInt 19920 x1 := s1.Args[0] 19921 if x1.Op != OpAMD64MOVBloadidx1 { 19922 break 19923 } 19924 i1 := x1.AuxInt 19925 s := x1.Aux 19926 _ = x1.Args[2] 19927 idx := x1.Args[0] 19928 p := x1.Args[1] 19929 mem := x1.Args[2] 19930 or := v.Args[1] 19931 if or.Op != OpAMD64ORL { 19932 break 19933 } 19934 _ = or.Args[1] 19935 y := or.Args[0] 19936 s0 := or.Args[1] 19937 if s0.Op != OpAMD64SHLLconst { 19938 break 19939 } 19940 j0 := s0.AuxInt 19941 x0 := s0.Args[0] 19942 if x0.Op != OpAMD64MOVBloadidx1 { 19943 break 19944 } 19945 i0 := x0.AuxInt 19946 if x0.Aux != s { 19947 break 19948 } 19949 _ = x0.Args[2] 19950 if p != x0.Args[0] { 19951 break 19952 } 19953 if idx != x0.Args[1] { 19954 break 19955 } 19956 if mem != x0.Args[2] { 19957 break 19958 } 19959 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19960 break 19961 } 19962 b = mergePoint(b, x0, x1) 19963 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19964 v.reset(OpCopy) 19965 v.AddArg(v0) 19966 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19967 v1.AuxInt = j0 19968 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19969 v2.AuxInt = i0 19970 v2.Aux = s 19971 v2.AddArg(p) 19972 v2.AddArg(idx) 19973 v2.AddArg(mem) 19974 v1.AddArg(v2) 19975 v0.AddArg(v1) 19976 v0.AddArg(y) 19977 return true 19978 } 19979 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 19980 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19981 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19982 for { 19983 _ = v.Args[1] 19984 s1 := v.Args[0] 19985 if s1.Op != OpAMD64SHLLconst { 19986 break 19987 } 19988 j1 := s1.AuxInt 19989 x1 := s1.Args[0] 19990 if x1.Op != OpAMD64MOVBloadidx1 { 19991 break 19992 } 19993 i1 := x1.AuxInt 19994 s := x1.Aux 19995 _ = x1.Args[2] 19996 p := x1.Args[0] 19997 idx := x1.Args[1] 19998 mem := x1.Args[2] 19999 or := v.Args[1] 20000 if or.Op != OpAMD64ORL { 20001 break 20002 } 20003 _ = or.Args[1] 20004 y := or.Args[0] 20005 s0 := or.Args[1] 20006 if s0.Op != OpAMD64SHLLconst { 20007 break 20008 } 20009 j0 := s0.AuxInt 20010 x0 := s0.Args[0] 20011 if x0.Op != OpAMD64MOVBloadidx1 { 20012 break 20013 } 20014 i0 := x0.AuxInt 20015 if x0.Aux != s { 20016 break 20017 } 20018 _ = x0.Args[2] 20019 if idx != x0.Args[0] { 20020 break 20021 } 20022 if p != x0.Args[1] { 20023 break 20024 } 20025 if mem != x0.Args[2] { 20026 break 20027 } 20028 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20029 break 20030 } 20031 b = mergePoint(b, x0, x1) 20032 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20033 v.reset(OpCopy) 20034 v.AddArg(v0) 20035 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20036 v1.AuxInt = j0 20037 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20038 v2.AuxInt = i0 20039 v2.Aux = s 20040 v2.AddArg(p) 20041 v2.AddArg(idx) 20042 v2.AddArg(mem) 20043 v1.AddArg(v2) 20044 v0.AddArg(v1) 20045 v0.AddArg(y) 20046 return true 20047 } 20048 return false 20049 } 20050 func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool { 20051 b := v.Block 20052 _ = b 20053 typ := &b.Func.Config.Types 20054 _ = typ 20055 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 20056 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20057 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20058 for { 20059 _ = v.Args[1] 20060 s1 := v.Args[0] 20061 if s1.Op != OpAMD64SHLLconst { 20062 break 20063 } 20064 j1 := s1.AuxInt 20065 x1 := s1.Args[0] 20066 if x1.Op != OpAMD64MOVBloadidx1 { 20067 break 20068 } 20069 i1 := x1.AuxInt 20070 s := x1.Aux 20071 _ = x1.Args[2] 20072 idx := x1.Args[0] 20073 p := x1.Args[1] 20074 mem := x1.Args[2] 20075 or := v.Args[1] 20076 if or.Op != OpAMD64ORL { 20077 break 20078 } 20079 _ = or.Args[1] 20080 y := or.Args[0] 20081 s0 := or.Args[1] 20082 if s0.Op != OpAMD64SHLLconst { 20083 break 20084 } 20085 j0 := s0.AuxInt 20086 x0 := s0.Args[0] 20087 if x0.Op != OpAMD64MOVBloadidx1 { 20088 break 20089 } 20090 i0 := x0.AuxInt 20091 if x0.Aux != s { 20092 break 20093 } 20094 _ = x0.Args[2] 20095 if idx != x0.Args[0] { 20096 break 20097 } 20098 if p != x0.Args[1] { 20099 break 20100 } 20101 if mem != x0.Args[2] { 20102 break 20103 } 20104 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20105 break 20106 } 20107 b = mergePoint(b, x0, x1) 20108 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20109 v.reset(OpCopy) 20110 v.AddArg(v0) 20111 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20112 v1.AuxInt = j0 20113 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20114 v2.AuxInt = i0 20115 v2.Aux = s 20116 v2.AddArg(p) 20117 v2.AddArg(idx) 20118 v2.AddArg(mem) 20119 v1.AddArg(v2) 20120 v0.AddArg(v1) 20121 v0.AddArg(y) 20122 return true 20123 } 20124 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20125 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20126 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20127 for { 20128 _ = v.Args[1] 20129 or := v.Args[0] 20130 if or.Op != OpAMD64ORL { 20131 break 20132 } 20133 _ = or.Args[1] 20134 s0 := or.Args[0] 20135 if s0.Op != OpAMD64SHLLconst { 20136 break 20137 } 20138 j0 := s0.AuxInt 20139 x0 := s0.Args[0] 20140 if x0.Op != OpAMD64MOVBloadidx1 { 20141 break 20142 } 20143 i0 := x0.AuxInt 20144 s := x0.Aux 20145 _ = x0.Args[2] 20146 p := x0.Args[0] 20147 idx := x0.Args[1] 20148 mem := x0.Args[2] 20149 y := or.Args[1] 20150 s1 := v.Args[1] 20151 if s1.Op != OpAMD64SHLLconst { 20152 break 20153 } 20154 j1 := s1.AuxInt 20155 x1 := s1.Args[0] 20156 if x1.Op != OpAMD64MOVBloadidx1 { 20157 break 20158 } 20159 i1 := x1.AuxInt 20160 if x1.Aux != s { 20161 break 20162 } 20163 _ = x1.Args[2] 20164 if p != x1.Args[0] { 20165 break 20166 } 20167 if idx != x1.Args[1] { 20168 break 20169 } 20170 if mem != x1.Args[2] { 20171 break 20172 } 20173 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20174 break 20175 } 20176 b = mergePoint(b, x0, x1) 20177 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20178 v.reset(OpCopy) 20179 v.AddArg(v0) 20180 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20181 v1.AuxInt = j0 20182 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20183 v2.AuxInt = i0 20184 v2.Aux = s 20185 v2.AddArg(p) 20186 v2.AddArg(idx) 20187 v2.AddArg(mem) 20188 v1.AddArg(v2) 20189 v0.AddArg(v1) 20190 v0.AddArg(y) 20191 return true 20192 } 20193 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20194 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20195 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20196 for { 20197 _ = v.Args[1] 20198 or := v.Args[0] 20199 if or.Op != OpAMD64ORL { 20200 break 20201 } 20202 _ = or.Args[1] 20203 s0 := or.Args[0] 20204 if s0.Op != OpAMD64SHLLconst { 20205 break 20206 } 20207 j0 := s0.AuxInt 20208 x0 := s0.Args[0] 20209 if x0.Op != OpAMD64MOVBloadidx1 { 20210 break 20211 } 20212 i0 := x0.AuxInt 20213 s := x0.Aux 20214 _ = x0.Args[2] 20215 idx := x0.Args[0] 20216 p := x0.Args[1] 20217 mem := x0.Args[2] 20218 y := or.Args[1] 20219 s1 := v.Args[1] 20220 if s1.Op != OpAMD64SHLLconst { 20221 break 20222 } 20223 j1 := s1.AuxInt 20224 x1 := s1.Args[0] 20225 if x1.Op != OpAMD64MOVBloadidx1 { 20226 break 20227 } 20228 i1 := x1.AuxInt 20229 if x1.Aux != s { 20230 break 20231 } 20232 _ = x1.Args[2] 20233 if p != x1.Args[0] { 20234 break 20235 } 20236 if idx != x1.Args[1] { 20237 break 20238 } 20239 if mem != x1.Args[2] { 20240 break 20241 } 20242 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20243 break 20244 } 20245 b = mergePoint(b, x0, x1) 20246 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20247 v.reset(OpCopy) 20248 v.AddArg(v0) 20249 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20250 v1.AuxInt = j0 20251 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20252 v2.AuxInt = i0 20253 v2.Aux = s 20254 v2.AddArg(p) 20255 v2.AddArg(idx) 20256 v2.AddArg(mem) 20257 v1.AddArg(v2) 20258 v0.AddArg(v1) 20259 v0.AddArg(y) 20260 return true 20261 } 20262 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20263 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20264 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20265 for { 20266 _ = v.Args[1] 20267 or := v.Args[0] 20268 if or.Op != OpAMD64ORL { 20269 break 20270 } 20271 _ = or.Args[1] 20272 y := or.Args[0] 20273 s0 := or.Args[1] 20274 if s0.Op != OpAMD64SHLLconst { 20275 break 20276 } 20277 j0 := s0.AuxInt 20278 x0 := s0.Args[0] 20279 if x0.Op != OpAMD64MOVBloadidx1 { 20280 break 20281 } 20282 i0 := x0.AuxInt 20283 s := x0.Aux 20284 _ = x0.Args[2] 20285 p := x0.Args[0] 20286 idx := x0.Args[1] 20287 mem := x0.Args[2] 20288 s1 := v.Args[1] 20289 if s1.Op != OpAMD64SHLLconst { 20290 break 20291 } 20292 j1 := s1.AuxInt 20293 x1 := s1.Args[0] 20294 if x1.Op != OpAMD64MOVBloadidx1 { 20295 break 20296 } 20297 i1 := x1.AuxInt 20298 if x1.Aux != s { 20299 break 20300 } 20301 _ = x1.Args[2] 20302 if p != x1.Args[0] { 20303 break 20304 } 20305 if idx != x1.Args[1] { 20306 break 20307 } 20308 if mem != x1.Args[2] { 20309 break 20310 } 20311 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20312 break 20313 } 20314 b = mergePoint(b, x0, x1) 20315 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20316 v.reset(OpCopy) 20317 v.AddArg(v0) 20318 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20319 v1.AuxInt = j0 20320 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20321 v2.AuxInt = i0 20322 v2.Aux = s 20323 v2.AddArg(p) 20324 v2.AddArg(idx) 20325 v2.AddArg(mem) 20326 v1.AddArg(v2) 20327 v0.AddArg(v1) 20328 v0.AddArg(y) 20329 return true 20330 } 20331 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20332 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20333 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20334 for { 20335 _ = v.Args[1] 20336 or := v.Args[0] 20337 if or.Op != OpAMD64ORL { 20338 break 20339 } 20340 _ = or.Args[1] 20341 y := or.Args[0] 20342 s0 := or.Args[1] 20343 if s0.Op != OpAMD64SHLLconst { 20344 break 20345 } 20346 j0 := s0.AuxInt 20347 x0 := s0.Args[0] 20348 if x0.Op != OpAMD64MOVBloadidx1 { 20349 break 20350 } 20351 i0 := x0.AuxInt 20352 s := x0.Aux 20353 _ = x0.Args[2] 20354 idx := x0.Args[0] 20355 p := x0.Args[1] 20356 mem := x0.Args[2] 20357 s1 := v.Args[1] 20358 if s1.Op != OpAMD64SHLLconst { 20359 break 20360 } 20361 j1 := s1.AuxInt 20362 x1 := s1.Args[0] 20363 if x1.Op != OpAMD64MOVBloadidx1 { 20364 break 20365 } 20366 i1 := x1.AuxInt 20367 if x1.Aux != s { 20368 break 20369 } 20370 _ = x1.Args[2] 20371 if p != x1.Args[0] { 20372 break 20373 } 20374 if idx != x1.Args[1] { 20375 break 20376 } 20377 if mem != x1.Args[2] { 20378 break 20379 } 20380 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20381 break 20382 } 20383 b = mergePoint(b, x0, x1) 20384 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20385 v.reset(OpCopy) 20386 v.AddArg(v0) 20387 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20388 v1.AuxInt = j0 20389 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20390 v2.AuxInt = i0 20391 v2.Aux = s 20392 v2.AddArg(p) 20393 v2.AddArg(idx) 20394 v2.AddArg(mem) 20395 v1.AddArg(v2) 20396 v0.AddArg(v1) 20397 v0.AddArg(y) 20398 return true 20399 } 20400 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20401 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20402 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20403 for { 20404 _ = v.Args[1] 20405 or := v.Args[0] 20406 if or.Op != OpAMD64ORL { 20407 break 20408 } 20409 _ = or.Args[1] 20410 s0 := or.Args[0] 20411 if s0.Op != OpAMD64SHLLconst { 20412 break 20413 } 20414 j0 := s0.AuxInt 20415 x0 := s0.Args[0] 20416 if x0.Op != OpAMD64MOVBloadidx1 { 20417 break 20418 } 20419 i0 := x0.AuxInt 20420 s := x0.Aux 20421 _ = x0.Args[2] 20422 p := x0.Args[0] 20423 idx := x0.Args[1] 20424 mem := x0.Args[2] 20425 y := or.Args[1] 20426 s1 := v.Args[1] 20427 if s1.Op != OpAMD64SHLLconst { 20428 break 20429 } 20430 j1 := s1.AuxInt 20431 x1 := s1.Args[0] 20432 if x1.Op != OpAMD64MOVBloadidx1 { 20433 break 20434 } 20435 i1 := x1.AuxInt 20436 if x1.Aux != s { 20437 break 20438 } 20439 _ = x1.Args[2] 20440 if idx != x1.Args[0] { 20441 break 20442 } 20443 if p != x1.Args[1] { 20444 break 20445 } 20446 if mem != x1.Args[2] { 20447 break 20448 } 20449 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20450 break 20451 } 20452 b = mergePoint(b, x0, x1) 20453 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20454 v.reset(OpCopy) 20455 v.AddArg(v0) 20456 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20457 v1.AuxInt = j0 20458 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20459 v2.AuxInt = i0 20460 v2.Aux = s 20461 v2.AddArg(p) 20462 v2.AddArg(idx) 20463 v2.AddArg(mem) 20464 v1.AddArg(v2) 20465 v0.AddArg(v1) 20466 v0.AddArg(y) 20467 return true 20468 } 20469 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20470 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20471 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20472 for { 20473 _ = v.Args[1] 20474 or := v.Args[0] 20475 if or.Op != OpAMD64ORL { 20476 break 20477 } 20478 _ = or.Args[1] 20479 s0 := or.Args[0] 20480 if s0.Op != OpAMD64SHLLconst { 20481 break 20482 } 20483 j0 := s0.AuxInt 20484 x0 := s0.Args[0] 20485 if x0.Op != OpAMD64MOVBloadidx1 { 20486 break 20487 } 20488 i0 := x0.AuxInt 20489 s := x0.Aux 20490 _ = x0.Args[2] 20491 idx := x0.Args[0] 20492 p := x0.Args[1] 20493 mem := x0.Args[2] 20494 y := or.Args[1] 20495 s1 := v.Args[1] 20496 if s1.Op != OpAMD64SHLLconst { 20497 break 20498 } 20499 j1 := s1.AuxInt 20500 x1 := s1.Args[0] 20501 if x1.Op != OpAMD64MOVBloadidx1 { 20502 break 20503 } 20504 i1 := x1.AuxInt 20505 if x1.Aux != s { 20506 break 20507 } 20508 _ = x1.Args[2] 20509 if idx != x1.Args[0] { 20510 break 20511 } 20512 if p != x1.Args[1] { 20513 break 20514 } 20515 if mem != x1.Args[2] { 20516 break 20517 } 20518 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20519 break 20520 } 20521 b = mergePoint(b, x0, x1) 20522 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20523 v.reset(OpCopy) 20524 v.AddArg(v0) 20525 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20526 v1.AuxInt = j0 20527 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20528 v2.AuxInt = i0 20529 v2.Aux = s 20530 v2.AddArg(p) 20531 v2.AddArg(idx) 20532 v2.AddArg(mem) 20533 v1.AddArg(v2) 20534 v0.AddArg(v1) 20535 v0.AddArg(y) 20536 return true 20537 } 20538 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20539 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20540 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20541 for { 20542 _ = v.Args[1] 20543 or := v.Args[0] 20544 if or.Op != OpAMD64ORL { 20545 break 20546 } 20547 _ = or.Args[1] 20548 y := or.Args[0] 20549 s0 := or.Args[1] 20550 if s0.Op != OpAMD64SHLLconst { 20551 break 20552 } 20553 j0 := s0.AuxInt 20554 x0 := s0.Args[0] 20555 if x0.Op != OpAMD64MOVBloadidx1 { 20556 break 20557 } 20558 i0 := x0.AuxInt 20559 s := x0.Aux 20560 _ = x0.Args[2] 20561 p := x0.Args[0] 20562 idx := x0.Args[1] 20563 mem := x0.Args[2] 20564 s1 := v.Args[1] 20565 if s1.Op != OpAMD64SHLLconst { 20566 break 20567 } 20568 j1 := s1.AuxInt 20569 x1 := s1.Args[0] 20570 if x1.Op != OpAMD64MOVBloadidx1 { 20571 break 20572 } 20573 i1 := x1.AuxInt 20574 if x1.Aux != s { 20575 break 20576 } 20577 _ = x1.Args[2] 20578 if idx != x1.Args[0] { 20579 break 20580 } 20581 if p != x1.Args[1] { 20582 break 20583 } 20584 if mem != x1.Args[2] { 20585 break 20586 } 20587 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20588 break 20589 } 20590 b = mergePoint(b, x0, x1) 20591 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20592 v.reset(OpCopy) 20593 v.AddArg(v0) 20594 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20595 v1.AuxInt = j0 20596 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20597 v2.AuxInt = i0 20598 v2.Aux = s 20599 v2.AddArg(p) 20600 v2.AddArg(idx) 20601 v2.AddArg(mem) 20602 v1.AddArg(v2) 20603 v0.AddArg(v1) 20604 v0.AddArg(y) 20605 return true 20606 } 20607 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20608 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20609 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20610 for { 20611 _ = v.Args[1] 20612 or := v.Args[0] 20613 if or.Op != OpAMD64ORL { 20614 break 20615 } 20616 _ = or.Args[1] 20617 y := or.Args[0] 20618 s0 := or.Args[1] 20619 if s0.Op != OpAMD64SHLLconst { 20620 break 20621 } 20622 j0 := s0.AuxInt 20623 x0 := s0.Args[0] 20624 if x0.Op != OpAMD64MOVBloadidx1 { 20625 break 20626 } 20627 i0 := x0.AuxInt 20628 s := x0.Aux 20629 _ = x0.Args[2] 20630 idx := x0.Args[0] 20631 p := x0.Args[1] 20632 mem := x0.Args[2] 20633 s1 := v.Args[1] 20634 if s1.Op != OpAMD64SHLLconst { 20635 break 20636 } 20637 j1 := s1.AuxInt 20638 x1 := s1.Args[0] 20639 if x1.Op != OpAMD64MOVBloadidx1 { 20640 break 20641 } 20642 i1 := x1.AuxInt 20643 if x1.Aux != s { 20644 break 20645 } 20646 _ = x1.Args[2] 20647 if idx != x1.Args[0] { 20648 break 20649 } 20650 if p != x1.Args[1] { 20651 break 20652 } 20653 if mem != x1.Args[2] { 20654 break 20655 } 20656 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20657 break 20658 } 20659 b = mergePoint(b, x0, x1) 20660 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20661 v.reset(OpCopy) 20662 v.AddArg(v0) 20663 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20664 v1.AuxInt = j0 20665 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20666 v2.AuxInt = i0 20667 v2.Aux = s 20668 v2.AddArg(p) 20669 v2.AddArg(idx) 20670 v2.AddArg(mem) 20671 v1.AddArg(v2) 20672 v0.AddArg(v1) 20673 v0.AddArg(y) 20674 return true 20675 } 20676 // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) 20677 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 20678 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 20679 for { 20680 _ = v.Args[1] 20681 x1 := v.Args[0] 20682 if x1.Op != OpAMD64MOVBload { 20683 break 20684 } 20685 i1 := x1.AuxInt 20686 s := x1.Aux 20687 _ = x1.Args[1] 20688 p := x1.Args[0] 20689 mem := x1.Args[1] 20690 sh := v.Args[1] 20691 if sh.Op != OpAMD64SHLLconst { 20692 break 20693 } 20694 if sh.AuxInt != 8 { 20695 break 20696 } 20697 x0 := sh.Args[0] 20698 if x0.Op != OpAMD64MOVBload { 20699 break 20700 } 20701 i0 := x0.AuxInt 20702 if x0.Aux != s { 20703 break 20704 } 20705 _ = x0.Args[1] 20706 if p != x0.Args[0] { 20707 break 20708 } 20709 if mem != x0.Args[1] { 20710 break 20711 } 20712 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 20713 break 20714 } 20715 b = mergePoint(b, x0, x1) 20716 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 20717 v.reset(OpCopy) 20718 v.AddArg(v0) 20719 v0.AuxInt = 8 20720 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20721 v1.AuxInt = i0 20722 v1.Aux = s 20723 v1.AddArg(p) 20724 v1.AddArg(mem) 20725 v0.AddArg(v1) 20726 return true 20727 } 20728 return false 20729 } 20730 func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { 20731 b := v.Block 20732 _ = b 20733 typ := &b.Func.Config.Types 20734 _ = typ 20735 // match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 20736 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 20737 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 20738 for { 20739 _ = v.Args[1] 20740 sh := v.Args[0] 20741 if sh.Op != OpAMD64SHLLconst { 20742 break 20743 } 20744 if sh.AuxInt != 8 { 20745 break 20746 } 20747 x0 := sh.Args[0] 20748 if x0.Op != OpAMD64MOVBload { 20749 break 20750 } 20751 i0 := x0.AuxInt 20752 s := x0.Aux 20753 _ = x0.Args[1] 20754 p := x0.Args[0] 20755 mem := x0.Args[1] 20756 x1 := v.Args[1] 20757 if x1.Op != OpAMD64MOVBload { 20758 break 20759 } 20760 i1 := x1.AuxInt 20761 if x1.Aux != s { 20762 break 20763 } 20764 _ = x1.Args[1] 20765 if p != x1.Args[0] { 20766 break 20767 } 20768 if mem != x1.Args[1] { 20769 break 20770 } 20771 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 20772 break 20773 } 20774 b = mergePoint(b, x0, x1) 20775 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 20776 v.reset(OpCopy) 20777 v.AddArg(v0) 20778 v0.AuxInt = 8 20779 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20780 v1.AuxInt = i0 20781 v1.Aux = s 20782 v1.AddArg(p) 20783 v1.AddArg(mem) 20784 v0.AddArg(v1) 20785 return true 20786 } 20787 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 20788 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20789 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 20790 for { 20791 _ = v.Args[1] 20792 r1 := v.Args[0] 20793 if r1.Op != OpAMD64ROLWconst { 20794 break 20795 } 20796 if r1.AuxInt != 8 { 20797 break 20798 } 20799 x1 := r1.Args[0] 20800 if x1.Op != OpAMD64MOVWload { 20801 break 20802 } 20803 i1 := x1.AuxInt 20804 s := x1.Aux 20805 _ = x1.Args[1] 20806 p := x1.Args[0] 20807 mem := x1.Args[1] 20808 sh := v.Args[1] 20809 if sh.Op != OpAMD64SHLLconst { 20810 break 20811 } 20812 if sh.AuxInt != 16 { 20813 break 20814 } 20815 r0 := sh.Args[0] 20816 if r0.Op != OpAMD64ROLWconst { 20817 break 20818 } 20819 if r0.AuxInt != 8 { 20820 break 20821 } 20822 x0 := r0.Args[0] 20823 if x0.Op != OpAMD64MOVWload { 20824 break 20825 } 20826 i0 := x0.AuxInt 20827 if x0.Aux != s { 20828 break 20829 } 20830 _ = x0.Args[1] 20831 if p != x0.Args[0] { 20832 break 20833 } 20834 if mem != x0.Args[1] { 20835 break 20836 } 20837 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20838 break 20839 } 20840 b = mergePoint(b, x0, x1) 20841 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20842 v.reset(OpCopy) 20843 v.AddArg(v0) 20844 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 20845 v1.AuxInt = i0 20846 v1.Aux = s 20847 v1.AddArg(p) 20848 v1.AddArg(mem) 20849 v0.AddArg(v1) 20850 return true 20851 } 20852 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 20853 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20854 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 20855 for { 20856 _ = v.Args[1] 20857 sh := v.Args[0] 20858 if sh.Op != OpAMD64SHLLconst { 20859 break 20860 } 20861 if sh.AuxInt != 16 { 20862 break 20863 } 20864 r0 := sh.Args[0] 20865 if r0.Op != OpAMD64ROLWconst { 20866 break 20867 } 20868 if r0.AuxInt != 8 { 20869 break 20870 } 20871 x0 := r0.Args[0] 20872 if x0.Op != OpAMD64MOVWload { 20873 break 20874 } 20875 i0 := x0.AuxInt 20876 s := x0.Aux 20877 _ = x0.Args[1] 20878 p := x0.Args[0] 20879 mem := x0.Args[1] 20880 r1 := v.Args[1] 20881 if r1.Op != OpAMD64ROLWconst { 20882 break 20883 } 20884 if r1.AuxInt != 8 { 20885 break 20886 } 20887 x1 := r1.Args[0] 20888 if x1.Op != OpAMD64MOVWload { 20889 break 20890 } 20891 i1 := x1.AuxInt 20892 if x1.Aux != s { 20893 break 20894 } 20895 _ = x1.Args[1] 20896 if p != x1.Args[0] { 20897 break 20898 } 20899 if mem != x1.Args[1] { 20900 break 20901 } 20902 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20903 break 20904 } 20905 b = mergePoint(b, x0, x1) 20906 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20907 v.reset(OpCopy) 20908 v.AddArg(v0) 20909 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 20910 v1.AuxInt = i0 20911 v1.Aux = s 20912 v1.AddArg(p) 20913 v1.AddArg(mem) 20914 v0.AddArg(v1) 20915 return true 20916 } 20917 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 20918 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20919 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 20920 for { 20921 _ = v.Args[1] 20922 s0 := v.Args[0] 20923 if s0.Op != OpAMD64SHLLconst { 20924 break 20925 } 20926 j0 := s0.AuxInt 20927 x0 := s0.Args[0] 20928 if x0.Op != OpAMD64MOVBload { 20929 break 20930 } 20931 i0 := x0.AuxInt 20932 s := x0.Aux 20933 _ = x0.Args[1] 20934 p := x0.Args[0] 20935 mem := x0.Args[1] 20936 or := v.Args[1] 20937 if or.Op != OpAMD64ORL { 20938 break 20939 } 20940 _ = or.Args[1] 20941 s1 := or.Args[0] 20942 if s1.Op != OpAMD64SHLLconst { 20943 break 20944 } 20945 j1 := s1.AuxInt 20946 x1 := s1.Args[0] 20947 if x1.Op != OpAMD64MOVBload { 20948 break 20949 } 20950 i1 := x1.AuxInt 20951 if x1.Aux != s { 20952 break 20953 } 20954 _ = x1.Args[1] 20955 if p != x1.Args[0] { 20956 break 20957 } 20958 if mem != x1.Args[1] { 20959 break 20960 } 20961 y := or.Args[1] 20962 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20963 break 20964 } 20965 b = mergePoint(b, x0, x1) 20966 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20967 v.reset(OpCopy) 20968 v.AddArg(v0) 20969 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20970 v1.AuxInt = j1 20971 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20972 v2.AuxInt = 8 20973 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20974 v3.AuxInt = i0 20975 v3.Aux = s 20976 v3.AddArg(p) 20977 v3.AddArg(mem) 20978 v2.AddArg(v3) 20979 v1.AddArg(v2) 20980 v0.AddArg(v1) 20981 v0.AddArg(y) 20982 return true 20983 } 20984 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))) 20985 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20986 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 20987 for { 20988 _ = v.Args[1] 20989 s0 := v.Args[0] 20990 if s0.Op != OpAMD64SHLLconst { 20991 break 20992 } 20993 j0 := s0.AuxInt 20994 x0 := s0.Args[0] 20995 if x0.Op != OpAMD64MOVBload { 20996 break 20997 } 20998 i0 := x0.AuxInt 20999 s := x0.Aux 21000 _ = x0.Args[1] 21001 p := x0.Args[0] 21002 mem := x0.Args[1] 21003 or := v.Args[1] 21004 if or.Op != OpAMD64ORL { 21005 break 21006 } 21007 _ = or.Args[1] 21008 y := or.Args[0] 21009 s1 := or.Args[1] 21010 if s1.Op != OpAMD64SHLLconst { 21011 break 21012 } 21013 j1 := s1.AuxInt 21014 x1 := s1.Args[0] 21015 if x1.Op != OpAMD64MOVBload { 21016 break 21017 } 21018 i1 := x1.AuxInt 21019 if x1.Aux != s { 21020 break 21021 } 21022 _ = x1.Args[1] 21023 if p != x1.Args[0] { 21024 break 21025 } 21026 if mem != x1.Args[1] { 21027 break 21028 } 21029 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21030 break 21031 } 21032 b = mergePoint(b, x0, x1) 21033 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21034 v.reset(OpCopy) 21035 v.AddArg(v0) 21036 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21037 v1.AuxInt = j1 21038 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21039 v2.AuxInt = 8 21040 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 21041 v3.AuxInt = i0 21042 v3.Aux = s 21043 v3.AddArg(p) 21044 v3.AddArg(mem) 21045 v2.AddArg(v3) 21046 v1.AddArg(v2) 21047 v0.AddArg(v1) 21048 v0.AddArg(y) 21049 return true 21050 } 21051 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 21052 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21053 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 21054 for { 21055 _ = v.Args[1] 21056 or := v.Args[0] 21057 if or.Op != OpAMD64ORL { 21058 break 21059 } 21060 _ = or.Args[1] 21061 s1 := or.Args[0] 21062 if s1.Op != OpAMD64SHLLconst { 21063 break 21064 } 21065 j1 := s1.AuxInt 21066 x1 := s1.Args[0] 21067 if x1.Op != OpAMD64MOVBload { 21068 break 21069 } 21070 i1 := x1.AuxInt 21071 s := x1.Aux 21072 _ = x1.Args[1] 21073 p := x1.Args[0] 21074 mem := x1.Args[1] 21075 y := or.Args[1] 21076 s0 := v.Args[1] 21077 if s0.Op != OpAMD64SHLLconst { 21078 break 21079 } 21080 j0 := s0.AuxInt 21081 x0 := s0.Args[0] 21082 if x0.Op != OpAMD64MOVBload { 21083 break 21084 } 21085 i0 := x0.AuxInt 21086 if x0.Aux != s { 21087 break 21088 } 21089 _ = x0.Args[1] 21090 if p != x0.Args[0] { 21091 break 21092 } 21093 if mem != x0.Args[1] { 21094 break 21095 } 21096 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21097 break 21098 } 21099 b = mergePoint(b, x0, x1) 21100 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21101 v.reset(OpCopy) 21102 v.AddArg(v0) 21103 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21104 v1.AuxInt = j1 21105 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21106 v2.AuxInt = 8 21107 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 21108 v3.AuxInt = i0 21109 v3.Aux = s 21110 v3.AddArg(p) 21111 v3.AddArg(mem) 21112 v2.AddArg(v3) 21113 v1.AddArg(v2) 21114 v0.AddArg(v1) 21115 v0.AddArg(y) 21116 return true 21117 } 21118 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 21119 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21120 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 21121 for { 21122 _ = v.Args[1] 21123 or := v.Args[0] 21124 if or.Op != OpAMD64ORL { 21125 break 21126 } 21127 _ = or.Args[1] 21128 y := or.Args[0] 21129 s1 := or.Args[1] 21130 if s1.Op != OpAMD64SHLLconst { 21131 break 21132 } 21133 j1 := s1.AuxInt 21134 x1 := s1.Args[0] 21135 if x1.Op != OpAMD64MOVBload { 21136 break 21137 } 21138 i1 := x1.AuxInt 21139 s := x1.Aux 21140 _ = x1.Args[1] 21141 p := x1.Args[0] 21142 mem := x1.Args[1] 21143 s0 := v.Args[1] 21144 if s0.Op != OpAMD64SHLLconst { 21145 break 21146 } 21147 j0 := s0.AuxInt 21148 x0 := s0.Args[0] 21149 if x0.Op != OpAMD64MOVBload { 21150 break 21151 } 21152 i0 := x0.AuxInt 21153 if x0.Aux != s { 21154 break 21155 } 21156 _ = x0.Args[1] 21157 if p != x0.Args[0] { 21158 break 21159 } 21160 if mem != x0.Args[1] { 21161 break 21162 } 21163 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21164 break 21165 } 21166 b = mergePoint(b, x0, x1) 21167 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21168 v.reset(OpCopy) 21169 v.AddArg(v0) 21170 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21171 v1.AuxInt = j1 21172 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21173 v2.AuxInt = 8 21174 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 21175 v3.AuxInt = i0 21176 v3.Aux = s 21177 v3.AddArg(p) 21178 v3.AddArg(mem) 21179 v2.AddArg(v3) 21180 v1.AddArg(v2) 21181 v0.AddArg(v1) 21182 v0.AddArg(y) 21183 return true 21184 } 21185 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21186 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21187 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21188 for { 21189 _ = v.Args[1] 21190 x1 := v.Args[0] 21191 if x1.Op != OpAMD64MOVBloadidx1 { 21192 break 21193 } 21194 i1 := x1.AuxInt 21195 s := x1.Aux 21196 _ = x1.Args[2] 21197 p := x1.Args[0] 21198 idx := x1.Args[1] 21199 mem := x1.Args[2] 21200 sh := v.Args[1] 21201 if sh.Op != OpAMD64SHLLconst { 21202 break 21203 } 21204 if sh.AuxInt != 8 { 21205 break 21206 } 21207 x0 := sh.Args[0] 21208 if x0.Op != OpAMD64MOVBloadidx1 { 21209 break 21210 } 21211 i0 := x0.AuxInt 21212 if x0.Aux != s { 21213 break 21214 } 21215 _ = x0.Args[2] 21216 if p != x0.Args[0] { 21217 break 21218 } 21219 if idx != x0.Args[1] { 21220 break 21221 } 21222 if mem != x0.Args[2] { 21223 break 21224 } 21225 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21226 break 21227 } 21228 b = mergePoint(b, x0, x1) 21229 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21230 v.reset(OpCopy) 21231 v.AddArg(v0) 21232 v0.AuxInt = 8 21233 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21234 v1.AuxInt = i0 21235 v1.Aux = s 21236 v1.AddArg(p) 21237 v1.AddArg(idx) 21238 v1.AddArg(mem) 21239 v0.AddArg(v1) 21240 return true 21241 } 21242 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21243 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21244 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21245 for { 21246 _ = v.Args[1] 21247 x1 := v.Args[0] 21248 if x1.Op != OpAMD64MOVBloadidx1 { 21249 break 21250 } 21251 i1 := x1.AuxInt 21252 s := x1.Aux 21253 _ = x1.Args[2] 21254 idx := x1.Args[0] 21255 p := x1.Args[1] 21256 mem := x1.Args[2] 21257 sh := v.Args[1] 21258 if sh.Op != OpAMD64SHLLconst { 21259 break 21260 } 21261 if sh.AuxInt != 8 { 21262 break 21263 } 21264 x0 := sh.Args[0] 21265 if x0.Op != OpAMD64MOVBloadidx1 { 21266 break 21267 } 21268 i0 := x0.AuxInt 21269 if x0.Aux != s { 21270 break 21271 } 21272 _ = x0.Args[2] 21273 if p != x0.Args[0] { 21274 break 21275 } 21276 if idx != x0.Args[1] { 21277 break 21278 } 21279 if mem != x0.Args[2] { 21280 break 21281 } 21282 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21283 break 21284 } 21285 b = mergePoint(b, x0, x1) 21286 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21287 v.reset(OpCopy) 21288 v.AddArg(v0) 21289 v0.AuxInt = 8 21290 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21291 v1.AuxInt = i0 21292 v1.Aux = s 21293 v1.AddArg(p) 21294 v1.AddArg(idx) 21295 v1.AddArg(mem) 21296 v0.AddArg(v1) 21297 return true 21298 } 21299 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21300 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21301 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21302 for { 21303 _ = v.Args[1] 21304 x1 := v.Args[0] 21305 if x1.Op != OpAMD64MOVBloadidx1 { 21306 break 21307 } 21308 i1 := x1.AuxInt 21309 s := x1.Aux 21310 _ = x1.Args[2] 21311 p := x1.Args[0] 21312 idx := x1.Args[1] 21313 mem := x1.Args[2] 21314 sh := v.Args[1] 21315 if sh.Op != OpAMD64SHLLconst { 21316 break 21317 } 21318 if sh.AuxInt != 8 { 21319 break 21320 } 21321 x0 := sh.Args[0] 21322 if x0.Op != OpAMD64MOVBloadidx1 { 21323 break 21324 } 21325 i0 := x0.AuxInt 21326 if x0.Aux != s { 21327 break 21328 } 21329 _ = x0.Args[2] 21330 if idx != x0.Args[0] { 21331 break 21332 } 21333 if p != x0.Args[1] { 21334 break 21335 } 21336 if mem != x0.Args[2] { 21337 break 21338 } 21339 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21340 break 21341 } 21342 b = mergePoint(b, x0, x1) 21343 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21344 v.reset(OpCopy) 21345 v.AddArg(v0) 21346 v0.AuxInt = 8 21347 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21348 v1.AuxInt = i0 21349 v1.Aux = s 21350 v1.AddArg(p) 21351 v1.AddArg(idx) 21352 v1.AddArg(mem) 21353 v0.AddArg(v1) 21354 return true 21355 } 21356 return false 21357 } 21358 func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool { 21359 b := v.Block 21360 _ = b 21361 typ := &b.Func.Config.Types 21362 _ = typ 21363 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21364 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21365 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21366 for { 21367 _ = v.Args[1] 21368 x1 := v.Args[0] 21369 if x1.Op != OpAMD64MOVBloadidx1 { 21370 break 21371 } 21372 i1 := x1.AuxInt 21373 s := x1.Aux 21374 _ = x1.Args[2] 21375 idx := x1.Args[0] 21376 p := x1.Args[1] 21377 mem := x1.Args[2] 21378 sh := v.Args[1] 21379 if sh.Op != OpAMD64SHLLconst { 21380 break 21381 } 21382 if sh.AuxInt != 8 { 21383 break 21384 } 21385 x0 := sh.Args[0] 21386 if x0.Op != OpAMD64MOVBloadidx1 { 21387 break 21388 } 21389 i0 := x0.AuxInt 21390 if x0.Aux != s { 21391 break 21392 } 21393 _ = x0.Args[2] 21394 if idx != x0.Args[0] { 21395 break 21396 } 21397 if p != x0.Args[1] { 21398 break 21399 } 21400 if mem != x0.Args[2] { 21401 break 21402 } 21403 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21404 break 21405 } 21406 b = mergePoint(b, x0, x1) 21407 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21408 v.reset(OpCopy) 21409 v.AddArg(v0) 21410 v0.AuxInt = 8 21411 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21412 v1.AuxInt = i0 21413 v1.Aux = s 21414 v1.AddArg(p) 21415 v1.AddArg(idx) 21416 v1.AddArg(mem) 21417 v0.AddArg(v1) 21418 return true 21419 } 21420 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 21421 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21422 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21423 for { 21424 _ = v.Args[1] 21425 sh := v.Args[0] 21426 if sh.Op != OpAMD64SHLLconst { 21427 break 21428 } 21429 if sh.AuxInt != 8 { 21430 break 21431 } 21432 x0 := sh.Args[0] 21433 if x0.Op != OpAMD64MOVBloadidx1 { 21434 break 21435 } 21436 i0 := x0.AuxInt 21437 s := x0.Aux 21438 _ = x0.Args[2] 21439 p := x0.Args[0] 21440 idx := x0.Args[1] 21441 mem := x0.Args[2] 21442 x1 := v.Args[1] 21443 if x1.Op != OpAMD64MOVBloadidx1 { 21444 break 21445 } 21446 i1 := x1.AuxInt 21447 if x1.Aux != s { 21448 break 21449 } 21450 _ = x1.Args[2] 21451 if p != x1.Args[0] { 21452 break 21453 } 21454 if idx != x1.Args[1] { 21455 break 21456 } 21457 if mem != x1.Args[2] { 21458 break 21459 } 21460 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21461 break 21462 } 21463 b = mergePoint(b, x0, x1) 21464 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21465 v.reset(OpCopy) 21466 v.AddArg(v0) 21467 v0.AuxInt = 8 21468 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21469 v1.AuxInt = i0 21470 v1.Aux = s 21471 v1.AddArg(p) 21472 v1.AddArg(idx) 21473 v1.AddArg(mem) 21474 v0.AddArg(v1) 21475 return true 21476 } 21477 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 21478 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21479 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21480 for { 21481 _ = v.Args[1] 21482 sh := v.Args[0] 21483 if sh.Op != OpAMD64SHLLconst { 21484 break 21485 } 21486 if sh.AuxInt != 8 { 21487 break 21488 } 21489 x0 := sh.Args[0] 21490 if x0.Op != OpAMD64MOVBloadidx1 { 21491 break 21492 } 21493 i0 := x0.AuxInt 21494 s := x0.Aux 21495 _ = x0.Args[2] 21496 idx := x0.Args[0] 21497 p := x0.Args[1] 21498 mem := x0.Args[2] 21499 x1 := v.Args[1] 21500 if x1.Op != OpAMD64MOVBloadidx1 { 21501 break 21502 } 21503 i1 := x1.AuxInt 21504 if x1.Aux != s { 21505 break 21506 } 21507 _ = x1.Args[2] 21508 if p != x1.Args[0] { 21509 break 21510 } 21511 if idx != x1.Args[1] { 21512 break 21513 } 21514 if mem != x1.Args[2] { 21515 break 21516 } 21517 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21518 break 21519 } 21520 b = mergePoint(b, x0, x1) 21521 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21522 v.reset(OpCopy) 21523 v.AddArg(v0) 21524 v0.AuxInt = 8 21525 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21526 v1.AuxInt = i0 21527 v1.Aux = s 21528 v1.AddArg(p) 21529 v1.AddArg(idx) 21530 v1.AddArg(mem) 21531 v0.AddArg(v1) 21532 return true 21533 } 21534 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 21535 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21536 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21537 for { 21538 _ = v.Args[1] 21539 sh := v.Args[0] 21540 if sh.Op != OpAMD64SHLLconst { 21541 break 21542 } 21543 if sh.AuxInt != 8 { 21544 break 21545 } 21546 x0 := sh.Args[0] 21547 if x0.Op != OpAMD64MOVBloadidx1 { 21548 break 21549 } 21550 i0 := x0.AuxInt 21551 s := x0.Aux 21552 _ = x0.Args[2] 21553 p := x0.Args[0] 21554 idx := x0.Args[1] 21555 mem := x0.Args[2] 21556 x1 := v.Args[1] 21557 if x1.Op != OpAMD64MOVBloadidx1 { 21558 break 21559 } 21560 i1 := x1.AuxInt 21561 if x1.Aux != s { 21562 break 21563 } 21564 _ = x1.Args[2] 21565 if idx != x1.Args[0] { 21566 break 21567 } 21568 if p != x1.Args[1] { 21569 break 21570 } 21571 if mem != x1.Args[2] { 21572 break 21573 } 21574 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21575 break 21576 } 21577 b = mergePoint(b, x0, x1) 21578 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21579 v.reset(OpCopy) 21580 v.AddArg(v0) 21581 v0.AuxInt = 8 21582 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21583 v1.AuxInt = i0 21584 v1.Aux = s 21585 v1.AddArg(p) 21586 v1.AddArg(idx) 21587 v1.AddArg(mem) 21588 v0.AddArg(v1) 21589 return true 21590 } 21591 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 21592 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21593 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21594 for { 21595 _ = v.Args[1] 21596 sh := v.Args[0] 21597 if sh.Op != OpAMD64SHLLconst { 21598 break 21599 } 21600 if sh.AuxInt != 8 { 21601 break 21602 } 21603 x0 := sh.Args[0] 21604 if x0.Op != OpAMD64MOVBloadidx1 { 21605 break 21606 } 21607 i0 := x0.AuxInt 21608 s := x0.Aux 21609 _ = x0.Args[2] 21610 idx := x0.Args[0] 21611 p := x0.Args[1] 21612 mem := x0.Args[2] 21613 x1 := v.Args[1] 21614 if x1.Op != OpAMD64MOVBloadidx1 { 21615 break 21616 } 21617 i1 := x1.AuxInt 21618 if x1.Aux != s { 21619 break 21620 } 21621 _ = x1.Args[2] 21622 if idx != x1.Args[0] { 21623 break 21624 } 21625 if p != x1.Args[1] { 21626 break 21627 } 21628 if mem != x1.Args[2] { 21629 break 21630 } 21631 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21632 break 21633 } 21634 b = mergePoint(b, x0, x1) 21635 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21636 v.reset(OpCopy) 21637 v.AddArg(v0) 21638 v0.AuxInt = 8 21639 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21640 v1.AuxInt = i0 21641 v1.Aux = s 21642 v1.AddArg(p) 21643 v1.AddArg(idx) 21644 v1.AddArg(mem) 21645 v0.AddArg(v1) 21646 return true 21647 } 21648 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 21649 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21650 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21651 for { 21652 _ = v.Args[1] 21653 r1 := v.Args[0] 21654 if r1.Op != OpAMD64ROLWconst { 21655 break 21656 } 21657 if r1.AuxInt != 8 { 21658 break 21659 } 21660 x1 := r1.Args[0] 21661 if x1.Op != OpAMD64MOVWloadidx1 { 21662 break 21663 } 21664 i1 := x1.AuxInt 21665 s := x1.Aux 21666 _ = x1.Args[2] 21667 p := x1.Args[0] 21668 idx := x1.Args[1] 21669 mem := x1.Args[2] 21670 sh := v.Args[1] 21671 if sh.Op != OpAMD64SHLLconst { 21672 break 21673 } 21674 if sh.AuxInt != 16 { 21675 break 21676 } 21677 r0 := sh.Args[0] 21678 if r0.Op != OpAMD64ROLWconst { 21679 break 21680 } 21681 if r0.AuxInt != 8 { 21682 break 21683 } 21684 x0 := r0.Args[0] 21685 if x0.Op != OpAMD64MOVWloadidx1 { 21686 break 21687 } 21688 i0 := x0.AuxInt 21689 if x0.Aux != s { 21690 break 21691 } 21692 _ = x0.Args[2] 21693 if p != x0.Args[0] { 21694 break 21695 } 21696 if idx != x0.Args[1] { 21697 break 21698 } 21699 if mem != x0.Args[2] { 21700 break 21701 } 21702 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21703 break 21704 } 21705 b = mergePoint(b, x0, x1) 21706 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21707 v.reset(OpCopy) 21708 v.AddArg(v0) 21709 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21710 v1.AuxInt = i0 21711 v1.Aux = s 21712 v1.AddArg(p) 21713 v1.AddArg(idx) 21714 v1.AddArg(mem) 21715 v0.AddArg(v1) 21716 return true 21717 } 21718 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 21719 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21720 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21721 for { 21722 _ = v.Args[1] 21723 r1 := v.Args[0] 21724 if r1.Op != OpAMD64ROLWconst { 21725 break 21726 } 21727 if r1.AuxInt != 8 { 21728 break 21729 } 21730 x1 := r1.Args[0] 21731 if x1.Op != OpAMD64MOVWloadidx1 { 21732 break 21733 } 21734 i1 := x1.AuxInt 21735 s := x1.Aux 21736 _ = x1.Args[2] 21737 idx := x1.Args[0] 21738 p := x1.Args[1] 21739 mem := x1.Args[2] 21740 sh := v.Args[1] 21741 if sh.Op != OpAMD64SHLLconst { 21742 break 21743 } 21744 if sh.AuxInt != 16 { 21745 break 21746 } 21747 r0 := sh.Args[0] 21748 if r0.Op != OpAMD64ROLWconst { 21749 break 21750 } 21751 if r0.AuxInt != 8 { 21752 break 21753 } 21754 x0 := r0.Args[0] 21755 if x0.Op != OpAMD64MOVWloadidx1 { 21756 break 21757 } 21758 i0 := x0.AuxInt 21759 if x0.Aux != s { 21760 break 21761 } 21762 _ = x0.Args[2] 21763 if p != x0.Args[0] { 21764 break 21765 } 21766 if idx != x0.Args[1] { 21767 break 21768 } 21769 if mem != x0.Args[2] { 21770 break 21771 } 21772 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21773 break 21774 } 21775 b = mergePoint(b, x0, x1) 21776 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21777 v.reset(OpCopy) 21778 v.AddArg(v0) 21779 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21780 v1.AuxInt = i0 21781 v1.Aux = s 21782 v1.AddArg(p) 21783 v1.AddArg(idx) 21784 v1.AddArg(mem) 21785 v0.AddArg(v1) 21786 return true 21787 } 21788 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 21789 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21790 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21791 for { 21792 _ = v.Args[1] 21793 r1 := v.Args[0] 21794 if r1.Op != OpAMD64ROLWconst { 21795 break 21796 } 21797 if r1.AuxInt != 8 { 21798 break 21799 } 21800 x1 := r1.Args[0] 21801 if x1.Op != OpAMD64MOVWloadidx1 { 21802 break 21803 } 21804 i1 := x1.AuxInt 21805 s := x1.Aux 21806 _ = x1.Args[2] 21807 p := x1.Args[0] 21808 idx := x1.Args[1] 21809 mem := x1.Args[2] 21810 sh := v.Args[1] 21811 if sh.Op != OpAMD64SHLLconst { 21812 break 21813 } 21814 if sh.AuxInt != 16 { 21815 break 21816 } 21817 r0 := sh.Args[0] 21818 if r0.Op != OpAMD64ROLWconst { 21819 break 21820 } 21821 if r0.AuxInt != 8 { 21822 break 21823 } 21824 x0 := r0.Args[0] 21825 if x0.Op != OpAMD64MOVWloadidx1 { 21826 break 21827 } 21828 i0 := x0.AuxInt 21829 if x0.Aux != s { 21830 break 21831 } 21832 _ = x0.Args[2] 21833 if idx != x0.Args[0] { 21834 break 21835 } 21836 if p != x0.Args[1] { 21837 break 21838 } 21839 if mem != x0.Args[2] { 21840 break 21841 } 21842 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21843 break 21844 } 21845 b = mergePoint(b, x0, x1) 21846 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21847 v.reset(OpCopy) 21848 v.AddArg(v0) 21849 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21850 v1.AuxInt = i0 21851 v1.Aux = s 21852 v1.AddArg(p) 21853 v1.AddArg(idx) 21854 v1.AddArg(mem) 21855 v0.AddArg(v1) 21856 return true 21857 } 21858 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 21859 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21860 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21861 for { 21862 _ = v.Args[1] 21863 r1 := v.Args[0] 21864 if r1.Op != OpAMD64ROLWconst { 21865 break 21866 } 21867 if r1.AuxInt != 8 { 21868 break 21869 } 21870 x1 := r1.Args[0] 21871 if x1.Op != OpAMD64MOVWloadidx1 { 21872 break 21873 } 21874 i1 := x1.AuxInt 21875 s := x1.Aux 21876 _ = x1.Args[2] 21877 idx := x1.Args[0] 21878 p := x1.Args[1] 21879 mem := x1.Args[2] 21880 sh := v.Args[1] 21881 if sh.Op != OpAMD64SHLLconst { 21882 break 21883 } 21884 if sh.AuxInt != 16 { 21885 break 21886 } 21887 r0 := sh.Args[0] 21888 if r0.Op != OpAMD64ROLWconst { 21889 break 21890 } 21891 if r0.AuxInt != 8 { 21892 break 21893 } 21894 x0 := r0.Args[0] 21895 if x0.Op != OpAMD64MOVWloadidx1 { 21896 break 21897 } 21898 i0 := x0.AuxInt 21899 if x0.Aux != s { 21900 break 21901 } 21902 _ = x0.Args[2] 21903 if idx != x0.Args[0] { 21904 break 21905 } 21906 if p != x0.Args[1] { 21907 break 21908 } 21909 if mem != x0.Args[2] { 21910 break 21911 } 21912 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21913 break 21914 } 21915 b = mergePoint(b, x0, x1) 21916 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21917 v.reset(OpCopy) 21918 v.AddArg(v0) 21919 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21920 v1.AuxInt = i0 21921 v1.Aux = s 21922 v1.AddArg(p) 21923 v1.AddArg(idx) 21924 v1.AddArg(mem) 21925 v0.AddArg(v1) 21926 return true 21927 } 21928 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 21929 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21930 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21931 for { 21932 _ = v.Args[1] 21933 sh := v.Args[0] 21934 if sh.Op != OpAMD64SHLLconst { 21935 break 21936 } 21937 if sh.AuxInt != 16 { 21938 break 21939 } 21940 r0 := sh.Args[0] 21941 if r0.Op != OpAMD64ROLWconst { 21942 break 21943 } 21944 if r0.AuxInt != 8 { 21945 break 21946 } 21947 x0 := r0.Args[0] 21948 if x0.Op != OpAMD64MOVWloadidx1 { 21949 break 21950 } 21951 i0 := x0.AuxInt 21952 s := x0.Aux 21953 _ = x0.Args[2] 21954 p := x0.Args[0] 21955 idx := x0.Args[1] 21956 mem := x0.Args[2] 21957 r1 := v.Args[1] 21958 if r1.Op != OpAMD64ROLWconst { 21959 break 21960 } 21961 if r1.AuxInt != 8 { 21962 break 21963 } 21964 x1 := r1.Args[0] 21965 if x1.Op != OpAMD64MOVWloadidx1 { 21966 break 21967 } 21968 i1 := x1.AuxInt 21969 if x1.Aux != s { 21970 break 21971 } 21972 _ = x1.Args[2] 21973 if p != x1.Args[0] { 21974 break 21975 } 21976 if idx != x1.Args[1] { 21977 break 21978 } 21979 if mem != x1.Args[2] { 21980 break 21981 } 21982 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21983 break 21984 } 21985 b = mergePoint(b, x0, x1) 21986 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21987 v.reset(OpCopy) 21988 v.AddArg(v0) 21989 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21990 v1.AuxInt = i0 21991 v1.Aux = s 21992 v1.AddArg(p) 21993 v1.AddArg(idx) 21994 v1.AddArg(mem) 21995 v0.AddArg(v1) 21996 return true 21997 } 21998 return false 21999 } 22000 func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool { 22001 b := v.Block 22002 _ = b 22003 typ := &b.Func.Config.Types 22004 _ = typ 22005 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 22006 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 22007 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 22008 for { 22009 _ = v.Args[1] 22010 sh := v.Args[0] 22011 if sh.Op != OpAMD64SHLLconst { 22012 break 22013 } 22014 if sh.AuxInt != 16 { 22015 break 22016 } 22017 r0 := sh.Args[0] 22018 if r0.Op != OpAMD64ROLWconst { 22019 break 22020 } 22021 if r0.AuxInt != 8 { 22022 break 22023 } 22024 x0 := r0.Args[0] 22025 if x0.Op != OpAMD64MOVWloadidx1 { 22026 break 22027 } 22028 i0 := x0.AuxInt 22029 s := x0.Aux 22030 _ = x0.Args[2] 22031 idx := x0.Args[0] 22032 p := x0.Args[1] 22033 mem := x0.Args[2] 22034 r1 := v.Args[1] 22035 if r1.Op != OpAMD64ROLWconst { 22036 break 22037 } 22038 if r1.AuxInt != 8 { 22039 break 22040 } 22041 x1 := r1.Args[0] 22042 if x1.Op != OpAMD64MOVWloadidx1 { 22043 break 22044 } 22045 i1 := x1.AuxInt 22046 if x1.Aux != s { 22047 break 22048 } 22049 _ = x1.Args[2] 22050 if p != x1.Args[0] { 22051 break 22052 } 22053 if idx != x1.Args[1] { 22054 break 22055 } 22056 if mem != x1.Args[2] { 22057 break 22058 } 22059 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 22060 break 22061 } 22062 b = mergePoint(b, x0, x1) 22063 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 22064 v.reset(OpCopy) 22065 v.AddArg(v0) 22066 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 22067 v1.AuxInt = i0 22068 v1.Aux = s 22069 v1.AddArg(p) 22070 v1.AddArg(idx) 22071 v1.AddArg(mem) 22072 v0.AddArg(v1) 22073 return true 22074 } 22075 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 22076 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 22077 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 22078 for { 22079 _ = v.Args[1] 22080 sh := v.Args[0] 22081 if sh.Op != OpAMD64SHLLconst { 22082 break 22083 } 22084 if sh.AuxInt != 16 { 22085 break 22086 } 22087 r0 := sh.Args[0] 22088 if r0.Op != OpAMD64ROLWconst { 22089 break 22090 } 22091 if r0.AuxInt != 8 { 22092 break 22093 } 22094 x0 := r0.Args[0] 22095 if x0.Op != OpAMD64MOVWloadidx1 { 22096 break 22097 } 22098 i0 := x0.AuxInt 22099 s := x0.Aux 22100 _ = x0.Args[2] 22101 p := x0.Args[0] 22102 idx := x0.Args[1] 22103 mem := x0.Args[2] 22104 r1 := v.Args[1] 22105 if r1.Op != OpAMD64ROLWconst { 22106 break 22107 } 22108 if r1.AuxInt != 8 { 22109 break 22110 } 22111 x1 := r1.Args[0] 22112 if x1.Op != OpAMD64MOVWloadidx1 { 22113 break 22114 } 22115 i1 := x1.AuxInt 22116 if x1.Aux != s { 22117 break 22118 } 22119 _ = x1.Args[2] 22120 if idx != x1.Args[0] { 22121 break 22122 } 22123 if p != x1.Args[1] { 22124 break 22125 } 22126 if mem != x1.Args[2] { 22127 break 22128 } 22129 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 22130 break 22131 } 22132 b = mergePoint(b, x0, x1) 22133 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 22134 v.reset(OpCopy) 22135 v.AddArg(v0) 22136 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 22137 v1.AuxInt = i0 22138 v1.Aux = s 22139 v1.AddArg(p) 22140 v1.AddArg(idx) 22141 v1.AddArg(mem) 22142 v0.AddArg(v1) 22143 return true 22144 } 22145 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 22146 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 22147 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 22148 for { 22149 _ = v.Args[1] 22150 sh := v.Args[0] 22151 if sh.Op != OpAMD64SHLLconst { 22152 break 22153 } 22154 if sh.AuxInt != 16 { 22155 break 22156 } 22157 r0 := sh.Args[0] 22158 if r0.Op != OpAMD64ROLWconst { 22159 break 22160 } 22161 if r0.AuxInt != 8 { 22162 break 22163 } 22164 x0 := r0.Args[0] 22165 if x0.Op != OpAMD64MOVWloadidx1 { 22166 break 22167 } 22168 i0 := x0.AuxInt 22169 s := x0.Aux 22170 _ = x0.Args[2] 22171 idx := x0.Args[0] 22172 p := x0.Args[1] 22173 mem := x0.Args[2] 22174 r1 := v.Args[1] 22175 if r1.Op != OpAMD64ROLWconst { 22176 break 22177 } 22178 if r1.AuxInt != 8 { 22179 break 22180 } 22181 x1 := r1.Args[0] 22182 if x1.Op != OpAMD64MOVWloadidx1 { 22183 break 22184 } 22185 i1 := x1.AuxInt 22186 if x1.Aux != s { 22187 break 22188 } 22189 _ = x1.Args[2] 22190 if idx != x1.Args[0] { 22191 break 22192 } 22193 if p != x1.Args[1] { 22194 break 22195 } 22196 if mem != x1.Args[2] { 22197 break 22198 } 22199 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 22200 break 22201 } 22202 b = mergePoint(b, x0, x1) 22203 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 22204 v.reset(OpCopy) 22205 v.AddArg(v0) 22206 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 22207 v1.AuxInt = i0 22208 v1.Aux = s 22209 v1.AddArg(p) 22210 v1.AddArg(idx) 22211 v1.AddArg(mem) 22212 v0.AddArg(v1) 22213 return true 22214 } 22215 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 22216 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22217 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22218 for { 22219 _ = v.Args[1] 22220 s0 := v.Args[0] 22221 if s0.Op != OpAMD64SHLLconst { 22222 break 22223 } 22224 j0 := s0.AuxInt 22225 x0 := s0.Args[0] 22226 if x0.Op != OpAMD64MOVBloadidx1 { 22227 break 22228 } 22229 i0 := x0.AuxInt 22230 s := x0.Aux 22231 _ = x0.Args[2] 22232 p := x0.Args[0] 22233 idx := x0.Args[1] 22234 mem := x0.Args[2] 22235 or := v.Args[1] 22236 if or.Op != OpAMD64ORL { 22237 break 22238 } 22239 _ = or.Args[1] 22240 s1 := or.Args[0] 22241 if s1.Op != OpAMD64SHLLconst { 22242 break 22243 } 22244 j1 := s1.AuxInt 22245 x1 := s1.Args[0] 22246 if x1.Op != OpAMD64MOVBloadidx1 { 22247 break 22248 } 22249 i1 := x1.AuxInt 22250 if x1.Aux != s { 22251 break 22252 } 22253 _ = x1.Args[2] 22254 if p != x1.Args[0] { 22255 break 22256 } 22257 if idx != x1.Args[1] { 22258 break 22259 } 22260 if mem != x1.Args[2] { 22261 break 22262 } 22263 y := or.Args[1] 22264 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22265 break 22266 } 22267 b = mergePoint(b, x0, x1) 22268 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22269 v.reset(OpCopy) 22270 v.AddArg(v0) 22271 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22272 v1.AuxInt = j1 22273 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22274 v2.AuxInt = 8 22275 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22276 v3.AuxInt = i0 22277 v3.Aux = s 22278 v3.AddArg(p) 22279 v3.AddArg(idx) 22280 v3.AddArg(mem) 22281 v2.AddArg(v3) 22282 v1.AddArg(v2) 22283 v0.AddArg(v1) 22284 v0.AddArg(y) 22285 return true 22286 } 22287 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 22288 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22289 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22290 for { 22291 _ = v.Args[1] 22292 s0 := v.Args[0] 22293 if s0.Op != OpAMD64SHLLconst { 22294 break 22295 } 22296 j0 := s0.AuxInt 22297 x0 := s0.Args[0] 22298 if x0.Op != OpAMD64MOVBloadidx1 { 22299 break 22300 } 22301 i0 := x0.AuxInt 22302 s := x0.Aux 22303 _ = x0.Args[2] 22304 idx := x0.Args[0] 22305 p := x0.Args[1] 22306 mem := x0.Args[2] 22307 or := v.Args[1] 22308 if or.Op != OpAMD64ORL { 22309 break 22310 } 22311 _ = or.Args[1] 22312 s1 := or.Args[0] 22313 if s1.Op != OpAMD64SHLLconst { 22314 break 22315 } 22316 j1 := s1.AuxInt 22317 x1 := s1.Args[0] 22318 if x1.Op != OpAMD64MOVBloadidx1 { 22319 break 22320 } 22321 i1 := x1.AuxInt 22322 if x1.Aux != s { 22323 break 22324 } 22325 _ = x1.Args[2] 22326 if p != x1.Args[0] { 22327 break 22328 } 22329 if idx != x1.Args[1] { 22330 break 22331 } 22332 if mem != x1.Args[2] { 22333 break 22334 } 22335 y := or.Args[1] 22336 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22337 break 22338 } 22339 b = mergePoint(b, x0, x1) 22340 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22341 v.reset(OpCopy) 22342 v.AddArg(v0) 22343 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22344 v1.AuxInt = j1 22345 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22346 v2.AuxInt = 8 22347 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22348 v3.AuxInt = i0 22349 v3.Aux = s 22350 v3.AddArg(p) 22351 v3.AddArg(idx) 22352 v3.AddArg(mem) 22353 v2.AddArg(v3) 22354 v1.AddArg(v2) 22355 v0.AddArg(v1) 22356 v0.AddArg(y) 22357 return true 22358 } 22359 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 22360 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22361 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22362 for { 22363 _ = v.Args[1] 22364 s0 := v.Args[0] 22365 if s0.Op != OpAMD64SHLLconst { 22366 break 22367 } 22368 j0 := s0.AuxInt 22369 x0 := s0.Args[0] 22370 if x0.Op != OpAMD64MOVBloadidx1 { 22371 break 22372 } 22373 i0 := x0.AuxInt 22374 s := x0.Aux 22375 _ = x0.Args[2] 22376 p := x0.Args[0] 22377 idx := x0.Args[1] 22378 mem := x0.Args[2] 22379 or := v.Args[1] 22380 if or.Op != OpAMD64ORL { 22381 break 22382 } 22383 _ = or.Args[1] 22384 s1 := or.Args[0] 22385 if s1.Op != OpAMD64SHLLconst { 22386 break 22387 } 22388 j1 := s1.AuxInt 22389 x1 := s1.Args[0] 22390 if x1.Op != OpAMD64MOVBloadidx1 { 22391 break 22392 } 22393 i1 := x1.AuxInt 22394 if x1.Aux != s { 22395 break 22396 } 22397 _ = x1.Args[2] 22398 if idx != x1.Args[0] { 22399 break 22400 } 22401 if p != x1.Args[1] { 22402 break 22403 } 22404 if mem != x1.Args[2] { 22405 break 22406 } 22407 y := or.Args[1] 22408 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22409 break 22410 } 22411 b = mergePoint(b, x0, x1) 22412 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22413 v.reset(OpCopy) 22414 v.AddArg(v0) 22415 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22416 v1.AuxInt = j1 22417 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22418 v2.AuxInt = 8 22419 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22420 v3.AuxInt = i0 22421 v3.Aux = s 22422 v3.AddArg(p) 22423 v3.AddArg(idx) 22424 v3.AddArg(mem) 22425 v2.AddArg(v3) 22426 v1.AddArg(v2) 22427 v0.AddArg(v1) 22428 v0.AddArg(y) 22429 return true 22430 } 22431 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 22432 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22433 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22434 for { 22435 _ = v.Args[1] 22436 s0 := v.Args[0] 22437 if s0.Op != OpAMD64SHLLconst { 22438 break 22439 } 22440 j0 := s0.AuxInt 22441 x0 := s0.Args[0] 22442 if x0.Op != OpAMD64MOVBloadidx1 { 22443 break 22444 } 22445 i0 := x0.AuxInt 22446 s := x0.Aux 22447 _ = x0.Args[2] 22448 idx := x0.Args[0] 22449 p := x0.Args[1] 22450 mem := x0.Args[2] 22451 or := v.Args[1] 22452 if or.Op != OpAMD64ORL { 22453 break 22454 } 22455 _ = or.Args[1] 22456 s1 := or.Args[0] 22457 if s1.Op != OpAMD64SHLLconst { 22458 break 22459 } 22460 j1 := s1.AuxInt 22461 x1 := s1.Args[0] 22462 if x1.Op != OpAMD64MOVBloadidx1 { 22463 break 22464 } 22465 i1 := x1.AuxInt 22466 if x1.Aux != s { 22467 break 22468 } 22469 _ = x1.Args[2] 22470 if idx != x1.Args[0] { 22471 break 22472 } 22473 if p != x1.Args[1] { 22474 break 22475 } 22476 if mem != x1.Args[2] { 22477 break 22478 } 22479 y := or.Args[1] 22480 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22481 break 22482 } 22483 b = mergePoint(b, x0, x1) 22484 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22485 v.reset(OpCopy) 22486 v.AddArg(v0) 22487 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22488 v1.AuxInt = j1 22489 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22490 v2.AuxInt = 8 22491 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22492 v3.AuxInt = i0 22493 v3.Aux = s 22494 v3.AddArg(p) 22495 v3.AddArg(idx) 22496 v3.AddArg(mem) 22497 v2.AddArg(v3) 22498 v1.AddArg(v2) 22499 v0.AddArg(v1) 22500 v0.AddArg(y) 22501 return true 22502 } 22503 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 22504 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22505 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22506 for { 22507 _ = v.Args[1] 22508 s0 := v.Args[0] 22509 if s0.Op != OpAMD64SHLLconst { 22510 break 22511 } 22512 j0 := s0.AuxInt 22513 x0 := s0.Args[0] 22514 if x0.Op != OpAMD64MOVBloadidx1 { 22515 break 22516 } 22517 i0 := x0.AuxInt 22518 s := x0.Aux 22519 _ = x0.Args[2] 22520 p := x0.Args[0] 22521 idx := x0.Args[1] 22522 mem := x0.Args[2] 22523 or := v.Args[1] 22524 if or.Op != OpAMD64ORL { 22525 break 22526 } 22527 _ = or.Args[1] 22528 y := or.Args[0] 22529 s1 := or.Args[1] 22530 if s1.Op != OpAMD64SHLLconst { 22531 break 22532 } 22533 j1 := s1.AuxInt 22534 x1 := s1.Args[0] 22535 if x1.Op != OpAMD64MOVBloadidx1 { 22536 break 22537 } 22538 i1 := x1.AuxInt 22539 if x1.Aux != s { 22540 break 22541 } 22542 _ = x1.Args[2] 22543 if p != x1.Args[0] { 22544 break 22545 } 22546 if idx != x1.Args[1] { 22547 break 22548 } 22549 if mem != x1.Args[2] { 22550 break 22551 } 22552 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22553 break 22554 } 22555 b = mergePoint(b, x0, x1) 22556 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22557 v.reset(OpCopy) 22558 v.AddArg(v0) 22559 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22560 v1.AuxInt = j1 22561 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22562 v2.AuxInt = 8 22563 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22564 v3.AuxInt = i0 22565 v3.Aux = s 22566 v3.AddArg(p) 22567 v3.AddArg(idx) 22568 v3.AddArg(mem) 22569 v2.AddArg(v3) 22570 v1.AddArg(v2) 22571 v0.AddArg(v1) 22572 v0.AddArg(y) 22573 return true 22574 } 22575 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 22576 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22577 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22578 for { 22579 _ = v.Args[1] 22580 s0 := v.Args[0] 22581 if s0.Op != OpAMD64SHLLconst { 22582 break 22583 } 22584 j0 := s0.AuxInt 22585 x0 := s0.Args[0] 22586 if x0.Op != OpAMD64MOVBloadidx1 { 22587 break 22588 } 22589 i0 := x0.AuxInt 22590 s := x0.Aux 22591 _ = x0.Args[2] 22592 idx := x0.Args[0] 22593 p := x0.Args[1] 22594 mem := x0.Args[2] 22595 or := v.Args[1] 22596 if or.Op != OpAMD64ORL { 22597 break 22598 } 22599 _ = or.Args[1] 22600 y := or.Args[0] 22601 s1 := or.Args[1] 22602 if s1.Op != OpAMD64SHLLconst { 22603 break 22604 } 22605 j1 := s1.AuxInt 22606 x1 := s1.Args[0] 22607 if x1.Op != OpAMD64MOVBloadidx1 { 22608 break 22609 } 22610 i1 := x1.AuxInt 22611 if x1.Aux != s { 22612 break 22613 } 22614 _ = x1.Args[2] 22615 if p != x1.Args[0] { 22616 break 22617 } 22618 if idx != x1.Args[1] { 22619 break 22620 } 22621 if mem != x1.Args[2] { 22622 break 22623 } 22624 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22625 break 22626 } 22627 b = mergePoint(b, x0, x1) 22628 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22629 v.reset(OpCopy) 22630 v.AddArg(v0) 22631 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22632 v1.AuxInt = j1 22633 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22634 v2.AuxInt = 8 22635 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22636 v3.AuxInt = i0 22637 v3.Aux = s 22638 v3.AddArg(p) 22639 v3.AddArg(idx) 22640 v3.AddArg(mem) 22641 v2.AddArg(v3) 22642 v1.AddArg(v2) 22643 v0.AddArg(v1) 22644 v0.AddArg(y) 22645 return true 22646 } 22647 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 22648 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22649 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22650 for { 22651 _ = v.Args[1] 22652 s0 := v.Args[0] 22653 if s0.Op != OpAMD64SHLLconst { 22654 break 22655 } 22656 j0 := s0.AuxInt 22657 x0 := s0.Args[0] 22658 if x0.Op != OpAMD64MOVBloadidx1 { 22659 break 22660 } 22661 i0 := x0.AuxInt 22662 s := x0.Aux 22663 _ = x0.Args[2] 22664 p := x0.Args[0] 22665 idx := x0.Args[1] 22666 mem := x0.Args[2] 22667 or := v.Args[1] 22668 if or.Op != OpAMD64ORL { 22669 break 22670 } 22671 _ = or.Args[1] 22672 y := or.Args[0] 22673 s1 := or.Args[1] 22674 if s1.Op != OpAMD64SHLLconst { 22675 break 22676 } 22677 j1 := s1.AuxInt 22678 x1 := s1.Args[0] 22679 if x1.Op != OpAMD64MOVBloadidx1 { 22680 break 22681 } 22682 i1 := x1.AuxInt 22683 if x1.Aux != s { 22684 break 22685 } 22686 _ = x1.Args[2] 22687 if idx != x1.Args[0] { 22688 break 22689 } 22690 if p != x1.Args[1] { 22691 break 22692 } 22693 if mem != x1.Args[2] { 22694 break 22695 } 22696 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22697 break 22698 } 22699 b = mergePoint(b, x0, x1) 22700 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22701 v.reset(OpCopy) 22702 v.AddArg(v0) 22703 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22704 v1.AuxInt = j1 22705 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22706 v2.AuxInt = 8 22707 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22708 v3.AuxInt = i0 22709 v3.Aux = s 22710 v3.AddArg(p) 22711 v3.AddArg(idx) 22712 v3.AddArg(mem) 22713 v2.AddArg(v3) 22714 v1.AddArg(v2) 22715 v0.AddArg(v1) 22716 v0.AddArg(y) 22717 return true 22718 } 22719 return false 22720 } 22721 func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool { 22722 b := v.Block 22723 _ = b 22724 typ := &b.Func.Config.Types 22725 _ = typ 22726 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 22727 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22728 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22729 for { 22730 _ = v.Args[1] 22731 s0 := v.Args[0] 22732 if s0.Op != OpAMD64SHLLconst { 22733 break 22734 } 22735 j0 := s0.AuxInt 22736 x0 := s0.Args[0] 22737 if x0.Op != OpAMD64MOVBloadidx1 { 22738 break 22739 } 22740 i0 := x0.AuxInt 22741 s := x0.Aux 22742 _ = x0.Args[2] 22743 idx := x0.Args[0] 22744 p := x0.Args[1] 22745 mem := x0.Args[2] 22746 or := v.Args[1] 22747 if or.Op != OpAMD64ORL { 22748 break 22749 } 22750 _ = or.Args[1] 22751 y := or.Args[0] 22752 s1 := or.Args[1] 22753 if s1.Op != OpAMD64SHLLconst { 22754 break 22755 } 22756 j1 := s1.AuxInt 22757 x1 := s1.Args[0] 22758 if x1.Op != OpAMD64MOVBloadidx1 { 22759 break 22760 } 22761 i1 := x1.AuxInt 22762 if x1.Aux != s { 22763 break 22764 } 22765 _ = x1.Args[2] 22766 if idx != x1.Args[0] { 22767 break 22768 } 22769 if p != x1.Args[1] { 22770 break 22771 } 22772 if mem != x1.Args[2] { 22773 break 22774 } 22775 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22776 break 22777 } 22778 b = mergePoint(b, x0, x1) 22779 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22780 v.reset(OpCopy) 22781 v.AddArg(v0) 22782 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22783 v1.AuxInt = j1 22784 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22785 v2.AuxInt = 8 22786 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22787 v3.AuxInt = i0 22788 v3.Aux = s 22789 v3.AddArg(p) 22790 v3.AddArg(idx) 22791 v3.AddArg(mem) 22792 v2.AddArg(v3) 22793 v1.AddArg(v2) 22794 v0.AddArg(v1) 22795 v0.AddArg(y) 22796 return true 22797 } 22798 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22799 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22800 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22801 for { 22802 _ = v.Args[1] 22803 or := v.Args[0] 22804 if or.Op != OpAMD64ORL { 22805 break 22806 } 22807 _ = or.Args[1] 22808 s1 := or.Args[0] 22809 if s1.Op != OpAMD64SHLLconst { 22810 break 22811 } 22812 j1 := s1.AuxInt 22813 x1 := s1.Args[0] 22814 if x1.Op != OpAMD64MOVBloadidx1 { 22815 break 22816 } 22817 i1 := x1.AuxInt 22818 s := x1.Aux 22819 _ = x1.Args[2] 22820 p := x1.Args[0] 22821 idx := x1.Args[1] 22822 mem := x1.Args[2] 22823 y := or.Args[1] 22824 s0 := v.Args[1] 22825 if s0.Op != OpAMD64SHLLconst { 22826 break 22827 } 22828 j0 := s0.AuxInt 22829 x0 := s0.Args[0] 22830 if x0.Op != OpAMD64MOVBloadidx1 { 22831 break 22832 } 22833 i0 := x0.AuxInt 22834 if x0.Aux != s { 22835 break 22836 } 22837 _ = x0.Args[2] 22838 if p != x0.Args[0] { 22839 break 22840 } 22841 if idx != x0.Args[1] { 22842 break 22843 } 22844 if mem != x0.Args[2] { 22845 break 22846 } 22847 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22848 break 22849 } 22850 b = mergePoint(b, x0, x1) 22851 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22852 v.reset(OpCopy) 22853 v.AddArg(v0) 22854 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22855 v1.AuxInt = j1 22856 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22857 v2.AuxInt = 8 22858 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22859 v3.AuxInt = i0 22860 v3.Aux = s 22861 v3.AddArg(p) 22862 v3.AddArg(idx) 22863 v3.AddArg(mem) 22864 v2.AddArg(v3) 22865 v1.AddArg(v2) 22866 v0.AddArg(v1) 22867 v0.AddArg(y) 22868 return true 22869 } 22870 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22871 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22872 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22873 for { 22874 _ = v.Args[1] 22875 or := v.Args[0] 22876 if or.Op != OpAMD64ORL { 22877 break 22878 } 22879 _ = or.Args[1] 22880 s1 := or.Args[0] 22881 if s1.Op != OpAMD64SHLLconst { 22882 break 22883 } 22884 j1 := s1.AuxInt 22885 x1 := s1.Args[0] 22886 if x1.Op != OpAMD64MOVBloadidx1 { 22887 break 22888 } 22889 i1 := x1.AuxInt 22890 s := x1.Aux 22891 _ = x1.Args[2] 22892 idx := x1.Args[0] 22893 p := x1.Args[1] 22894 mem := x1.Args[2] 22895 y := or.Args[1] 22896 s0 := v.Args[1] 22897 if s0.Op != OpAMD64SHLLconst { 22898 break 22899 } 22900 j0 := s0.AuxInt 22901 x0 := s0.Args[0] 22902 if x0.Op != OpAMD64MOVBloadidx1 { 22903 break 22904 } 22905 i0 := x0.AuxInt 22906 if x0.Aux != s { 22907 break 22908 } 22909 _ = x0.Args[2] 22910 if p != x0.Args[0] { 22911 break 22912 } 22913 if idx != x0.Args[1] { 22914 break 22915 } 22916 if mem != x0.Args[2] { 22917 break 22918 } 22919 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22920 break 22921 } 22922 b = mergePoint(b, x0, x1) 22923 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22924 v.reset(OpCopy) 22925 v.AddArg(v0) 22926 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22927 v1.AuxInt = j1 22928 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22929 v2.AuxInt = 8 22930 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22931 v3.AuxInt = i0 22932 v3.Aux = s 22933 v3.AddArg(p) 22934 v3.AddArg(idx) 22935 v3.AddArg(mem) 22936 v2.AddArg(v3) 22937 v1.AddArg(v2) 22938 v0.AddArg(v1) 22939 v0.AddArg(y) 22940 return true 22941 } 22942 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22943 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22944 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22945 for { 22946 _ = v.Args[1] 22947 or := v.Args[0] 22948 if or.Op != OpAMD64ORL { 22949 break 22950 } 22951 _ = or.Args[1] 22952 y := or.Args[0] 22953 s1 := or.Args[1] 22954 if s1.Op != OpAMD64SHLLconst { 22955 break 22956 } 22957 j1 := s1.AuxInt 22958 x1 := s1.Args[0] 22959 if x1.Op != OpAMD64MOVBloadidx1 { 22960 break 22961 } 22962 i1 := x1.AuxInt 22963 s := x1.Aux 22964 _ = x1.Args[2] 22965 p := x1.Args[0] 22966 idx := x1.Args[1] 22967 mem := x1.Args[2] 22968 s0 := v.Args[1] 22969 if s0.Op != OpAMD64SHLLconst { 22970 break 22971 } 22972 j0 := s0.AuxInt 22973 x0 := s0.Args[0] 22974 if x0.Op != OpAMD64MOVBloadidx1 { 22975 break 22976 } 22977 i0 := x0.AuxInt 22978 if x0.Aux != s { 22979 break 22980 } 22981 _ = x0.Args[2] 22982 if p != x0.Args[0] { 22983 break 22984 } 22985 if idx != x0.Args[1] { 22986 break 22987 } 22988 if mem != x0.Args[2] { 22989 break 22990 } 22991 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22992 break 22993 } 22994 b = mergePoint(b, x0, x1) 22995 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22996 v.reset(OpCopy) 22997 v.AddArg(v0) 22998 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22999 v1.AuxInt = j1 23000 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23001 v2.AuxInt = 8 23002 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23003 v3.AuxInt = i0 23004 v3.Aux = s 23005 v3.AddArg(p) 23006 v3.AddArg(idx) 23007 v3.AddArg(mem) 23008 v2.AddArg(v3) 23009 v1.AddArg(v2) 23010 v0.AddArg(v1) 23011 v0.AddArg(y) 23012 return true 23013 } 23014 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 23015 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23016 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23017 for { 23018 _ = v.Args[1] 23019 or := v.Args[0] 23020 if or.Op != OpAMD64ORL { 23021 break 23022 } 23023 _ = or.Args[1] 23024 y := or.Args[0] 23025 s1 := or.Args[1] 23026 if s1.Op != OpAMD64SHLLconst { 23027 break 23028 } 23029 j1 := s1.AuxInt 23030 x1 := s1.Args[0] 23031 if x1.Op != OpAMD64MOVBloadidx1 { 23032 break 23033 } 23034 i1 := x1.AuxInt 23035 s := x1.Aux 23036 _ = x1.Args[2] 23037 idx := x1.Args[0] 23038 p := x1.Args[1] 23039 mem := x1.Args[2] 23040 s0 := v.Args[1] 23041 if s0.Op != OpAMD64SHLLconst { 23042 break 23043 } 23044 j0 := s0.AuxInt 23045 x0 := s0.Args[0] 23046 if x0.Op != OpAMD64MOVBloadidx1 { 23047 break 23048 } 23049 i0 := x0.AuxInt 23050 if x0.Aux != s { 23051 break 23052 } 23053 _ = x0.Args[2] 23054 if p != x0.Args[0] { 23055 break 23056 } 23057 if idx != x0.Args[1] { 23058 break 23059 } 23060 if mem != x0.Args[2] { 23061 break 23062 } 23063 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23064 break 23065 } 23066 b = mergePoint(b, x0, x1) 23067 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23068 v.reset(OpCopy) 23069 v.AddArg(v0) 23070 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23071 v1.AuxInt = j1 23072 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23073 v2.AuxInt = 8 23074 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23075 v3.AuxInt = i0 23076 v3.Aux = s 23077 v3.AddArg(p) 23078 v3.AddArg(idx) 23079 v3.AddArg(mem) 23080 v2.AddArg(v3) 23081 v1.AddArg(v2) 23082 v0.AddArg(v1) 23083 v0.AddArg(y) 23084 return true 23085 } 23086 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23087 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23088 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23089 for { 23090 _ = v.Args[1] 23091 or := v.Args[0] 23092 if or.Op != OpAMD64ORL { 23093 break 23094 } 23095 _ = or.Args[1] 23096 s1 := or.Args[0] 23097 if s1.Op != OpAMD64SHLLconst { 23098 break 23099 } 23100 j1 := s1.AuxInt 23101 x1 := s1.Args[0] 23102 if x1.Op != OpAMD64MOVBloadidx1 { 23103 break 23104 } 23105 i1 := x1.AuxInt 23106 s := x1.Aux 23107 _ = x1.Args[2] 23108 p := x1.Args[0] 23109 idx := x1.Args[1] 23110 mem := x1.Args[2] 23111 y := or.Args[1] 23112 s0 := v.Args[1] 23113 if s0.Op != OpAMD64SHLLconst { 23114 break 23115 } 23116 j0 := s0.AuxInt 23117 x0 := s0.Args[0] 23118 if x0.Op != OpAMD64MOVBloadidx1 { 23119 break 23120 } 23121 i0 := x0.AuxInt 23122 if x0.Aux != s { 23123 break 23124 } 23125 _ = x0.Args[2] 23126 if idx != x0.Args[0] { 23127 break 23128 } 23129 if p != x0.Args[1] { 23130 break 23131 } 23132 if mem != x0.Args[2] { 23133 break 23134 } 23135 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23136 break 23137 } 23138 b = mergePoint(b, x0, x1) 23139 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23140 v.reset(OpCopy) 23141 v.AddArg(v0) 23142 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23143 v1.AuxInt = j1 23144 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23145 v2.AuxInt = 8 23146 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23147 v3.AuxInt = i0 23148 v3.Aux = s 23149 v3.AddArg(p) 23150 v3.AddArg(idx) 23151 v3.AddArg(mem) 23152 v2.AddArg(v3) 23153 v1.AddArg(v2) 23154 v0.AddArg(v1) 23155 v0.AddArg(y) 23156 return true 23157 } 23158 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23159 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23160 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23161 for { 23162 _ = v.Args[1] 23163 or := v.Args[0] 23164 if or.Op != OpAMD64ORL { 23165 break 23166 } 23167 _ = or.Args[1] 23168 s1 := or.Args[0] 23169 if s1.Op != OpAMD64SHLLconst { 23170 break 23171 } 23172 j1 := s1.AuxInt 23173 x1 := s1.Args[0] 23174 if x1.Op != OpAMD64MOVBloadidx1 { 23175 break 23176 } 23177 i1 := x1.AuxInt 23178 s := x1.Aux 23179 _ = x1.Args[2] 23180 idx := x1.Args[0] 23181 p := x1.Args[1] 23182 mem := x1.Args[2] 23183 y := or.Args[1] 23184 s0 := v.Args[1] 23185 if s0.Op != OpAMD64SHLLconst { 23186 break 23187 } 23188 j0 := s0.AuxInt 23189 x0 := s0.Args[0] 23190 if x0.Op != OpAMD64MOVBloadidx1 { 23191 break 23192 } 23193 i0 := x0.AuxInt 23194 if x0.Aux != s { 23195 break 23196 } 23197 _ = x0.Args[2] 23198 if idx != x0.Args[0] { 23199 break 23200 } 23201 if p != x0.Args[1] { 23202 break 23203 } 23204 if mem != x0.Args[2] { 23205 break 23206 } 23207 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23208 break 23209 } 23210 b = mergePoint(b, x0, x1) 23211 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23212 v.reset(OpCopy) 23213 v.AddArg(v0) 23214 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23215 v1.AuxInt = j1 23216 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23217 v2.AuxInt = 8 23218 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23219 v3.AuxInt = i0 23220 v3.Aux = s 23221 v3.AddArg(p) 23222 v3.AddArg(idx) 23223 v3.AddArg(mem) 23224 v2.AddArg(v3) 23225 v1.AddArg(v2) 23226 v0.AddArg(v1) 23227 v0.AddArg(y) 23228 return true 23229 } 23230 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23231 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23232 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23233 for { 23234 _ = v.Args[1] 23235 or := v.Args[0] 23236 if or.Op != OpAMD64ORL { 23237 break 23238 } 23239 _ = or.Args[1] 23240 y := or.Args[0] 23241 s1 := or.Args[1] 23242 if s1.Op != OpAMD64SHLLconst { 23243 break 23244 } 23245 j1 := s1.AuxInt 23246 x1 := s1.Args[0] 23247 if x1.Op != OpAMD64MOVBloadidx1 { 23248 break 23249 } 23250 i1 := x1.AuxInt 23251 s := x1.Aux 23252 _ = x1.Args[2] 23253 p := x1.Args[0] 23254 idx := x1.Args[1] 23255 mem := x1.Args[2] 23256 s0 := v.Args[1] 23257 if s0.Op != OpAMD64SHLLconst { 23258 break 23259 } 23260 j0 := s0.AuxInt 23261 x0 := s0.Args[0] 23262 if x0.Op != OpAMD64MOVBloadidx1 { 23263 break 23264 } 23265 i0 := x0.AuxInt 23266 if x0.Aux != s { 23267 break 23268 } 23269 _ = x0.Args[2] 23270 if idx != x0.Args[0] { 23271 break 23272 } 23273 if p != x0.Args[1] { 23274 break 23275 } 23276 if mem != x0.Args[2] { 23277 break 23278 } 23279 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23280 break 23281 } 23282 b = mergePoint(b, x0, x1) 23283 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23284 v.reset(OpCopy) 23285 v.AddArg(v0) 23286 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23287 v1.AuxInt = j1 23288 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23289 v2.AuxInt = 8 23290 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23291 v3.AuxInt = i0 23292 v3.Aux = s 23293 v3.AddArg(p) 23294 v3.AddArg(idx) 23295 v3.AddArg(mem) 23296 v2.AddArg(v3) 23297 v1.AddArg(v2) 23298 v0.AddArg(v1) 23299 v0.AddArg(y) 23300 return true 23301 } 23302 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23303 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23304 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23305 for { 23306 _ = v.Args[1] 23307 or := v.Args[0] 23308 if or.Op != OpAMD64ORL { 23309 break 23310 } 23311 _ = or.Args[1] 23312 y := or.Args[0] 23313 s1 := or.Args[1] 23314 if s1.Op != OpAMD64SHLLconst { 23315 break 23316 } 23317 j1 := s1.AuxInt 23318 x1 := s1.Args[0] 23319 if x1.Op != OpAMD64MOVBloadidx1 { 23320 break 23321 } 23322 i1 := x1.AuxInt 23323 s := x1.Aux 23324 _ = x1.Args[2] 23325 idx := x1.Args[0] 23326 p := x1.Args[1] 23327 mem := x1.Args[2] 23328 s0 := v.Args[1] 23329 if s0.Op != OpAMD64SHLLconst { 23330 break 23331 } 23332 j0 := s0.AuxInt 23333 x0 := s0.Args[0] 23334 if x0.Op != OpAMD64MOVBloadidx1 { 23335 break 23336 } 23337 i0 := x0.AuxInt 23338 if x0.Aux != s { 23339 break 23340 } 23341 _ = x0.Args[2] 23342 if idx != x0.Args[0] { 23343 break 23344 } 23345 if p != x0.Args[1] { 23346 break 23347 } 23348 if mem != x0.Args[2] { 23349 break 23350 } 23351 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23352 break 23353 } 23354 b = mergePoint(b, x0, x1) 23355 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23356 v.reset(OpCopy) 23357 v.AddArg(v0) 23358 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23359 v1.AuxInt = j1 23360 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23361 v2.AuxInt = 8 23362 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23363 v3.AuxInt = i0 23364 v3.Aux = s 23365 v3.AddArg(p) 23366 v3.AddArg(idx) 23367 v3.AddArg(mem) 23368 v2.AddArg(v3) 23369 v1.AddArg(v2) 23370 v0.AddArg(v1) 23371 v0.AddArg(y) 23372 return true 23373 } 23374 // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) 23375 // cond: canMergeLoad(v, l, x) && clobber(l) 23376 // result: (ORLmem x [off] {sym} ptr mem) 23377 for { 23378 _ = v.Args[1] 23379 x := v.Args[0] 23380 l := v.Args[1] 23381 if l.Op != OpAMD64MOVLload { 23382 break 23383 } 23384 off := l.AuxInt 23385 sym := l.Aux 23386 _ = l.Args[1] 23387 ptr := l.Args[0] 23388 mem := l.Args[1] 23389 if !(canMergeLoad(v, l, x) && clobber(l)) { 23390 break 23391 } 23392 v.reset(OpAMD64ORLmem) 23393 v.AuxInt = off 23394 v.Aux = sym 23395 v.AddArg(x) 23396 v.AddArg(ptr) 23397 v.AddArg(mem) 23398 return true 23399 } 23400 return false 23401 } 23402 func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { 23403 // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) 23404 // cond: canMergeLoad(v, l, x) && clobber(l) 23405 // result: (ORLmem x [off] {sym} ptr mem) 23406 for { 23407 _ = v.Args[1] 23408 l := v.Args[0] 23409 if l.Op != OpAMD64MOVLload { 23410 break 23411 } 23412 off := l.AuxInt 23413 sym := l.Aux 23414 _ = l.Args[1] 23415 ptr := l.Args[0] 23416 mem := l.Args[1] 23417 x := v.Args[1] 23418 if !(canMergeLoad(v, l, x) && clobber(l)) { 23419 break 23420 } 23421 v.reset(OpAMD64ORLmem) 23422 v.AuxInt = off 23423 v.Aux = sym 23424 v.AddArg(x) 23425 v.AddArg(ptr) 23426 v.AddArg(mem) 23427 return true 23428 } 23429 return false 23430 } 23431 func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { 23432 // match: (ORLconst [c] x) 23433 // cond: int32(c)==0 23434 // result: x 23435 for { 23436 c := v.AuxInt 23437 x := v.Args[0] 23438 if !(int32(c) == 0) { 23439 break 23440 } 23441 v.reset(OpCopy) 23442 v.Type = x.Type 23443 v.AddArg(x) 23444 return true 23445 } 23446 // match: (ORLconst [c] _) 23447 // cond: int32(c)==-1 23448 // result: (MOVLconst [-1]) 23449 for { 23450 c := v.AuxInt 23451 if !(int32(c) == -1) { 23452 break 23453 } 23454 v.reset(OpAMD64MOVLconst) 23455 v.AuxInt = -1 23456 return true 23457 } 23458 // match: (ORLconst [c] (MOVLconst [d])) 23459 // cond: 23460 // result: (MOVLconst [c|d]) 23461 for { 23462 c := v.AuxInt 23463 v_0 := v.Args[0] 23464 if v_0.Op != OpAMD64MOVLconst { 23465 break 23466 } 23467 d := v_0.AuxInt 23468 v.reset(OpAMD64MOVLconst) 23469 v.AuxInt = c | d 23470 return true 23471 } 23472 return false 23473 } 23474 func rewriteValueAMD64_OpAMD64ORLmem_0(v *Value) bool { 23475 b := v.Block 23476 _ = b 23477 typ := &b.Func.Config.Types 23478 _ = typ 23479 // match: (ORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 23480 // cond: 23481 // result: ( ORL x (MOVLf2i y)) 23482 for { 23483 off := v.AuxInt 23484 sym := v.Aux 23485 _ = v.Args[2] 23486 x := v.Args[0] 23487 ptr := v.Args[1] 23488 v_2 := v.Args[2] 23489 if v_2.Op != OpAMD64MOVSSstore { 23490 break 23491 } 23492 if v_2.AuxInt != off { 23493 break 23494 } 23495 if v_2.Aux != sym { 23496 break 23497 } 23498 _ = v_2.Args[2] 23499 if ptr != v_2.Args[0] { 23500 break 23501 } 23502 y := v_2.Args[1] 23503 v.reset(OpAMD64ORL) 23504 v.AddArg(x) 23505 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 23506 v0.AddArg(y) 23507 v.AddArg(v0) 23508 return true 23509 } 23510 return false 23511 } 23512 func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { 23513 // match: (ORQ x (MOVQconst [c])) 23514 // cond: is32Bit(c) 23515 // result: (ORQconst [c] x) 23516 for { 23517 _ = v.Args[1] 23518 x := v.Args[0] 23519 v_1 := v.Args[1] 23520 if v_1.Op != OpAMD64MOVQconst { 23521 break 23522 } 23523 c := v_1.AuxInt 23524 if !(is32Bit(c)) { 23525 break 23526 } 23527 v.reset(OpAMD64ORQconst) 23528 v.AuxInt = c 23529 v.AddArg(x) 23530 return true 23531 } 23532 // match: (ORQ (MOVQconst [c]) x) 23533 // cond: is32Bit(c) 23534 // result: (ORQconst [c] x) 23535 for { 23536 _ = v.Args[1] 23537 v_0 := v.Args[0] 23538 if v_0.Op != OpAMD64MOVQconst { 23539 break 23540 } 23541 c := v_0.AuxInt 23542 x := v.Args[1] 23543 if !(is32Bit(c)) { 23544 break 23545 } 23546 v.reset(OpAMD64ORQconst) 23547 v.AuxInt = c 23548 v.AddArg(x) 23549 return true 23550 } 23551 // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) 23552 // cond: d==64-c 23553 // result: (ROLQconst x [c]) 23554 for { 23555 _ = v.Args[1] 23556 v_0 := v.Args[0] 23557 if v_0.Op != OpAMD64SHLQconst { 23558 break 23559 } 23560 c := v_0.AuxInt 23561 x := v_0.Args[0] 23562 v_1 := v.Args[1] 23563 if v_1.Op != OpAMD64SHRQconst { 23564 break 23565 } 23566 d := v_1.AuxInt 23567 if x != v_1.Args[0] { 23568 break 23569 } 23570 if !(d == 64-c) { 23571 break 23572 } 23573 v.reset(OpAMD64ROLQconst) 23574 v.AuxInt = c 23575 v.AddArg(x) 23576 return true 23577 } 23578 // match: (ORQ (SHRQconst x [d]) (SHLQconst x [c])) 23579 // cond: d==64-c 23580 // result: (ROLQconst x [c]) 23581 for { 23582 _ = v.Args[1] 23583 v_0 := v.Args[0] 23584 if v_0.Op != OpAMD64SHRQconst { 23585 break 23586 } 23587 d := v_0.AuxInt 23588 x := v_0.Args[0] 23589 v_1 := v.Args[1] 23590 if v_1.Op != OpAMD64SHLQconst { 23591 break 23592 } 23593 c := v_1.AuxInt 23594 if x != v_1.Args[0] { 23595 break 23596 } 23597 if !(d == 64-c) { 23598 break 23599 } 23600 v.reset(OpAMD64ROLQconst) 23601 v.AuxInt = c 23602 v.AddArg(x) 23603 return true 23604 } 23605 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 23606 // cond: 23607 // result: (ROLQ x y) 23608 for { 23609 _ = v.Args[1] 23610 v_0 := v.Args[0] 23611 if v_0.Op != OpAMD64SHLQ { 23612 break 23613 } 23614 _ = v_0.Args[1] 23615 x := v_0.Args[0] 23616 y := v_0.Args[1] 23617 v_1 := v.Args[1] 23618 if v_1.Op != OpAMD64ANDQ { 23619 break 23620 } 23621 _ = v_1.Args[1] 23622 v_1_0 := v_1.Args[0] 23623 if v_1_0.Op != OpAMD64SHRQ { 23624 break 23625 } 23626 _ = v_1_0.Args[1] 23627 if x != v_1_0.Args[0] { 23628 break 23629 } 23630 v_1_0_1 := v_1_0.Args[1] 23631 if v_1_0_1.Op != OpAMD64NEGQ { 23632 break 23633 } 23634 if y != v_1_0_1.Args[0] { 23635 break 23636 } 23637 v_1_1 := v_1.Args[1] 23638 if v_1_1.Op != OpAMD64SBBQcarrymask { 23639 break 23640 } 23641 v_1_1_0 := v_1_1.Args[0] 23642 if v_1_1_0.Op != OpAMD64CMPQconst { 23643 break 23644 } 23645 if v_1_1_0.AuxInt != 64 { 23646 break 23647 } 23648 v_1_1_0_0 := v_1_1_0.Args[0] 23649 if v_1_1_0_0.Op != OpAMD64NEGQ { 23650 break 23651 } 23652 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 23653 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 23654 break 23655 } 23656 if v_1_1_0_0_0.AuxInt != -64 { 23657 break 23658 } 23659 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 23660 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 23661 break 23662 } 23663 if v_1_1_0_0_0_0.AuxInt != 63 { 23664 break 23665 } 23666 if y != v_1_1_0_0_0_0.Args[0] { 23667 break 23668 } 23669 v.reset(OpAMD64ROLQ) 23670 v.AddArg(x) 23671 v.AddArg(y) 23672 return true 23673 } 23674 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y)))) 23675 // cond: 23676 // result: (ROLQ x y) 23677 for { 23678 _ = v.Args[1] 23679 v_0 := v.Args[0] 23680 if v_0.Op != OpAMD64SHLQ { 23681 break 23682 } 23683 _ = v_0.Args[1] 23684 x := v_0.Args[0] 23685 y := v_0.Args[1] 23686 v_1 := v.Args[1] 23687 if v_1.Op != OpAMD64ANDQ { 23688 break 23689 } 23690 _ = v_1.Args[1] 23691 v_1_0 := v_1.Args[0] 23692 if v_1_0.Op != OpAMD64SBBQcarrymask { 23693 break 23694 } 23695 v_1_0_0 := v_1_0.Args[0] 23696 if v_1_0_0.Op != OpAMD64CMPQconst { 23697 break 23698 } 23699 if v_1_0_0.AuxInt != 64 { 23700 break 23701 } 23702 v_1_0_0_0 := v_1_0_0.Args[0] 23703 if v_1_0_0_0.Op != OpAMD64NEGQ { 23704 break 23705 } 23706 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 23707 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 23708 break 23709 } 23710 if v_1_0_0_0_0.AuxInt != -64 { 23711 break 23712 } 23713 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 23714 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 23715 break 23716 } 23717 if v_1_0_0_0_0_0.AuxInt != 63 { 23718 break 23719 } 23720 if y != v_1_0_0_0_0_0.Args[0] { 23721 break 23722 } 23723 v_1_1 := v_1.Args[1] 23724 if v_1_1.Op != OpAMD64SHRQ { 23725 break 23726 } 23727 _ = v_1_1.Args[1] 23728 if x != v_1_1.Args[0] { 23729 break 23730 } 23731 v_1_1_1 := v_1_1.Args[1] 23732 if v_1_1_1.Op != OpAMD64NEGQ { 23733 break 23734 } 23735 if y != v_1_1_1.Args[0] { 23736 break 23737 } 23738 v.reset(OpAMD64ROLQ) 23739 v.AddArg(x) 23740 v.AddArg(y) 23741 return true 23742 } 23743 // match: (ORQ (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHLQ x y)) 23744 // cond: 23745 // result: (ROLQ x y) 23746 for { 23747 _ = v.Args[1] 23748 v_0 := v.Args[0] 23749 if v_0.Op != OpAMD64ANDQ { 23750 break 23751 } 23752 _ = v_0.Args[1] 23753 v_0_0 := v_0.Args[0] 23754 if v_0_0.Op != OpAMD64SHRQ { 23755 break 23756 } 23757 _ = v_0_0.Args[1] 23758 x := v_0_0.Args[0] 23759 v_0_0_1 := v_0_0.Args[1] 23760 if v_0_0_1.Op != OpAMD64NEGQ { 23761 break 23762 } 23763 y := v_0_0_1.Args[0] 23764 v_0_1 := v_0.Args[1] 23765 if v_0_1.Op != OpAMD64SBBQcarrymask { 23766 break 23767 } 23768 v_0_1_0 := v_0_1.Args[0] 23769 if v_0_1_0.Op != OpAMD64CMPQconst { 23770 break 23771 } 23772 if v_0_1_0.AuxInt != 64 { 23773 break 23774 } 23775 v_0_1_0_0 := v_0_1_0.Args[0] 23776 if v_0_1_0_0.Op != OpAMD64NEGQ { 23777 break 23778 } 23779 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 23780 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 23781 break 23782 } 23783 if v_0_1_0_0_0.AuxInt != -64 { 23784 break 23785 } 23786 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 23787 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 23788 break 23789 } 23790 if v_0_1_0_0_0_0.AuxInt != 63 { 23791 break 23792 } 23793 if y != v_0_1_0_0_0_0.Args[0] { 23794 break 23795 } 23796 v_1 := v.Args[1] 23797 if v_1.Op != OpAMD64SHLQ { 23798 break 23799 } 23800 _ = v_1.Args[1] 23801 if x != v_1.Args[0] { 23802 break 23803 } 23804 if y != v_1.Args[1] { 23805 break 23806 } 23807 v.reset(OpAMD64ROLQ) 23808 v.AddArg(x) 23809 v.AddArg(y) 23810 return true 23811 } 23812 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y))) (SHLQ x y)) 23813 // cond: 23814 // result: (ROLQ x y) 23815 for { 23816 _ = v.Args[1] 23817 v_0 := v.Args[0] 23818 if v_0.Op != OpAMD64ANDQ { 23819 break 23820 } 23821 _ = v_0.Args[1] 23822 v_0_0 := v_0.Args[0] 23823 if v_0_0.Op != OpAMD64SBBQcarrymask { 23824 break 23825 } 23826 v_0_0_0 := v_0_0.Args[0] 23827 if v_0_0_0.Op != OpAMD64CMPQconst { 23828 break 23829 } 23830 if v_0_0_0.AuxInt != 64 { 23831 break 23832 } 23833 v_0_0_0_0 := v_0_0_0.Args[0] 23834 if v_0_0_0_0.Op != OpAMD64NEGQ { 23835 break 23836 } 23837 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 23838 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 23839 break 23840 } 23841 if v_0_0_0_0_0.AuxInt != -64 { 23842 break 23843 } 23844 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 23845 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 23846 break 23847 } 23848 if v_0_0_0_0_0_0.AuxInt != 63 { 23849 break 23850 } 23851 y := v_0_0_0_0_0_0.Args[0] 23852 v_0_1 := v_0.Args[1] 23853 if v_0_1.Op != OpAMD64SHRQ { 23854 break 23855 } 23856 _ = v_0_1.Args[1] 23857 x := v_0_1.Args[0] 23858 v_0_1_1 := v_0_1.Args[1] 23859 if v_0_1_1.Op != OpAMD64NEGQ { 23860 break 23861 } 23862 if y != v_0_1_1.Args[0] { 23863 break 23864 } 23865 v_1 := v.Args[1] 23866 if v_1.Op != OpAMD64SHLQ { 23867 break 23868 } 23869 _ = v_1.Args[1] 23870 if x != v_1.Args[0] { 23871 break 23872 } 23873 if y != v_1.Args[1] { 23874 break 23875 } 23876 v.reset(OpAMD64ROLQ) 23877 v.AddArg(x) 23878 v.AddArg(y) 23879 return true 23880 } 23881 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 23882 // cond: 23883 // result: (ROLQ x y) 23884 for { 23885 _ = v.Args[1] 23886 v_0 := v.Args[0] 23887 if v_0.Op != OpAMD64SHLQ { 23888 break 23889 } 23890 _ = v_0.Args[1] 23891 x := v_0.Args[0] 23892 y := v_0.Args[1] 23893 v_1 := v.Args[1] 23894 if v_1.Op != OpAMD64ANDQ { 23895 break 23896 } 23897 _ = v_1.Args[1] 23898 v_1_0 := v_1.Args[0] 23899 if v_1_0.Op != OpAMD64SHRQ { 23900 break 23901 } 23902 _ = v_1_0.Args[1] 23903 if x != v_1_0.Args[0] { 23904 break 23905 } 23906 v_1_0_1 := v_1_0.Args[1] 23907 if v_1_0_1.Op != OpAMD64NEGL { 23908 break 23909 } 23910 if y != v_1_0_1.Args[0] { 23911 break 23912 } 23913 v_1_1 := v_1.Args[1] 23914 if v_1_1.Op != OpAMD64SBBQcarrymask { 23915 break 23916 } 23917 v_1_1_0 := v_1_1.Args[0] 23918 if v_1_1_0.Op != OpAMD64CMPLconst { 23919 break 23920 } 23921 if v_1_1_0.AuxInt != 64 { 23922 break 23923 } 23924 v_1_1_0_0 := v_1_1_0.Args[0] 23925 if v_1_1_0_0.Op != OpAMD64NEGL { 23926 break 23927 } 23928 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 23929 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 23930 break 23931 } 23932 if v_1_1_0_0_0.AuxInt != -64 { 23933 break 23934 } 23935 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 23936 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 23937 break 23938 } 23939 if v_1_1_0_0_0_0.AuxInt != 63 { 23940 break 23941 } 23942 if y != v_1_1_0_0_0_0.Args[0] { 23943 break 23944 } 23945 v.reset(OpAMD64ROLQ) 23946 v.AddArg(x) 23947 v.AddArg(y) 23948 return true 23949 } 23950 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y)))) 23951 // cond: 23952 // result: (ROLQ x y) 23953 for { 23954 _ = v.Args[1] 23955 v_0 := v.Args[0] 23956 if v_0.Op != OpAMD64SHLQ { 23957 break 23958 } 23959 _ = v_0.Args[1] 23960 x := v_0.Args[0] 23961 y := v_0.Args[1] 23962 v_1 := v.Args[1] 23963 if v_1.Op != OpAMD64ANDQ { 23964 break 23965 } 23966 _ = v_1.Args[1] 23967 v_1_0 := v_1.Args[0] 23968 if v_1_0.Op != OpAMD64SBBQcarrymask { 23969 break 23970 } 23971 v_1_0_0 := v_1_0.Args[0] 23972 if v_1_0_0.Op != OpAMD64CMPLconst { 23973 break 23974 } 23975 if v_1_0_0.AuxInt != 64 { 23976 break 23977 } 23978 v_1_0_0_0 := v_1_0_0.Args[0] 23979 if v_1_0_0_0.Op != OpAMD64NEGL { 23980 break 23981 } 23982 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 23983 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 23984 break 23985 } 23986 if v_1_0_0_0_0.AuxInt != -64 { 23987 break 23988 } 23989 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 23990 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 23991 break 23992 } 23993 if v_1_0_0_0_0_0.AuxInt != 63 { 23994 break 23995 } 23996 if y != v_1_0_0_0_0_0.Args[0] { 23997 break 23998 } 23999 v_1_1 := v_1.Args[1] 24000 if v_1_1.Op != OpAMD64SHRQ { 24001 break 24002 } 24003 _ = v_1_1.Args[1] 24004 if x != v_1_1.Args[0] { 24005 break 24006 } 24007 v_1_1_1 := v_1_1.Args[1] 24008 if v_1_1_1.Op != OpAMD64NEGL { 24009 break 24010 } 24011 if y != v_1_1_1.Args[0] { 24012 break 24013 } 24014 v.reset(OpAMD64ROLQ) 24015 v.AddArg(x) 24016 v.AddArg(y) 24017 return true 24018 } 24019 return false 24020 } 24021 func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { 24022 // match: (ORQ (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHLQ x y)) 24023 // cond: 24024 // result: (ROLQ x y) 24025 for { 24026 _ = v.Args[1] 24027 v_0 := v.Args[0] 24028 if v_0.Op != OpAMD64ANDQ { 24029 break 24030 } 24031 _ = v_0.Args[1] 24032 v_0_0 := v_0.Args[0] 24033 if v_0_0.Op != OpAMD64SHRQ { 24034 break 24035 } 24036 _ = v_0_0.Args[1] 24037 x := v_0_0.Args[0] 24038 v_0_0_1 := v_0_0.Args[1] 24039 if v_0_0_1.Op != OpAMD64NEGL { 24040 break 24041 } 24042 y := v_0_0_1.Args[0] 24043 v_0_1 := v_0.Args[1] 24044 if v_0_1.Op != OpAMD64SBBQcarrymask { 24045 break 24046 } 24047 v_0_1_0 := v_0_1.Args[0] 24048 if v_0_1_0.Op != OpAMD64CMPLconst { 24049 break 24050 } 24051 if v_0_1_0.AuxInt != 64 { 24052 break 24053 } 24054 v_0_1_0_0 := v_0_1_0.Args[0] 24055 if v_0_1_0_0.Op != OpAMD64NEGL { 24056 break 24057 } 24058 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24059 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 24060 break 24061 } 24062 if v_0_1_0_0_0.AuxInt != -64 { 24063 break 24064 } 24065 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24066 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 24067 break 24068 } 24069 if v_0_1_0_0_0_0.AuxInt != 63 { 24070 break 24071 } 24072 if y != v_0_1_0_0_0_0.Args[0] { 24073 break 24074 } 24075 v_1 := v.Args[1] 24076 if v_1.Op != OpAMD64SHLQ { 24077 break 24078 } 24079 _ = v_1.Args[1] 24080 if x != v_1.Args[0] { 24081 break 24082 } 24083 if y != v_1.Args[1] { 24084 break 24085 } 24086 v.reset(OpAMD64ROLQ) 24087 v.AddArg(x) 24088 v.AddArg(y) 24089 return true 24090 } 24091 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y))) (SHLQ x y)) 24092 // cond: 24093 // result: (ROLQ x y) 24094 for { 24095 _ = v.Args[1] 24096 v_0 := v.Args[0] 24097 if v_0.Op != OpAMD64ANDQ { 24098 break 24099 } 24100 _ = v_0.Args[1] 24101 v_0_0 := v_0.Args[0] 24102 if v_0_0.Op != OpAMD64SBBQcarrymask { 24103 break 24104 } 24105 v_0_0_0 := v_0_0.Args[0] 24106 if v_0_0_0.Op != OpAMD64CMPLconst { 24107 break 24108 } 24109 if v_0_0_0.AuxInt != 64 { 24110 break 24111 } 24112 v_0_0_0_0 := v_0_0_0.Args[0] 24113 if v_0_0_0_0.Op != OpAMD64NEGL { 24114 break 24115 } 24116 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24117 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 24118 break 24119 } 24120 if v_0_0_0_0_0.AuxInt != -64 { 24121 break 24122 } 24123 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24124 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 24125 break 24126 } 24127 if v_0_0_0_0_0_0.AuxInt != 63 { 24128 break 24129 } 24130 y := v_0_0_0_0_0_0.Args[0] 24131 v_0_1 := v_0.Args[1] 24132 if v_0_1.Op != OpAMD64SHRQ { 24133 break 24134 } 24135 _ = v_0_1.Args[1] 24136 x := v_0_1.Args[0] 24137 v_0_1_1 := v_0_1.Args[1] 24138 if v_0_1_1.Op != OpAMD64NEGL { 24139 break 24140 } 24141 if y != v_0_1_1.Args[0] { 24142 break 24143 } 24144 v_1 := v.Args[1] 24145 if v_1.Op != OpAMD64SHLQ { 24146 break 24147 } 24148 _ = v_1.Args[1] 24149 if x != v_1.Args[0] { 24150 break 24151 } 24152 if y != v_1.Args[1] { 24153 break 24154 } 24155 v.reset(OpAMD64ROLQ) 24156 v.AddArg(x) 24157 v.AddArg(y) 24158 return true 24159 } 24160 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 24161 // cond: 24162 // result: (RORQ x y) 24163 for { 24164 _ = v.Args[1] 24165 v_0 := v.Args[0] 24166 if v_0.Op != OpAMD64SHRQ { 24167 break 24168 } 24169 _ = v_0.Args[1] 24170 x := v_0.Args[0] 24171 y := v_0.Args[1] 24172 v_1 := v.Args[1] 24173 if v_1.Op != OpAMD64ANDQ { 24174 break 24175 } 24176 _ = v_1.Args[1] 24177 v_1_0 := v_1.Args[0] 24178 if v_1_0.Op != OpAMD64SHLQ { 24179 break 24180 } 24181 _ = v_1_0.Args[1] 24182 if x != v_1_0.Args[0] { 24183 break 24184 } 24185 v_1_0_1 := v_1_0.Args[1] 24186 if v_1_0_1.Op != OpAMD64NEGQ { 24187 break 24188 } 24189 if y != v_1_0_1.Args[0] { 24190 break 24191 } 24192 v_1_1 := v_1.Args[1] 24193 if v_1_1.Op != OpAMD64SBBQcarrymask { 24194 break 24195 } 24196 v_1_1_0 := v_1_1.Args[0] 24197 if v_1_1_0.Op != OpAMD64CMPQconst { 24198 break 24199 } 24200 if v_1_1_0.AuxInt != 64 { 24201 break 24202 } 24203 v_1_1_0_0 := v_1_1_0.Args[0] 24204 if v_1_1_0_0.Op != OpAMD64NEGQ { 24205 break 24206 } 24207 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 24208 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 24209 break 24210 } 24211 if v_1_1_0_0_0.AuxInt != -64 { 24212 break 24213 } 24214 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 24215 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 24216 break 24217 } 24218 if v_1_1_0_0_0_0.AuxInt != 63 { 24219 break 24220 } 24221 if y != v_1_1_0_0_0_0.Args[0] { 24222 break 24223 } 24224 v.reset(OpAMD64RORQ) 24225 v.AddArg(x) 24226 v.AddArg(y) 24227 return true 24228 } 24229 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y)))) 24230 // cond: 24231 // result: (RORQ x y) 24232 for { 24233 _ = v.Args[1] 24234 v_0 := v.Args[0] 24235 if v_0.Op != OpAMD64SHRQ { 24236 break 24237 } 24238 _ = v_0.Args[1] 24239 x := v_0.Args[0] 24240 y := v_0.Args[1] 24241 v_1 := v.Args[1] 24242 if v_1.Op != OpAMD64ANDQ { 24243 break 24244 } 24245 _ = v_1.Args[1] 24246 v_1_0 := v_1.Args[0] 24247 if v_1_0.Op != OpAMD64SBBQcarrymask { 24248 break 24249 } 24250 v_1_0_0 := v_1_0.Args[0] 24251 if v_1_0_0.Op != OpAMD64CMPQconst { 24252 break 24253 } 24254 if v_1_0_0.AuxInt != 64 { 24255 break 24256 } 24257 v_1_0_0_0 := v_1_0_0.Args[0] 24258 if v_1_0_0_0.Op != OpAMD64NEGQ { 24259 break 24260 } 24261 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 24262 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 24263 break 24264 } 24265 if v_1_0_0_0_0.AuxInt != -64 { 24266 break 24267 } 24268 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 24269 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 24270 break 24271 } 24272 if v_1_0_0_0_0_0.AuxInt != 63 { 24273 break 24274 } 24275 if y != v_1_0_0_0_0_0.Args[0] { 24276 break 24277 } 24278 v_1_1 := v_1.Args[1] 24279 if v_1_1.Op != OpAMD64SHLQ { 24280 break 24281 } 24282 _ = v_1_1.Args[1] 24283 if x != v_1_1.Args[0] { 24284 break 24285 } 24286 v_1_1_1 := v_1_1.Args[1] 24287 if v_1_1_1.Op != OpAMD64NEGQ { 24288 break 24289 } 24290 if y != v_1_1_1.Args[0] { 24291 break 24292 } 24293 v.reset(OpAMD64RORQ) 24294 v.AddArg(x) 24295 v.AddArg(y) 24296 return true 24297 } 24298 // match: (ORQ (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHRQ x y)) 24299 // cond: 24300 // result: (RORQ x y) 24301 for { 24302 _ = v.Args[1] 24303 v_0 := v.Args[0] 24304 if v_0.Op != OpAMD64ANDQ { 24305 break 24306 } 24307 _ = v_0.Args[1] 24308 v_0_0 := v_0.Args[0] 24309 if v_0_0.Op != OpAMD64SHLQ { 24310 break 24311 } 24312 _ = v_0_0.Args[1] 24313 x := v_0_0.Args[0] 24314 v_0_0_1 := v_0_0.Args[1] 24315 if v_0_0_1.Op != OpAMD64NEGQ { 24316 break 24317 } 24318 y := v_0_0_1.Args[0] 24319 v_0_1 := v_0.Args[1] 24320 if v_0_1.Op != OpAMD64SBBQcarrymask { 24321 break 24322 } 24323 v_0_1_0 := v_0_1.Args[0] 24324 if v_0_1_0.Op != OpAMD64CMPQconst { 24325 break 24326 } 24327 if v_0_1_0.AuxInt != 64 { 24328 break 24329 } 24330 v_0_1_0_0 := v_0_1_0.Args[0] 24331 if v_0_1_0_0.Op != OpAMD64NEGQ { 24332 break 24333 } 24334 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24335 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 24336 break 24337 } 24338 if v_0_1_0_0_0.AuxInt != -64 { 24339 break 24340 } 24341 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24342 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 24343 break 24344 } 24345 if v_0_1_0_0_0_0.AuxInt != 63 { 24346 break 24347 } 24348 if y != v_0_1_0_0_0_0.Args[0] { 24349 break 24350 } 24351 v_1 := v.Args[1] 24352 if v_1.Op != OpAMD64SHRQ { 24353 break 24354 } 24355 _ = v_1.Args[1] 24356 if x != v_1.Args[0] { 24357 break 24358 } 24359 if y != v_1.Args[1] { 24360 break 24361 } 24362 v.reset(OpAMD64RORQ) 24363 v.AddArg(x) 24364 v.AddArg(y) 24365 return true 24366 } 24367 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y))) (SHRQ x y)) 24368 // cond: 24369 // result: (RORQ x y) 24370 for { 24371 _ = v.Args[1] 24372 v_0 := v.Args[0] 24373 if v_0.Op != OpAMD64ANDQ { 24374 break 24375 } 24376 _ = v_0.Args[1] 24377 v_0_0 := v_0.Args[0] 24378 if v_0_0.Op != OpAMD64SBBQcarrymask { 24379 break 24380 } 24381 v_0_0_0 := v_0_0.Args[0] 24382 if v_0_0_0.Op != OpAMD64CMPQconst { 24383 break 24384 } 24385 if v_0_0_0.AuxInt != 64 { 24386 break 24387 } 24388 v_0_0_0_0 := v_0_0_0.Args[0] 24389 if v_0_0_0_0.Op != OpAMD64NEGQ { 24390 break 24391 } 24392 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24393 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 24394 break 24395 } 24396 if v_0_0_0_0_0.AuxInt != -64 { 24397 break 24398 } 24399 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24400 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 24401 break 24402 } 24403 if v_0_0_0_0_0_0.AuxInt != 63 { 24404 break 24405 } 24406 y := v_0_0_0_0_0_0.Args[0] 24407 v_0_1 := v_0.Args[1] 24408 if v_0_1.Op != OpAMD64SHLQ { 24409 break 24410 } 24411 _ = v_0_1.Args[1] 24412 x := v_0_1.Args[0] 24413 v_0_1_1 := v_0_1.Args[1] 24414 if v_0_1_1.Op != OpAMD64NEGQ { 24415 break 24416 } 24417 if y != v_0_1_1.Args[0] { 24418 break 24419 } 24420 v_1 := v.Args[1] 24421 if v_1.Op != OpAMD64SHRQ { 24422 break 24423 } 24424 _ = v_1.Args[1] 24425 if x != v_1.Args[0] { 24426 break 24427 } 24428 if y != v_1.Args[1] { 24429 break 24430 } 24431 v.reset(OpAMD64RORQ) 24432 v.AddArg(x) 24433 v.AddArg(y) 24434 return true 24435 } 24436 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 24437 // cond: 24438 // result: (RORQ x y) 24439 for { 24440 _ = v.Args[1] 24441 v_0 := v.Args[0] 24442 if v_0.Op != OpAMD64SHRQ { 24443 break 24444 } 24445 _ = v_0.Args[1] 24446 x := v_0.Args[0] 24447 y := v_0.Args[1] 24448 v_1 := v.Args[1] 24449 if v_1.Op != OpAMD64ANDQ { 24450 break 24451 } 24452 _ = v_1.Args[1] 24453 v_1_0 := v_1.Args[0] 24454 if v_1_0.Op != OpAMD64SHLQ { 24455 break 24456 } 24457 _ = v_1_0.Args[1] 24458 if x != v_1_0.Args[0] { 24459 break 24460 } 24461 v_1_0_1 := v_1_0.Args[1] 24462 if v_1_0_1.Op != OpAMD64NEGL { 24463 break 24464 } 24465 if y != v_1_0_1.Args[0] { 24466 break 24467 } 24468 v_1_1 := v_1.Args[1] 24469 if v_1_1.Op != OpAMD64SBBQcarrymask { 24470 break 24471 } 24472 v_1_1_0 := v_1_1.Args[0] 24473 if v_1_1_0.Op != OpAMD64CMPLconst { 24474 break 24475 } 24476 if v_1_1_0.AuxInt != 64 { 24477 break 24478 } 24479 v_1_1_0_0 := v_1_1_0.Args[0] 24480 if v_1_1_0_0.Op != OpAMD64NEGL { 24481 break 24482 } 24483 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 24484 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 24485 break 24486 } 24487 if v_1_1_0_0_0.AuxInt != -64 { 24488 break 24489 } 24490 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 24491 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 24492 break 24493 } 24494 if v_1_1_0_0_0_0.AuxInt != 63 { 24495 break 24496 } 24497 if y != v_1_1_0_0_0_0.Args[0] { 24498 break 24499 } 24500 v.reset(OpAMD64RORQ) 24501 v.AddArg(x) 24502 v.AddArg(y) 24503 return true 24504 } 24505 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y)))) 24506 // cond: 24507 // result: (RORQ x y) 24508 for { 24509 _ = v.Args[1] 24510 v_0 := v.Args[0] 24511 if v_0.Op != OpAMD64SHRQ { 24512 break 24513 } 24514 _ = v_0.Args[1] 24515 x := v_0.Args[0] 24516 y := v_0.Args[1] 24517 v_1 := v.Args[1] 24518 if v_1.Op != OpAMD64ANDQ { 24519 break 24520 } 24521 _ = v_1.Args[1] 24522 v_1_0 := v_1.Args[0] 24523 if v_1_0.Op != OpAMD64SBBQcarrymask { 24524 break 24525 } 24526 v_1_0_0 := v_1_0.Args[0] 24527 if v_1_0_0.Op != OpAMD64CMPLconst { 24528 break 24529 } 24530 if v_1_0_0.AuxInt != 64 { 24531 break 24532 } 24533 v_1_0_0_0 := v_1_0_0.Args[0] 24534 if v_1_0_0_0.Op != OpAMD64NEGL { 24535 break 24536 } 24537 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 24538 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 24539 break 24540 } 24541 if v_1_0_0_0_0.AuxInt != -64 { 24542 break 24543 } 24544 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 24545 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 24546 break 24547 } 24548 if v_1_0_0_0_0_0.AuxInt != 63 { 24549 break 24550 } 24551 if y != v_1_0_0_0_0_0.Args[0] { 24552 break 24553 } 24554 v_1_1 := v_1.Args[1] 24555 if v_1_1.Op != OpAMD64SHLQ { 24556 break 24557 } 24558 _ = v_1_1.Args[1] 24559 if x != v_1_1.Args[0] { 24560 break 24561 } 24562 v_1_1_1 := v_1_1.Args[1] 24563 if v_1_1_1.Op != OpAMD64NEGL { 24564 break 24565 } 24566 if y != v_1_1_1.Args[0] { 24567 break 24568 } 24569 v.reset(OpAMD64RORQ) 24570 v.AddArg(x) 24571 v.AddArg(y) 24572 return true 24573 } 24574 // match: (ORQ (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHRQ x y)) 24575 // cond: 24576 // result: (RORQ x y) 24577 for { 24578 _ = v.Args[1] 24579 v_0 := v.Args[0] 24580 if v_0.Op != OpAMD64ANDQ { 24581 break 24582 } 24583 _ = v_0.Args[1] 24584 v_0_0 := v_0.Args[0] 24585 if v_0_0.Op != OpAMD64SHLQ { 24586 break 24587 } 24588 _ = v_0_0.Args[1] 24589 x := v_0_0.Args[0] 24590 v_0_0_1 := v_0_0.Args[1] 24591 if v_0_0_1.Op != OpAMD64NEGL { 24592 break 24593 } 24594 y := v_0_0_1.Args[0] 24595 v_0_1 := v_0.Args[1] 24596 if v_0_1.Op != OpAMD64SBBQcarrymask { 24597 break 24598 } 24599 v_0_1_0 := v_0_1.Args[0] 24600 if v_0_1_0.Op != OpAMD64CMPLconst { 24601 break 24602 } 24603 if v_0_1_0.AuxInt != 64 { 24604 break 24605 } 24606 v_0_1_0_0 := v_0_1_0.Args[0] 24607 if v_0_1_0_0.Op != OpAMD64NEGL { 24608 break 24609 } 24610 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24611 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 24612 break 24613 } 24614 if v_0_1_0_0_0.AuxInt != -64 { 24615 break 24616 } 24617 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24618 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 24619 break 24620 } 24621 if v_0_1_0_0_0_0.AuxInt != 63 { 24622 break 24623 } 24624 if y != v_0_1_0_0_0_0.Args[0] { 24625 break 24626 } 24627 v_1 := v.Args[1] 24628 if v_1.Op != OpAMD64SHRQ { 24629 break 24630 } 24631 _ = v_1.Args[1] 24632 if x != v_1.Args[0] { 24633 break 24634 } 24635 if y != v_1.Args[1] { 24636 break 24637 } 24638 v.reset(OpAMD64RORQ) 24639 v.AddArg(x) 24640 v.AddArg(y) 24641 return true 24642 } 24643 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y))) (SHRQ x y)) 24644 // cond: 24645 // result: (RORQ x y) 24646 for { 24647 _ = v.Args[1] 24648 v_0 := v.Args[0] 24649 if v_0.Op != OpAMD64ANDQ { 24650 break 24651 } 24652 _ = v_0.Args[1] 24653 v_0_0 := v_0.Args[0] 24654 if v_0_0.Op != OpAMD64SBBQcarrymask { 24655 break 24656 } 24657 v_0_0_0 := v_0_0.Args[0] 24658 if v_0_0_0.Op != OpAMD64CMPLconst { 24659 break 24660 } 24661 if v_0_0_0.AuxInt != 64 { 24662 break 24663 } 24664 v_0_0_0_0 := v_0_0_0.Args[0] 24665 if v_0_0_0_0.Op != OpAMD64NEGL { 24666 break 24667 } 24668 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24669 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 24670 break 24671 } 24672 if v_0_0_0_0_0.AuxInt != -64 { 24673 break 24674 } 24675 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24676 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 24677 break 24678 } 24679 if v_0_0_0_0_0_0.AuxInt != 63 { 24680 break 24681 } 24682 y := v_0_0_0_0_0_0.Args[0] 24683 v_0_1 := v_0.Args[1] 24684 if v_0_1.Op != OpAMD64SHLQ { 24685 break 24686 } 24687 _ = v_0_1.Args[1] 24688 x := v_0_1.Args[0] 24689 v_0_1_1 := v_0_1.Args[1] 24690 if v_0_1_1.Op != OpAMD64NEGL { 24691 break 24692 } 24693 if y != v_0_1_1.Args[0] { 24694 break 24695 } 24696 v_1 := v.Args[1] 24697 if v_1.Op != OpAMD64SHRQ { 24698 break 24699 } 24700 _ = v_1.Args[1] 24701 if x != v_1.Args[0] { 24702 break 24703 } 24704 if y != v_1.Args[1] { 24705 break 24706 } 24707 v.reset(OpAMD64RORQ) 24708 v.AddArg(x) 24709 v.AddArg(y) 24710 return true 24711 } 24712 return false 24713 } 24714 func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { 24715 b := v.Block 24716 _ = b 24717 typ := &b.Func.Config.Types 24718 _ = typ 24719 // match: (ORQ x x) 24720 // cond: 24721 // result: x 24722 for { 24723 _ = v.Args[1] 24724 x := v.Args[0] 24725 if x != v.Args[1] { 24726 break 24727 } 24728 v.reset(OpCopy) 24729 v.Type = x.Type 24730 v.AddArg(x) 24731 return true 24732 } 24733 // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) 24734 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24735 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 24736 for { 24737 _ = v.Args[1] 24738 x0 := v.Args[0] 24739 if x0.Op != OpAMD64MOVBload { 24740 break 24741 } 24742 i0 := x0.AuxInt 24743 s := x0.Aux 24744 _ = x0.Args[1] 24745 p := x0.Args[0] 24746 mem := x0.Args[1] 24747 sh := v.Args[1] 24748 if sh.Op != OpAMD64SHLQconst { 24749 break 24750 } 24751 if sh.AuxInt != 8 { 24752 break 24753 } 24754 x1 := sh.Args[0] 24755 if x1.Op != OpAMD64MOVBload { 24756 break 24757 } 24758 i1 := x1.AuxInt 24759 if x1.Aux != s { 24760 break 24761 } 24762 _ = x1.Args[1] 24763 if p != x1.Args[0] { 24764 break 24765 } 24766 if mem != x1.Args[1] { 24767 break 24768 } 24769 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24770 break 24771 } 24772 b = mergePoint(b, x0, x1) 24773 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 24774 v.reset(OpCopy) 24775 v.AddArg(v0) 24776 v0.AuxInt = i0 24777 v0.Aux = s 24778 v0.AddArg(p) 24779 v0.AddArg(mem) 24780 return true 24781 } 24782 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 24783 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24784 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 24785 for { 24786 _ = v.Args[1] 24787 sh := v.Args[0] 24788 if sh.Op != OpAMD64SHLQconst { 24789 break 24790 } 24791 if sh.AuxInt != 8 { 24792 break 24793 } 24794 x1 := sh.Args[0] 24795 if x1.Op != OpAMD64MOVBload { 24796 break 24797 } 24798 i1 := x1.AuxInt 24799 s := x1.Aux 24800 _ = x1.Args[1] 24801 p := x1.Args[0] 24802 mem := x1.Args[1] 24803 x0 := v.Args[1] 24804 if x0.Op != OpAMD64MOVBload { 24805 break 24806 } 24807 i0 := x0.AuxInt 24808 if x0.Aux != s { 24809 break 24810 } 24811 _ = x0.Args[1] 24812 if p != x0.Args[0] { 24813 break 24814 } 24815 if mem != x0.Args[1] { 24816 break 24817 } 24818 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24819 break 24820 } 24821 b = mergePoint(b, x0, x1) 24822 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 24823 v.reset(OpCopy) 24824 v.AddArg(v0) 24825 v0.AuxInt = i0 24826 v0.Aux = s 24827 v0.AddArg(p) 24828 v0.AddArg(mem) 24829 return true 24830 } 24831 // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) 24832 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24833 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 24834 for { 24835 _ = v.Args[1] 24836 x0 := v.Args[0] 24837 if x0.Op != OpAMD64MOVWload { 24838 break 24839 } 24840 i0 := x0.AuxInt 24841 s := x0.Aux 24842 _ = x0.Args[1] 24843 p := x0.Args[0] 24844 mem := x0.Args[1] 24845 sh := v.Args[1] 24846 if sh.Op != OpAMD64SHLQconst { 24847 break 24848 } 24849 if sh.AuxInt != 16 { 24850 break 24851 } 24852 x1 := sh.Args[0] 24853 if x1.Op != OpAMD64MOVWload { 24854 break 24855 } 24856 i1 := x1.AuxInt 24857 if x1.Aux != s { 24858 break 24859 } 24860 _ = x1.Args[1] 24861 if p != x1.Args[0] { 24862 break 24863 } 24864 if mem != x1.Args[1] { 24865 break 24866 } 24867 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24868 break 24869 } 24870 b = mergePoint(b, x0, x1) 24871 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 24872 v.reset(OpCopy) 24873 v.AddArg(v0) 24874 v0.AuxInt = i0 24875 v0.Aux = s 24876 v0.AddArg(p) 24877 v0.AddArg(mem) 24878 return true 24879 } 24880 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 24881 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24882 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 24883 for { 24884 _ = v.Args[1] 24885 sh := v.Args[0] 24886 if sh.Op != OpAMD64SHLQconst { 24887 break 24888 } 24889 if sh.AuxInt != 16 { 24890 break 24891 } 24892 x1 := sh.Args[0] 24893 if x1.Op != OpAMD64MOVWload { 24894 break 24895 } 24896 i1 := x1.AuxInt 24897 s := x1.Aux 24898 _ = x1.Args[1] 24899 p := x1.Args[0] 24900 mem := x1.Args[1] 24901 x0 := v.Args[1] 24902 if x0.Op != OpAMD64MOVWload { 24903 break 24904 } 24905 i0 := x0.AuxInt 24906 if x0.Aux != s { 24907 break 24908 } 24909 _ = x0.Args[1] 24910 if p != x0.Args[0] { 24911 break 24912 } 24913 if mem != x0.Args[1] { 24914 break 24915 } 24916 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24917 break 24918 } 24919 b = mergePoint(b, x0, x1) 24920 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 24921 v.reset(OpCopy) 24922 v.AddArg(v0) 24923 v0.AuxInt = i0 24924 v0.Aux = s 24925 v0.AddArg(p) 24926 v0.AddArg(mem) 24927 return true 24928 } 24929 // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) 24930 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24931 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 24932 for { 24933 _ = v.Args[1] 24934 x0 := v.Args[0] 24935 if x0.Op != OpAMD64MOVLload { 24936 break 24937 } 24938 i0 := x0.AuxInt 24939 s := x0.Aux 24940 _ = x0.Args[1] 24941 p := x0.Args[0] 24942 mem := x0.Args[1] 24943 sh := v.Args[1] 24944 if sh.Op != OpAMD64SHLQconst { 24945 break 24946 } 24947 if sh.AuxInt != 32 { 24948 break 24949 } 24950 x1 := sh.Args[0] 24951 if x1.Op != OpAMD64MOVLload { 24952 break 24953 } 24954 i1 := x1.AuxInt 24955 if x1.Aux != s { 24956 break 24957 } 24958 _ = x1.Args[1] 24959 if p != x1.Args[0] { 24960 break 24961 } 24962 if mem != x1.Args[1] { 24963 break 24964 } 24965 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24966 break 24967 } 24968 b = mergePoint(b, x0, x1) 24969 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 24970 v.reset(OpCopy) 24971 v.AddArg(v0) 24972 v0.AuxInt = i0 24973 v0.Aux = s 24974 v0.AddArg(p) 24975 v0.AddArg(mem) 24976 return true 24977 } 24978 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem)) 24979 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24980 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 24981 for { 24982 _ = v.Args[1] 24983 sh := v.Args[0] 24984 if sh.Op != OpAMD64SHLQconst { 24985 break 24986 } 24987 if sh.AuxInt != 32 { 24988 break 24989 } 24990 x1 := sh.Args[0] 24991 if x1.Op != OpAMD64MOVLload { 24992 break 24993 } 24994 i1 := x1.AuxInt 24995 s := x1.Aux 24996 _ = x1.Args[1] 24997 p := x1.Args[0] 24998 mem := x1.Args[1] 24999 x0 := v.Args[1] 25000 if x0.Op != OpAMD64MOVLload { 25001 break 25002 } 25003 i0 := x0.AuxInt 25004 if x0.Aux != s { 25005 break 25006 } 25007 _ = x0.Args[1] 25008 if p != x0.Args[0] { 25009 break 25010 } 25011 if mem != x0.Args[1] { 25012 break 25013 } 25014 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25015 break 25016 } 25017 b = mergePoint(b, x0, x1) 25018 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 25019 v.reset(OpCopy) 25020 v.AddArg(v0) 25021 v0.AuxInt = i0 25022 v0.Aux = s 25023 v0.AddArg(p) 25024 v0.AddArg(mem) 25025 return true 25026 } 25027 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 25028 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25029 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 25030 for { 25031 _ = v.Args[1] 25032 s1 := v.Args[0] 25033 if s1.Op != OpAMD64SHLQconst { 25034 break 25035 } 25036 j1 := s1.AuxInt 25037 x1 := s1.Args[0] 25038 if x1.Op != OpAMD64MOVBload { 25039 break 25040 } 25041 i1 := x1.AuxInt 25042 s := x1.Aux 25043 _ = x1.Args[1] 25044 p := x1.Args[0] 25045 mem := x1.Args[1] 25046 or := v.Args[1] 25047 if or.Op != OpAMD64ORQ { 25048 break 25049 } 25050 _ = or.Args[1] 25051 s0 := or.Args[0] 25052 if s0.Op != OpAMD64SHLQconst { 25053 break 25054 } 25055 j0 := s0.AuxInt 25056 x0 := s0.Args[0] 25057 if x0.Op != OpAMD64MOVBload { 25058 break 25059 } 25060 i0 := x0.AuxInt 25061 if x0.Aux != s { 25062 break 25063 } 25064 _ = x0.Args[1] 25065 if p != x0.Args[0] { 25066 break 25067 } 25068 if mem != x0.Args[1] { 25069 break 25070 } 25071 y := or.Args[1] 25072 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25073 break 25074 } 25075 b = mergePoint(b, x0, x1) 25076 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25077 v.reset(OpCopy) 25078 v.AddArg(v0) 25079 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25080 v1.AuxInt = j0 25081 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25082 v2.AuxInt = i0 25083 v2.Aux = s 25084 v2.AddArg(p) 25085 v2.AddArg(mem) 25086 v1.AddArg(v2) 25087 v0.AddArg(v1) 25088 v0.AddArg(y) 25089 return true 25090 } 25091 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))) 25092 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25093 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 25094 for { 25095 _ = v.Args[1] 25096 s1 := v.Args[0] 25097 if s1.Op != OpAMD64SHLQconst { 25098 break 25099 } 25100 j1 := s1.AuxInt 25101 x1 := s1.Args[0] 25102 if x1.Op != OpAMD64MOVBload { 25103 break 25104 } 25105 i1 := x1.AuxInt 25106 s := x1.Aux 25107 _ = x1.Args[1] 25108 p := x1.Args[0] 25109 mem := x1.Args[1] 25110 or := v.Args[1] 25111 if or.Op != OpAMD64ORQ { 25112 break 25113 } 25114 _ = or.Args[1] 25115 y := or.Args[0] 25116 s0 := or.Args[1] 25117 if s0.Op != OpAMD64SHLQconst { 25118 break 25119 } 25120 j0 := s0.AuxInt 25121 x0 := s0.Args[0] 25122 if x0.Op != OpAMD64MOVBload { 25123 break 25124 } 25125 i0 := x0.AuxInt 25126 if x0.Aux != s { 25127 break 25128 } 25129 _ = x0.Args[1] 25130 if p != x0.Args[0] { 25131 break 25132 } 25133 if mem != x0.Args[1] { 25134 break 25135 } 25136 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25137 break 25138 } 25139 b = mergePoint(b, x0, x1) 25140 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25141 v.reset(OpCopy) 25142 v.AddArg(v0) 25143 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25144 v1.AuxInt = j0 25145 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25146 v2.AuxInt = i0 25147 v2.Aux = s 25148 v2.AddArg(p) 25149 v2.AddArg(mem) 25150 v1.AddArg(v2) 25151 v0.AddArg(v1) 25152 v0.AddArg(y) 25153 return true 25154 } 25155 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 25156 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25157 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 25158 for { 25159 _ = v.Args[1] 25160 or := v.Args[0] 25161 if or.Op != OpAMD64ORQ { 25162 break 25163 } 25164 _ = or.Args[1] 25165 s0 := or.Args[0] 25166 if s0.Op != OpAMD64SHLQconst { 25167 break 25168 } 25169 j0 := s0.AuxInt 25170 x0 := s0.Args[0] 25171 if x0.Op != OpAMD64MOVBload { 25172 break 25173 } 25174 i0 := x0.AuxInt 25175 s := x0.Aux 25176 _ = x0.Args[1] 25177 p := x0.Args[0] 25178 mem := x0.Args[1] 25179 y := or.Args[1] 25180 s1 := v.Args[1] 25181 if s1.Op != OpAMD64SHLQconst { 25182 break 25183 } 25184 j1 := s1.AuxInt 25185 x1 := s1.Args[0] 25186 if x1.Op != OpAMD64MOVBload { 25187 break 25188 } 25189 i1 := x1.AuxInt 25190 if x1.Aux != s { 25191 break 25192 } 25193 _ = x1.Args[1] 25194 if p != x1.Args[0] { 25195 break 25196 } 25197 if mem != x1.Args[1] { 25198 break 25199 } 25200 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25201 break 25202 } 25203 b = mergePoint(b, x0, x1) 25204 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25205 v.reset(OpCopy) 25206 v.AddArg(v0) 25207 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25208 v1.AuxInt = j0 25209 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25210 v2.AuxInt = i0 25211 v2.Aux = s 25212 v2.AddArg(p) 25213 v2.AddArg(mem) 25214 v1.AddArg(v2) 25215 v0.AddArg(v1) 25216 v0.AddArg(y) 25217 return true 25218 } 25219 return false 25220 } 25221 func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool { 25222 b := v.Block 25223 _ = b 25224 typ := &b.Func.Config.Types 25225 _ = typ 25226 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 25227 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25228 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 25229 for { 25230 _ = v.Args[1] 25231 or := v.Args[0] 25232 if or.Op != OpAMD64ORQ { 25233 break 25234 } 25235 _ = or.Args[1] 25236 y := or.Args[0] 25237 s0 := or.Args[1] 25238 if s0.Op != OpAMD64SHLQconst { 25239 break 25240 } 25241 j0 := s0.AuxInt 25242 x0 := s0.Args[0] 25243 if x0.Op != OpAMD64MOVBload { 25244 break 25245 } 25246 i0 := x0.AuxInt 25247 s := x0.Aux 25248 _ = x0.Args[1] 25249 p := x0.Args[0] 25250 mem := x0.Args[1] 25251 s1 := v.Args[1] 25252 if s1.Op != OpAMD64SHLQconst { 25253 break 25254 } 25255 j1 := s1.AuxInt 25256 x1 := s1.Args[0] 25257 if x1.Op != OpAMD64MOVBload { 25258 break 25259 } 25260 i1 := x1.AuxInt 25261 if x1.Aux != s { 25262 break 25263 } 25264 _ = x1.Args[1] 25265 if p != x1.Args[0] { 25266 break 25267 } 25268 if mem != x1.Args[1] { 25269 break 25270 } 25271 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25272 break 25273 } 25274 b = mergePoint(b, x0, x1) 25275 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25276 v.reset(OpCopy) 25277 v.AddArg(v0) 25278 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25279 v1.AuxInt = j0 25280 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25281 v2.AuxInt = i0 25282 v2.Aux = s 25283 v2.AddArg(p) 25284 v2.AddArg(mem) 25285 v1.AddArg(v2) 25286 v0.AddArg(v1) 25287 v0.AddArg(y) 25288 return true 25289 } 25290 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y)) 25291 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25292 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25293 for { 25294 _ = v.Args[1] 25295 s1 := v.Args[0] 25296 if s1.Op != OpAMD64SHLQconst { 25297 break 25298 } 25299 j1 := s1.AuxInt 25300 x1 := s1.Args[0] 25301 if x1.Op != OpAMD64MOVWload { 25302 break 25303 } 25304 i1 := x1.AuxInt 25305 s := x1.Aux 25306 _ = x1.Args[1] 25307 p := x1.Args[0] 25308 mem := x1.Args[1] 25309 or := v.Args[1] 25310 if or.Op != OpAMD64ORQ { 25311 break 25312 } 25313 _ = or.Args[1] 25314 s0 := or.Args[0] 25315 if s0.Op != OpAMD64SHLQconst { 25316 break 25317 } 25318 j0 := s0.AuxInt 25319 x0 := s0.Args[0] 25320 if x0.Op != OpAMD64MOVWload { 25321 break 25322 } 25323 i0 := x0.AuxInt 25324 if x0.Aux != s { 25325 break 25326 } 25327 _ = x0.Args[1] 25328 if p != x0.Args[0] { 25329 break 25330 } 25331 if mem != x0.Args[1] { 25332 break 25333 } 25334 y := or.Args[1] 25335 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25336 break 25337 } 25338 b = mergePoint(b, x0, x1) 25339 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25340 v.reset(OpCopy) 25341 v.AddArg(v0) 25342 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25343 v1.AuxInt = j0 25344 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25345 v2.AuxInt = i0 25346 v2.Aux = s 25347 v2.AddArg(p) 25348 v2.AddArg(mem) 25349 v1.AddArg(v2) 25350 v0.AddArg(v1) 25351 v0.AddArg(y) 25352 return true 25353 } 25354 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)))) 25355 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25356 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25357 for { 25358 _ = v.Args[1] 25359 s1 := v.Args[0] 25360 if s1.Op != OpAMD64SHLQconst { 25361 break 25362 } 25363 j1 := s1.AuxInt 25364 x1 := s1.Args[0] 25365 if x1.Op != OpAMD64MOVWload { 25366 break 25367 } 25368 i1 := x1.AuxInt 25369 s := x1.Aux 25370 _ = x1.Args[1] 25371 p := x1.Args[0] 25372 mem := x1.Args[1] 25373 or := v.Args[1] 25374 if or.Op != OpAMD64ORQ { 25375 break 25376 } 25377 _ = or.Args[1] 25378 y := or.Args[0] 25379 s0 := or.Args[1] 25380 if s0.Op != OpAMD64SHLQconst { 25381 break 25382 } 25383 j0 := s0.AuxInt 25384 x0 := s0.Args[0] 25385 if x0.Op != OpAMD64MOVWload { 25386 break 25387 } 25388 i0 := x0.AuxInt 25389 if x0.Aux != s { 25390 break 25391 } 25392 _ = x0.Args[1] 25393 if p != x0.Args[0] { 25394 break 25395 } 25396 if mem != x0.Args[1] { 25397 break 25398 } 25399 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25400 break 25401 } 25402 b = mergePoint(b, x0, x1) 25403 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25404 v.reset(OpCopy) 25405 v.AddArg(v0) 25406 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25407 v1.AuxInt = j0 25408 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25409 v2.AuxInt = i0 25410 v2.Aux = s 25411 v2.AddArg(p) 25412 v2.AddArg(mem) 25413 v1.AddArg(v2) 25414 v0.AddArg(v1) 25415 v0.AddArg(y) 25416 return true 25417 } 25418 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 25419 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25420 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25421 for { 25422 _ = v.Args[1] 25423 or := v.Args[0] 25424 if or.Op != OpAMD64ORQ { 25425 break 25426 } 25427 _ = or.Args[1] 25428 s0 := or.Args[0] 25429 if s0.Op != OpAMD64SHLQconst { 25430 break 25431 } 25432 j0 := s0.AuxInt 25433 x0 := s0.Args[0] 25434 if x0.Op != OpAMD64MOVWload { 25435 break 25436 } 25437 i0 := x0.AuxInt 25438 s := x0.Aux 25439 _ = x0.Args[1] 25440 p := x0.Args[0] 25441 mem := x0.Args[1] 25442 y := or.Args[1] 25443 s1 := v.Args[1] 25444 if s1.Op != OpAMD64SHLQconst { 25445 break 25446 } 25447 j1 := s1.AuxInt 25448 x1 := s1.Args[0] 25449 if x1.Op != OpAMD64MOVWload { 25450 break 25451 } 25452 i1 := x1.AuxInt 25453 if x1.Aux != s { 25454 break 25455 } 25456 _ = x1.Args[1] 25457 if p != x1.Args[0] { 25458 break 25459 } 25460 if mem != x1.Args[1] { 25461 break 25462 } 25463 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25464 break 25465 } 25466 b = mergePoint(b, x0, x1) 25467 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25468 v.reset(OpCopy) 25469 v.AddArg(v0) 25470 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25471 v1.AuxInt = j0 25472 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25473 v2.AuxInt = i0 25474 v2.Aux = s 25475 v2.AddArg(p) 25476 v2.AddArg(mem) 25477 v1.AddArg(v2) 25478 v0.AddArg(v1) 25479 v0.AddArg(y) 25480 return true 25481 } 25482 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 25483 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25484 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25485 for { 25486 _ = v.Args[1] 25487 or := v.Args[0] 25488 if or.Op != OpAMD64ORQ { 25489 break 25490 } 25491 _ = or.Args[1] 25492 y := or.Args[0] 25493 s0 := or.Args[1] 25494 if s0.Op != OpAMD64SHLQconst { 25495 break 25496 } 25497 j0 := s0.AuxInt 25498 x0 := s0.Args[0] 25499 if x0.Op != OpAMD64MOVWload { 25500 break 25501 } 25502 i0 := x0.AuxInt 25503 s := x0.Aux 25504 _ = x0.Args[1] 25505 p := x0.Args[0] 25506 mem := x0.Args[1] 25507 s1 := v.Args[1] 25508 if s1.Op != OpAMD64SHLQconst { 25509 break 25510 } 25511 j1 := s1.AuxInt 25512 x1 := s1.Args[0] 25513 if x1.Op != OpAMD64MOVWload { 25514 break 25515 } 25516 i1 := x1.AuxInt 25517 if x1.Aux != s { 25518 break 25519 } 25520 _ = x1.Args[1] 25521 if p != x1.Args[0] { 25522 break 25523 } 25524 if mem != x1.Args[1] { 25525 break 25526 } 25527 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25528 break 25529 } 25530 b = mergePoint(b, x0, x1) 25531 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25532 v.reset(OpCopy) 25533 v.AddArg(v0) 25534 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25535 v1.AuxInt = j0 25536 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25537 v2.AuxInt = i0 25538 v2.Aux = s 25539 v2.AddArg(p) 25540 v2.AddArg(mem) 25541 v1.AddArg(v2) 25542 v0.AddArg(v1) 25543 v0.AddArg(y) 25544 return true 25545 } 25546 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25547 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25548 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25549 for { 25550 _ = v.Args[1] 25551 x0 := v.Args[0] 25552 if x0.Op != OpAMD64MOVBloadidx1 { 25553 break 25554 } 25555 i0 := x0.AuxInt 25556 s := x0.Aux 25557 _ = x0.Args[2] 25558 p := x0.Args[0] 25559 idx := x0.Args[1] 25560 mem := x0.Args[2] 25561 sh := v.Args[1] 25562 if sh.Op != OpAMD64SHLQconst { 25563 break 25564 } 25565 if sh.AuxInt != 8 { 25566 break 25567 } 25568 x1 := sh.Args[0] 25569 if x1.Op != OpAMD64MOVBloadidx1 { 25570 break 25571 } 25572 i1 := x1.AuxInt 25573 if x1.Aux != s { 25574 break 25575 } 25576 _ = x1.Args[2] 25577 if p != x1.Args[0] { 25578 break 25579 } 25580 if idx != x1.Args[1] { 25581 break 25582 } 25583 if mem != x1.Args[2] { 25584 break 25585 } 25586 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25587 break 25588 } 25589 b = mergePoint(b, x0, x1) 25590 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25591 v.reset(OpCopy) 25592 v.AddArg(v0) 25593 v0.AuxInt = i0 25594 v0.Aux = s 25595 v0.AddArg(p) 25596 v0.AddArg(idx) 25597 v0.AddArg(mem) 25598 return true 25599 } 25600 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25601 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25602 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25603 for { 25604 _ = v.Args[1] 25605 x0 := v.Args[0] 25606 if x0.Op != OpAMD64MOVBloadidx1 { 25607 break 25608 } 25609 i0 := x0.AuxInt 25610 s := x0.Aux 25611 _ = x0.Args[2] 25612 idx := x0.Args[0] 25613 p := x0.Args[1] 25614 mem := x0.Args[2] 25615 sh := v.Args[1] 25616 if sh.Op != OpAMD64SHLQconst { 25617 break 25618 } 25619 if sh.AuxInt != 8 { 25620 break 25621 } 25622 x1 := sh.Args[0] 25623 if x1.Op != OpAMD64MOVBloadidx1 { 25624 break 25625 } 25626 i1 := x1.AuxInt 25627 if x1.Aux != s { 25628 break 25629 } 25630 _ = x1.Args[2] 25631 if p != x1.Args[0] { 25632 break 25633 } 25634 if idx != x1.Args[1] { 25635 break 25636 } 25637 if mem != x1.Args[2] { 25638 break 25639 } 25640 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25641 break 25642 } 25643 b = mergePoint(b, x0, x1) 25644 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25645 v.reset(OpCopy) 25646 v.AddArg(v0) 25647 v0.AuxInt = i0 25648 v0.Aux = s 25649 v0.AddArg(p) 25650 v0.AddArg(idx) 25651 v0.AddArg(mem) 25652 return true 25653 } 25654 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 25655 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25656 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25657 for { 25658 _ = v.Args[1] 25659 x0 := v.Args[0] 25660 if x0.Op != OpAMD64MOVBloadidx1 { 25661 break 25662 } 25663 i0 := x0.AuxInt 25664 s := x0.Aux 25665 _ = x0.Args[2] 25666 p := x0.Args[0] 25667 idx := x0.Args[1] 25668 mem := x0.Args[2] 25669 sh := v.Args[1] 25670 if sh.Op != OpAMD64SHLQconst { 25671 break 25672 } 25673 if sh.AuxInt != 8 { 25674 break 25675 } 25676 x1 := sh.Args[0] 25677 if x1.Op != OpAMD64MOVBloadidx1 { 25678 break 25679 } 25680 i1 := x1.AuxInt 25681 if x1.Aux != s { 25682 break 25683 } 25684 _ = x1.Args[2] 25685 if idx != x1.Args[0] { 25686 break 25687 } 25688 if p != x1.Args[1] { 25689 break 25690 } 25691 if mem != x1.Args[2] { 25692 break 25693 } 25694 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25695 break 25696 } 25697 b = mergePoint(b, x0, x1) 25698 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25699 v.reset(OpCopy) 25700 v.AddArg(v0) 25701 v0.AuxInt = i0 25702 v0.Aux = s 25703 v0.AddArg(p) 25704 v0.AddArg(idx) 25705 v0.AddArg(mem) 25706 return true 25707 } 25708 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 25709 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25710 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25711 for { 25712 _ = v.Args[1] 25713 x0 := v.Args[0] 25714 if x0.Op != OpAMD64MOVBloadidx1 { 25715 break 25716 } 25717 i0 := x0.AuxInt 25718 s := x0.Aux 25719 _ = x0.Args[2] 25720 idx := x0.Args[0] 25721 p := x0.Args[1] 25722 mem := x0.Args[2] 25723 sh := v.Args[1] 25724 if sh.Op != OpAMD64SHLQconst { 25725 break 25726 } 25727 if sh.AuxInt != 8 { 25728 break 25729 } 25730 x1 := sh.Args[0] 25731 if x1.Op != OpAMD64MOVBloadidx1 { 25732 break 25733 } 25734 i1 := x1.AuxInt 25735 if x1.Aux != s { 25736 break 25737 } 25738 _ = x1.Args[2] 25739 if idx != x1.Args[0] { 25740 break 25741 } 25742 if p != x1.Args[1] { 25743 break 25744 } 25745 if mem != x1.Args[2] { 25746 break 25747 } 25748 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25749 break 25750 } 25751 b = mergePoint(b, x0, x1) 25752 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25753 v.reset(OpCopy) 25754 v.AddArg(v0) 25755 v0.AuxInt = i0 25756 v0.Aux = s 25757 v0.AddArg(p) 25758 v0.AddArg(idx) 25759 v0.AddArg(mem) 25760 return true 25761 } 25762 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 25763 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25764 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25765 for { 25766 _ = v.Args[1] 25767 sh := v.Args[0] 25768 if sh.Op != OpAMD64SHLQconst { 25769 break 25770 } 25771 if sh.AuxInt != 8 { 25772 break 25773 } 25774 x1 := sh.Args[0] 25775 if x1.Op != OpAMD64MOVBloadidx1 { 25776 break 25777 } 25778 i1 := x1.AuxInt 25779 s := x1.Aux 25780 _ = x1.Args[2] 25781 p := x1.Args[0] 25782 idx := x1.Args[1] 25783 mem := x1.Args[2] 25784 x0 := v.Args[1] 25785 if x0.Op != OpAMD64MOVBloadidx1 { 25786 break 25787 } 25788 i0 := x0.AuxInt 25789 if x0.Aux != s { 25790 break 25791 } 25792 _ = x0.Args[2] 25793 if p != x0.Args[0] { 25794 break 25795 } 25796 if idx != x0.Args[1] { 25797 break 25798 } 25799 if mem != x0.Args[2] { 25800 break 25801 } 25802 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25803 break 25804 } 25805 b = mergePoint(b, x0, x1) 25806 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25807 v.reset(OpCopy) 25808 v.AddArg(v0) 25809 v0.AuxInt = i0 25810 v0.Aux = s 25811 v0.AddArg(p) 25812 v0.AddArg(idx) 25813 v0.AddArg(mem) 25814 return true 25815 } 25816 return false 25817 } 25818 func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool { 25819 b := v.Block 25820 _ = b 25821 typ := &b.Func.Config.Types 25822 _ = typ 25823 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 25824 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25825 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25826 for { 25827 _ = v.Args[1] 25828 sh := v.Args[0] 25829 if sh.Op != OpAMD64SHLQconst { 25830 break 25831 } 25832 if sh.AuxInt != 8 { 25833 break 25834 } 25835 x1 := sh.Args[0] 25836 if x1.Op != OpAMD64MOVBloadidx1 { 25837 break 25838 } 25839 i1 := x1.AuxInt 25840 s := x1.Aux 25841 _ = x1.Args[2] 25842 idx := x1.Args[0] 25843 p := x1.Args[1] 25844 mem := x1.Args[2] 25845 x0 := v.Args[1] 25846 if x0.Op != OpAMD64MOVBloadidx1 { 25847 break 25848 } 25849 i0 := x0.AuxInt 25850 if x0.Aux != s { 25851 break 25852 } 25853 _ = x0.Args[2] 25854 if p != x0.Args[0] { 25855 break 25856 } 25857 if idx != x0.Args[1] { 25858 break 25859 } 25860 if mem != x0.Args[2] { 25861 break 25862 } 25863 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25864 break 25865 } 25866 b = mergePoint(b, x0, x1) 25867 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25868 v.reset(OpCopy) 25869 v.AddArg(v0) 25870 v0.AuxInt = i0 25871 v0.Aux = s 25872 v0.AddArg(p) 25873 v0.AddArg(idx) 25874 v0.AddArg(mem) 25875 return true 25876 } 25877 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 25878 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25879 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25880 for { 25881 _ = v.Args[1] 25882 sh := v.Args[0] 25883 if sh.Op != OpAMD64SHLQconst { 25884 break 25885 } 25886 if sh.AuxInt != 8 { 25887 break 25888 } 25889 x1 := sh.Args[0] 25890 if x1.Op != OpAMD64MOVBloadidx1 { 25891 break 25892 } 25893 i1 := x1.AuxInt 25894 s := x1.Aux 25895 _ = x1.Args[2] 25896 p := x1.Args[0] 25897 idx := x1.Args[1] 25898 mem := x1.Args[2] 25899 x0 := v.Args[1] 25900 if x0.Op != OpAMD64MOVBloadidx1 { 25901 break 25902 } 25903 i0 := x0.AuxInt 25904 if x0.Aux != s { 25905 break 25906 } 25907 _ = x0.Args[2] 25908 if idx != x0.Args[0] { 25909 break 25910 } 25911 if p != x0.Args[1] { 25912 break 25913 } 25914 if mem != x0.Args[2] { 25915 break 25916 } 25917 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25918 break 25919 } 25920 b = mergePoint(b, x0, x1) 25921 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25922 v.reset(OpCopy) 25923 v.AddArg(v0) 25924 v0.AuxInt = i0 25925 v0.Aux = s 25926 v0.AddArg(p) 25927 v0.AddArg(idx) 25928 v0.AddArg(mem) 25929 return true 25930 } 25931 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 25932 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25933 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25934 for { 25935 _ = v.Args[1] 25936 sh := v.Args[0] 25937 if sh.Op != OpAMD64SHLQconst { 25938 break 25939 } 25940 if sh.AuxInt != 8 { 25941 break 25942 } 25943 x1 := sh.Args[0] 25944 if x1.Op != OpAMD64MOVBloadidx1 { 25945 break 25946 } 25947 i1 := x1.AuxInt 25948 s := x1.Aux 25949 _ = x1.Args[2] 25950 idx := x1.Args[0] 25951 p := x1.Args[1] 25952 mem := x1.Args[2] 25953 x0 := v.Args[1] 25954 if x0.Op != OpAMD64MOVBloadidx1 { 25955 break 25956 } 25957 i0 := x0.AuxInt 25958 if x0.Aux != s { 25959 break 25960 } 25961 _ = x0.Args[2] 25962 if idx != x0.Args[0] { 25963 break 25964 } 25965 if p != x0.Args[1] { 25966 break 25967 } 25968 if mem != x0.Args[2] { 25969 break 25970 } 25971 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25972 break 25973 } 25974 b = mergePoint(b, x0, x1) 25975 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25976 v.reset(OpCopy) 25977 v.AddArg(v0) 25978 v0.AuxInt = i0 25979 v0.Aux = s 25980 v0.AddArg(p) 25981 v0.AddArg(idx) 25982 v0.AddArg(mem) 25983 return true 25984 } 25985 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 25986 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25987 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 25988 for { 25989 _ = v.Args[1] 25990 x0 := v.Args[0] 25991 if x0.Op != OpAMD64MOVWloadidx1 { 25992 break 25993 } 25994 i0 := x0.AuxInt 25995 s := x0.Aux 25996 _ = x0.Args[2] 25997 p := x0.Args[0] 25998 idx := x0.Args[1] 25999 mem := x0.Args[2] 26000 sh := v.Args[1] 26001 if sh.Op != OpAMD64SHLQconst { 26002 break 26003 } 26004 if sh.AuxInt != 16 { 26005 break 26006 } 26007 x1 := sh.Args[0] 26008 if x1.Op != OpAMD64MOVWloadidx1 { 26009 break 26010 } 26011 i1 := x1.AuxInt 26012 if x1.Aux != s { 26013 break 26014 } 26015 _ = x1.Args[2] 26016 if p != x1.Args[0] { 26017 break 26018 } 26019 if idx != x1.Args[1] { 26020 break 26021 } 26022 if mem != x1.Args[2] { 26023 break 26024 } 26025 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26026 break 26027 } 26028 b = mergePoint(b, x0, x1) 26029 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26030 v.reset(OpCopy) 26031 v.AddArg(v0) 26032 v0.AuxInt = i0 26033 v0.Aux = s 26034 v0.AddArg(p) 26035 v0.AddArg(idx) 26036 v0.AddArg(mem) 26037 return true 26038 } 26039 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 26040 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26041 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26042 for { 26043 _ = v.Args[1] 26044 x0 := v.Args[0] 26045 if x0.Op != OpAMD64MOVWloadidx1 { 26046 break 26047 } 26048 i0 := x0.AuxInt 26049 s := x0.Aux 26050 _ = x0.Args[2] 26051 idx := x0.Args[0] 26052 p := x0.Args[1] 26053 mem := x0.Args[2] 26054 sh := v.Args[1] 26055 if sh.Op != OpAMD64SHLQconst { 26056 break 26057 } 26058 if sh.AuxInt != 16 { 26059 break 26060 } 26061 x1 := sh.Args[0] 26062 if x1.Op != OpAMD64MOVWloadidx1 { 26063 break 26064 } 26065 i1 := x1.AuxInt 26066 if x1.Aux != s { 26067 break 26068 } 26069 _ = x1.Args[2] 26070 if p != x1.Args[0] { 26071 break 26072 } 26073 if idx != x1.Args[1] { 26074 break 26075 } 26076 if mem != x1.Args[2] { 26077 break 26078 } 26079 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26080 break 26081 } 26082 b = mergePoint(b, x0, x1) 26083 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26084 v.reset(OpCopy) 26085 v.AddArg(v0) 26086 v0.AuxInt = i0 26087 v0.Aux = s 26088 v0.AddArg(p) 26089 v0.AddArg(idx) 26090 v0.AddArg(mem) 26091 return true 26092 } 26093 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 26094 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26095 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26096 for { 26097 _ = v.Args[1] 26098 x0 := v.Args[0] 26099 if x0.Op != OpAMD64MOVWloadidx1 { 26100 break 26101 } 26102 i0 := x0.AuxInt 26103 s := x0.Aux 26104 _ = x0.Args[2] 26105 p := x0.Args[0] 26106 idx := x0.Args[1] 26107 mem := x0.Args[2] 26108 sh := v.Args[1] 26109 if sh.Op != OpAMD64SHLQconst { 26110 break 26111 } 26112 if sh.AuxInt != 16 { 26113 break 26114 } 26115 x1 := sh.Args[0] 26116 if x1.Op != OpAMD64MOVWloadidx1 { 26117 break 26118 } 26119 i1 := x1.AuxInt 26120 if x1.Aux != s { 26121 break 26122 } 26123 _ = x1.Args[2] 26124 if idx != x1.Args[0] { 26125 break 26126 } 26127 if p != x1.Args[1] { 26128 break 26129 } 26130 if mem != x1.Args[2] { 26131 break 26132 } 26133 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26134 break 26135 } 26136 b = mergePoint(b, x0, x1) 26137 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26138 v.reset(OpCopy) 26139 v.AddArg(v0) 26140 v0.AuxInt = i0 26141 v0.Aux = s 26142 v0.AddArg(p) 26143 v0.AddArg(idx) 26144 v0.AddArg(mem) 26145 return true 26146 } 26147 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 26148 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26149 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26150 for { 26151 _ = v.Args[1] 26152 x0 := v.Args[0] 26153 if x0.Op != OpAMD64MOVWloadidx1 { 26154 break 26155 } 26156 i0 := x0.AuxInt 26157 s := x0.Aux 26158 _ = x0.Args[2] 26159 idx := x0.Args[0] 26160 p := x0.Args[1] 26161 mem := x0.Args[2] 26162 sh := v.Args[1] 26163 if sh.Op != OpAMD64SHLQconst { 26164 break 26165 } 26166 if sh.AuxInt != 16 { 26167 break 26168 } 26169 x1 := sh.Args[0] 26170 if x1.Op != OpAMD64MOVWloadidx1 { 26171 break 26172 } 26173 i1 := x1.AuxInt 26174 if x1.Aux != s { 26175 break 26176 } 26177 _ = x1.Args[2] 26178 if idx != x1.Args[0] { 26179 break 26180 } 26181 if p != x1.Args[1] { 26182 break 26183 } 26184 if mem != x1.Args[2] { 26185 break 26186 } 26187 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26188 break 26189 } 26190 b = mergePoint(b, x0, x1) 26191 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26192 v.reset(OpCopy) 26193 v.AddArg(v0) 26194 v0.AuxInt = i0 26195 v0.Aux = s 26196 v0.AddArg(p) 26197 v0.AddArg(idx) 26198 v0.AddArg(mem) 26199 return true 26200 } 26201 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 26202 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26203 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26204 for { 26205 _ = v.Args[1] 26206 sh := v.Args[0] 26207 if sh.Op != OpAMD64SHLQconst { 26208 break 26209 } 26210 if sh.AuxInt != 16 { 26211 break 26212 } 26213 x1 := sh.Args[0] 26214 if x1.Op != OpAMD64MOVWloadidx1 { 26215 break 26216 } 26217 i1 := x1.AuxInt 26218 s := x1.Aux 26219 _ = x1.Args[2] 26220 p := x1.Args[0] 26221 idx := x1.Args[1] 26222 mem := x1.Args[2] 26223 x0 := v.Args[1] 26224 if x0.Op != OpAMD64MOVWloadidx1 { 26225 break 26226 } 26227 i0 := x0.AuxInt 26228 if x0.Aux != s { 26229 break 26230 } 26231 _ = x0.Args[2] 26232 if p != x0.Args[0] { 26233 break 26234 } 26235 if idx != x0.Args[1] { 26236 break 26237 } 26238 if mem != x0.Args[2] { 26239 break 26240 } 26241 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26242 break 26243 } 26244 b = mergePoint(b, x0, x1) 26245 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26246 v.reset(OpCopy) 26247 v.AddArg(v0) 26248 v0.AuxInt = i0 26249 v0.Aux = s 26250 v0.AddArg(p) 26251 v0.AddArg(idx) 26252 v0.AddArg(mem) 26253 return true 26254 } 26255 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 26256 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26257 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26258 for { 26259 _ = v.Args[1] 26260 sh := v.Args[0] 26261 if sh.Op != OpAMD64SHLQconst { 26262 break 26263 } 26264 if sh.AuxInt != 16 { 26265 break 26266 } 26267 x1 := sh.Args[0] 26268 if x1.Op != OpAMD64MOVWloadidx1 { 26269 break 26270 } 26271 i1 := x1.AuxInt 26272 s := x1.Aux 26273 _ = x1.Args[2] 26274 idx := x1.Args[0] 26275 p := x1.Args[1] 26276 mem := x1.Args[2] 26277 x0 := v.Args[1] 26278 if x0.Op != OpAMD64MOVWloadidx1 { 26279 break 26280 } 26281 i0 := x0.AuxInt 26282 if x0.Aux != s { 26283 break 26284 } 26285 _ = x0.Args[2] 26286 if p != x0.Args[0] { 26287 break 26288 } 26289 if idx != x0.Args[1] { 26290 break 26291 } 26292 if mem != x0.Args[2] { 26293 break 26294 } 26295 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26296 break 26297 } 26298 b = mergePoint(b, x0, x1) 26299 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26300 v.reset(OpCopy) 26301 v.AddArg(v0) 26302 v0.AuxInt = i0 26303 v0.Aux = s 26304 v0.AddArg(p) 26305 v0.AddArg(idx) 26306 v0.AddArg(mem) 26307 return true 26308 } 26309 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 26310 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26311 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26312 for { 26313 _ = v.Args[1] 26314 sh := v.Args[0] 26315 if sh.Op != OpAMD64SHLQconst { 26316 break 26317 } 26318 if sh.AuxInt != 16 { 26319 break 26320 } 26321 x1 := sh.Args[0] 26322 if x1.Op != OpAMD64MOVWloadidx1 { 26323 break 26324 } 26325 i1 := x1.AuxInt 26326 s := x1.Aux 26327 _ = x1.Args[2] 26328 p := x1.Args[0] 26329 idx := x1.Args[1] 26330 mem := x1.Args[2] 26331 x0 := v.Args[1] 26332 if x0.Op != OpAMD64MOVWloadidx1 { 26333 break 26334 } 26335 i0 := x0.AuxInt 26336 if x0.Aux != s { 26337 break 26338 } 26339 _ = x0.Args[2] 26340 if idx != x0.Args[0] { 26341 break 26342 } 26343 if p != x0.Args[1] { 26344 break 26345 } 26346 if mem != x0.Args[2] { 26347 break 26348 } 26349 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26350 break 26351 } 26352 b = mergePoint(b, x0, x1) 26353 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26354 v.reset(OpCopy) 26355 v.AddArg(v0) 26356 v0.AuxInt = i0 26357 v0.Aux = s 26358 v0.AddArg(p) 26359 v0.AddArg(idx) 26360 v0.AddArg(mem) 26361 return true 26362 } 26363 return false 26364 } 26365 func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool { 26366 b := v.Block 26367 _ = b 26368 typ := &b.Func.Config.Types 26369 _ = typ 26370 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 26371 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26372 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26373 for { 26374 _ = v.Args[1] 26375 sh := v.Args[0] 26376 if sh.Op != OpAMD64SHLQconst { 26377 break 26378 } 26379 if sh.AuxInt != 16 { 26380 break 26381 } 26382 x1 := sh.Args[0] 26383 if x1.Op != OpAMD64MOVWloadidx1 { 26384 break 26385 } 26386 i1 := x1.AuxInt 26387 s := x1.Aux 26388 _ = x1.Args[2] 26389 idx := x1.Args[0] 26390 p := x1.Args[1] 26391 mem := x1.Args[2] 26392 x0 := v.Args[1] 26393 if x0.Op != OpAMD64MOVWloadidx1 { 26394 break 26395 } 26396 i0 := x0.AuxInt 26397 if x0.Aux != s { 26398 break 26399 } 26400 _ = x0.Args[2] 26401 if idx != x0.Args[0] { 26402 break 26403 } 26404 if p != x0.Args[1] { 26405 break 26406 } 26407 if mem != x0.Args[2] { 26408 break 26409 } 26410 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26411 break 26412 } 26413 b = mergePoint(b, x0, x1) 26414 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26415 v.reset(OpCopy) 26416 v.AddArg(v0) 26417 v0.AuxInt = i0 26418 v0.Aux = s 26419 v0.AddArg(p) 26420 v0.AddArg(idx) 26421 v0.AddArg(mem) 26422 return true 26423 } 26424 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 26425 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26426 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26427 for { 26428 _ = v.Args[1] 26429 x0 := v.Args[0] 26430 if x0.Op != OpAMD64MOVLloadidx1 { 26431 break 26432 } 26433 i0 := x0.AuxInt 26434 s := x0.Aux 26435 _ = x0.Args[2] 26436 p := x0.Args[0] 26437 idx := x0.Args[1] 26438 mem := x0.Args[2] 26439 sh := v.Args[1] 26440 if sh.Op != OpAMD64SHLQconst { 26441 break 26442 } 26443 if sh.AuxInt != 32 { 26444 break 26445 } 26446 x1 := sh.Args[0] 26447 if x1.Op != OpAMD64MOVLloadidx1 { 26448 break 26449 } 26450 i1 := x1.AuxInt 26451 if x1.Aux != s { 26452 break 26453 } 26454 _ = x1.Args[2] 26455 if p != x1.Args[0] { 26456 break 26457 } 26458 if idx != x1.Args[1] { 26459 break 26460 } 26461 if mem != x1.Args[2] { 26462 break 26463 } 26464 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26465 break 26466 } 26467 b = mergePoint(b, x0, x1) 26468 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26469 v.reset(OpCopy) 26470 v.AddArg(v0) 26471 v0.AuxInt = i0 26472 v0.Aux = s 26473 v0.AddArg(p) 26474 v0.AddArg(idx) 26475 v0.AddArg(mem) 26476 return true 26477 } 26478 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 26479 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26480 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26481 for { 26482 _ = v.Args[1] 26483 x0 := v.Args[0] 26484 if x0.Op != OpAMD64MOVLloadidx1 { 26485 break 26486 } 26487 i0 := x0.AuxInt 26488 s := x0.Aux 26489 _ = x0.Args[2] 26490 idx := x0.Args[0] 26491 p := x0.Args[1] 26492 mem := x0.Args[2] 26493 sh := v.Args[1] 26494 if sh.Op != OpAMD64SHLQconst { 26495 break 26496 } 26497 if sh.AuxInt != 32 { 26498 break 26499 } 26500 x1 := sh.Args[0] 26501 if x1.Op != OpAMD64MOVLloadidx1 { 26502 break 26503 } 26504 i1 := x1.AuxInt 26505 if x1.Aux != s { 26506 break 26507 } 26508 _ = x1.Args[2] 26509 if p != x1.Args[0] { 26510 break 26511 } 26512 if idx != x1.Args[1] { 26513 break 26514 } 26515 if mem != x1.Args[2] { 26516 break 26517 } 26518 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26519 break 26520 } 26521 b = mergePoint(b, x0, x1) 26522 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26523 v.reset(OpCopy) 26524 v.AddArg(v0) 26525 v0.AuxInt = i0 26526 v0.Aux = s 26527 v0.AddArg(p) 26528 v0.AddArg(idx) 26529 v0.AddArg(mem) 26530 return true 26531 } 26532 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 26533 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26534 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26535 for { 26536 _ = v.Args[1] 26537 x0 := v.Args[0] 26538 if x0.Op != OpAMD64MOVLloadidx1 { 26539 break 26540 } 26541 i0 := x0.AuxInt 26542 s := x0.Aux 26543 _ = x0.Args[2] 26544 p := x0.Args[0] 26545 idx := x0.Args[1] 26546 mem := x0.Args[2] 26547 sh := v.Args[1] 26548 if sh.Op != OpAMD64SHLQconst { 26549 break 26550 } 26551 if sh.AuxInt != 32 { 26552 break 26553 } 26554 x1 := sh.Args[0] 26555 if x1.Op != OpAMD64MOVLloadidx1 { 26556 break 26557 } 26558 i1 := x1.AuxInt 26559 if x1.Aux != s { 26560 break 26561 } 26562 _ = x1.Args[2] 26563 if idx != x1.Args[0] { 26564 break 26565 } 26566 if p != x1.Args[1] { 26567 break 26568 } 26569 if mem != x1.Args[2] { 26570 break 26571 } 26572 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26573 break 26574 } 26575 b = mergePoint(b, x0, x1) 26576 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26577 v.reset(OpCopy) 26578 v.AddArg(v0) 26579 v0.AuxInt = i0 26580 v0.Aux = s 26581 v0.AddArg(p) 26582 v0.AddArg(idx) 26583 v0.AddArg(mem) 26584 return true 26585 } 26586 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 26587 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26588 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26589 for { 26590 _ = v.Args[1] 26591 x0 := v.Args[0] 26592 if x0.Op != OpAMD64MOVLloadidx1 { 26593 break 26594 } 26595 i0 := x0.AuxInt 26596 s := x0.Aux 26597 _ = x0.Args[2] 26598 idx := x0.Args[0] 26599 p := x0.Args[1] 26600 mem := x0.Args[2] 26601 sh := v.Args[1] 26602 if sh.Op != OpAMD64SHLQconst { 26603 break 26604 } 26605 if sh.AuxInt != 32 { 26606 break 26607 } 26608 x1 := sh.Args[0] 26609 if x1.Op != OpAMD64MOVLloadidx1 { 26610 break 26611 } 26612 i1 := x1.AuxInt 26613 if x1.Aux != s { 26614 break 26615 } 26616 _ = x1.Args[2] 26617 if idx != x1.Args[0] { 26618 break 26619 } 26620 if p != x1.Args[1] { 26621 break 26622 } 26623 if mem != x1.Args[2] { 26624 break 26625 } 26626 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26627 break 26628 } 26629 b = mergePoint(b, x0, x1) 26630 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26631 v.reset(OpCopy) 26632 v.AddArg(v0) 26633 v0.AuxInt = i0 26634 v0.Aux = s 26635 v0.AddArg(p) 26636 v0.AddArg(idx) 26637 v0.AddArg(mem) 26638 return true 26639 } 26640 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 26641 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26642 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26643 for { 26644 _ = v.Args[1] 26645 sh := v.Args[0] 26646 if sh.Op != OpAMD64SHLQconst { 26647 break 26648 } 26649 if sh.AuxInt != 32 { 26650 break 26651 } 26652 x1 := sh.Args[0] 26653 if x1.Op != OpAMD64MOVLloadidx1 { 26654 break 26655 } 26656 i1 := x1.AuxInt 26657 s := x1.Aux 26658 _ = x1.Args[2] 26659 p := x1.Args[0] 26660 idx := x1.Args[1] 26661 mem := x1.Args[2] 26662 x0 := v.Args[1] 26663 if x0.Op != OpAMD64MOVLloadidx1 { 26664 break 26665 } 26666 i0 := x0.AuxInt 26667 if x0.Aux != s { 26668 break 26669 } 26670 _ = x0.Args[2] 26671 if p != x0.Args[0] { 26672 break 26673 } 26674 if idx != x0.Args[1] { 26675 break 26676 } 26677 if mem != x0.Args[2] { 26678 break 26679 } 26680 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26681 break 26682 } 26683 b = mergePoint(b, x0, x1) 26684 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26685 v.reset(OpCopy) 26686 v.AddArg(v0) 26687 v0.AuxInt = i0 26688 v0.Aux = s 26689 v0.AddArg(p) 26690 v0.AddArg(idx) 26691 v0.AddArg(mem) 26692 return true 26693 } 26694 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 26695 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26696 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26697 for { 26698 _ = v.Args[1] 26699 sh := v.Args[0] 26700 if sh.Op != OpAMD64SHLQconst { 26701 break 26702 } 26703 if sh.AuxInt != 32 { 26704 break 26705 } 26706 x1 := sh.Args[0] 26707 if x1.Op != OpAMD64MOVLloadidx1 { 26708 break 26709 } 26710 i1 := x1.AuxInt 26711 s := x1.Aux 26712 _ = x1.Args[2] 26713 idx := x1.Args[0] 26714 p := x1.Args[1] 26715 mem := x1.Args[2] 26716 x0 := v.Args[1] 26717 if x0.Op != OpAMD64MOVLloadidx1 { 26718 break 26719 } 26720 i0 := x0.AuxInt 26721 if x0.Aux != s { 26722 break 26723 } 26724 _ = x0.Args[2] 26725 if p != x0.Args[0] { 26726 break 26727 } 26728 if idx != x0.Args[1] { 26729 break 26730 } 26731 if mem != x0.Args[2] { 26732 break 26733 } 26734 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26735 break 26736 } 26737 b = mergePoint(b, x0, x1) 26738 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26739 v.reset(OpCopy) 26740 v.AddArg(v0) 26741 v0.AuxInt = i0 26742 v0.Aux = s 26743 v0.AddArg(p) 26744 v0.AddArg(idx) 26745 v0.AddArg(mem) 26746 return true 26747 } 26748 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 26749 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26750 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26751 for { 26752 _ = v.Args[1] 26753 sh := v.Args[0] 26754 if sh.Op != OpAMD64SHLQconst { 26755 break 26756 } 26757 if sh.AuxInt != 32 { 26758 break 26759 } 26760 x1 := sh.Args[0] 26761 if x1.Op != OpAMD64MOVLloadidx1 { 26762 break 26763 } 26764 i1 := x1.AuxInt 26765 s := x1.Aux 26766 _ = x1.Args[2] 26767 p := x1.Args[0] 26768 idx := x1.Args[1] 26769 mem := x1.Args[2] 26770 x0 := v.Args[1] 26771 if x0.Op != OpAMD64MOVLloadidx1 { 26772 break 26773 } 26774 i0 := x0.AuxInt 26775 if x0.Aux != s { 26776 break 26777 } 26778 _ = x0.Args[2] 26779 if idx != x0.Args[0] { 26780 break 26781 } 26782 if p != x0.Args[1] { 26783 break 26784 } 26785 if mem != x0.Args[2] { 26786 break 26787 } 26788 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26789 break 26790 } 26791 b = mergePoint(b, x0, x1) 26792 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26793 v.reset(OpCopy) 26794 v.AddArg(v0) 26795 v0.AuxInt = i0 26796 v0.Aux = s 26797 v0.AddArg(p) 26798 v0.AddArg(idx) 26799 v0.AddArg(mem) 26800 return true 26801 } 26802 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 26803 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26804 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26805 for { 26806 _ = v.Args[1] 26807 sh := v.Args[0] 26808 if sh.Op != OpAMD64SHLQconst { 26809 break 26810 } 26811 if sh.AuxInt != 32 { 26812 break 26813 } 26814 x1 := sh.Args[0] 26815 if x1.Op != OpAMD64MOVLloadidx1 { 26816 break 26817 } 26818 i1 := x1.AuxInt 26819 s := x1.Aux 26820 _ = x1.Args[2] 26821 idx := x1.Args[0] 26822 p := x1.Args[1] 26823 mem := x1.Args[2] 26824 x0 := v.Args[1] 26825 if x0.Op != OpAMD64MOVLloadidx1 { 26826 break 26827 } 26828 i0 := x0.AuxInt 26829 if x0.Aux != s { 26830 break 26831 } 26832 _ = x0.Args[2] 26833 if idx != x0.Args[0] { 26834 break 26835 } 26836 if p != x0.Args[1] { 26837 break 26838 } 26839 if mem != x0.Args[2] { 26840 break 26841 } 26842 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26843 break 26844 } 26845 b = mergePoint(b, x0, x1) 26846 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26847 v.reset(OpCopy) 26848 v.AddArg(v0) 26849 v0.AuxInt = i0 26850 v0.Aux = s 26851 v0.AddArg(p) 26852 v0.AddArg(idx) 26853 v0.AddArg(mem) 26854 return true 26855 } 26856 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 26857 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26858 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26859 for { 26860 _ = v.Args[1] 26861 s1 := v.Args[0] 26862 if s1.Op != OpAMD64SHLQconst { 26863 break 26864 } 26865 j1 := s1.AuxInt 26866 x1 := s1.Args[0] 26867 if x1.Op != OpAMD64MOVBloadidx1 { 26868 break 26869 } 26870 i1 := x1.AuxInt 26871 s := x1.Aux 26872 _ = x1.Args[2] 26873 p := x1.Args[0] 26874 idx := x1.Args[1] 26875 mem := x1.Args[2] 26876 or := v.Args[1] 26877 if or.Op != OpAMD64ORQ { 26878 break 26879 } 26880 _ = or.Args[1] 26881 s0 := or.Args[0] 26882 if s0.Op != OpAMD64SHLQconst { 26883 break 26884 } 26885 j0 := s0.AuxInt 26886 x0 := s0.Args[0] 26887 if x0.Op != OpAMD64MOVBloadidx1 { 26888 break 26889 } 26890 i0 := x0.AuxInt 26891 if x0.Aux != s { 26892 break 26893 } 26894 _ = x0.Args[2] 26895 if p != x0.Args[0] { 26896 break 26897 } 26898 if idx != x0.Args[1] { 26899 break 26900 } 26901 if mem != x0.Args[2] { 26902 break 26903 } 26904 y := or.Args[1] 26905 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26906 break 26907 } 26908 b = mergePoint(b, x0, x1) 26909 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26910 v.reset(OpCopy) 26911 v.AddArg(v0) 26912 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26913 v1.AuxInt = j0 26914 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26915 v2.AuxInt = i0 26916 v2.Aux = s 26917 v2.AddArg(p) 26918 v2.AddArg(idx) 26919 v2.AddArg(mem) 26920 v1.AddArg(v2) 26921 v0.AddArg(v1) 26922 v0.AddArg(y) 26923 return true 26924 } 26925 return false 26926 } 26927 func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool { 26928 b := v.Block 26929 _ = b 26930 typ := &b.Func.Config.Types 26931 _ = typ 26932 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 26933 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26934 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26935 for { 26936 _ = v.Args[1] 26937 s1 := v.Args[0] 26938 if s1.Op != OpAMD64SHLQconst { 26939 break 26940 } 26941 j1 := s1.AuxInt 26942 x1 := s1.Args[0] 26943 if x1.Op != OpAMD64MOVBloadidx1 { 26944 break 26945 } 26946 i1 := x1.AuxInt 26947 s := x1.Aux 26948 _ = x1.Args[2] 26949 idx := x1.Args[0] 26950 p := x1.Args[1] 26951 mem := x1.Args[2] 26952 or := v.Args[1] 26953 if or.Op != OpAMD64ORQ { 26954 break 26955 } 26956 _ = or.Args[1] 26957 s0 := or.Args[0] 26958 if s0.Op != OpAMD64SHLQconst { 26959 break 26960 } 26961 j0 := s0.AuxInt 26962 x0 := s0.Args[0] 26963 if x0.Op != OpAMD64MOVBloadidx1 { 26964 break 26965 } 26966 i0 := x0.AuxInt 26967 if x0.Aux != s { 26968 break 26969 } 26970 _ = x0.Args[2] 26971 if p != x0.Args[0] { 26972 break 26973 } 26974 if idx != x0.Args[1] { 26975 break 26976 } 26977 if mem != x0.Args[2] { 26978 break 26979 } 26980 y := or.Args[1] 26981 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26982 break 26983 } 26984 b = mergePoint(b, x0, x1) 26985 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26986 v.reset(OpCopy) 26987 v.AddArg(v0) 26988 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26989 v1.AuxInt = j0 26990 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26991 v2.AuxInt = i0 26992 v2.Aux = s 26993 v2.AddArg(p) 26994 v2.AddArg(idx) 26995 v2.AddArg(mem) 26996 v1.AddArg(v2) 26997 v0.AddArg(v1) 26998 v0.AddArg(y) 26999 return true 27000 } 27001 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 27002 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27003 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27004 for { 27005 _ = v.Args[1] 27006 s1 := v.Args[0] 27007 if s1.Op != OpAMD64SHLQconst { 27008 break 27009 } 27010 j1 := s1.AuxInt 27011 x1 := s1.Args[0] 27012 if x1.Op != OpAMD64MOVBloadidx1 { 27013 break 27014 } 27015 i1 := x1.AuxInt 27016 s := x1.Aux 27017 _ = x1.Args[2] 27018 p := x1.Args[0] 27019 idx := x1.Args[1] 27020 mem := x1.Args[2] 27021 or := v.Args[1] 27022 if or.Op != OpAMD64ORQ { 27023 break 27024 } 27025 _ = or.Args[1] 27026 s0 := or.Args[0] 27027 if s0.Op != OpAMD64SHLQconst { 27028 break 27029 } 27030 j0 := s0.AuxInt 27031 x0 := s0.Args[0] 27032 if x0.Op != OpAMD64MOVBloadidx1 { 27033 break 27034 } 27035 i0 := x0.AuxInt 27036 if x0.Aux != s { 27037 break 27038 } 27039 _ = x0.Args[2] 27040 if idx != x0.Args[0] { 27041 break 27042 } 27043 if p != x0.Args[1] { 27044 break 27045 } 27046 if mem != x0.Args[2] { 27047 break 27048 } 27049 y := or.Args[1] 27050 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27051 break 27052 } 27053 b = mergePoint(b, x0, x1) 27054 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27055 v.reset(OpCopy) 27056 v.AddArg(v0) 27057 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27058 v1.AuxInt = j0 27059 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27060 v2.AuxInt = i0 27061 v2.Aux = s 27062 v2.AddArg(p) 27063 v2.AddArg(idx) 27064 v2.AddArg(mem) 27065 v1.AddArg(v2) 27066 v0.AddArg(v1) 27067 v0.AddArg(y) 27068 return true 27069 } 27070 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 27071 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27072 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27073 for { 27074 _ = v.Args[1] 27075 s1 := v.Args[0] 27076 if s1.Op != OpAMD64SHLQconst { 27077 break 27078 } 27079 j1 := s1.AuxInt 27080 x1 := s1.Args[0] 27081 if x1.Op != OpAMD64MOVBloadidx1 { 27082 break 27083 } 27084 i1 := x1.AuxInt 27085 s := x1.Aux 27086 _ = x1.Args[2] 27087 idx := x1.Args[0] 27088 p := x1.Args[1] 27089 mem := x1.Args[2] 27090 or := v.Args[1] 27091 if or.Op != OpAMD64ORQ { 27092 break 27093 } 27094 _ = or.Args[1] 27095 s0 := or.Args[0] 27096 if s0.Op != OpAMD64SHLQconst { 27097 break 27098 } 27099 j0 := s0.AuxInt 27100 x0 := s0.Args[0] 27101 if x0.Op != OpAMD64MOVBloadidx1 { 27102 break 27103 } 27104 i0 := x0.AuxInt 27105 if x0.Aux != s { 27106 break 27107 } 27108 _ = x0.Args[2] 27109 if idx != x0.Args[0] { 27110 break 27111 } 27112 if p != x0.Args[1] { 27113 break 27114 } 27115 if mem != x0.Args[2] { 27116 break 27117 } 27118 y := or.Args[1] 27119 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27120 break 27121 } 27122 b = mergePoint(b, x0, x1) 27123 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27124 v.reset(OpCopy) 27125 v.AddArg(v0) 27126 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27127 v1.AuxInt = j0 27128 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27129 v2.AuxInt = i0 27130 v2.Aux = s 27131 v2.AddArg(p) 27132 v2.AddArg(idx) 27133 v2.AddArg(mem) 27134 v1.AddArg(v2) 27135 v0.AddArg(v1) 27136 v0.AddArg(y) 27137 return true 27138 } 27139 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 27140 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27141 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27142 for { 27143 _ = v.Args[1] 27144 s1 := v.Args[0] 27145 if s1.Op != OpAMD64SHLQconst { 27146 break 27147 } 27148 j1 := s1.AuxInt 27149 x1 := s1.Args[0] 27150 if x1.Op != OpAMD64MOVBloadidx1 { 27151 break 27152 } 27153 i1 := x1.AuxInt 27154 s := x1.Aux 27155 _ = x1.Args[2] 27156 p := x1.Args[0] 27157 idx := x1.Args[1] 27158 mem := x1.Args[2] 27159 or := v.Args[1] 27160 if or.Op != OpAMD64ORQ { 27161 break 27162 } 27163 _ = or.Args[1] 27164 y := or.Args[0] 27165 s0 := or.Args[1] 27166 if s0.Op != OpAMD64SHLQconst { 27167 break 27168 } 27169 j0 := s0.AuxInt 27170 x0 := s0.Args[0] 27171 if x0.Op != OpAMD64MOVBloadidx1 { 27172 break 27173 } 27174 i0 := x0.AuxInt 27175 if x0.Aux != s { 27176 break 27177 } 27178 _ = x0.Args[2] 27179 if p != x0.Args[0] { 27180 break 27181 } 27182 if idx != x0.Args[1] { 27183 break 27184 } 27185 if mem != x0.Args[2] { 27186 break 27187 } 27188 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27189 break 27190 } 27191 b = mergePoint(b, x0, x1) 27192 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27193 v.reset(OpCopy) 27194 v.AddArg(v0) 27195 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27196 v1.AuxInt = j0 27197 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27198 v2.AuxInt = i0 27199 v2.Aux = s 27200 v2.AddArg(p) 27201 v2.AddArg(idx) 27202 v2.AddArg(mem) 27203 v1.AddArg(v2) 27204 v0.AddArg(v1) 27205 v0.AddArg(y) 27206 return true 27207 } 27208 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 27209 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27210 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27211 for { 27212 _ = v.Args[1] 27213 s1 := v.Args[0] 27214 if s1.Op != OpAMD64SHLQconst { 27215 break 27216 } 27217 j1 := s1.AuxInt 27218 x1 := s1.Args[0] 27219 if x1.Op != OpAMD64MOVBloadidx1 { 27220 break 27221 } 27222 i1 := x1.AuxInt 27223 s := x1.Aux 27224 _ = x1.Args[2] 27225 idx := x1.Args[0] 27226 p := x1.Args[1] 27227 mem := x1.Args[2] 27228 or := v.Args[1] 27229 if or.Op != OpAMD64ORQ { 27230 break 27231 } 27232 _ = or.Args[1] 27233 y := or.Args[0] 27234 s0 := or.Args[1] 27235 if s0.Op != OpAMD64SHLQconst { 27236 break 27237 } 27238 j0 := s0.AuxInt 27239 x0 := s0.Args[0] 27240 if x0.Op != OpAMD64MOVBloadidx1 { 27241 break 27242 } 27243 i0 := x0.AuxInt 27244 if x0.Aux != s { 27245 break 27246 } 27247 _ = x0.Args[2] 27248 if p != x0.Args[0] { 27249 break 27250 } 27251 if idx != x0.Args[1] { 27252 break 27253 } 27254 if mem != x0.Args[2] { 27255 break 27256 } 27257 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27258 break 27259 } 27260 b = mergePoint(b, x0, x1) 27261 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27262 v.reset(OpCopy) 27263 v.AddArg(v0) 27264 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27265 v1.AuxInt = j0 27266 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27267 v2.AuxInt = i0 27268 v2.Aux = s 27269 v2.AddArg(p) 27270 v2.AddArg(idx) 27271 v2.AddArg(mem) 27272 v1.AddArg(v2) 27273 v0.AddArg(v1) 27274 v0.AddArg(y) 27275 return true 27276 } 27277 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 27278 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27279 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27280 for { 27281 _ = v.Args[1] 27282 s1 := v.Args[0] 27283 if s1.Op != OpAMD64SHLQconst { 27284 break 27285 } 27286 j1 := s1.AuxInt 27287 x1 := s1.Args[0] 27288 if x1.Op != OpAMD64MOVBloadidx1 { 27289 break 27290 } 27291 i1 := x1.AuxInt 27292 s := x1.Aux 27293 _ = x1.Args[2] 27294 p := x1.Args[0] 27295 idx := x1.Args[1] 27296 mem := x1.Args[2] 27297 or := v.Args[1] 27298 if or.Op != OpAMD64ORQ { 27299 break 27300 } 27301 _ = or.Args[1] 27302 y := or.Args[0] 27303 s0 := or.Args[1] 27304 if s0.Op != OpAMD64SHLQconst { 27305 break 27306 } 27307 j0 := s0.AuxInt 27308 x0 := s0.Args[0] 27309 if x0.Op != OpAMD64MOVBloadidx1 { 27310 break 27311 } 27312 i0 := x0.AuxInt 27313 if x0.Aux != s { 27314 break 27315 } 27316 _ = x0.Args[2] 27317 if idx != x0.Args[0] { 27318 break 27319 } 27320 if p != x0.Args[1] { 27321 break 27322 } 27323 if mem != x0.Args[2] { 27324 break 27325 } 27326 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27327 break 27328 } 27329 b = mergePoint(b, x0, x1) 27330 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27331 v.reset(OpCopy) 27332 v.AddArg(v0) 27333 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27334 v1.AuxInt = j0 27335 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27336 v2.AuxInt = i0 27337 v2.Aux = s 27338 v2.AddArg(p) 27339 v2.AddArg(idx) 27340 v2.AddArg(mem) 27341 v1.AddArg(v2) 27342 v0.AddArg(v1) 27343 v0.AddArg(y) 27344 return true 27345 } 27346 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 27347 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27348 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27349 for { 27350 _ = v.Args[1] 27351 s1 := v.Args[0] 27352 if s1.Op != OpAMD64SHLQconst { 27353 break 27354 } 27355 j1 := s1.AuxInt 27356 x1 := s1.Args[0] 27357 if x1.Op != OpAMD64MOVBloadidx1 { 27358 break 27359 } 27360 i1 := x1.AuxInt 27361 s := x1.Aux 27362 _ = x1.Args[2] 27363 idx := x1.Args[0] 27364 p := x1.Args[1] 27365 mem := x1.Args[2] 27366 or := v.Args[1] 27367 if or.Op != OpAMD64ORQ { 27368 break 27369 } 27370 _ = or.Args[1] 27371 y := or.Args[0] 27372 s0 := or.Args[1] 27373 if s0.Op != OpAMD64SHLQconst { 27374 break 27375 } 27376 j0 := s0.AuxInt 27377 x0 := s0.Args[0] 27378 if x0.Op != OpAMD64MOVBloadidx1 { 27379 break 27380 } 27381 i0 := x0.AuxInt 27382 if x0.Aux != s { 27383 break 27384 } 27385 _ = x0.Args[2] 27386 if idx != x0.Args[0] { 27387 break 27388 } 27389 if p != x0.Args[1] { 27390 break 27391 } 27392 if mem != x0.Args[2] { 27393 break 27394 } 27395 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27396 break 27397 } 27398 b = mergePoint(b, x0, x1) 27399 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27400 v.reset(OpCopy) 27401 v.AddArg(v0) 27402 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27403 v1.AuxInt = j0 27404 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27405 v2.AuxInt = i0 27406 v2.Aux = s 27407 v2.AddArg(p) 27408 v2.AddArg(idx) 27409 v2.AddArg(mem) 27410 v1.AddArg(v2) 27411 v0.AddArg(v1) 27412 v0.AddArg(y) 27413 return true 27414 } 27415 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27416 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27417 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27418 for { 27419 _ = v.Args[1] 27420 or := v.Args[0] 27421 if or.Op != OpAMD64ORQ { 27422 break 27423 } 27424 _ = or.Args[1] 27425 s0 := or.Args[0] 27426 if s0.Op != OpAMD64SHLQconst { 27427 break 27428 } 27429 j0 := s0.AuxInt 27430 x0 := s0.Args[0] 27431 if x0.Op != OpAMD64MOVBloadidx1 { 27432 break 27433 } 27434 i0 := x0.AuxInt 27435 s := x0.Aux 27436 _ = x0.Args[2] 27437 p := x0.Args[0] 27438 idx := x0.Args[1] 27439 mem := x0.Args[2] 27440 y := or.Args[1] 27441 s1 := v.Args[1] 27442 if s1.Op != OpAMD64SHLQconst { 27443 break 27444 } 27445 j1 := s1.AuxInt 27446 x1 := s1.Args[0] 27447 if x1.Op != OpAMD64MOVBloadidx1 { 27448 break 27449 } 27450 i1 := x1.AuxInt 27451 if x1.Aux != s { 27452 break 27453 } 27454 _ = x1.Args[2] 27455 if p != x1.Args[0] { 27456 break 27457 } 27458 if idx != x1.Args[1] { 27459 break 27460 } 27461 if mem != x1.Args[2] { 27462 break 27463 } 27464 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27465 break 27466 } 27467 b = mergePoint(b, x0, x1) 27468 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27469 v.reset(OpCopy) 27470 v.AddArg(v0) 27471 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27472 v1.AuxInt = j0 27473 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27474 v2.AuxInt = i0 27475 v2.Aux = s 27476 v2.AddArg(p) 27477 v2.AddArg(idx) 27478 v2.AddArg(mem) 27479 v1.AddArg(v2) 27480 v0.AddArg(v1) 27481 v0.AddArg(y) 27482 return true 27483 } 27484 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27485 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27486 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27487 for { 27488 _ = v.Args[1] 27489 or := v.Args[0] 27490 if or.Op != OpAMD64ORQ { 27491 break 27492 } 27493 _ = or.Args[1] 27494 s0 := or.Args[0] 27495 if s0.Op != OpAMD64SHLQconst { 27496 break 27497 } 27498 j0 := s0.AuxInt 27499 x0 := s0.Args[0] 27500 if x0.Op != OpAMD64MOVBloadidx1 { 27501 break 27502 } 27503 i0 := x0.AuxInt 27504 s := x0.Aux 27505 _ = x0.Args[2] 27506 idx := x0.Args[0] 27507 p := x0.Args[1] 27508 mem := x0.Args[2] 27509 y := or.Args[1] 27510 s1 := v.Args[1] 27511 if s1.Op != OpAMD64SHLQconst { 27512 break 27513 } 27514 j1 := s1.AuxInt 27515 x1 := s1.Args[0] 27516 if x1.Op != OpAMD64MOVBloadidx1 { 27517 break 27518 } 27519 i1 := x1.AuxInt 27520 if x1.Aux != s { 27521 break 27522 } 27523 _ = x1.Args[2] 27524 if p != x1.Args[0] { 27525 break 27526 } 27527 if idx != x1.Args[1] { 27528 break 27529 } 27530 if mem != x1.Args[2] { 27531 break 27532 } 27533 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27534 break 27535 } 27536 b = mergePoint(b, x0, x1) 27537 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27538 v.reset(OpCopy) 27539 v.AddArg(v0) 27540 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27541 v1.AuxInt = j0 27542 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27543 v2.AuxInt = i0 27544 v2.Aux = s 27545 v2.AddArg(p) 27546 v2.AddArg(idx) 27547 v2.AddArg(mem) 27548 v1.AddArg(v2) 27549 v0.AddArg(v1) 27550 v0.AddArg(y) 27551 return true 27552 } 27553 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27554 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27555 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27556 for { 27557 _ = v.Args[1] 27558 or := v.Args[0] 27559 if or.Op != OpAMD64ORQ { 27560 break 27561 } 27562 _ = or.Args[1] 27563 y := or.Args[0] 27564 s0 := or.Args[1] 27565 if s0.Op != OpAMD64SHLQconst { 27566 break 27567 } 27568 j0 := s0.AuxInt 27569 x0 := s0.Args[0] 27570 if x0.Op != OpAMD64MOVBloadidx1 { 27571 break 27572 } 27573 i0 := x0.AuxInt 27574 s := x0.Aux 27575 _ = x0.Args[2] 27576 p := x0.Args[0] 27577 idx := x0.Args[1] 27578 mem := x0.Args[2] 27579 s1 := v.Args[1] 27580 if s1.Op != OpAMD64SHLQconst { 27581 break 27582 } 27583 j1 := s1.AuxInt 27584 x1 := s1.Args[0] 27585 if x1.Op != OpAMD64MOVBloadidx1 { 27586 break 27587 } 27588 i1 := x1.AuxInt 27589 if x1.Aux != s { 27590 break 27591 } 27592 _ = x1.Args[2] 27593 if p != x1.Args[0] { 27594 break 27595 } 27596 if idx != x1.Args[1] { 27597 break 27598 } 27599 if mem != x1.Args[2] { 27600 break 27601 } 27602 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27603 break 27604 } 27605 b = mergePoint(b, x0, x1) 27606 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27607 v.reset(OpCopy) 27608 v.AddArg(v0) 27609 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27610 v1.AuxInt = j0 27611 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27612 v2.AuxInt = i0 27613 v2.Aux = s 27614 v2.AddArg(p) 27615 v2.AddArg(idx) 27616 v2.AddArg(mem) 27617 v1.AddArg(v2) 27618 v0.AddArg(v1) 27619 v0.AddArg(y) 27620 return true 27621 } 27622 return false 27623 } 27624 func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool { 27625 b := v.Block 27626 _ = b 27627 typ := &b.Func.Config.Types 27628 _ = typ 27629 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27630 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27631 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27632 for { 27633 _ = v.Args[1] 27634 or := v.Args[0] 27635 if or.Op != OpAMD64ORQ { 27636 break 27637 } 27638 _ = or.Args[1] 27639 y := or.Args[0] 27640 s0 := or.Args[1] 27641 if s0.Op != OpAMD64SHLQconst { 27642 break 27643 } 27644 j0 := s0.AuxInt 27645 x0 := s0.Args[0] 27646 if x0.Op != OpAMD64MOVBloadidx1 { 27647 break 27648 } 27649 i0 := x0.AuxInt 27650 s := x0.Aux 27651 _ = x0.Args[2] 27652 idx := x0.Args[0] 27653 p := x0.Args[1] 27654 mem := x0.Args[2] 27655 s1 := v.Args[1] 27656 if s1.Op != OpAMD64SHLQconst { 27657 break 27658 } 27659 j1 := s1.AuxInt 27660 x1 := s1.Args[0] 27661 if x1.Op != OpAMD64MOVBloadidx1 { 27662 break 27663 } 27664 i1 := x1.AuxInt 27665 if x1.Aux != s { 27666 break 27667 } 27668 _ = x1.Args[2] 27669 if p != x1.Args[0] { 27670 break 27671 } 27672 if idx != x1.Args[1] { 27673 break 27674 } 27675 if mem != x1.Args[2] { 27676 break 27677 } 27678 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27679 break 27680 } 27681 b = mergePoint(b, x0, x1) 27682 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27683 v.reset(OpCopy) 27684 v.AddArg(v0) 27685 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27686 v1.AuxInt = j0 27687 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27688 v2.AuxInt = i0 27689 v2.Aux = s 27690 v2.AddArg(p) 27691 v2.AddArg(idx) 27692 v2.AddArg(mem) 27693 v1.AddArg(v2) 27694 v0.AddArg(v1) 27695 v0.AddArg(y) 27696 return true 27697 } 27698 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27699 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27700 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27701 for { 27702 _ = v.Args[1] 27703 or := v.Args[0] 27704 if or.Op != OpAMD64ORQ { 27705 break 27706 } 27707 _ = or.Args[1] 27708 s0 := or.Args[0] 27709 if s0.Op != OpAMD64SHLQconst { 27710 break 27711 } 27712 j0 := s0.AuxInt 27713 x0 := s0.Args[0] 27714 if x0.Op != OpAMD64MOVBloadidx1 { 27715 break 27716 } 27717 i0 := x0.AuxInt 27718 s := x0.Aux 27719 _ = x0.Args[2] 27720 p := x0.Args[0] 27721 idx := x0.Args[1] 27722 mem := x0.Args[2] 27723 y := or.Args[1] 27724 s1 := v.Args[1] 27725 if s1.Op != OpAMD64SHLQconst { 27726 break 27727 } 27728 j1 := s1.AuxInt 27729 x1 := s1.Args[0] 27730 if x1.Op != OpAMD64MOVBloadidx1 { 27731 break 27732 } 27733 i1 := x1.AuxInt 27734 if x1.Aux != s { 27735 break 27736 } 27737 _ = x1.Args[2] 27738 if idx != x1.Args[0] { 27739 break 27740 } 27741 if p != x1.Args[1] { 27742 break 27743 } 27744 if mem != x1.Args[2] { 27745 break 27746 } 27747 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27748 break 27749 } 27750 b = mergePoint(b, x0, x1) 27751 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27752 v.reset(OpCopy) 27753 v.AddArg(v0) 27754 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27755 v1.AuxInt = j0 27756 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27757 v2.AuxInt = i0 27758 v2.Aux = s 27759 v2.AddArg(p) 27760 v2.AddArg(idx) 27761 v2.AddArg(mem) 27762 v1.AddArg(v2) 27763 v0.AddArg(v1) 27764 v0.AddArg(y) 27765 return true 27766 } 27767 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27768 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27769 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27770 for { 27771 _ = v.Args[1] 27772 or := v.Args[0] 27773 if or.Op != OpAMD64ORQ { 27774 break 27775 } 27776 _ = or.Args[1] 27777 s0 := or.Args[0] 27778 if s0.Op != OpAMD64SHLQconst { 27779 break 27780 } 27781 j0 := s0.AuxInt 27782 x0 := s0.Args[0] 27783 if x0.Op != OpAMD64MOVBloadidx1 { 27784 break 27785 } 27786 i0 := x0.AuxInt 27787 s := x0.Aux 27788 _ = x0.Args[2] 27789 idx := x0.Args[0] 27790 p := x0.Args[1] 27791 mem := x0.Args[2] 27792 y := or.Args[1] 27793 s1 := v.Args[1] 27794 if s1.Op != OpAMD64SHLQconst { 27795 break 27796 } 27797 j1 := s1.AuxInt 27798 x1 := s1.Args[0] 27799 if x1.Op != OpAMD64MOVBloadidx1 { 27800 break 27801 } 27802 i1 := x1.AuxInt 27803 if x1.Aux != s { 27804 break 27805 } 27806 _ = x1.Args[2] 27807 if idx != x1.Args[0] { 27808 break 27809 } 27810 if p != x1.Args[1] { 27811 break 27812 } 27813 if mem != x1.Args[2] { 27814 break 27815 } 27816 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27817 break 27818 } 27819 b = mergePoint(b, x0, x1) 27820 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27821 v.reset(OpCopy) 27822 v.AddArg(v0) 27823 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27824 v1.AuxInt = j0 27825 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27826 v2.AuxInt = i0 27827 v2.Aux = s 27828 v2.AddArg(p) 27829 v2.AddArg(idx) 27830 v2.AddArg(mem) 27831 v1.AddArg(v2) 27832 v0.AddArg(v1) 27833 v0.AddArg(y) 27834 return true 27835 } 27836 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27837 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27838 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27839 for { 27840 _ = v.Args[1] 27841 or := v.Args[0] 27842 if or.Op != OpAMD64ORQ { 27843 break 27844 } 27845 _ = or.Args[1] 27846 y := or.Args[0] 27847 s0 := or.Args[1] 27848 if s0.Op != OpAMD64SHLQconst { 27849 break 27850 } 27851 j0 := s0.AuxInt 27852 x0 := s0.Args[0] 27853 if x0.Op != OpAMD64MOVBloadidx1 { 27854 break 27855 } 27856 i0 := x0.AuxInt 27857 s := x0.Aux 27858 _ = x0.Args[2] 27859 p := x0.Args[0] 27860 idx := x0.Args[1] 27861 mem := x0.Args[2] 27862 s1 := v.Args[1] 27863 if s1.Op != OpAMD64SHLQconst { 27864 break 27865 } 27866 j1 := s1.AuxInt 27867 x1 := s1.Args[0] 27868 if x1.Op != OpAMD64MOVBloadidx1 { 27869 break 27870 } 27871 i1 := x1.AuxInt 27872 if x1.Aux != s { 27873 break 27874 } 27875 _ = x1.Args[2] 27876 if idx != x1.Args[0] { 27877 break 27878 } 27879 if p != x1.Args[1] { 27880 break 27881 } 27882 if mem != x1.Args[2] { 27883 break 27884 } 27885 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27886 break 27887 } 27888 b = mergePoint(b, x0, x1) 27889 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27890 v.reset(OpCopy) 27891 v.AddArg(v0) 27892 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27893 v1.AuxInt = j0 27894 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27895 v2.AuxInt = i0 27896 v2.Aux = s 27897 v2.AddArg(p) 27898 v2.AddArg(idx) 27899 v2.AddArg(mem) 27900 v1.AddArg(v2) 27901 v0.AddArg(v1) 27902 v0.AddArg(y) 27903 return true 27904 } 27905 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27906 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27907 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27908 for { 27909 _ = v.Args[1] 27910 or := v.Args[0] 27911 if or.Op != OpAMD64ORQ { 27912 break 27913 } 27914 _ = or.Args[1] 27915 y := or.Args[0] 27916 s0 := or.Args[1] 27917 if s0.Op != OpAMD64SHLQconst { 27918 break 27919 } 27920 j0 := s0.AuxInt 27921 x0 := s0.Args[0] 27922 if x0.Op != OpAMD64MOVBloadidx1 { 27923 break 27924 } 27925 i0 := x0.AuxInt 27926 s := x0.Aux 27927 _ = x0.Args[2] 27928 idx := x0.Args[0] 27929 p := x0.Args[1] 27930 mem := x0.Args[2] 27931 s1 := v.Args[1] 27932 if s1.Op != OpAMD64SHLQconst { 27933 break 27934 } 27935 j1 := s1.AuxInt 27936 x1 := s1.Args[0] 27937 if x1.Op != OpAMD64MOVBloadidx1 { 27938 break 27939 } 27940 i1 := x1.AuxInt 27941 if x1.Aux != s { 27942 break 27943 } 27944 _ = x1.Args[2] 27945 if idx != x1.Args[0] { 27946 break 27947 } 27948 if p != x1.Args[1] { 27949 break 27950 } 27951 if mem != x1.Args[2] { 27952 break 27953 } 27954 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27955 break 27956 } 27957 b = mergePoint(b, x0, x1) 27958 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27959 v.reset(OpCopy) 27960 v.AddArg(v0) 27961 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27962 v1.AuxInt = j0 27963 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27964 v2.AuxInt = i0 27965 v2.Aux = s 27966 v2.AddArg(p) 27967 v2.AddArg(idx) 27968 v2.AddArg(mem) 27969 v1.AddArg(v2) 27970 v0.AddArg(v1) 27971 v0.AddArg(y) 27972 return true 27973 } 27974 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 27975 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27976 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27977 for { 27978 _ = v.Args[1] 27979 s1 := v.Args[0] 27980 if s1.Op != OpAMD64SHLQconst { 27981 break 27982 } 27983 j1 := s1.AuxInt 27984 x1 := s1.Args[0] 27985 if x1.Op != OpAMD64MOVWloadidx1 { 27986 break 27987 } 27988 i1 := x1.AuxInt 27989 s := x1.Aux 27990 _ = x1.Args[2] 27991 p := x1.Args[0] 27992 idx := x1.Args[1] 27993 mem := x1.Args[2] 27994 or := v.Args[1] 27995 if or.Op != OpAMD64ORQ { 27996 break 27997 } 27998 _ = or.Args[1] 27999 s0 := or.Args[0] 28000 if s0.Op != OpAMD64SHLQconst { 28001 break 28002 } 28003 j0 := s0.AuxInt 28004 x0 := s0.Args[0] 28005 if x0.Op != OpAMD64MOVWloadidx1 { 28006 break 28007 } 28008 i0 := x0.AuxInt 28009 if x0.Aux != s { 28010 break 28011 } 28012 _ = x0.Args[2] 28013 if p != x0.Args[0] { 28014 break 28015 } 28016 if idx != x0.Args[1] { 28017 break 28018 } 28019 if mem != x0.Args[2] { 28020 break 28021 } 28022 y := or.Args[1] 28023 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28024 break 28025 } 28026 b = mergePoint(b, x0, x1) 28027 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28028 v.reset(OpCopy) 28029 v.AddArg(v0) 28030 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28031 v1.AuxInt = j0 28032 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28033 v2.AuxInt = i0 28034 v2.Aux = s 28035 v2.AddArg(p) 28036 v2.AddArg(idx) 28037 v2.AddArg(mem) 28038 v1.AddArg(v2) 28039 v0.AddArg(v1) 28040 v0.AddArg(y) 28041 return true 28042 } 28043 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 28044 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28045 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28046 for { 28047 _ = v.Args[1] 28048 s1 := v.Args[0] 28049 if s1.Op != OpAMD64SHLQconst { 28050 break 28051 } 28052 j1 := s1.AuxInt 28053 x1 := s1.Args[0] 28054 if x1.Op != OpAMD64MOVWloadidx1 { 28055 break 28056 } 28057 i1 := x1.AuxInt 28058 s := x1.Aux 28059 _ = x1.Args[2] 28060 idx := x1.Args[0] 28061 p := x1.Args[1] 28062 mem := x1.Args[2] 28063 or := v.Args[1] 28064 if or.Op != OpAMD64ORQ { 28065 break 28066 } 28067 _ = or.Args[1] 28068 s0 := or.Args[0] 28069 if s0.Op != OpAMD64SHLQconst { 28070 break 28071 } 28072 j0 := s0.AuxInt 28073 x0 := s0.Args[0] 28074 if x0.Op != OpAMD64MOVWloadidx1 { 28075 break 28076 } 28077 i0 := x0.AuxInt 28078 if x0.Aux != s { 28079 break 28080 } 28081 _ = x0.Args[2] 28082 if p != x0.Args[0] { 28083 break 28084 } 28085 if idx != x0.Args[1] { 28086 break 28087 } 28088 if mem != x0.Args[2] { 28089 break 28090 } 28091 y := or.Args[1] 28092 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28093 break 28094 } 28095 b = mergePoint(b, x0, x1) 28096 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28097 v.reset(OpCopy) 28098 v.AddArg(v0) 28099 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28100 v1.AuxInt = j0 28101 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28102 v2.AuxInt = i0 28103 v2.Aux = s 28104 v2.AddArg(p) 28105 v2.AddArg(idx) 28106 v2.AddArg(mem) 28107 v1.AddArg(v2) 28108 v0.AddArg(v1) 28109 v0.AddArg(y) 28110 return true 28111 } 28112 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 28113 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28114 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28115 for { 28116 _ = v.Args[1] 28117 s1 := v.Args[0] 28118 if s1.Op != OpAMD64SHLQconst { 28119 break 28120 } 28121 j1 := s1.AuxInt 28122 x1 := s1.Args[0] 28123 if x1.Op != OpAMD64MOVWloadidx1 { 28124 break 28125 } 28126 i1 := x1.AuxInt 28127 s := x1.Aux 28128 _ = x1.Args[2] 28129 p := x1.Args[0] 28130 idx := x1.Args[1] 28131 mem := x1.Args[2] 28132 or := v.Args[1] 28133 if or.Op != OpAMD64ORQ { 28134 break 28135 } 28136 _ = or.Args[1] 28137 s0 := or.Args[0] 28138 if s0.Op != OpAMD64SHLQconst { 28139 break 28140 } 28141 j0 := s0.AuxInt 28142 x0 := s0.Args[0] 28143 if x0.Op != OpAMD64MOVWloadidx1 { 28144 break 28145 } 28146 i0 := x0.AuxInt 28147 if x0.Aux != s { 28148 break 28149 } 28150 _ = x0.Args[2] 28151 if idx != x0.Args[0] { 28152 break 28153 } 28154 if p != x0.Args[1] { 28155 break 28156 } 28157 if mem != x0.Args[2] { 28158 break 28159 } 28160 y := or.Args[1] 28161 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28162 break 28163 } 28164 b = mergePoint(b, x0, x1) 28165 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28166 v.reset(OpCopy) 28167 v.AddArg(v0) 28168 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28169 v1.AuxInt = j0 28170 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28171 v2.AuxInt = i0 28172 v2.Aux = s 28173 v2.AddArg(p) 28174 v2.AddArg(idx) 28175 v2.AddArg(mem) 28176 v1.AddArg(v2) 28177 v0.AddArg(v1) 28178 v0.AddArg(y) 28179 return true 28180 } 28181 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 28182 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28183 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28184 for { 28185 _ = v.Args[1] 28186 s1 := v.Args[0] 28187 if s1.Op != OpAMD64SHLQconst { 28188 break 28189 } 28190 j1 := s1.AuxInt 28191 x1 := s1.Args[0] 28192 if x1.Op != OpAMD64MOVWloadidx1 { 28193 break 28194 } 28195 i1 := x1.AuxInt 28196 s := x1.Aux 28197 _ = x1.Args[2] 28198 idx := x1.Args[0] 28199 p := x1.Args[1] 28200 mem := x1.Args[2] 28201 or := v.Args[1] 28202 if or.Op != OpAMD64ORQ { 28203 break 28204 } 28205 _ = or.Args[1] 28206 s0 := or.Args[0] 28207 if s0.Op != OpAMD64SHLQconst { 28208 break 28209 } 28210 j0 := s0.AuxInt 28211 x0 := s0.Args[0] 28212 if x0.Op != OpAMD64MOVWloadidx1 { 28213 break 28214 } 28215 i0 := x0.AuxInt 28216 if x0.Aux != s { 28217 break 28218 } 28219 _ = x0.Args[2] 28220 if idx != x0.Args[0] { 28221 break 28222 } 28223 if p != x0.Args[1] { 28224 break 28225 } 28226 if mem != x0.Args[2] { 28227 break 28228 } 28229 y := or.Args[1] 28230 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28231 break 28232 } 28233 b = mergePoint(b, x0, x1) 28234 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28235 v.reset(OpCopy) 28236 v.AddArg(v0) 28237 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28238 v1.AuxInt = j0 28239 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28240 v2.AuxInt = i0 28241 v2.Aux = s 28242 v2.AddArg(p) 28243 v2.AddArg(idx) 28244 v2.AddArg(mem) 28245 v1.AddArg(v2) 28246 v0.AddArg(v1) 28247 v0.AddArg(y) 28248 return true 28249 } 28250 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 28251 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28252 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28253 for { 28254 _ = v.Args[1] 28255 s1 := v.Args[0] 28256 if s1.Op != OpAMD64SHLQconst { 28257 break 28258 } 28259 j1 := s1.AuxInt 28260 x1 := s1.Args[0] 28261 if x1.Op != OpAMD64MOVWloadidx1 { 28262 break 28263 } 28264 i1 := x1.AuxInt 28265 s := x1.Aux 28266 _ = x1.Args[2] 28267 p := x1.Args[0] 28268 idx := x1.Args[1] 28269 mem := x1.Args[2] 28270 or := v.Args[1] 28271 if or.Op != OpAMD64ORQ { 28272 break 28273 } 28274 _ = or.Args[1] 28275 y := or.Args[0] 28276 s0 := or.Args[1] 28277 if s0.Op != OpAMD64SHLQconst { 28278 break 28279 } 28280 j0 := s0.AuxInt 28281 x0 := s0.Args[0] 28282 if x0.Op != OpAMD64MOVWloadidx1 { 28283 break 28284 } 28285 i0 := x0.AuxInt 28286 if x0.Aux != s { 28287 break 28288 } 28289 _ = x0.Args[2] 28290 if p != x0.Args[0] { 28291 break 28292 } 28293 if idx != x0.Args[1] { 28294 break 28295 } 28296 if mem != x0.Args[2] { 28297 break 28298 } 28299 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28300 break 28301 } 28302 b = mergePoint(b, x0, x1) 28303 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28304 v.reset(OpCopy) 28305 v.AddArg(v0) 28306 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28307 v1.AuxInt = j0 28308 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28309 v2.AuxInt = i0 28310 v2.Aux = s 28311 v2.AddArg(p) 28312 v2.AddArg(idx) 28313 v2.AddArg(mem) 28314 v1.AddArg(v2) 28315 v0.AddArg(v1) 28316 v0.AddArg(y) 28317 return true 28318 } 28319 return false 28320 } 28321 func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool { 28322 b := v.Block 28323 _ = b 28324 typ := &b.Func.Config.Types 28325 _ = typ 28326 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 28327 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28328 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28329 for { 28330 _ = v.Args[1] 28331 s1 := v.Args[0] 28332 if s1.Op != OpAMD64SHLQconst { 28333 break 28334 } 28335 j1 := s1.AuxInt 28336 x1 := s1.Args[0] 28337 if x1.Op != OpAMD64MOVWloadidx1 { 28338 break 28339 } 28340 i1 := x1.AuxInt 28341 s := x1.Aux 28342 _ = x1.Args[2] 28343 idx := x1.Args[0] 28344 p := x1.Args[1] 28345 mem := x1.Args[2] 28346 or := v.Args[1] 28347 if or.Op != OpAMD64ORQ { 28348 break 28349 } 28350 _ = or.Args[1] 28351 y := or.Args[0] 28352 s0 := or.Args[1] 28353 if s0.Op != OpAMD64SHLQconst { 28354 break 28355 } 28356 j0 := s0.AuxInt 28357 x0 := s0.Args[0] 28358 if x0.Op != OpAMD64MOVWloadidx1 { 28359 break 28360 } 28361 i0 := x0.AuxInt 28362 if x0.Aux != s { 28363 break 28364 } 28365 _ = x0.Args[2] 28366 if p != x0.Args[0] { 28367 break 28368 } 28369 if idx != x0.Args[1] { 28370 break 28371 } 28372 if mem != x0.Args[2] { 28373 break 28374 } 28375 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28376 break 28377 } 28378 b = mergePoint(b, x0, x1) 28379 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28380 v.reset(OpCopy) 28381 v.AddArg(v0) 28382 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28383 v1.AuxInt = j0 28384 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28385 v2.AuxInt = i0 28386 v2.Aux = s 28387 v2.AddArg(p) 28388 v2.AddArg(idx) 28389 v2.AddArg(mem) 28390 v1.AddArg(v2) 28391 v0.AddArg(v1) 28392 v0.AddArg(y) 28393 return true 28394 } 28395 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 28396 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28397 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28398 for { 28399 _ = v.Args[1] 28400 s1 := v.Args[0] 28401 if s1.Op != OpAMD64SHLQconst { 28402 break 28403 } 28404 j1 := s1.AuxInt 28405 x1 := s1.Args[0] 28406 if x1.Op != OpAMD64MOVWloadidx1 { 28407 break 28408 } 28409 i1 := x1.AuxInt 28410 s := x1.Aux 28411 _ = x1.Args[2] 28412 p := x1.Args[0] 28413 idx := x1.Args[1] 28414 mem := x1.Args[2] 28415 or := v.Args[1] 28416 if or.Op != OpAMD64ORQ { 28417 break 28418 } 28419 _ = or.Args[1] 28420 y := or.Args[0] 28421 s0 := or.Args[1] 28422 if s0.Op != OpAMD64SHLQconst { 28423 break 28424 } 28425 j0 := s0.AuxInt 28426 x0 := s0.Args[0] 28427 if x0.Op != OpAMD64MOVWloadidx1 { 28428 break 28429 } 28430 i0 := x0.AuxInt 28431 if x0.Aux != s { 28432 break 28433 } 28434 _ = x0.Args[2] 28435 if idx != x0.Args[0] { 28436 break 28437 } 28438 if p != x0.Args[1] { 28439 break 28440 } 28441 if mem != x0.Args[2] { 28442 break 28443 } 28444 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28445 break 28446 } 28447 b = mergePoint(b, x0, x1) 28448 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28449 v.reset(OpCopy) 28450 v.AddArg(v0) 28451 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28452 v1.AuxInt = j0 28453 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28454 v2.AuxInt = i0 28455 v2.Aux = s 28456 v2.AddArg(p) 28457 v2.AddArg(idx) 28458 v2.AddArg(mem) 28459 v1.AddArg(v2) 28460 v0.AddArg(v1) 28461 v0.AddArg(y) 28462 return true 28463 } 28464 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 28465 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28466 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28467 for { 28468 _ = v.Args[1] 28469 s1 := v.Args[0] 28470 if s1.Op != OpAMD64SHLQconst { 28471 break 28472 } 28473 j1 := s1.AuxInt 28474 x1 := s1.Args[0] 28475 if x1.Op != OpAMD64MOVWloadidx1 { 28476 break 28477 } 28478 i1 := x1.AuxInt 28479 s := x1.Aux 28480 _ = x1.Args[2] 28481 idx := x1.Args[0] 28482 p := x1.Args[1] 28483 mem := x1.Args[2] 28484 or := v.Args[1] 28485 if or.Op != OpAMD64ORQ { 28486 break 28487 } 28488 _ = or.Args[1] 28489 y := or.Args[0] 28490 s0 := or.Args[1] 28491 if s0.Op != OpAMD64SHLQconst { 28492 break 28493 } 28494 j0 := s0.AuxInt 28495 x0 := s0.Args[0] 28496 if x0.Op != OpAMD64MOVWloadidx1 { 28497 break 28498 } 28499 i0 := x0.AuxInt 28500 if x0.Aux != s { 28501 break 28502 } 28503 _ = x0.Args[2] 28504 if idx != x0.Args[0] { 28505 break 28506 } 28507 if p != x0.Args[1] { 28508 break 28509 } 28510 if mem != x0.Args[2] { 28511 break 28512 } 28513 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28514 break 28515 } 28516 b = mergePoint(b, x0, x1) 28517 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28518 v.reset(OpCopy) 28519 v.AddArg(v0) 28520 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28521 v1.AuxInt = j0 28522 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28523 v2.AuxInt = i0 28524 v2.Aux = s 28525 v2.AddArg(p) 28526 v2.AddArg(idx) 28527 v2.AddArg(mem) 28528 v1.AddArg(v2) 28529 v0.AddArg(v1) 28530 v0.AddArg(y) 28531 return true 28532 } 28533 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28534 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28535 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28536 for { 28537 _ = v.Args[1] 28538 or := v.Args[0] 28539 if or.Op != OpAMD64ORQ { 28540 break 28541 } 28542 _ = or.Args[1] 28543 s0 := or.Args[0] 28544 if s0.Op != OpAMD64SHLQconst { 28545 break 28546 } 28547 j0 := s0.AuxInt 28548 x0 := s0.Args[0] 28549 if x0.Op != OpAMD64MOVWloadidx1 { 28550 break 28551 } 28552 i0 := x0.AuxInt 28553 s := x0.Aux 28554 _ = x0.Args[2] 28555 p := x0.Args[0] 28556 idx := x0.Args[1] 28557 mem := x0.Args[2] 28558 y := or.Args[1] 28559 s1 := v.Args[1] 28560 if s1.Op != OpAMD64SHLQconst { 28561 break 28562 } 28563 j1 := s1.AuxInt 28564 x1 := s1.Args[0] 28565 if x1.Op != OpAMD64MOVWloadidx1 { 28566 break 28567 } 28568 i1 := x1.AuxInt 28569 if x1.Aux != s { 28570 break 28571 } 28572 _ = x1.Args[2] 28573 if p != x1.Args[0] { 28574 break 28575 } 28576 if idx != x1.Args[1] { 28577 break 28578 } 28579 if mem != x1.Args[2] { 28580 break 28581 } 28582 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28583 break 28584 } 28585 b = mergePoint(b, x0, x1) 28586 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28587 v.reset(OpCopy) 28588 v.AddArg(v0) 28589 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28590 v1.AuxInt = j0 28591 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28592 v2.AuxInt = i0 28593 v2.Aux = s 28594 v2.AddArg(p) 28595 v2.AddArg(idx) 28596 v2.AddArg(mem) 28597 v1.AddArg(v2) 28598 v0.AddArg(v1) 28599 v0.AddArg(y) 28600 return true 28601 } 28602 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28603 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28604 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28605 for { 28606 _ = v.Args[1] 28607 or := v.Args[0] 28608 if or.Op != OpAMD64ORQ { 28609 break 28610 } 28611 _ = or.Args[1] 28612 s0 := or.Args[0] 28613 if s0.Op != OpAMD64SHLQconst { 28614 break 28615 } 28616 j0 := s0.AuxInt 28617 x0 := s0.Args[0] 28618 if x0.Op != OpAMD64MOVWloadidx1 { 28619 break 28620 } 28621 i0 := x0.AuxInt 28622 s := x0.Aux 28623 _ = x0.Args[2] 28624 idx := x0.Args[0] 28625 p := x0.Args[1] 28626 mem := x0.Args[2] 28627 y := or.Args[1] 28628 s1 := v.Args[1] 28629 if s1.Op != OpAMD64SHLQconst { 28630 break 28631 } 28632 j1 := s1.AuxInt 28633 x1 := s1.Args[0] 28634 if x1.Op != OpAMD64MOVWloadidx1 { 28635 break 28636 } 28637 i1 := x1.AuxInt 28638 if x1.Aux != s { 28639 break 28640 } 28641 _ = x1.Args[2] 28642 if p != x1.Args[0] { 28643 break 28644 } 28645 if idx != x1.Args[1] { 28646 break 28647 } 28648 if mem != x1.Args[2] { 28649 break 28650 } 28651 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28652 break 28653 } 28654 b = mergePoint(b, x0, x1) 28655 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28656 v.reset(OpCopy) 28657 v.AddArg(v0) 28658 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28659 v1.AuxInt = j0 28660 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28661 v2.AuxInt = i0 28662 v2.Aux = s 28663 v2.AddArg(p) 28664 v2.AddArg(idx) 28665 v2.AddArg(mem) 28666 v1.AddArg(v2) 28667 v0.AddArg(v1) 28668 v0.AddArg(y) 28669 return true 28670 } 28671 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28672 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28673 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28674 for { 28675 _ = v.Args[1] 28676 or := v.Args[0] 28677 if or.Op != OpAMD64ORQ { 28678 break 28679 } 28680 _ = or.Args[1] 28681 y := or.Args[0] 28682 s0 := or.Args[1] 28683 if s0.Op != OpAMD64SHLQconst { 28684 break 28685 } 28686 j0 := s0.AuxInt 28687 x0 := s0.Args[0] 28688 if x0.Op != OpAMD64MOVWloadidx1 { 28689 break 28690 } 28691 i0 := x0.AuxInt 28692 s := x0.Aux 28693 _ = x0.Args[2] 28694 p := x0.Args[0] 28695 idx := x0.Args[1] 28696 mem := x0.Args[2] 28697 s1 := v.Args[1] 28698 if s1.Op != OpAMD64SHLQconst { 28699 break 28700 } 28701 j1 := s1.AuxInt 28702 x1 := s1.Args[0] 28703 if x1.Op != OpAMD64MOVWloadidx1 { 28704 break 28705 } 28706 i1 := x1.AuxInt 28707 if x1.Aux != s { 28708 break 28709 } 28710 _ = x1.Args[2] 28711 if p != x1.Args[0] { 28712 break 28713 } 28714 if idx != x1.Args[1] { 28715 break 28716 } 28717 if mem != x1.Args[2] { 28718 break 28719 } 28720 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28721 break 28722 } 28723 b = mergePoint(b, x0, x1) 28724 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28725 v.reset(OpCopy) 28726 v.AddArg(v0) 28727 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28728 v1.AuxInt = j0 28729 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28730 v2.AuxInt = i0 28731 v2.Aux = s 28732 v2.AddArg(p) 28733 v2.AddArg(idx) 28734 v2.AddArg(mem) 28735 v1.AddArg(v2) 28736 v0.AddArg(v1) 28737 v0.AddArg(y) 28738 return true 28739 } 28740 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28741 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28742 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28743 for { 28744 _ = v.Args[1] 28745 or := v.Args[0] 28746 if or.Op != OpAMD64ORQ { 28747 break 28748 } 28749 _ = or.Args[1] 28750 y := or.Args[0] 28751 s0 := or.Args[1] 28752 if s0.Op != OpAMD64SHLQconst { 28753 break 28754 } 28755 j0 := s0.AuxInt 28756 x0 := s0.Args[0] 28757 if x0.Op != OpAMD64MOVWloadidx1 { 28758 break 28759 } 28760 i0 := x0.AuxInt 28761 s := x0.Aux 28762 _ = x0.Args[2] 28763 idx := x0.Args[0] 28764 p := x0.Args[1] 28765 mem := x0.Args[2] 28766 s1 := v.Args[1] 28767 if s1.Op != OpAMD64SHLQconst { 28768 break 28769 } 28770 j1 := s1.AuxInt 28771 x1 := s1.Args[0] 28772 if x1.Op != OpAMD64MOVWloadidx1 { 28773 break 28774 } 28775 i1 := x1.AuxInt 28776 if x1.Aux != s { 28777 break 28778 } 28779 _ = x1.Args[2] 28780 if p != x1.Args[0] { 28781 break 28782 } 28783 if idx != x1.Args[1] { 28784 break 28785 } 28786 if mem != x1.Args[2] { 28787 break 28788 } 28789 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28790 break 28791 } 28792 b = mergePoint(b, x0, x1) 28793 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28794 v.reset(OpCopy) 28795 v.AddArg(v0) 28796 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28797 v1.AuxInt = j0 28798 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28799 v2.AuxInt = i0 28800 v2.Aux = s 28801 v2.AddArg(p) 28802 v2.AddArg(idx) 28803 v2.AddArg(mem) 28804 v1.AddArg(v2) 28805 v0.AddArg(v1) 28806 v0.AddArg(y) 28807 return true 28808 } 28809 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28810 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28811 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28812 for { 28813 _ = v.Args[1] 28814 or := v.Args[0] 28815 if or.Op != OpAMD64ORQ { 28816 break 28817 } 28818 _ = or.Args[1] 28819 s0 := or.Args[0] 28820 if s0.Op != OpAMD64SHLQconst { 28821 break 28822 } 28823 j0 := s0.AuxInt 28824 x0 := s0.Args[0] 28825 if x0.Op != OpAMD64MOVWloadidx1 { 28826 break 28827 } 28828 i0 := x0.AuxInt 28829 s := x0.Aux 28830 _ = x0.Args[2] 28831 p := x0.Args[0] 28832 idx := x0.Args[1] 28833 mem := x0.Args[2] 28834 y := or.Args[1] 28835 s1 := v.Args[1] 28836 if s1.Op != OpAMD64SHLQconst { 28837 break 28838 } 28839 j1 := s1.AuxInt 28840 x1 := s1.Args[0] 28841 if x1.Op != OpAMD64MOVWloadidx1 { 28842 break 28843 } 28844 i1 := x1.AuxInt 28845 if x1.Aux != s { 28846 break 28847 } 28848 _ = x1.Args[2] 28849 if idx != x1.Args[0] { 28850 break 28851 } 28852 if p != x1.Args[1] { 28853 break 28854 } 28855 if mem != x1.Args[2] { 28856 break 28857 } 28858 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28859 break 28860 } 28861 b = mergePoint(b, x0, x1) 28862 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28863 v.reset(OpCopy) 28864 v.AddArg(v0) 28865 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28866 v1.AuxInt = j0 28867 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28868 v2.AuxInt = i0 28869 v2.Aux = s 28870 v2.AddArg(p) 28871 v2.AddArg(idx) 28872 v2.AddArg(mem) 28873 v1.AddArg(v2) 28874 v0.AddArg(v1) 28875 v0.AddArg(y) 28876 return true 28877 } 28878 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28879 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28880 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28881 for { 28882 _ = v.Args[1] 28883 or := v.Args[0] 28884 if or.Op != OpAMD64ORQ { 28885 break 28886 } 28887 _ = or.Args[1] 28888 s0 := or.Args[0] 28889 if s0.Op != OpAMD64SHLQconst { 28890 break 28891 } 28892 j0 := s0.AuxInt 28893 x0 := s0.Args[0] 28894 if x0.Op != OpAMD64MOVWloadidx1 { 28895 break 28896 } 28897 i0 := x0.AuxInt 28898 s := x0.Aux 28899 _ = x0.Args[2] 28900 idx := x0.Args[0] 28901 p := x0.Args[1] 28902 mem := x0.Args[2] 28903 y := or.Args[1] 28904 s1 := v.Args[1] 28905 if s1.Op != OpAMD64SHLQconst { 28906 break 28907 } 28908 j1 := s1.AuxInt 28909 x1 := s1.Args[0] 28910 if x1.Op != OpAMD64MOVWloadidx1 { 28911 break 28912 } 28913 i1 := x1.AuxInt 28914 if x1.Aux != s { 28915 break 28916 } 28917 _ = x1.Args[2] 28918 if idx != x1.Args[0] { 28919 break 28920 } 28921 if p != x1.Args[1] { 28922 break 28923 } 28924 if mem != x1.Args[2] { 28925 break 28926 } 28927 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28928 break 28929 } 28930 b = mergePoint(b, x0, x1) 28931 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28932 v.reset(OpCopy) 28933 v.AddArg(v0) 28934 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28935 v1.AuxInt = j0 28936 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28937 v2.AuxInt = i0 28938 v2.Aux = s 28939 v2.AddArg(p) 28940 v2.AddArg(idx) 28941 v2.AddArg(mem) 28942 v1.AddArg(v2) 28943 v0.AddArg(v1) 28944 v0.AddArg(y) 28945 return true 28946 } 28947 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28948 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28949 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28950 for { 28951 _ = v.Args[1] 28952 or := v.Args[0] 28953 if or.Op != OpAMD64ORQ { 28954 break 28955 } 28956 _ = or.Args[1] 28957 y := or.Args[0] 28958 s0 := or.Args[1] 28959 if s0.Op != OpAMD64SHLQconst { 28960 break 28961 } 28962 j0 := s0.AuxInt 28963 x0 := s0.Args[0] 28964 if x0.Op != OpAMD64MOVWloadidx1 { 28965 break 28966 } 28967 i0 := x0.AuxInt 28968 s := x0.Aux 28969 _ = x0.Args[2] 28970 p := x0.Args[0] 28971 idx := x0.Args[1] 28972 mem := x0.Args[2] 28973 s1 := v.Args[1] 28974 if s1.Op != OpAMD64SHLQconst { 28975 break 28976 } 28977 j1 := s1.AuxInt 28978 x1 := s1.Args[0] 28979 if x1.Op != OpAMD64MOVWloadidx1 { 28980 break 28981 } 28982 i1 := x1.AuxInt 28983 if x1.Aux != s { 28984 break 28985 } 28986 _ = x1.Args[2] 28987 if idx != x1.Args[0] { 28988 break 28989 } 28990 if p != x1.Args[1] { 28991 break 28992 } 28993 if mem != x1.Args[2] { 28994 break 28995 } 28996 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28997 break 28998 } 28999 b = mergePoint(b, x0, x1) 29000 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29001 v.reset(OpCopy) 29002 v.AddArg(v0) 29003 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29004 v1.AuxInt = j0 29005 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29006 v2.AuxInt = i0 29007 v2.Aux = s 29008 v2.AddArg(p) 29009 v2.AddArg(idx) 29010 v2.AddArg(mem) 29011 v1.AddArg(v2) 29012 v0.AddArg(v1) 29013 v0.AddArg(y) 29014 return true 29015 } 29016 return false 29017 } 29018 func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool { 29019 b := v.Block 29020 _ = b 29021 typ := &b.Func.Config.Types 29022 _ = typ 29023 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 29024 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29025 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 29026 for { 29027 _ = v.Args[1] 29028 or := v.Args[0] 29029 if or.Op != OpAMD64ORQ { 29030 break 29031 } 29032 _ = or.Args[1] 29033 y := or.Args[0] 29034 s0 := or.Args[1] 29035 if s0.Op != OpAMD64SHLQconst { 29036 break 29037 } 29038 j0 := s0.AuxInt 29039 x0 := s0.Args[0] 29040 if x0.Op != OpAMD64MOVWloadidx1 { 29041 break 29042 } 29043 i0 := x0.AuxInt 29044 s := x0.Aux 29045 _ = x0.Args[2] 29046 idx := x0.Args[0] 29047 p := x0.Args[1] 29048 mem := x0.Args[2] 29049 s1 := v.Args[1] 29050 if s1.Op != OpAMD64SHLQconst { 29051 break 29052 } 29053 j1 := s1.AuxInt 29054 x1 := s1.Args[0] 29055 if x1.Op != OpAMD64MOVWloadidx1 { 29056 break 29057 } 29058 i1 := x1.AuxInt 29059 if x1.Aux != s { 29060 break 29061 } 29062 _ = x1.Args[2] 29063 if idx != x1.Args[0] { 29064 break 29065 } 29066 if p != x1.Args[1] { 29067 break 29068 } 29069 if mem != x1.Args[2] { 29070 break 29071 } 29072 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29073 break 29074 } 29075 b = mergePoint(b, x0, x1) 29076 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29077 v.reset(OpCopy) 29078 v.AddArg(v0) 29079 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29080 v1.AuxInt = j0 29081 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29082 v2.AuxInt = i0 29083 v2.Aux = s 29084 v2.AddArg(p) 29085 v2.AddArg(idx) 29086 v2.AddArg(mem) 29087 v1.AddArg(v2) 29088 v0.AddArg(v1) 29089 v0.AddArg(y) 29090 return true 29091 } 29092 // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) 29093 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29094 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 29095 for { 29096 _ = v.Args[1] 29097 x1 := v.Args[0] 29098 if x1.Op != OpAMD64MOVBload { 29099 break 29100 } 29101 i1 := x1.AuxInt 29102 s := x1.Aux 29103 _ = x1.Args[1] 29104 p := x1.Args[0] 29105 mem := x1.Args[1] 29106 sh := v.Args[1] 29107 if sh.Op != OpAMD64SHLQconst { 29108 break 29109 } 29110 if sh.AuxInt != 8 { 29111 break 29112 } 29113 x0 := sh.Args[0] 29114 if x0.Op != OpAMD64MOVBload { 29115 break 29116 } 29117 i0 := x0.AuxInt 29118 if x0.Aux != s { 29119 break 29120 } 29121 _ = x0.Args[1] 29122 if p != x0.Args[0] { 29123 break 29124 } 29125 if mem != x0.Args[1] { 29126 break 29127 } 29128 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29129 break 29130 } 29131 b = mergePoint(b, x0, x1) 29132 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 29133 v.reset(OpCopy) 29134 v.AddArg(v0) 29135 v0.AuxInt = 8 29136 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29137 v1.AuxInt = i0 29138 v1.Aux = s 29139 v1.AddArg(p) 29140 v1.AddArg(mem) 29141 v0.AddArg(v1) 29142 return true 29143 } 29144 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 29145 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29146 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 29147 for { 29148 _ = v.Args[1] 29149 sh := v.Args[0] 29150 if sh.Op != OpAMD64SHLQconst { 29151 break 29152 } 29153 if sh.AuxInt != 8 { 29154 break 29155 } 29156 x0 := sh.Args[0] 29157 if x0.Op != OpAMD64MOVBload { 29158 break 29159 } 29160 i0 := x0.AuxInt 29161 s := x0.Aux 29162 _ = x0.Args[1] 29163 p := x0.Args[0] 29164 mem := x0.Args[1] 29165 x1 := v.Args[1] 29166 if x1.Op != OpAMD64MOVBload { 29167 break 29168 } 29169 i1 := x1.AuxInt 29170 if x1.Aux != s { 29171 break 29172 } 29173 _ = x1.Args[1] 29174 if p != x1.Args[0] { 29175 break 29176 } 29177 if mem != x1.Args[1] { 29178 break 29179 } 29180 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29181 break 29182 } 29183 b = mergePoint(b, x0, x1) 29184 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 29185 v.reset(OpCopy) 29186 v.AddArg(v0) 29187 v0.AuxInt = 8 29188 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29189 v1.AuxInt = i0 29190 v1.Aux = s 29191 v1.AddArg(p) 29192 v1.AddArg(mem) 29193 v0.AddArg(v1) 29194 return true 29195 } 29196 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 29197 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29198 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 29199 for { 29200 _ = v.Args[1] 29201 r1 := v.Args[0] 29202 if r1.Op != OpAMD64ROLWconst { 29203 break 29204 } 29205 if r1.AuxInt != 8 { 29206 break 29207 } 29208 x1 := r1.Args[0] 29209 if x1.Op != OpAMD64MOVWload { 29210 break 29211 } 29212 i1 := x1.AuxInt 29213 s := x1.Aux 29214 _ = x1.Args[1] 29215 p := x1.Args[0] 29216 mem := x1.Args[1] 29217 sh := v.Args[1] 29218 if sh.Op != OpAMD64SHLQconst { 29219 break 29220 } 29221 if sh.AuxInt != 16 { 29222 break 29223 } 29224 r0 := sh.Args[0] 29225 if r0.Op != OpAMD64ROLWconst { 29226 break 29227 } 29228 if r0.AuxInt != 8 { 29229 break 29230 } 29231 x0 := r0.Args[0] 29232 if x0.Op != OpAMD64MOVWload { 29233 break 29234 } 29235 i0 := x0.AuxInt 29236 if x0.Aux != s { 29237 break 29238 } 29239 _ = x0.Args[1] 29240 if p != x0.Args[0] { 29241 break 29242 } 29243 if mem != x0.Args[1] { 29244 break 29245 } 29246 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29247 break 29248 } 29249 b = mergePoint(b, x0, x1) 29250 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29251 v.reset(OpCopy) 29252 v.AddArg(v0) 29253 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29254 v1.AuxInt = i0 29255 v1.Aux = s 29256 v1.AddArg(p) 29257 v1.AddArg(mem) 29258 v0.AddArg(v1) 29259 return true 29260 } 29261 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 29262 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29263 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 29264 for { 29265 _ = v.Args[1] 29266 sh := v.Args[0] 29267 if sh.Op != OpAMD64SHLQconst { 29268 break 29269 } 29270 if sh.AuxInt != 16 { 29271 break 29272 } 29273 r0 := sh.Args[0] 29274 if r0.Op != OpAMD64ROLWconst { 29275 break 29276 } 29277 if r0.AuxInt != 8 { 29278 break 29279 } 29280 x0 := r0.Args[0] 29281 if x0.Op != OpAMD64MOVWload { 29282 break 29283 } 29284 i0 := x0.AuxInt 29285 s := x0.Aux 29286 _ = x0.Args[1] 29287 p := x0.Args[0] 29288 mem := x0.Args[1] 29289 r1 := v.Args[1] 29290 if r1.Op != OpAMD64ROLWconst { 29291 break 29292 } 29293 if r1.AuxInt != 8 { 29294 break 29295 } 29296 x1 := r1.Args[0] 29297 if x1.Op != OpAMD64MOVWload { 29298 break 29299 } 29300 i1 := x1.AuxInt 29301 if x1.Aux != s { 29302 break 29303 } 29304 _ = x1.Args[1] 29305 if p != x1.Args[0] { 29306 break 29307 } 29308 if mem != x1.Args[1] { 29309 break 29310 } 29311 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29312 break 29313 } 29314 b = mergePoint(b, x0, x1) 29315 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29316 v.reset(OpCopy) 29317 v.AddArg(v0) 29318 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29319 v1.AuxInt = i0 29320 v1.Aux = s 29321 v1.AddArg(p) 29322 v1.AddArg(mem) 29323 v0.AddArg(v1) 29324 return true 29325 } 29326 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) 29327 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29328 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 29329 for { 29330 _ = v.Args[1] 29331 r1 := v.Args[0] 29332 if r1.Op != OpAMD64BSWAPL { 29333 break 29334 } 29335 x1 := r1.Args[0] 29336 if x1.Op != OpAMD64MOVLload { 29337 break 29338 } 29339 i1 := x1.AuxInt 29340 s := x1.Aux 29341 _ = x1.Args[1] 29342 p := x1.Args[0] 29343 mem := x1.Args[1] 29344 sh := v.Args[1] 29345 if sh.Op != OpAMD64SHLQconst { 29346 break 29347 } 29348 if sh.AuxInt != 32 { 29349 break 29350 } 29351 r0 := sh.Args[0] 29352 if r0.Op != OpAMD64BSWAPL { 29353 break 29354 } 29355 x0 := r0.Args[0] 29356 if x0.Op != OpAMD64MOVLload { 29357 break 29358 } 29359 i0 := x0.AuxInt 29360 if x0.Aux != s { 29361 break 29362 } 29363 _ = x0.Args[1] 29364 if p != x0.Args[0] { 29365 break 29366 } 29367 if mem != x0.Args[1] { 29368 break 29369 } 29370 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29371 break 29372 } 29373 b = mergePoint(b, x0, x1) 29374 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29375 v.reset(OpCopy) 29376 v.AddArg(v0) 29377 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 29378 v1.AuxInt = i0 29379 v1.Aux = s 29380 v1.AddArg(p) 29381 v1.AddArg(mem) 29382 v0.AddArg(v1) 29383 return true 29384 } 29385 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))) 29386 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29387 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 29388 for { 29389 _ = v.Args[1] 29390 sh := v.Args[0] 29391 if sh.Op != OpAMD64SHLQconst { 29392 break 29393 } 29394 if sh.AuxInt != 32 { 29395 break 29396 } 29397 r0 := sh.Args[0] 29398 if r0.Op != OpAMD64BSWAPL { 29399 break 29400 } 29401 x0 := r0.Args[0] 29402 if x0.Op != OpAMD64MOVLload { 29403 break 29404 } 29405 i0 := x0.AuxInt 29406 s := x0.Aux 29407 _ = x0.Args[1] 29408 p := x0.Args[0] 29409 mem := x0.Args[1] 29410 r1 := v.Args[1] 29411 if r1.Op != OpAMD64BSWAPL { 29412 break 29413 } 29414 x1 := r1.Args[0] 29415 if x1.Op != OpAMD64MOVLload { 29416 break 29417 } 29418 i1 := x1.AuxInt 29419 if x1.Aux != s { 29420 break 29421 } 29422 _ = x1.Args[1] 29423 if p != x1.Args[0] { 29424 break 29425 } 29426 if mem != x1.Args[1] { 29427 break 29428 } 29429 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29430 break 29431 } 29432 b = mergePoint(b, x0, x1) 29433 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29434 v.reset(OpCopy) 29435 v.AddArg(v0) 29436 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 29437 v1.AuxInt = i0 29438 v1.Aux = s 29439 v1.AddArg(p) 29440 v1.AddArg(mem) 29441 v0.AddArg(v1) 29442 return true 29443 } 29444 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 29445 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29446 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29447 for { 29448 _ = v.Args[1] 29449 s0 := v.Args[0] 29450 if s0.Op != OpAMD64SHLQconst { 29451 break 29452 } 29453 j0 := s0.AuxInt 29454 x0 := s0.Args[0] 29455 if x0.Op != OpAMD64MOVBload { 29456 break 29457 } 29458 i0 := x0.AuxInt 29459 s := x0.Aux 29460 _ = x0.Args[1] 29461 p := x0.Args[0] 29462 mem := x0.Args[1] 29463 or := v.Args[1] 29464 if or.Op != OpAMD64ORQ { 29465 break 29466 } 29467 _ = or.Args[1] 29468 s1 := or.Args[0] 29469 if s1.Op != OpAMD64SHLQconst { 29470 break 29471 } 29472 j1 := s1.AuxInt 29473 x1 := s1.Args[0] 29474 if x1.Op != OpAMD64MOVBload { 29475 break 29476 } 29477 i1 := x1.AuxInt 29478 if x1.Aux != s { 29479 break 29480 } 29481 _ = x1.Args[1] 29482 if p != x1.Args[0] { 29483 break 29484 } 29485 if mem != x1.Args[1] { 29486 break 29487 } 29488 y := or.Args[1] 29489 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29490 break 29491 } 29492 b = mergePoint(b, x0, x1) 29493 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29494 v.reset(OpCopy) 29495 v.AddArg(v0) 29496 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29497 v1.AuxInt = j1 29498 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29499 v2.AuxInt = 8 29500 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29501 v3.AuxInt = i0 29502 v3.Aux = s 29503 v3.AddArg(p) 29504 v3.AddArg(mem) 29505 v2.AddArg(v3) 29506 v1.AddArg(v2) 29507 v0.AddArg(v1) 29508 v0.AddArg(y) 29509 return true 29510 } 29511 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))) 29512 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29513 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29514 for { 29515 _ = v.Args[1] 29516 s0 := v.Args[0] 29517 if s0.Op != OpAMD64SHLQconst { 29518 break 29519 } 29520 j0 := s0.AuxInt 29521 x0 := s0.Args[0] 29522 if x0.Op != OpAMD64MOVBload { 29523 break 29524 } 29525 i0 := x0.AuxInt 29526 s := x0.Aux 29527 _ = x0.Args[1] 29528 p := x0.Args[0] 29529 mem := x0.Args[1] 29530 or := v.Args[1] 29531 if or.Op != OpAMD64ORQ { 29532 break 29533 } 29534 _ = or.Args[1] 29535 y := or.Args[0] 29536 s1 := or.Args[1] 29537 if s1.Op != OpAMD64SHLQconst { 29538 break 29539 } 29540 j1 := s1.AuxInt 29541 x1 := s1.Args[0] 29542 if x1.Op != OpAMD64MOVBload { 29543 break 29544 } 29545 i1 := x1.AuxInt 29546 if x1.Aux != s { 29547 break 29548 } 29549 _ = x1.Args[1] 29550 if p != x1.Args[0] { 29551 break 29552 } 29553 if mem != x1.Args[1] { 29554 break 29555 } 29556 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29557 break 29558 } 29559 b = mergePoint(b, x0, x1) 29560 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29561 v.reset(OpCopy) 29562 v.AddArg(v0) 29563 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29564 v1.AuxInt = j1 29565 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29566 v2.AuxInt = 8 29567 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29568 v3.AuxInt = i0 29569 v3.Aux = s 29570 v3.AddArg(p) 29571 v3.AddArg(mem) 29572 v2.AddArg(v3) 29573 v1.AddArg(v2) 29574 v0.AddArg(v1) 29575 v0.AddArg(y) 29576 return true 29577 } 29578 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 29579 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29580 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29581 for { 29582 _ = v.Args[1] 29583 or := v.Args[0] 29584 if or.Op != OpAMD64ORQ { 29585 break 29586 } 29587 _ = or.Args[1] 29588 s1 := or.Args[0] 29589 if s1.Op != OpAMD64SHLQconst { 29590 break 29591 } 29592 j1 := s1.AuxInt 29593 x1 := s1.Args[0] 29594 if x1.Op != OpAMD64MOVBload { 29595 break 29596 } 29597 i1 := x1.AuxInt 29598 s := x1.Aux 29599 _ = x1.Args[1] 29600 p := x1.Args[0] 29601 mem := x1.Args[1] 29602 y := or.Args[1] 29603 s0 := v.Args[1] 29604 if s0.Op != OpAMD64SHLQconst { 29605 break 29606 } 29607 j0 := s0.AuxInt 29608 x0 := s0.Args[0] 29609 if x0.Op != OpAMD64MOVBload { 29610 break 29611 } 29612 i0 := x0.AuxInt 29613 if x0.Aux != s { 29614 break 29615 } 29616 _ = x0.Args[1] 29617 if p != x0.Args[0] { 29618 break 29619 } 29620 if mem != x0.Args[1] { 29621 break 29622 } 29623 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29624 break 29625 } 29626 b = mergePoint(b, x0, x1) 29627 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29628 v.reset(OpCopy) 29629 v.AddArg(v0) 29630 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29631 v1.AuxInt = j1 29632 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29633 v2.AuxInt = 8 29634 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29635 v3.AuxInt = i0 29636 v3.Aux = s 29637 v3.AddArg(p) 29638 v3.AddArg(mem) 29639 v2.AddArg(v3) 29640 v1.AddArg(v2) 29641 v0.AddArg(v1) 29642 v0.AddArg(y) 29643 return true 29644 } 29645 return false 29646 } 29647 func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool { 29648 b := v.Block 29649 _ = b 29650 typ := &b.Func.Config.Types 29651 _ = typ 29652 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 29653 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29654 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29655 for { 29656 _ = v.Args[1] 29657 or := v.Args[0] 29658 if or.Op != OpAMD64ORQ { 29659 break 29660 } 29661 _ = or.Args[1] 29662 y := or.Args[0] 29663 s1 := or.Args[1] 29664 if s1.Op != OpAMD64SHLQconst { 29665 break 29666 } 29667 j1 := s1.AuxInt 29668 x1 := s1.Args[0] 29669 if x1.Op != OpAMD64MOVBload { 29670 break 29671 } 29672 i1 := x1.AuxInt 29673 s := x1.Aux 29674 _ = x1.Args[1] 29675 p := x1.Args[0] 29676 mem := x1.Args[1] 29677 s0 := v.Args[1] 29678 if s0.Op != OpAMD64SHLQconst { 29679 break 29680 } 29681 j0 := s0.AuxInt 29682 x0 := s0.Args[0] 29683 if x0.Op != OpAMD64MOVBload { 29684 break 29685 } 29686 i0 := x0.AuxInt 29687 if x0.Aux != s { 29688 break 29689 } 29690 _ = x0.Args[1] 29691 if p != x0.Args[0] { 29692 break 29693 } 29694 if mem != x0.Args[1] { 29695 break 29696 } 29697 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29698 break 29699 } 29700 b = mergePoint(b, x0, x1) 29701 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29702 v.reset(OpCopy) 29703 v.AddArg(v0) 29704 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29705 v1.AuxInt = j1 29706 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29707 v2.AuxInt = 8 29708 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29709 v3.AuxInt = i0 29710 v3.Aux = s 29711 v3.AddArg(p) 29712 v3.AddArg(mem) 29713 v2.AddArg(v3) 29714 v1.AddArg(v2) 29715 v0.AddArg(v1) 29716 v0.AddArg(y) 29717 return true 29718 } 29719 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y)) 29720 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29721 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29722 for { 29723 _ = v.Args[1] 29724 s0 := v.Args[0] 29725 if s0.Op != OpAMD64SHLQconst { 29726 break 29727 } 29728 j0 := s0.AuxInt 29729 r0 := s0.Args[0] 29730 if r0.Op != OpAMD64ROLWconst { 29731 break 29732 } 29733 if r0.AuxInt != 8 { 29734 break 29735 } 29736 x0 := r0.Args[0] 29737 if x0.Op != OpAMD64MOVWload { 29738 break 29739 } 29740 i0 := x0.AuxInt 29741 s := x0.Aux 29742 _ = x0.Args[1] 29743 p := x0.Args[0] 29744 mem := x0.Args[1] 29745 or := v.Args[1] 29746 if or.Op != OpAMD64ORQ { 29747 break 29748 } 29749 _ = or.Args[1] 29750 s1 := or.Args[0] 29751 if s1.Op != OpAMD64SHLQconst { 29752 break 29753 } 29754 j1 := s1.AuxInt 29755 r1 := s1.Args[0] 29756 if r1.Op != OpAMD64ROLWconst { 29757 break 29758 } 29759 if r1.AuxInt != 8 { 29760 break 29761 } 29762 x1 := r1.Args[0] 29763 if x1.Op != OpAMD64MOVWload { 29764 break 29765 } 29766 i1 := x1.AuxInt 29767 if x1.Aux != s { 29768 break 29769 } 29770 _ = x1.Args[1] 29771 if p != x1.Args[0] { 29772 break 29773 } 29774 if mem != x1.Args[1] { 29775 break 29776 } 29777 y := or.Args[1] 29778 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29779 break 29780 } 29781 b = mergePoint(b, x0, x1) 29782 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29783 v.reset(OpCopy) 29784 v.AddArg(v0) 29785 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29786 v1.AuxInt = j1 29787 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29788 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29789 v3.AuxInt = i0 29790 v3.Aux = s 29791 v3.AddArg(p) 29792 v3.AddArg(mem) 29793 v2.AddArg(v3) 29794 v1.AddArg(v2) 29795 v0.AddArg(v1) 29796 v0.AddArg(y) 29797 return true 29798 } 29799 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))))) 29800 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29801 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29802 for { 29803 _ = v.Args[1] 29804 s0 := v.Args[0] 29805 if s0.Op != OpAMD64SHLQconst { 29806 break 29807 } 29808 j0 := s0.AuxInt 29809 r0 := s0.Args[0] 29810 if r0.Op != OpAMD64ROLWconst { 29811 break 29812 } 29813 if r0.AuxInt != 8 { 29814 break 29815 } 29816 x0 := r0.Args[0] 29817 if x0.Op != OpAMD64MOVWload { 29818 break 29819 } 29820 i0 := x0.AuxInt 29821 s := x0.Aux 29822 _ = x0.Args[1] 29823 p := x0.Args[0] 29824 mem := x0.Args[1] 29825 or := v.Args[1] 29826 if or.Op != OpAMD64ORQ { 29827 break 29828 } 29829 _ = or.Args[1] 29830 y := or.Args[0] 29831 s1 := or.Args[1] 29832 if s1.Op != OpAMD64SHLQconst { 29833 break 29834 } 29835 j1 := s1.AuxInt 29836 r1 := s1.Args[0] 29837 if r1.Op != OpAMD64ROLWconst { 29838 break 29839 } 29840 if r1.AuxInt != 8 { 29841 break 29842 } 29843 x1 := r1.Args[0] 29844 if x1.Op != OpAMD64MOVWload { 29845 break 29846 } 29847 i1 := x1.AuxInt 29848 if x1.Aux != s { 29849 break 29850 } 29851 _ = x1.Args[1] 29852 if p != x1.Args[0] { 29853 break 29854 } 29855 if mem != x1.Args[1] { 29856 break 29857 } 29858 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29859 break 29860 } 29861 b = mergePoint(b, x0, x1) 29862 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29863 v.reset(OpCopy) 29864 v.AddArg(v0) 29865 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29866 v1.AuxInt = j1 29867 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29868 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29869 v3.AuxInt = i0 29870 v3.Aux = s 29871 v3.AddArg(p) 29872 v3.AddArg(mem) 29873 v2.AddArg(v3) 29874 v1.AddArg(v2) 29875 v0.AddArg(v1) 29876 v0.AddArg(y) 29877 return true 29878 } 29879 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 29880 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29881 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29882 for { 29883 _ = v.Args[1] 29884 or := v.Args[0] 29885 if or.Op != OpAMD64ORQ { 29886 break 29887 } 29888 _ = or.Args[1] 29889 s1 := or.Args[0] 29890 if s1.Op != OpAMD64SHLQconst { 29891 break 29892 } 29893 j1 := s1.AuxInt 29894 r1 := s1.Args[0] 29895 if r1.Op != OpAMD64ROLWconst { 29896 break 29897 } 29898 if r1.AuxInt != 8 { 29899 break 29900 } 29901 x1 := r1.Args[0] 29902 if x1.Op != OpAMD64MOVWload { 29903 break 29904 } 29905 i1 := x1.AuxInt 29906 s := x1.Aux 29907 _ = x1.Args[1] 29908 p := x1.Args[0] 29909 mem := x1.Args[1] 29910 y := or.Args[1] 29911 s0 := v.Args[1] 29912 if s0.Op != OpAMD64SHLQconst { 29913 break 29914 } 29915 j0 := s0.AuxInt 29916 r0 := s0.Args[0] 29917 if r0.Op != OpAMD64ROLWconst { 29918 break 29919 } 29920 if r0.AuxInt != 8 { 29921 break 29922 } 29923 x0 := r0.Args[0] 29924 if x0.Op != OpAMD64MOVWload { 29925 break 29926 } 29927 i0 := x0.AuxInt 29928 if x0.Aux != s { 29929 break 29930 } 29931 _ = x0.Args[1] 29932 if p != x0.Args[0] { 29933 break 29934 } 29935 if mem != x0.Args[1] { 29936 break 29937 } 29938 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29939 break 29940 } 29941 b = mergePoint(b, x0, x1) 29942 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29943 v.reset(OpCopy) 29944 v.AddArg(v0) 29945 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29946 v1.AuxInt = j1 29947 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29948 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29949 v3.AuxInt = i0 29950 v3.Aux = s 29951 v3.AddArg(p) 29952 v3.AddArg(mem) 29953 v2.AddArg(v3) 29954 v1.AddArg(v2) 29955 v0.AddArg(v1) 29956 v0.AddArg(y) 29957 return true 29958 } 29959 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 29960 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29961 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29962 for { 29963 _ = v.Args[1] 29964 or := v.Args[0] 29965 if or.Op != OpAMD64ORQ { 29966 break 29967 } 29968 _ = or.Args[1] 29969 y := or.Args[0] 29970 s1 := or.Args[1] 29971 if s1.Op != OpAMD64SHLQconst { 29972 break 29973 } 29974 j1 := s1.AuxInt 29975 r1 := s1.Args[0] 29976 if r1.Op != OpAMD64ROLWconst { 29977 break 29978 } 29979 if r1.AuxInt != 8 { 29980 break 29981 } 29982 x1 := r1.Args[0] 29983 if x1.Op != OpAMD64MOVWload { 29984 break 29985 } 29986 i1 := x1.AuxInt 29987 s := x1.Aux 29988 _ = x1.Args[1] 29989 p := x1.Args[0] 29990 mem := x1.Args[1] 29991 s0 := v.Args[1] 29992 if s0.Op != OpAMD64SHLQconst { 29993 break 29994 } 29995 j0 := s0.AuxInt 29996 r0 := s0.Args[0] 29997 if r0.Op != OpAMD64ROLWconst { 29998 break 29999 } 30000 if r0.AuxInt != 8 { 30001 break 30002 } 30003 x0 := r0.Args[0] 30004 if x0.Op != OpAMD64MOVWload { 30005 break 30006 } 30007 i0 := x0.AuxInt 30008 if x0.Aux != s { 30009 break 30010 } 30011 _ = x0.Args[1] 30012 if p != x0.Args[0] { 30013 break 30014 } 30015 if mem != x0.Args[1] { 30016 break 30017 } 30018 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 30019 break 30020 } 30021 b = mergePoint(b, x0, x1) 30022 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 30023 v.reset(OpCopy) 30024 v.AddArg(v0) 30025 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 30026 v1.AuxInt = j1 30027 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 30028 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 30029 v3.AuxInt = i0 30030 v3.Aux = s 30031 v3.AddArg(p) 30032 v3.AddArg(mem) 30033 v2.AddArg(v3) 30034 v1.AddArg(v2) 30035 v0.AddArg(v1) 30036 v0.AddArg(y) 30037 return true 30038 } 30039 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30040 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30041 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30042 for { 30043 _ = v.Args[1] 30044 x1 := v.Args[0] 30045 if x1.Op != OpAMD64MOVBloadidx1 { 30046 break 30047 } 30048 i1 := x1.AuxInt 30049 s := x1.Aux 30050 _ = x1.Args[2] 30051 p := x1.Args[0] 30052 idx := x1.Args[1] 30053 mem := x1.Args[2] 30054 sh := v.Args[1] 30055 if sh.Op != OpAMD64SHLQconst { 30056 break 30057 } 30058 if sh.AuxInt != 8 { 30059 break 30060 } 30061 x0 := sh.Args[0] 30062 if x0.Op != OpAMD64MOVBloadidx1 { 30063 break 30064 } 30065 i0 := x0.AuxInt 30066 if x0.Aux != s { 30067 break 30068 } 30069 _ = x0.Args[2] 30070 if p != x0.Args[0] { 30071 break 30072 } 30073 if idx != x0.Args[1] { 30074 break 30075 } 30076 if mem != x0.Args[2] { 30077 break 30078 } 30079 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30080 break 30081 } 30082 b = mergePoint(b, x0, x1) 30083 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30084 v.reset(OpCopy) 30085 v.AddArg(v0) 30086 v0.AuxInt = 8 30087 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30088 v1.AuxInt = i0 30089 v1.Aux = s 30090 v1.AddArg(p) 30091 v1.AddArg(idx) 30092 v1.AddArg(mem) 30093 v0.AddArg(v1) 30094 return true 30095 } 30096 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30097 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30098 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30099 for { 30100 _ = v.Args[1] 30101 x1 := v.Args[0] 30102 if x1.Op != OpAMD64MOVBloadidx1 { 30103 break 30104 } 30105 i1 := x1.AuxInt 30106 s := x1.Aux 30107 _ = x1.Args[2] 30108 idx := x1.Args[0] 30109 p := x1.Args[1] 30110 mem := x1.Args[2] 30111 sh := v.Args[1] 30112 if sh.Op != OpAMD64SHLQconst { 30113 break 30114 } 30115 if sh.AuxInt != 8 { 30116 break 30117 } 30118 x0 := sh.Args[0] 30119 if x0.Op != OpAMD64MOVBloadidx1 { 30120 break 30121 } 30122 i0 := x0.AuxInt 30123 if x0.Aux != s { 30124 break 30125 } 30126 _ = x0.Args[2] 30127 if p != x0.Args[0] { 30128 break 30129 } 30130 if idx != x0.Args[1] { 30131 break 30132 } 30133 if mem != x0.Args[2] { 30134 break 30135 } 30136 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30137 break 30138 } 30139 b = mergePoint(b, x0, x1) 30140 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30141 v.reset(OpCopy) 30142 v.AddArg(v0) 30143 v0.AuxInt = 8 30144 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30145 v1.AuxInt = i0 30146 v1.Aux = s 30147 v1.AddArg(p) 30148 v1.AddArg(idx) 30149 v1.AddArg(mem) 30150 v0.AddArg(v1) 30151 return true 30152 } 30153 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30154 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30155 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30156 for { 30157 _ = v.Args[1] 30158 x1 := v.Args[0] 30159 if x1.Op != OpAMD64MOVBloadidx1 { 30160 break 30161 } 30162 i1 := x1.AuxInt 30163 s := x1.Aux 30164 _ = x1.Args[2] 30165 p := x1.Args[0] 30166 idx := x1.Args[1] 30167 mem := x1.Args[2] 30168 sh := v.Args[1] 30169 if sh.Op != OpAMD64SHLQconst { 30170 break 30171 } 30172 if sh.AuxInt != 8 { 30173 break 30174 } 30175 x0 := sh.Args[0] 30176 if x0.Op != OpAMD64MOVBloadidx1 { 30177 break 30178 } 30179 i0 := x0.AuxInt 30180 if x0.Aux != s { 30181 break 30182 } 30183 _ = x0.Args[2] 30184 if idx != x0.Args[0] { 30185 break 30186 } 30187 if p != x0.Args[1] { 30188 break 30189 } 30190 if mem != x0.Args[2] { 30191 break 30192 } 30193 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30194 break 30195 } 30196 b = mergePoint(b, x0, x1) 30197 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30198 v.reset(OpCopy) 30199 v.AddArg(v0) 30200 v0.AuxInt = 8 30201 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30202 v1.AuxInt = i0 30203 v1.Aux = s 30204 v1.AddArg(p) 30205 v1.AddArg(idx) 30206 v1.AddArg(mem) 30207 v0.AddArg(v1) 30208 return true 30209 } 30210 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30211 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30212 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30213 for { 30214 _ = v.Args[1] 30215 x1 := v.Args[0] 30216 if x1.Op != OpAMD64MOVBloadidx1 { 30217 break 30218 } 30219 i1 := x1.AuxInt 30220 s := x1.Aux 30221 _ = x1.Args[2] 30222 idx := x1.Args[0] 30223 p := x1.Args[1] 30224 mem := x1.Args[2] 30225 sh := v.Args[1] 30226 if sh.Op != OpAMD64SHLQconst { 30227 break 30228 } 30229 if sh.AuxInt != 8 { 30230 break 30231 } 30232 x0 := sh.Args[0] 30233 if x0.Op != OpAMD64MOVBloadidx1 { 30234 break 30235 } 30236 i0 := x0.AuxInt 30237 if x0.Aux != s { 30238 break 30239 } 30240 _ = x0.Args[2] 30241 if idx != x0.Args[0] { 30242 break 30243 } 30244 if p != x0.Args[1] { 30245 break 30246 } 30247 if mem != x0.Args[2] { 30248 break 30249 } 30250 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30251 break 30252 } 30253 b = mergePoint(b, x0, x1) 30254 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30255 v.reset(OpCopy) 30256 v.AddArg(v0) 30257 v0.AuxInt = 8 30258 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30259 v1.AuxInt = i0 30260 v1.Aux = s 30261 v1.AddArg(p) 30262 v1.AddArg(idx) 30263 v1.AddArg(mem) 30264 v0.AddArg(v1) 30265 return true 30266 } 30267 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 30268 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30269 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30270 for { 30271 _ = v.Args[1] 30272 sh := v.Args[0] 30273 if sh.Op != OpAMD64SHLQconst { 30274 break 30275 } 30276 if sh.AuxInt != 8 { 30277 break 30278 } 30279 x0 := sh.Args[0] 30280 if x0.Op != OpAMD64MOVBloadidx1 { 30281 break 30282 } 30283 i0 := x0.AuxInt 30284 s := x0.Aux 30285 _ = x0.Args[2] 30286 p := x0.Args[0] 30287 idx := x0.Args[1] 30288 mem := x0.Args[2] 30289 x1 := v.Args[1] 30290 if x1.Op != OpAMD64MOVBloadidx1 { 30291 break 30292 } 30293 i1 := x1.AuxInt 30294 if x1.Aux != s { 30295 break 30296 } 30297 _ = x1.Args[2] 30298 if p != x1.Args[0] { 30299 break 30300 } 30301 if idx != x1.Args[1] { 30302 break 30303 } 30304 if mem != x1.Args[2] { 30305 break 30306 } 30307 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30308 break 30309 } 30310 b = mergePoint(b, x0, x1) 30311 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30312 v.reset(OpCopy) 30313 v.AddArg(v0) 30314 v0.AuxInt = 8 30315 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30316 v1.AuxInt = i0 30317 v1.Aux = s 30318 v1.AddArg(p) 30319 v1.AddArg(idx) 30320 v1.AddArg(mem) 30321 v0.AddArg(v1) 30322 return true 30323 } 30324 return false 30325 } 30326 func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool { 30327 b := v.Block 30328 _ = b 30329 typ := &b.Func.Config.Types 30330 _ = typ 30331 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 30332 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30333 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30334 for { 30335 _ = v.Args[1] 30336 sh := v.Args[0] 30337 if sh.Op != OpAMD64SHLQconst { 30338 break 30339 } 30340 if sh.AuxInt != 8 { 30341 break 30342 } 30343 x0 := sh.Args[0] 30344 if x0.Op != OpAMD64MOVBloadidx1 { 30345 break 30346 } 30347 i0 := x0.AuxInt 30348 s := x0.Aux 30349 _ = x0.Args[2] 30350 idx := x0.Args[0] 30351 p := x0.Args[1] 30352 mem := x0.Args[2] 30353 x1 := v.Args[1] 30354 if x1.Op != OpAMD64MOVBloadidx1 { 30355 break 30356 } 30357 i1 := x1.AuxInt 30358 if x1.Aux != s { 30359 break 30360 } 30361 _ = x1.Args[2] 30362 if p != x1.Args[0] { 30363 break 30364 } 30365 if idx != x1.Args[1] { 30366 break 30367 } 30368 if mem != x1.Args[2] { 30369 break 30370 } 30371 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30372 break 30373 } 30374 b = mergePoint(b, x0, x1) 30375 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30376 v.reset(OpCopy) 30377 v.AddArg(v0) 30378 v0.AuxInt = 8 30379 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30380 v1.AuxInt = i0 30381 v1.Aux = s 30382 v1.AddArg(p) 30383 v1.AddArg(idx) 30384 v1.AddArg(mem) 30385 v0.AddArg(v1) 30386 return true 30387 } 30388 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 30389 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30390 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30391 for { 30392 _ = v.Args[1] 30393 sh := v.Args[0] 30394 if sh.Op != OpAMD64SHLQconst { 30395 break 30396 } 30397 if sh.AuxInt != 8 { 30398 break 30399 } 30400 x0 := sh.Args[0] 30401 if x0.Op != OpAMD64MOVBloadidx1 { 30402 break 30403 } 30404 i0 := x0.AuxInt 30405 s := x0.Aux 30406 _ = x0.Args[2] 30407 p := x0.Args[0] 30408 idx := x0.Args[1] 30409 mem := x0.Args[2] 30410 x1 := v.Args[1] 30411 if x1.Op != OpAMD64MOVBloadidx1 { 30412 break 30413 } 30414 i1 := x1.AuxInt 30415 if x1.Aux != s { 30416 break 30417 } 30418 _ = x1.Args[2] 30419 if idx != x1.Args[0] { 30420 break 30421 } 30422 if p != x1.Args[1] { 30423 break 30424 } 30425 if mem != x1.Args[2] { 30426 break 30427 } 30428 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30429 break 30430 } 30431 b = mergePoint(b, x0, x1) 30432 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30433 v.reset(OpCopy) 30434 v.AddArg(v0) 30435 v0.AuxInt = 8 30436 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30437 v1.AuxInt = i0 30438 v1.Aux = s 30439 v1.AddArg(p) 30440 v1.AddArg(idx) 30441 v1.AddArg(mem) 30442 v0.AddArg(v1) 30443 return true 30444 } 30445 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 30446 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30447 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30448 for { 30449 _ = v.Args[1] 30450 sh := v.Args[0] 30451 if sh.Op != OpAMD64SHLQconst { 30452 break 30453 } 30454 if sh.AuxInt != 8 { 30455 break 30456 } 30457 x0 := sh.Args[0] 30458 if x0.Op != OpAMD64MOVBloadidx1 { 30459 break 30460 } 30461 i0 := x0.AuxInt 30462 s := x0.Aux 30463 _ = x0.Args[2] 30464 idx := x0.Args[0] 30465 p := x0.Args[1] 30466 mem := x0.Args[2] 30467 x1 := v.Args[1] 30468 if x1.Op != OpAMD64MOVBloadidx1 { 30469 break 30470 } 30471 i1 := x1.AuxInt 30472 if x1.Aux != s { 30473 break 30474 } 30475 _ = x1.Args[2] 30476 if idx != x1.Args[0] { 30477 break 30478 } 30479 if p != x1.Args[1] { 30480 break 30481 } 30482 if mem != x1.Args[2] { 30483 break 30484 } 30485 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30486 break 30487 } 30488 b = mergePoint(b, x0, x1) 30489 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30490 v.reset(OpCopy) 30491 v.AddArg(v0) 30492 v0.AuxInt = 8 30493 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30494 v1.AuxInt = i0 30495 v1.Aux = s 30496 v1.AddArg(p) 30497 v1.AddArg(idx) 30498 v1.AddArg(mem) 30499 v0.AddArg(v1) 30500 return true 30501 } 30502 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 30503 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30504 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30505 for { 30506 _ = v.Args[1] 30507 r1 := v.Args[0] 30508 if r1.Op != OpAMD64ROLWconst { 30509 break 30510 } 30511 if r1.AuxInt != 8 { 30512 break 30513 } 30514 x1 := r1.Args[0] 30515 if x1.Op != OpAMD64MOVWloadidx1 { 30516 break 30517 } 30518 i1 := x1.AuxInt 30519 s := x1.Aux 30520 _ = x1.Args[2] 30521 p := x1.Args[0] 30522 idx := x1.Args[1] 30523 mem := x1.Args[2] 30524 sh := v.Args[1] 30525 if sh.Op != OpAMD64SHLQconst { 30526 break 30527 } 30528 if sh.AuxInt != 16 { 30529 break 30530 } 30531 r0 := sh.Args[0] 30532 if r0.Op != OpAMD64ROLWconst { 30533 break 30534 } 30535 if r0.AuxInt != 8 { 30536 break 30537 } 30538 x0 := r0.Args[0] 30539 if x0.Op != OpAMD64MOVWloadidx1 { 30540 break 30541 } 30542 i0 := x0.AuxInt 30543 if x0.Aux != s { 30544 break 30545 } 30546 _ = x0.Args[2] 30547 if p != x0.Args[0] { 30548 break 30549 } 30550 if idx != x0.Args[1] { 30551 break 30552 } 30553 if mem != x0.Args[2] { 30554 break 30555 } 30556 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30557 break 30558 } 30559 b = mergePoint(b, x0, x1) 30560 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30561 v.reset(OpCopy) 30562 v.AddArg(v0) 30563 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30564 v1.AuxInt = i0 30565 v1.Aux = s 30566 v1.AddArg(p) 30567 v1.AddArg(idx) 30568 v1.AddArg(mem) 30569 v0.AddArg(v1) 30570 return true 30571 } 30572 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 30573 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30574 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30575 for { 30576 _ = v.Args[1] 30577 r1 := v.Args[0] 30578 if r1.Op != OpAMD64ROLWconst { 30579 break 30580 } 30581 if r1.AuxInt != 8 { 30582 break 30583 } 30584 x1 := r1.Args[0] 30585 if x1.Op != OpAMD64MOVWloadidx1 { 30586 break 30587 } 30588 i1 := x1.AuxInt 30589 s := x1.Aux 30590 _ = x1.Args[2] 30591 idx := x1.Args[0] 30592 p := x1.Args[1] 30593 mem := x1.Args[2] 30594 sh := v.Args[1] 30595 if sh.Op != OpAMD64SHLQconst { 30596 break 30597 } 30598 if sh.AuxInt != 16 { 30599 break 30600 } 30601 r0 := sh.Args[0] 30602 if r0.Op != OpAMD64ROLWconst { 30603 break 30604 } 30605 if r0.AuxInt != 8 { 30606 break 30607 } 30608 x0 := r0.Args[0] 30609 if x0.Op != OpAMD64MOVWloadidx1 { 30610 break 30611 } 30612 i0 := x0.AuxInt 30613 if x0.Aux != s { 30614 break 30615 } 30616 _ = x0.Args[2] 30617 if p != x0.Args[0] { 30618 break 30619 } 30620 if idx != x0.Args[1] { 30621 break 30622 } 30623 if mem != x0.Args[2] { 30624 break 30625 } 30626 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30627 break 30628 } 30629 b = mergePoint(b, x0, x1) 30630 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30631 v.reset(OpCopy) 30632 v.AddArg(v0) 30633 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30634 v1.AuxInt = i0 30635 v1.Aux = s 30636 v1.AddArg(p) 30637 v1.AddArg(idx) 30638 v1.AddArg(mem) 30639 v0.AddArg(v1) 30640 return true 30641 } 30642 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 30643 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30644 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30645 for { 30646 _ = v.Args[1] 30647 r1 := v.Args[0] 30648 if r1.Op != OpAMD64ROLWconst { 30649 break 30650 } 30651 if r1.AuxInt != 8 { 30652 break 30653 } 30654 x1 := r1.Args[0] 30655 if x1.Op != OpAMD64MOVWloadidx1 { 30656 break 30657 } 30658 i1 := x1.AuxInt 30659 s := x1.Aux 30660 _ = x1.Args[2] 30661 p := x1.Args[0] 30662 idx := x1.Args[1] 30663 mem := x1.Args[2] 30664 sh := v.Args[1] 30665 if sh.Op != OpAMD64SHLQconst { 30666 break 30667 } 30668 if sh.AuxInt != 16 { 30669 break 30670 } 30671 r0 := sh.Args[0] 30672 if r0.Op != OpAMD64ROLWconst { 30673 break 30674 } 30675 if r0.AuxInt != 8 { 30676 break 30677 } 30678 x0 := r0.Args[0] 30679 if x0.Op != OpAMD64MOVWloadidx1 { 30680 break 30681 } 30682 i0 := x0.AuxInt 30683 if x0.Aux != s { 30684 break 30685 } 30686 _ = x0.Args[2] 30687 if idx != x0.Args[0] { 30688 break 30689 } 30690 if p != x0.Args[1] { 30691 break 30692 } 30693 if mem != x0.Args[2] { 30694 break 30695 } 30696 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30697 break 30698 } 30699 b = mergePoint(b, x0, x1) 30700 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30701 v.reset(OpCopy) 30702 v.AddArg(v0) 30703 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30704 v1.AuxInt = i0 30705 v1.Aux = s 30706 v1.AddArg(p) 30707 v1.AddArg(idx) 30708 v1.AddArg(mem) 30709 v0.AddArg(v1) 30710 return true 30711 } 30712 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 30713 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30714 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30715 for { 30716 _ = v.Args[1] 30717 r1 := v.Args[0] 30718 if r1.Op != OpAMD64ROLWconst { 30719 break 30720 } 30721 if r1.AuxInt != 8 { 30722 break 30723 } 30724 x1 := r1.Args[0] 30725 if x1.Op != OpAMD64MOVWloadidx1 { 30726 break 30727 } 30728 i1 := x1.AuxInt 30729 s := x1.Aux 30730 _ = x1.Args[2] 30731 idx := x1.Args[0] 30732 p := x1.Args[1] 30733 mem := x1.Args[2] 30734 sh := v.Args[1] 30735 if sh.Op != OpAMD64SHLQconst { 30736 break 30737 } 30738 if sh.AuxInt != 16 { 30739 break 30740 } 30741 r0 := sh.Args[0] 30742 if r0.Op != OpAMD64ROLWconst { 30743 break 30744 } 30745 if r0.AuxInt != 8 { 30746 break 30747 } 30748 x0 := r0.Args[0] 30749 if x0.Op != OpAMD64MOVWloadidx1 { 30750 break 30751 } 30752 i0 := x0.AuxInt 30753 if x0.Aux != s { 30754 break 30755 } 30756 _ = x0.Args[2] 30757 if idx != x0.Args[0] { 30758 break 30759 } 30760 if p != x0.Args[1] { 30761 break 30762 } 30763 if mem != x0.Args[2] { 30764 break 30765 } 30766 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30767 break 30768 } 30769 b = mergePoint(b, x0, x1) 30770 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30771 v.reset(OpCopy) 30772 v.AddArg(v0) 30773 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30774 v1.AuxInt = i0 30775 v1.Aux = s 30776 v1.AddArg(p) 30777 v1.AddArg(idx) 30778 v1.AddArg(mem) 30779 v0.AddArg(v1) 30780 return true 30781 } 30782 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 30783 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30784 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30785 for { 30786 _ = v.Args[1] 30787 sh := v.Args[0] 30788 if sh.Op != OpAMD64SHLQconst { 30789 break 30790 } 30791 if sh.AuxInt != 16 { 30792 break 30793 } 30794 r0 := sh.Args[0] 30795 if r0.Op != OpAMD64ROLWconst { 30796 break 30797 } 30798 if r0.AuxInt != 8 { 30799 break 30800 } 30801 x0 := r0.Args[0] 30802 if x0.Op != OpAMD64MOVWloadidx1 { 30803 break 30804 } 30805 i0 := x0.AuxInt 30806 s := x0.Aux 30807 _ = x0.Args[2] 30808 p := x0.Args[0] 30809 idx := x0.Args[1] 30810 mem := x0.Args[2] 30811 r1 := v.Args[1] 30812 if r1.Op != OpAMD64ROLWconst { 30813 break 30814 } 30815 if r1.AuxInt != 8 { 30816 break 30817 } 30818 x1 := r1.Args[0] 30819 if x1.Op != OpAMD64MOVWloadidx1 { 30820 break 30821 } 30822 i1 := x1.AuxInt 30823 if x1.Aux != s { 30824 break 30825 } 30826 _ = x1.Args[2] 30827 if p != x1.Args[0] { 30828 break 30829 } 30830 if idx != x1.Args[1] { 30831 break 30832 } 30833 if mem != x1.Args[2] { 30834 break 30835 } 30836 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30837 break 30838 } 30839 b = mergePoint(b, x0, x1) 30840 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30841 v.reset(OpCopy) 30842 v.AddArg(v0) 30843 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30844 v1.AuxInt = i0 30845 v1.Aux = s 30846 v1.AddArg(p) 30847 v1.AddArg(idx) 30848 v1.AddArg(mem) 30849 v0.AddArg(v1) 30850 return true 30851 } 30852 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 30853 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30854 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30855 for { 30856 _ = v.Args[1] 30857 sh := v.Args[0] 30858 if sh.Op != OpAMD64SHLQconst { 30859 break 30860 } 30861 if sh.AuxInt != 16 { 30862 break 30863 } 30864 r0 := sh.Args[0] 30865 if r0.Op != OpAMD64ROLWconst { 30866 break 30867 } 30868 if r0.AuxInt != 8 { 30869 break 30870 } 30871 x0 := r0.Args[0] 30872 if x0.Op != OpAMD64MOVWloadidx1 { 30873 break 30874 } 30875 i0 := x0.AuxInt 30876 s := x0.Aux 30877 _ = x0.Args[2] 30878 idx := x0.Args[0] 30879 p := x0.Args[1] 30880 mem := x0.Args[2] 30881 r1 := v.Args[1] 30882 if r1.Op != OpAMD64ROLWconst { 30883 break 30884 } 30885 if r1.AuxInt != 8 { 30886 break 30887 } 30888 x1 := r1.Args[0] 30889 if x1.Op != OpAMD64MOVWloadidx1 { 30890 break 30891 } 30892 i1 := x1.AuxInt 30893 if x1.Aux != s { 30894 break 30895 } 30896 _ = x1.Args[2] 30897 if p != x1.Args[0] { 30898 break 30899 } 30900 if idx != x1.Args[1] { 30901 break 30902 } 30903 if mem != x1.Args[2] { 30904 break 30905 } 30906 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30907 break 30908 } 30909 b = mergePoint(b, x0, x1) 30910 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30911 v.reset(OpCopy) 30912 v.AddArg(v0) 30913 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30914 v1.AuxInt = i0 30915 v1.Aux = s 30916 v1.AddArg(p) 30917 v1.AddArg(idx) 30918 v1.AddArg(mem) 30919 v0.AddArg(v1) 30920 return true 30921 } 30922 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 30923 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30924 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30925 for { 30926 _ = v.Args[1] 30927 sh := v.Args[0] 30928 if sh.Op != OpAMD64SHLQconst { 30929 break 30930 } 30931 if sh.AuxInt != 16 { 30932 break 30933 } 30934 r0 := sh.Args[0] 30935 if r0.Op != OpAMD64ROLWconst { 30936 break 30937 } 30938 if r0.AuxInt != 8 { 30939 break 30940 } 30941 x0 := r0.Args[0] 30942 if x0.Op != OpAMD64MOVWloadidx1 { 30943 break 30944 } 30945 i0 := x0.AuxInt 30946 s := x0.Aux 30947 _ = x0.Args[2] 30948 p := x0.Args[0] 30949 idx := x0.Args[1] 30950 mem := x0.Args[2] 30951 r1 := v.Args[1] 30952 if r1.Op != OpAMD64ROLWconst { 30953 break 30954 } 30955 if r1.AuxInt != 8 { 30956 break 30957 } 30958 x1 := r1.Args[0] 30959 if x1.Op != OpAMD64MOVWloadidx1 { 30960 break 30961 } 30962 i1 := x1.AuxInt 30963 if x1.Aux != s { 30964 break 30965 } 30966 _ = x1.Args[2] 30967 if idx != x1.Args[0] { 30968 break 30969 } 30970 if p != x1.Args[1] { 30971 break 30972 } 30973 if mem != x1.Args[2] { 30974 break 30975 } 30976 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30977 break 30978 } 30979 b = mergePoint(b, x0, x1) 30980 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30981 v.reset(OpCopy) 30982 v.AddArg(v0) 30983 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30984 v1.AuxInt = i0 30985 v1.Aux = s 30986 v1.AddArg(p) 30987 v1.AddArg(idx) 30988 v1.AddArg(mem) 30989 v0.AddArg(v1) 30990 return true 30991 } 30992 return false 30993 } 30994 func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool { 30995 b := v.Block 30996 _ = b 30997 typ := &b.Func.Config.Types 30998 _ = typ 30999 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 31000 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31001 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 31002 for { 31003 _ = v.Args[1] 31004 sh := v.Args[0] 31005 if sh.Op != OpAMD64SHLQconst { 31006 break 31007 } 31008 if sh.AuxInt != 16 { 31009 break 31010 } 31011 r0 := sh.Args[0] 31012 if r0.Op != OpAMD64ROLWconst { 31013 break 31014 } 31015 if r0.AuxInt != 8 { 31016 break 31017 } 31018 x0 := r0.Args[0] 31019 if x0.Op != OpAMD64MOVWloadidx1 { 31020 break 31021 } 31022 i0 := x0.AuxInt 31023 s := x0.Aux 31024 _ = x0.Args[2] 31025 idx := x0.Args[0] 31026 p := x0.Args[1] 31027 mem := x0.Args[2] 31028 r1 := v.Args[1] 31029 if r1.Op != OpAMD64ROLWconst { 31030 break 31031 } 31032 if r1.AuxInt != 8 { 31033 break 31034 } 31035 x1 := r1.Args[0] 31036 if x1.Op != OpAMD64MOVWloadidx1 { 31037 break 31038 } 31039 i1 := x1.AuxInt 31040 if x1.Aux != s { 31041 break 31042 } 31043 _ = x1.Args[2] 31044 if idx != x1.Args[0] { 31045 break 31046 } 31047 if p != x1.Args[1] { 31048 break 31049 } 31050 if mem != x1.Args[2] { 31051 break 31052 } 31053 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31054 break 31055 } 31056 b = mergePoint(b, x0, x1) 31057 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 31058 v.reset(OpCopy) 31059 v.AddArg(v0) 31060 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31061 v1.AuxInt = i0 31062 v1.Aux = s 31063 v1.AddArg(p) 31064 v1.AddArg(idx) 31065 v1.AddArg(mem) 31066 v0.AddArg(v1) 31067 return true 31068 } 31069 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 31070 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31071 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31072 for { 31073 _ = v.Args[1] 31074 r1 := v.Args[0] 31075 if r1.Op != OpAMD64BSWAPL { 31076 break 31077 } 31078 x1 := r1.Args[0] 31079 if x1.Op != OpAMD64MOVLloadidx1 { 31080 break 31081 } 31082 i1 := x1.AuxInt 31083 s := x1.Aux 31084 _ = x1.Args[2] 31085 p := x1.Args[0] 31086 idx := x1.Args[1] 31087 mem := x1.Args[2] 31088 sh := v.Args[1] 31089 if sh.Op != OpAMD64SHLQconst { 31090 break 31091 } 31092 if sh.AuxInt != 32 { 31093 break 31094 } 31095 r0 := sh.Args[0] 31096 if r0.Op != OpAMD64BSWAPL { 31097 break 31098 } 31099 x0 := r0.Args[0] 31100 if x0.Op != OpAMD64MOVLloadidx1 { 31101 break 31102 } 31103 i0 := x0.AuxInt 31104 if x0.Aux != s { 31105 break 31106 } 31107 _ = x0.Args[2] 31108 if p != x0.Args[0] { 31109 break 31110 } 31111 if idx != x0.Args[1] { 31112 break 31113 } 31114 if mem != x0.Args[2] { 31115 break 31116 } 31117 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31118 break 31119 } 31120 b = mergePoint(b, x0, x1) 31121 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31122 v.reset(OpCopy) 31123 v.AddArg(v0) 31124 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31125 v1.AuxInt = i0 31126 v1.Aux = s 31127 v1.AddArg(p) 31128 v1.AddArg(idx) 31129 v1.AddArg(mem) 31130 v0.AddArg(v1) 31131 return true 31132 } 31133 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 31134 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31135 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31136 for { 31137 _ = v.Args[1] 31138 r1 := v.Args[0] 31139 if r1.Op != OpAMD64BSWAPL { 31140 break 31141 } 31142 x1 := r1.Args[0] 31143 if x1.Op != OpAMD64MOVLloadidx1 { 31144 break 31145 } 31146 i1 := x1.AuxInt 31147 s := x1.Aux 31148 _ = x1.Args[2] 31149 idx := x1.Args[0] 31150 p := x1.Args[1] 31151 mem := x1.Args[2] 31152 sh := v.Args[1] 31153 if sh.Op != OpAMD64SHLQconst { 31154 break 31155 } 31156 if sh.AuxInt != 32 { 31157 break 31158 } 31159 r0 := sh.Args[0] 31160 if r0.Op != OpAMD64BSWAPL { 31161 break 31162 } 31163 x0 := r0.Args[0] 31164 if x0.Op != OpAMD64MOVLloadidx1 { 31165 break 31166 } 31167 i0 := x0.AuxInt 31168 if x0.Aux != s { 31169 break 31170 } 31171 _ = x0.Args[2] 31172 if p != x0.Args[0] { 31173 break 31174 } 31175 if idx != x0.Args[1] { 31176 break 31177 } 31178 if mem != x0.Args[2] { 31179 break 31180 } 31181 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31182 break 31183 } 31184 b = mergePoint(b, x0, x1) 31185 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31186 v.reset(OpCopy) 31187 v.AddArg(v0) 31188 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31189 v1.AuxInt = i0 31190 v1.Aux = s 31191 v1.AddArg(p) 31192 v1.AddArg(idx) 31193 v1.AddArg(mem) 31194 v0.AddArg(v1) 31195 return true 31196 } 31197 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 31198 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31199 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31200 for { 31201 _ = v.Args[1] 31202 r1 := v.Args[0] 31203 if r1.Op != OpAMD64BSWAPL { 31204 break 31205 } 31206 x1 := r1.Args[0] 31207 if x1.Op != OpAMD64MOVLloadidx1 { 31208 break 31209 } 31210 i1 := x1.AuxInt 31211 s := x1.Aux 31212 _ = x1.Args[2] 31213 p := x1.Args[0] 31214 idx := x1.Args[1] 31215 mem := x1.Args[2] 31216 sh := v.Args[1] 31217 if sh.Op != OpAMD64SHLQconst { 31218 break 31219 } 31220 if sh.AuxInt != 32 { 31221 break 31222 } 31223 r0 := sh.Args[0] 31224 if r0.Op != OpAMD64BSWAPL { 31225 break 31226 } 31227 x0 := r0.Args[0] 31228 if x0.Op != OpAMD64MOVLloadidx1 { 31229 break 31230 } 31231 i0 := x0.AuxInt 31232 if x0.Aux != s { 31233 break 31234 } 31235 _ = x0.Args[2] 31236 if idx != x0.Args[0] { 31237 break 31238 } 31239 if p != x0.Args[1] { 31240 break 31241 } 31242 if mem != x0.Args[2] { 31243 break 31244 } 31245 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31246 break 31247 } 31248 b = mergePoint(b, x0, x1) 31249 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31250 v.reset(OpCopy) 31251 v.AddArg(v0) 31252 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31253 v1.AuxInt = i0 31254 v1.Aux = s 31255 v1.AddArg(p) 31256 v1.AddArg(idx) 31257 v1.AddArg(mem) 31258 v0.AddArg(v1) 31259 return true 31260 } 31261 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 31262 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31263 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31264 for { 31265 _ = v.Args[1] 31266 r1 := v.Args[0] 31267 if r1.Op != OpAMD64BSWAPL { 31268 break 31269 } 31270 x1 := r1.Args[0] 31271 if x1.Op != OpAMD64MOVLloadidx1 { 31272 break 31273 } 31274 i1 := x1.AuxInt 31275 s := x1.Aux 31276 _ = x1.Args[2] 31277 idx := x1.Args[0] 31278 p := x1.Args[1] 31279 mem := x1.Args[2] 31280 sh := v.Args[1] 31281 if sh.Op != OpAMD64SHLQconst { 31282 break 31283 } 31284 if sh.AuxInt != 32 { 31285 break 31286 } 31287 r0 := sh.Args[0] 31288 if r0.Op != OpAMD64BSWAPL { 31289 break 31290 } 31291 x0 := r0.Args[0] 31292 if x0.Op != OpAMD64MOVLloadidx1 { 31293 break 31294 } 31295 i0 := x0.AuxInt 31296 if x0.Aux != s { 31297 break 31298 } 31299 _ = x0.Args[2] 31300 if idx != x0.Args[0] { 31301 break 31302 } 31303 if p != x0.Args[1] { 31304 break 31305 } 31306 if mem != x0.Args[2] { 31307 break 31308 } 31309 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31310 break 31311 } 31312 b = mergePoint(b, x0, x1) 31313 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31314 v.reset(OpCopy) 31315 v.AddArg(v0) 31316 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31317 v1.AuxInt = i0 31318 v1.Aux = s 31319 v1.AddArg(p) 31320 v1.AddArg(idx) 31321 v1.AddArg(mem) 31322 v0.AddArg(v1) 31323 return true 31324 } 31325 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 31326 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31327 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31328 for { 31329 _ = v.Args[1] 31330 sh := v.Args[0] 31331 if sh.Op != OpAMD64SHLQconst { 31332 break 31333 } 31334 if sh.AuxInt != 32 { 31335 break 31336 } 31337 r0 := sh.Args[0] 31338 if r0.Op != OpAMD64BSWAPL { 31339 break 31340 } 31341 x0 := r0.Args[0] 31342 if x0.Op != OpAMD64MOVLloadidx1 { 31343 break 31344 } 31345 i0 := x0.AuxInt 31346 s := x0.Aux 31347 _ = x0.Args[2] 31348 p := x0.Args[0] 31349 idx := x0.Args[1] 31350 mem := x0.Args[2] 31351 r1 := v.Args[1] 31352 if r1.Op != OpAMD64BSWAPL { 31353 break 31354 } 31355 x1 := r1.Args[0] 31356 if x1.Op != OpAMD64MOVLloadidx1 { 31357 break 31358 } 31359 i1 := x1.AuxInt 31360 if x1.Aux != s { 31361 break 31362 } 31363 _ = x1.Args[2] 31364 if p != x1.Args[0] { 31365 break 31366 } 31367 if idx != x1.Args[1] { 31368 break 31369 } 31370 if mem != x1.Args[2] { 31371 break 31372 } 31373 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31374 break 31375 } 31376 b = mergePoint(b, x0, x1) 31377 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31378 v.reset(OpCopy) 31379 v.AddArg(v0) 31380 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31381 v1.AuxInt = i0 31382 v1.Aux = s 31383 v1.AddArg(p) 31384 v1.AddArg(idx) 31385 v1.AddArg(mem) 31386 v0.AddArg(v1) 31387 return true 31388 } 31389 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 31390 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31391 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31392 for { 31393 _ = v.Args[1] 31394 sh := v.Args[0] 31395 if sh.Op != OpAMD64SHLQconst { 31396 break 31397 } 31398 if sh.AuxInt != 32 { 31399 break 31400 } 31401 r0 := sh.Args[0] 31402 if r0.Op != OpAMD64BSWAPL { 31403 break 31404 } 31405 x0 := r0.Args[0] 31406 if x0.Op != OpAMD64MOVLloadidx1 { 31407 break 31408 } 31409 i0 := x0.AuxInt 31410 s := x0.Aux 31411 _ = x0.Args[2] 31412 idx := x0.Args[0] 31413 p := x0.Args[1] 31414 mem := x0.Args[2] 31415 r1 := v.Args[1] 31416 if r1.Op != OpAMD64BSWAPL { 31417 break 31418 } 31419 x1 := r1.Args[0] 31420 if x1.Op != OpAMD64MOVLloadidx1 { 31421 break 31422 } 31423 i1 := x1.AuxInt 31424 if x1.Aux != s { 31425 break 31426 } 31427 _ = x1.Args[2] 31428 if p != x1.Args[0] { 31429 break 31430 } 31431 if idx != x1.Args[1] { 31432 break 31433 } 31434 if mem != x1.Args[2] { 31435 break 31436 } 31437 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31438 break 31439 } 31440 b = mergePoint(b, x0, x1) 31441 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31442 v.reset(OpCopy) 31443 v.AddArg(v0) 31444 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31445 v1.AuxInt = i0 31446 v1.Aux = s 31447 v1.AddArg(p) 31448 v1.AddArg(idx) 31449 v1.AddArg(mem) 31450 v0.AddArg(v1) 31451 return true 31452 } 31453 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 31454 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31455 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31456 for { 31457 _ = v.Args[1] 31458 sh := v.Args[0] 31459 if sh.Op != OpAMD64SHLQconst { 31460 break 31461 } 31462 if sh.AuxInt != 32 { 31463 break 31464 } 31465 r0 := sh.Args[0] 31466 if r0.Op != OpAMD64BSWAPL { 31467 break 31468 } 31469 x0 := r0.Args[0] 31470 if x0.Op != OpAMD64MOVLloadidx1 { 31471 break 31472 } 31473 i0 := x0.AuxInt 31474 s := x0.Aux 31475 _ = x0.Args[2] 31476 p := x0.Args[0] 31477 idx := x0.Args[1] 31478 mem := x0.Args[2] 31479 r1 := v.Args[1] 31480 if r1.Op != OpAMD64BSWAPL { 31481 break 31482 } 31483 x1 := r1.Args[0] 31484 if x1.Op != OpAMD64MOVLloadidx1 { 31485 break 31486 } 31487 i1 := x1.AuxInt 31488 if x1.Aux != s { 31489 break 31490 } 31491 _ = x1.Args[2] 31492 if idx != x1.Args[0] { 31493 break 31494 } 31495 if p != x1.Args[1] { 31496 break 31497 } 31498 if mem != x1.Args[2] { 31499 break 31500 } 31501 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31502 break 31503 } 31504 b = mergePoint(b, x0, x1) 31505 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31506 v.reset(OpCopy) 31507 v.AddArg(v0) 31508 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31509 v1.AuxInt = i0 31510 v1.Aux = s 31511 v1.AddArg(p) 31512 v1.AddArg(idx) 31513 v1.AddArg(mem) 31514 v0.AddArg(v1) 31515 return true 31516 } 31517 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 31518 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31519 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31520 for { 31521 _ = v.Args[1] 31522 sh := v.Args[0] 31523 if sh.Op != OpAMD64SHLQconst { 31524 break 31525 } 31526 if sh.AuxInt != 32 { 31527 break 31528 } 31529 r0 := sh.Args[0] 31530 if r0.Op != OpAMD64BSWAPL { 31531 break 31532 } 31533 x0 := r0.Args[0] 31534 if x0.Op != OpAMD64MOVLloadidx1 { 31535 break 31536 } 31537 i0 := x0.AuxInt 31538 s := x0.Aux 31539 _ = x0.Args[2] 31540 idx := x0.Args[0] 31541 p := x0.Args[1] 31542 mem := x0.Args[2] 31543 r1 := v.Args[1] 31544 if r1.Op != OpAMD64BSWAPL { 31545 break 31546 } 31547 x1 := r1.Args[0] 31548 if x1.Op != OpAMD64MOVLloadidx1 { 31549 break 31550 } 31551 i1 := x1.AuxInt 31552 if x1.Aux != s { 31553 break 31554 } 31555 _ = x1.Args[2] 31556 if idx != x1.Args[0] { 31557 break 31558 } 31559 if p != x1.Args[1] { 31560 break 31561 } 31562 if mem != x1.Args[2] { 31563 break 31564 } 31565 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31566 break 31567 } 31568 b = mergePoint(b, x0, x1) 31569 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31570 v.reset(OpCopy) 31571 v.AddArg(v0) 31572 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31573 v1.AuxInt = i0 31574 v1.Aux = s 31575 v1.AddArg(p) 31576 v1.AddArg(idx) 31577 v1.AddArg(mem) 31578 v0.AddArg(v1) 31579 return true 31580 } 31581 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 31582 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31583 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31584 for { 31585 _ = v.Args[1] 31586 s0 := v.Args[0] 31587 if s0.Op != OpAMD64SHLQconst { 31588 break 31589 } 31590 j0 := s0.AuxInt 31591 x0 := s0.Args[0] 31592 if x0.Op != OpAMD64MOVBloadidx1 { 31593 break 31594 } 31595 i0 := x0.AuxInt 31596 s := x0.Aux 31597 _ = x0.Args[2] 31598 p := x0.Args[0] 31599 idx := x0.Args[1] 31600 mem := x0.Args[2] 31601 or := v.Args[1] 31602 if or.Op != OpAMD64ORQ { 31603 break 31604 } 31605 _ = or.Args[1] 31606 s1 := or.Args[0] 31607 if s1.Op != OpAMD64SHLQconst { 31608 break 31609 } 31610 j1 := s1.AuxInt 31611 x1 := s1.Args[0] 31612 if x1.Op != OpAMD64MOVBloadidx1 { 31613 break 31614 } 31615 i1 := x1.AuxInt 31616 if x1.Aux != s { 31617 break 31618 } 31619 _ = x1.Args[2] 31620 if p != x1.Args[0] { 31621 break 31622 } 31623 if idx != x1.Args[1] { 31624 break 31625 } 31626 if mem != x1.Args[2] { 31627 break 31628 } 31629 y := or.Args[1] 31630 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31631 break 31632 } 31633 b = mergePoint(b, x0, x1) 31634 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31635 v.reset(OpCopy) 31636 v.AddArg(v0) 31637 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31638 v1.AuxInt = j1 31639 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31640 v2.AuxInt = 8 31641 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31642 v3.AuxInt = i0 31643 v3.Aux = s 31644 v3.AddArg(p) 31645 v3.AddArg(idx) 31646 v3.AddArg(mem) 31647 v2.AddArg(v3) 31648 v1.AddArg(v2) 31649 v0.AddArg(v1) 31650 v0.AddArg(y) 31651 return true 31652 } 31653 return false 31654 } 31655 func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool { 31656 b := v.Block 31657 _ = b 31658 typ := &b.Func.Config.Types 31659 _ = typ 31660 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 31661 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31662 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31663 for { 31664 _ = v.Args[1] 31665 s0 := v.Args[0] 31666 if s0.Op != OpAMD64SHLQconst { 31667 break 31668 } 31669 j0 := s0.AuxInt 31670 x0 := s0.Args[0] 31671 if x0.Op != OpAMD64MOVBloadidx1 { 31672 break 31673 } 31674 i0 := x0.AuxInt 31675 s := x0.Aux 31676 _ = x0.Args[2] 31677 idx := x0.Args[0] 31678 p := x0.Args[1] 31679 mem := x0.Args[2] 31680 or := v.Args[1] 31681 if or.Op != OpAMD64ORQ { 31682 break 31683 } 31684 _ = or.Args[1] 31685 s1 := or.Args[0] 31686 if s1.Op != OpAMD64SHLQconst { 31687 break 31688 } 31689 j1 := s1.AuxInt 31690 x1 := s1.Args[0] 31691 if x1.Op != OpAMD64MOVBloadidx1 { 31692 break 31693 } 31694 i1 := x1.AuxInt 31695 if x1.Aux != s { 31696 break 31697 } 31698 _ = x1.Args[2] 31699 if p != x1.Args[0] { 31700 break 31701 } 31702 if idx != x1.Args[1] { 31703 break 31704 } 31705 if mem != x1.Args[2] { 31706 break 31707 } 31708 y := or.Args[1] 31709 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31710 break 31711 } 31712 b = mergePoint(b, x0, x1) 31713 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31714 v.reset(OpCopy) 31715 v.AddArg(v0) 31716 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31717 v1.AuxInt = j1 31718 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31719 v2.AuxInt = 8 31720 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31721 v3.AuxInt = i0 31722 v3.Aux = s 31723 v3.AddArg(p) 31724 v3.AddArg(idx) 31725 v3.AddArg(mem) 31726 v2.AddArg(v3) 31727 v1.AddArg(v2) 31728 v0.AddArg(v1) 31729 v0.AddArg(y) 31730 return true 31731 } 31732 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 31733 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31734 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31735 for { 31736 _ = v.Args[1] 31737 s0 := v.Args[0] 31738 if s0.Op != OpAMD64SHLQconst { 31739 break 31740 } 31741 j0 := s0.AuxInt 31742 x0 := s0.Args[0] 31743 if x0.Op != OpAMD64MOVBloadidx1 { 31744 break 31745 } 31746 i0 := x0.AuxInt 31747 s := x0.Aux 31748 _ = x0.Args[2] 31749 p := x0.Args[0] 31750 idx := x0.Args[1] 31751 mem := x0.Args[2] 31752 or := v.Args[1] 31753 if or.Op != OpAMD64ORQ { 31754 break 31755 } 31756 _ = or.Args[1] 31757 s1 := or.Args[0] 31758 if s1.Op != OpAMD64SHLQconst { 31759 break 31760 } 31761 j1 := s1.AuxInt 31762 x1 := s1.Args[0] 31763 if x1.Op != OpAMD64MOVBloadidx1 { 31764 break 31765 } 31766 i1 := x1.AuxInt 31767 if x1.Aux != s { 31768 break 31769 } 31770 _ = x1.Args[2] 31771 if idx != x1.Args[0] { 31772 break 31773 } 31774 if p != x1.Args[1] { 31775 break 31776 } 31777 if mem != x1.Args[2] { 31778 break 31779 } 31780 y := or.Args[1] 31781 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31782 break 31783 } 31784 b = mergePoint(b, x0, x1) 31785 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31786 v.reset(OpCopy) 31787 v.AddArg(v0) 31788 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31789 v1.AuxInt = j1 31790 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31791 v2.AuxInt = 8 31792 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31793 v3.AuxInt = i0 31794 v3.Aux = s 31795 v3.AddArg(p) 31796 v3.AddArg(idx) 31797 v3.AddArg(mem) 31798 v2.AddArg(v3) 31799 v1.AddArg(v2) 31800 v0.AddArg(v1) 31801 v0.AddArg(y) 31802 return true 31803 } 31804 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 31805 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31806 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31807 for { 31808 _ = v.Args[1] 31809 s0 := v.Args[0] 31810 if s0.Op != OpAMD64SHLQconst { 31811 break 31812 } 31813 j0 := s0.AuxInt 31814 x0 := s0.Args[0] 31815 if x0.Op != OpAMD64MOVBloadidx1 { 31816 break 31817 } 31818 i0 := x0.AuxInt 31819 s := x0.Aux 31820 _ = x0.Args[2] 31821 idx := x0.Args[0] 31822 p := x0.Args[1] 31823 mem := x0.Args[2] 31824 or := v.Args[1] 31825 if or.Op != OpAMD64ORQ { 31826 break 31827 } 31828 _ = or.Args[1] 31829 s1 := or.Args[0] 31830 if s1.Op != OpAMD64SHLQconst { 31831 break 31832 } 31833 j1 := s1.AuxInt 31834 x1 := s1.Args[0] 31835 if x1.Op != OpAMD64MOVBloadidx1 { 31836 break 31837 } 31838 i1 := x1.AuxInt 31839 if x1.Aux != s { 31840 break 31841 } 31842 _ = x1.Args[2] 31843 if idx != x1.Args[0] { 31844 break 31845 } 31846 if p != x1.Args[1] { 31847 break 31848 } 31849 if mem != x1.Args[2] { 31850 break 31851 } 31852 y := or.Args[1] 31853 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31854 break 31855 } 31856 b = mergePoint(b, x0, x1) 31857 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31858 v.reset(OpCopy) 31859 v.AddArg(v0) 31860 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31861 v1.AuxInt = j1 31862 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31863 v2.AuxInt = 8 31864 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31865 v3.AuxInt = i0 31866 v3.Aux = s 31867 v3.AddArg(p) 31868 v3.AddArg(idx) 31869 v3.AddArg(mem) 31870 v2.AddArg(v3) 31871 v1.AddArg(v2) 31872 v0.AddArg(v1) 31873 v0.AddArg(y) 31874 return true 31875 } 31876 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 31877 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31878 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31879 for { 31880 _ = v.Args[1] 31881 s0 := v.Args[0] 31882 if s0.Op != OpAMD64SHLQconst { 31883 break 31884 } 31885 j0 := s0.AuxInt 31886 x0 := s0.Args[0] 31887 if x0.Op != OpAMD64MOVBloadidx1 { 31888 break 31889 } 31890 i0 := x0.AuxInt 31891 s := x0.Aux 31892 _ = x0.Args[2] 31893 p := x0.Args[0] 31894 idx := x0.Args[1] 31895 mem := x0.Args[2] 31896 or := v.Args[1] 31897 if or.Op != OpAMD64ORQ { 31898 break 31899 } 31900 _ = or.Args[1] 31901 y := or.Args[0] 31902 s1 := or.Args[1] 31903 if s1.Op != OpAMD64SHLQconst { 31904 break 31905 } 31906 j1 := s1.AuxInt 31907 x1 := s1.Args[0] 31908 if x1.Op != OpAMD64MOVBloadidx1 { 31909 break 31910 } 31911 i1 := x1.AuxInt 31912 if x1.Aux != s { 31913 break 31914 } 31915 _ = x1.Args[2] 31916 if p != x1.Args[0] { 31917 break 31918 } 31919 if idx != x1.Args[1] { 31920 break 31921 } 31922 if mem != x1.Args[2] { 31923 break 31924 } 31925 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31926 break 31927 } 31928 b = mergePoint(b, x0, x1) 31929 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31930 v.reset(OpCopy) 31931 v.AddArg(v0) 31932 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31933 v1.AuxInt = j1 31934 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31935 v2.AuxInt = 8 31936 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31937 v3.AuxInt = i0 31938 v3.Aux = s 31939 v3.AddArg(p) 31940 v3.AddArg(idx) 31941 v3.AddArg(mem) 31942 v2.AddArg(v3) 31943 v1.AddArg(v2) 31944 v0.AddArg(v1) 31945 v0.AddArg(y) 31946 return true 31947 } 31948 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 31949 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31950 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31951 for { 31952 _ = v.Args[1] 31953 s0 := v.Args[0] 31954 if s0.Op != OpAMD64SHLQconst { 31955 break 31956 } 31957 j0 := s0.AuxInt 31958 x0 := s0.Args[0] 31959 if x0.Op != OpAMD64MOVBloadidx1 { 31960 break 31961 } 31962 i0 := x0.AuxInt 31963 s := x0.Aux 31964 _ = x0.Args[2] 31965 idx := x0.Args[0] 31966 p := x0.Args[1] 31967 mem := x0.Args[2] 31968 or := v.Args[1] 31969 if or.Op != OpAMD64ORQ { 31970 break 31971 } 31972 _ = or.Args[1] 31973 y := or.Args[0] 31974 s1 := or.Args[1] 31975 if s1.Op != OpAMD64SHLQconst { 31976 break 31977 } 31978 j1 := s1.AuxInt 31979 x1 := s1.Args[0] 31980 if x1.Op != OpAMD64MOVBloadidx1 { 31981 break 31982 } 31983 i1 := x1.AuxInt 31984 if x1.Aux != s { 31985 break 31986 } 31987 _ = x1.Args[2] 31988 if p != x1.Args[0] { 31989 break 31990 } 31991 if idx != x1.Args[1] { 31992 break 31993 } 31994 if mem != x1.Args[2] { 31995 break 31996 } 31997 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31998 break 31999 } 32000 b = mergePoint(b, x0, x1) 32001 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32002 v.reset(OpCopy) 32003 v.AddArg(v0) 32004 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32005 v1.AuxInt = j1 32006 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32007 v2.AuxInt = 8 32008 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32009 v3.AuxInt = i0 32010 v3.Aux = s 32011 v3.AddArg(p) 32012 v3.AddArg(idx) 32013 v3.AddArg(mem) 32014 v2.AddArg(v3) 32015 v1.AddArg(v2) 32016 v0.AddArg(v1) 32017 v0.AddArg(y) 32018 return true 32019 } 32020 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 32021 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32022 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32023 for { 32024 _ = v.Args[1] 32025 s0 := v.Args[0] 32026 if s0.Op != OpAMD64SHLQconst { 32027 break 32028 } 32029 j0 := s0.AuxInt 32030 x0 := s0.Args[0] 32031 if x0.Op != OpAMD64MOVBloadidx1 { 32032 break 32033 } 32034 i0 := x0.AuxInt 32035 s := x0.Aux 32036 _ = x0.Args[2] 32037 p := x0.Args[0] 32038 idx := x0.Args[1] 32039 mem := x0.Args[2] 32040 or := v.Args[1] 32041 if or.Op != OpAMD64ORQ { 32042 break 32043 } 32044 _ = or.Args[1] 32045 y := or.Args[0] 32046 s1 := or.Args[1] 32047 if s1.Op != OpAMD64SHLQconst { 32048 break 32049 } 32050 j1 := s1.AuxInt 32051 x1 := s1.Args[0] 32052 if x1.Op != OpAMD64MOVBloadidx1 { 32053 break 32054 } 32055 i1 := x1.AuxInt 32056 if x1.Aux != s { 32057 break 32058 } 32059 _ = x1.Args[2] 32060 if idx != x1.Args[0] { 32061 break 32062 } 32063 if p != x1.Args[1] { 32064 break 32065 } 32066 if mem != x1.Args[2] { 32067 break 32068 } 32069 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32070 break 32071 } 32072 b = mergePoint(b, x0, x1) 32073 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32074 v.reset(OpCopy) 32075 v.AddArg(v0) 32076 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32077 v1.AuxInt = j1 32078 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32079 v2.AuxInt = 8 32080 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32081 v3.AuxInt = i0 32082 v3.Aux = s 32083 v3.AddArg(p) 32084 v3.AddArg(idx) 32085 v3.AddArg(mem) 32086 v2.AddArg(v3) 32087 v1.AddArg(v2) 32088 v0.AddArg(v1) 32089 v0.AddArg(y) 32090 return true 32091 } 32092 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 32093 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32094 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32095 for { 32096 _ = v.Args[1] 32097 s0 := v.Args[0] 32098 if s0.Op != OpAMD64SHLQconst { 32099 break 32100 } 32101 j0 := s0.AuxInt 32102 x0 := s0.Args[0] 32103 if x0.Op != OpAMD64MOVBloadidx1 { 32104 break 32105 } 32106 i0 := x0.AuxInt 32107 s := x0.Aux 32108 _ = x0.Args[2] 32109 idx := x0.Args[0] 32110 p := x0.Args[1] 32111 mem := x0.Args[2] 32112 or := v.Args[1] 32113 if or.Op != OpAMD64ORQ { 32114 break 32115 } 32116 _ = or.Args[1] 32117 y := or.Args[0] 32118 s1 := or.Args[1] 32119 if s1.Op != OpAMD64SHLQconst { 32120 break 32121 } 32122 j1 := s1.AuxInt 32123 x1 := s1.Args[0] 32124 if x1.Op != OpAMD64MOVBloadidx1 { 32125 break 32126 } 32127 i1 := x1.AuxInt 32128 if x1.Aux != s { 32129 break 32130 } 32131 _ = x1.Args[2] 32132 if idx != x1.Args[0] { 32133 break 32134 } 32135 if p != x1.Args[1] { 32136 break 32137 } 32138 if mem != x1.Args[2] { 32139 break 32140 } 32141 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32142 break 32143 } 32144 b = mergePoint(b, x0, x1) 32145 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32146 v.reset(OpCopy) 32147 v.AddArg(v0) 32148 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32149 v1.AuxInt = j1 32150 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32151 v2.AuxInt = 8 32152 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32153 v3.AuxInt = i0 32154 v3.Aux = s 32155 v3.AddArg(p) 32156 v3.AddArg(idx) 32157 v3.AddArg(mem) 32158 v2.AddArg(v3) 32159 v1.AddArg(v2) 32160 v0.AddArg(v1) 32161 v0.AddArg(y) 32162 return true 32163 } 32164 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32165 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32166 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32167 for { 32168 _ = v.Args[1] 32169 or := v.Args[0] 32170 if or.Op != OpAMD64ORQ { 32171 break 32172 } 32173 _ = or.Args[1] 32174 s1 := or.Args[0] 32175 if s1.Op != OpAMD64SHLQconst { 32176 break 32177 } 32178 j1 := s1.AuxInt 32179 x1 := s1.Args[0] 32180 if x1.Op != OpAMD64MOVBloadidx1 { 32181 break 32182 } 32183 i1 := x1.AuxInt 32184 s := x1.Aux 32185 _ = x1.Args[2] 32186 p := x1.Args[0] 32187 idx := x1.Args[1] 32188 mem := x1.Args[2] 32189 y := or.Args[1] 32190 s0 := v.Args[1] 32191 if s0.Op != OpAMD64SHLQconst { 32192 break 32193 } 32194 j0 := s0.AuxInt 32195 x0 := s0.Args[0] 32196 if x0.Op != OpAMD64MOVBloadidx1 { 32197 break 32198 } 32199 i0 := x0.AuxInt 32200 if x0.Aux != s { 32201 break 32202 } 32203 _ = x0.Args[2] 32204 if p != x0.Args[0] { 32205 break 32206 } 32207 if idx != x0.Args[1] { 32208 break 32209 } 32210 if mem != x0.Args[2] { 32211 break 32212 } 32213 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32214 break 32215 } 32216 b = mergePoint(b, x0, x1) 32217 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32218 v.reset(OpCopy) 32219 v.AddArg(v0) 32220 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32221 v1.AuxInt = j1 32222 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32223 v2.AuxInt = 8 32224 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32225 v3.AuxInt = i0 32226 v3.Aux = s 32227 v3.AddArg(p) 32228 v3.AddArg(idx) 32229 v3.AddArg(mem) 32230 v2.AddArg(v3) 32231 v1.AddArg(v2) 32232 v0.AddArg(v1) 32233 v0.AddArg(y) 32234 return true 32235 } 32236 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32237 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32238 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32239 for { 32240 _ = v.Args[1] 32241 or := v.Args[0] 32242 if or.Op != OpAMD64ORQ { 32243 break 32244 } 32245 _ = or.Args[1] 32246 s1 := or.Args[0] 32247 if s1.Op != OpAMD64SHLQconst { 32248 break 32249 } 32250 j1 := s1.AuxInt 32251 x1 := s1.Args[0] 32252 if x1.Op != OpAMD64MOVBloadidx1 { 32253 break 32254 } 32255 i1 := x1.AuxInt 32256 s := x1.Aux 32257 _ = x1.Args[2] 32258 idx := x1.Args[0] 32259 p := x1.Args[1] 32260 mem := x1.Args[2] 32261 y := or.Args[1] 32262 s0 := v.Args[1] 32263 if s0.Op != OpAMD64SHLQconst { 32264 break 32265 } 32266 j0 := s0.AuxInt 32267 x0 := s0.Args[0] 32268 if x0.Op != OpAMD64MOVBloadidx1 { 32269 break 32270 } 32271 i0 := x0.AuxInt 32272 if x0.Aux != s { 32273 break 32274 } 32275 _ = x0.Args[2] 32276 if p != x0.Args[0] { 32277 break 32278 } 32279 if idx != x0.Args[1] { 32280 break 32281 } 32282 if mem != x0.Args[2] { 32283 break 32284 } 32285 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32286 break 32287 } 32288 b = mergePoint(b, x0, x1) 32289 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32290 v.reset(OpCopy) 32291 v.AddArg(v0) 32292 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32293 v1.AuxInt = j1 32294 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32295 v2.AuxInt = 8 32296 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32297 v3.AuxInt = i0 32298 v3.Aux = s 32299 v3.AddArg(p) 32300 v3.AddArg(idx) 32301 v3.AddArg(mem) 32302 v2.AddArg(v3) 32303 v1.AddArg(v2) 32304 v0.AddArg(v1) 32305 v0.AddArg(y) 32306 return true 32307 } 32308 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32309 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32310 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32311 for { 32312 _ = v.Args[1] 32313 or := v.Args[0] 32314 if or.Op != OpAMD64ORQ { 32315 break 32316 } 32317 _ = or.Args[1] 32318 y := or.Args[0] 32319 s1 := or.Args[1] 32320 if s1.Op != OpAMD64SHLQconst { 32321 break 32322 } 32323 j1 := s1.AuxInt 32324 x1 := s1.Args[0] 32325 if x1.Op != OpAMD64MOVBloadidx1 { 32326 break 32327 } 32328 i1 := x1.AuxInt 32329 s := x1.Aux 32330 _ = x1.Args[2] 32331 p := x1.Args[0] 32332 idx := x1.Args[1] 32333 mem := x1.Args[2] 32334 s0 := v.Args[1] 32335 if s0.Op != OpAMD64SHLQconst { 32336 break 32337 } 32338 j0 := s0.AuxInt 32339 x0 := s0.Args[0] 32340 if x0.Op != OpAMD64MOVBloadidx1 { 32341 break 32342 } 32343 i0 := x0.AuxInt 32344 if x0.Aux != s { 32345 break 32346 } 32347 _ = x0.Args[2] 32348 if p != x0.Args[0] { 32349 break 32350 } 32351 if idx != x0.Args[1] { 32352 break 32353 } 32354 if mem != x0.Args[2] { 32355 break 32356 } 32357 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32358 break 32359 } 32360 b = mergePoint(b, x0, x1) 32361 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32362 v.reset(OpCopy) 32363 v.AddArg(v0) 32364 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32365 v1.AuxInt = j1 32366 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32367 v2.AuxInt = 8 32368 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32369 v3.AuxInt = i0 32370 v3.Aux = s 32371 v3.AddArg(p) 32372 v3.AddArg(idx) 32373 v3.AddArg(mem) 32374 v2.AddArg(v3) 32375 v1.AddArg(v2) 32376 v0.AddArg(v1) 32377 v0.AddArg(y) 32378 return true 32379 } 32380 return false 32381 } 32382 func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool { 32383 b := v.Block 32384 _ = b 32385 typ := &b.Func.Config.Types 32386 _ = typ 32387 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32388 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32389 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32390 for { 32391 _ = v.Args[1] 32392 or := v.Args[0] 32393 if or.Op != OpAMD64ORQ { 32394 break 32395 } 32396 _ = or.Args[1] 32397 y := or.Args[0] 32398 s1 := or.Args[1] 32399 if s1.Op != OpAMD64SHLQconst { 32400 break 32401 } 32402 j1 := s1.AuxInt 32403 x1 := s1.Args[0] 32404 if x1.Op != OpAMD64MOVBloadidx1 { 32405 break 32406 } 32407 i1 := x1.AuxInt 32408 s := x1.Aux 32409 _ = x1.Args[2] 32410 idx := x1.Args[0] 32411 p := x1.Args[1] 32412 mem := x1.Args[2] 32413 s0 := v.Args[1] 32414 if s0.Op != OpAMD64SHLQconst { 32415 break 32416 } 32417 j0 := s0.AuxInt 32418 x0 := s0.Args[0] 32419 if x0.Op != OpAMD64MOVBloadidx1 { 32420 break 32421 } 32422 i0 := x0.AuxInt 32423 if x0.Aux != s { 32424 break 32425 } 32426 _ = x0.Args[2] 32427 if p != x0.Args[0] { 32428 break 32429 } 32430 if idx != x0.Args[1] { 32431 break 32432 } 32433 if mem != x0.Args[2] { 32434 break 32435 } 32436 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32437 break 32438 } 32439 b = mergePoint(b, x0, x1) 32440 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32441 v.reset(OpCopy) 32442 v.AddArg(v0) 32443 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32444 v1.AuxInt = j1 32445 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32446 v2.AuxInt = 8 32447 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32448 v3.AuxInt = i0 32449 v3.Aux = s 32450 v3.AddArg(p) 32451 v3.AddArg(idx) 32452 v3.AddArg(mem) 32453 v2.AddArg(v3) 32454 v1.AddArg(v2) 32455 v0.AddArg(v1) 32456 v0.AddArg(y) 32457 return true 32458 } 32459 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32460 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32461 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32462 for { 32463 _ = v.Args[1] 32464 or := v.Args[0] 32465 if or.Op != OpAMD64ORQ { 32466 break 32467 } 32468 _ = or.Args[1] 32469 s1 := or.Args[0] 32470 if s1.Op != OpAMD64SHLQconst { 32471 break 32472 } 32473 j1 := s1.AuxInt 32474 x1 := s1.Args[0] 32475 if x1.Op != OpAMD64MOVBloadidx1 { 32476 break 32477 } 32478 i1 := x1.AuxInt 32479 s := x1.Aux 32480 _ = x1.Args[2] 32481 p := x1.Args[0] 32482 idx := x1.Args[1] 32483 mem := x1.Args[2] 32484 y := or.Args[1] 32485 s0 := v.Args[1] 32486 if s0.Op != OpAMD64SHLQconst { 32487 break 32488 } 32489 j0 := s0.AuxInt 32490 x0 := s0.Args[0] 32491 if x0.Op != OpAMD64MOVBloadidx1 { 32492 break 32493 } 32494 i0 := x0.AuxInt 32495 if x0.Aux != s { 32496 break 32497 } 32498 _ = x0.Args[2] 32499 if idx != x0.Args[0] { 32500 break 32501 } 32502 if p != x0.Args[1] { 32503 break 32504 } 32505 if mem != x0.Args[2] { 32506 break 32507 } 32508 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32509 break 32510 } 32511 b = mergePoint(b, x0, x1) 32512 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32513 v.reset(OpCopy) 32514 v.AddArg(v0) 32515 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32516 v1.AuxInt = j1 32517 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32518 v2.AuxInt = 8 32519 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32520 v3.AuxInt = i0 32521 v3.Aux = s 32522 v3.AddArg(p) 32523 v3.AddArg(idx) 32524 v3.AddArg(mem) 32525 v2.AddArg(v3) 32526 v1.AddArg(v2) 32527 v0.AddArg(v1) 32528 v0.AddArg(y) 32529 return true 32530 } 32531 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32532 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32533 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32534 for { 32535 _ = v.Args[1] 32536 or := v.Args[0] 32537 if or.Op != OpAMD64ORQ { 32538 break 32539 } 32540 _ = or.Args[1] 32541 s1 := or.Args[0] 32542 if s1.Op != OpAMD64SHLQconst { 32543 break 32544 } 32545 j1 := s1.AuxInt 32546 x1 := s1.Args[0] 32547 if x1.Op != OpAMD64MOVBloadidx1 { 32548 break 32549 } 32550 i1 := x1.AuxInt 32551 s := x1.Aux 32552 _ = x1.Args[2] 32553 idx := x1.Args[0] 32554 p := x1.Args[1] 32555 mem := x1.Args[2] 32556 y := or.Args[1] 32557 s0 := v.Args[1] 32558 if s0.Op != OpAMD64SHLQconst { 32559 break 32560 } 32561 j0 := s0.AuxInt 32562 x0 := s0.Args[0] 32563 if x0.Op != OpAMD64MOVBloadidx1 { 32564 break 32565 } 32566 i0 := x0.AuxInt 32567 if x0.Aux != s { 32568 break 32569 } 32570 _ = x0.Args[2] 32571 if idx != x0.Args[0] { 32572 break 32573 } 32574 if p != x0.Args[1] { 32575 break 32576 } 32577 if mem != x0.Args[2] { 32578 break 32579 } 32580 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32581 break 32582 } 32583 b = mergePoint(b, x0, x1) 32584 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32585 v.reset(OpCopy) 32586 v.AddArg(v0) 32587 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32588 v1.AuxInt = j1 32589 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32590 v2.AuxInt = 8 32591 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32592 v3.AuxInt = i0 32593 v3.Aux = s 32594 v3.AddArg(p) 32595 v3.AddArg(idx) 32596 v3.AddArg(mem) 32597 v2.AddArg(v3) 32598 v1.AddArg(v2) 32599 v0.AddArg(v1) 32600 v0.AddArg(y) 32601 return true 32602 } 32603 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32604 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32605 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32606 for { 32607 _ = v.Args[1] 32608 or := v.Args[0] 32609 if or.Op != OpAMD64ORQ { 32610 break 32611 } 32612 _ = or.Args[1] 32613 y := or.Args[0] 32614 s1 := or.Args[1] 32615 if s1.Op != OpAMD64SHLQconst { 32616 break 32617 } 32618 j1 := s1.AuxInt 32619 x1 := s1.Args[0] 32620 if x1.Op != OpAMD64MOVBloadidx1 { 32621 break 32622 } 32623 i1 := x1.AuxInt 32624 s := x1.Aux 32625 _ = x1.Args[2] 32626 p := x1.Args[0] 32627 idx := x1.Args[1] 32628 mem := x1.Args[2] 32629 s0 := v.Args[1] 32630 if s0.Op != OpAMD64SHLQconst { 32631 break 32632 } 32633 j0 := s0.AuxInt 32634 x0 := s0.Args[0] 32635 if x0.Op != OpAMD64MOVBloadidx1 { 32636 break 32637 } 32638 i0 := x0.AuxInt 32639 if x0.Aux != s { 32640 break 32641 } 32642 _ = x0.Args[2] 32643 if idx != x0.Args[0] { 32644 break 32645 } 32646 if p != x0.Args[1] { 32647 break 32648 } 32649 if mem != x0.Args[2] { 32650 break 32651 } 32652 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32653 break 32654 } 32655 b = mergePoint(b, x0, x1) 32656 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32657 v.reset(OpCopy) 32658 v.AddArg(v0) 32659 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32660 v1.AuxInt = j1 32661 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32662 v2.AuxInt = 8 32663 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32664 v3.AuxInt = i0 32665 v3.Aux = s 32666 v3.AddArg(p) 32667 v3.AddArg(idx) 32668 v3.AddArg(mem) 32669 v2.AddArg(v3) 32670 v1.AddArg(v2) 32671 v0.AddArg(v1) 32672 v0.AddArg(y) 32673 return true 32674 } 32675 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32676 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32677 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32678 for { 32679 _ = v.Args[1] 32680 or := v.Args[0] 32681 if or.Op != OpAMD64ORQ { 32682 break 32683 } 32684 _ = or.Args[1] 32685 y := or.Args[0] 32686 s1 := or.Args[1] 32687 if s1.Op != OpAMD64SHLQconst { 32688 break 32689 } 32690 j1 := s1.AuxInt 32691 x1 := s1.Args[0] 32692 if x1.Op != OpAMD64MOVBloadidx1 { 32693 break 32694 } 32695 i1 := x1.AuxInt 32696 s := x1.Aux 32697 _ = x1.Args[2] 32698 idx := x1.Args[0] 32699 p := x1.Args[1] 32700 mem := x1.Args[2] 32701 s0 := v.Args[1] 32702 if s0.Op != OpAMD64SHLQconst { 32703 break 32704 } 32705 j0 := s0.AuxInt 32706 x0 := s0.Args[0] 32707 if x0.Op != OpAMD64MOVBloadidx1 { 32708 break 32709 } 32710 i0 := x0.AuxInt 32711 if x0.Aux != s { 32712 break 32713 } 32714 _ = x0.Args[2] 32715 if idx != x0.Args[0] { 32716 break 32717 } 32718 if p != x0.Args[1] { 32719 break 32720 } 32721 if mem != x0.Args[2] { 32722 break 32723 } 32724 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32725 break 32726 } 32727 b = mergePoint(b, x0, x1) 32728 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32729 v.reset(OpCopy) 32730 v.AddArg(v0) 32731 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32732 v1.AuxInt = j1 32733 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32734 v2.AuxInt = 8 32735 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32736 v3.AuxInt = i0 32737 v3.Aux = s 32738 v3.AddArg(p) 32739 v3.AddArg(idx) 32740 v3.AddArg(mem) 32741 v2.AddArg(v3) 32742 v1.AddArg(v2) 32743 v0.AddArg(v1) 32744 v0.AddArg(y) 32745 return true 32746 } 32747 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 32748 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32749 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32750 for { 32751 _ = v.Args[1] 32752 s0 := v.Args[0] 32753 if s0.Op != OpAMD64SHLQconst { 32754 break 32755 } 32756 j0 := s0.AuxInt 32757 r0 := s0.Args[0] 32758 if r0.Op != OpAMD64ROLWconst { 32759 break 32760 } 32761 if r0.AuxInt != 8 { 32762 break 32763 } 32764 x0 := r0.Args[0] 32765 if x0.Op != OpAMD64MOVWloadidx1 { 32766 break 32767 } 32768 i0 := x0.AuxInt 32769 s := x0.Aux 32770 _ = x0.Args[2] 32771 p := x0.Args[0] 32772 idx := x0.Args[1] 32773 mem := x0.Args[2] 32774 or := v.Args[1] 32775 if or.Op != OpAMD64ORQ { 32776 break 32777 } 32778 _ = or.Args[1] 32779 s1 := or.Args[0] 32780 if s1.Op != OpAMD64SHLQconst { 32781 break 32782 } 32783 j1 := s1.AuxInt 32784 r1 := s1.Args[0] 32785 if r1.Op != OpAMD64ROLWconst { 32786 break 32787 } 32788 if r1.AuxInt != 8 { 32789 break 32790 } 32791 x1 := r1.Args[0] 32792 if x1.Op != OpAMD64MOVWloadidx1 { 32793 break 32794 } 32795 i1 := x1.AuxInt 32796 if x1.Aux != s { 32797 break 32798 } 32799 _ = x1.Args[2] 32800 if p != x1.Args[0] { 32801 break 32802 } 32803 if idx != x1.Args[1] { 32804 break 32805 } 32806 if mem != x1.Args[2] { 32807 break 32808 } 32809 y := or.Args[1] 32810 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32811 break 32812 } 32813 b = mergePoint(b, x0, x1) 32814 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32815 v.reset(OpCopy) 32816 v.AddArg(v0) 32817 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32818 v1.AuxInt = j1 32819 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32820 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32821 v3.AuxInt = i0 32822 v3.Aux = s 32823 v3.AddArg(p) 32824 v3.AddArg(idx) 32825 v3.AddArg(mem) 32826 v2.AddArg(v3) 32827 v1.AddArg(v2) 32828 v0.AddArg(v1) 32829 v0.AddArg(y) 32830 return true 32831 } 32832 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 32833 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32834 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32835 for { 32836 _ = v.Args[1] 32837 s0 := v.Args[0] 32838 if s0.Op != OpAMD64SHLQconst { 32839 break 32840 } 32841 j0 := s0.AuxInt 32842 r0 := s0.Args[0] 32843 if r0.Op != OpAMD64ROLWconst { 32844 break 32845 } 32846 if r0.AuxInt != 8 { 32847 break 32848 } 32849 x0 := r0.Args[0] 32850 if x0.Op != OpAMD64MOVWloadidx1 { 32851 break 32852 } 32853 i0 := x0.AuxInt 32854 s := x0.Aux 32855 _ = x0.Args[2] 32856 idx := x0.Args[0] 32857 p := x0.Args[1] 32858 mem := x0.Args[2] 32859 or := v.Args[1] 32860 if or.Op != OpAMD64ORQ { 32861 break 32862 } 32863 _ = or.Args[1] 32864 s1 := or.Args[0] 32865 if s1.Op != OpAMD64SHLQconst { 32866 break 32867 } 32868 j1 := s1.AuxInt 32869 r1 := s1.Args[0] 32870 if r1.Op != OpAMD64ROLWconst { 32871 break 32872 } 32873 if r1.AuxInt != 8 { 32874 break 32875 } 32876 x1 := r1.Args[0] 32877 if x1.Op != OpAMD64MOVWloadidx1 { 32878 break 32879 } 32880 i1 := x1.AuxInt 32881 if x1.Aux != s { 32882 break 32883 } 32884 _ = x1.Args[2] 32885 if p != x1.Args[0] { 32886 break 32887 } 32888 if idx != x1.Args[1] { 32889 break 32890 } 32891 if mem != x1.Args[2] { 32892 break 32893 } 32894 y := or.Args[1] 32895 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32896 break 32897 } 32898 b = mergePoint(b, x0, x1) 32899 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32900 v.reset(OpCopy) 32901 v.AddArg(v0) 32902 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32903 v1.AuxInt = j1 32904 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32905 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32906 v3.AuxInt = i0 32907 v3.Aux = s 32908 v3.AddArg(p) 32909 v3.AddArg(idx) 32910 v3.AddArg(mem) 32911 v2.AddArg(v3) 32912 v1.AddArg(v2) 32913 v0.AddArg(v1) 32914 v0.AddArg(y) 32915 return true 32916 } 32917 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 32918 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32919 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32920 for { 32921 _ = v.Args[1] 32922 s0 := v.Args[0] 32923 if s0.Op != OpAMD64SHLQconst { 32924 break 32925 } 32926 j0 := s0.AuxInt 32927 r0 := s0.Args[0] 32928 if r0.Op != OpAMD64ROLWconst { 32929 break 32930 } 32931 if r0.AuxInt != 8 { 32932 break 32933 } 32934 x0 := r0.Args[0] 32935 if x0.Op != OpAMD64MOVWloadidx1 { 32936 break 32937 } 32938 i0 := x0.AuxInt 32939 s := x0.Aux 32940 _ = x0.Args[2] 32941 p := x0.Args[0] 32942 idx := x0.Args[1] 32943 mem := x0.Args[2] 32944 or := v.Args[1] 32945 if or.Op != OpAMD64ORQ { 32946 break 32947 } 32948 _ = or.Args[1] 32949 s1 := or.Args[0] 32950 if s1.Op != OpAMD64SHLQconst { 32951 break 32952 } 32953 j1 := s1.AuxInt 32954 r1 := s1.Args[0] 32955 if r1.Op != OpAMD64ROLWconst { 32956 break 32957 } 32958 if r1.AuxInt != 8 { 32959 break 32960 } 32961 x1 := r1.Args[0] 32962 if x1.Op != OpAMD64MOVWloadidx1 { 32963 break 32964 } 32965 i1 := x1.AuxInt 32966 if x1.Aux != s { 32967 break 32968 } 32969 _ = x1.Args[2] 32970 if idx != x1.Args[0] { 32971 break 32972 } 32973 if p != x1.Args[1] { 32974 break 32975 } 32976 if mem != x1.Args[2] { 32977 break 32978 } 32979 y := or.Args[1] 32980 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32981 break 32982 } 32983 b = mergePoint(b, x0, x1) 32984 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32985 v.reset(OpCopy) 32986 v.AddArg(v0) 32987 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32988 v1.AuxInt = j1 32989 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32990 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32991 v3.AuxInt = i0 32992 v3.Aux = s 32993 v3.AddArg(p) 32994 v3.AddArg(idx) 32995 v3.AddArg(mem) 32996 v2.AddArg(v3) 32997 v1.AddArg(v2) 32998 v0.AddArg(v1) 32999 v0.AddArg(y) 33000 return true 33001 } 33002 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 33003 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33004 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33005 for { 33006 _ = v.Args[1] 33007 s0 := v.Args[0] 33008 if s0.Op != OpAMD64SHLQconst { 33009 break 33010 } 33011 j0 := s0.AuxInt 33012 r0 := s0.Args[0] 33013 if r0.Op != OpAMD64ROLWconst { 33014 break 33015 } 33016 if r0.AuxInt != 8 { 33017 break 33018 } 33019 x0 := r0.Args[0] 33020 if x0.Op != OpAMD64MOVWloadidx1 { 33021 break 33022 } 33023 i0 := x0.AuxInt 33024 s := x0.Aux 33025 _ = x0.Args[2] 33026 idx := x0.Args[0] 33027 p := x0.Args[1] 33028 mem := x0.Args[2] 33029 or := v.Args[1] 33030 if or.Op != OpAMD64ORQ { 33031 break 33032 } 33033 _ = or.Args[1] 33034 s1 := or.Args[0] 33035 if s1.Op != OpAMD64SHLQconst { 33036 break 33037 } 33038 j1 := s1.AuxInt 33039 r1 := s1.Args[0] 33040 if r1.Op != OpAMD64ROLWconst { 33041 break 33042 } 33043 if r1.AuxInt != 8 { 33044 break 33045 } 33046 x1 := r1.Args[0] 33047 if x1.Op != OpAMD64MOVWloadidx1 { 33048 break 33049 } 33050 i1 := x1.AuxInt 33051 if x1.Aux != s { 33052 break 33053 } 33054 _ = x1.Args[2] 33055 if idx != x1.Args[0] { 33056 break 33057 } 33058 if p != x1.Args[1] { 33059 break 33060 } 33061 if mem != x1.Args[2] { 33062 break 33063 } 33064 y := or.Args[1] 33065 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33066 break 33067 } 33068 b = mergePoint(b, x0, x1) 33069 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33070 v.reset(OpCopy) 33071 v.AddArg(v0) 33072 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33073 v1.AuxInt = j1 33074 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33075 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33076 v3.AuxInt = i0 33077 v3.Aux = s 33078 v3.AddArg(p) 33079 v3.AddArg(idx) 33080 v3.AddArg(mem) 33081 v2.AddArg(v3) 33082 v1.AddArg(v2) 33083 v0.AddArg(v1) 33084 v0.AddArg(y) 33085 return true 33086 } 33087 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 33088 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33089 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33090 for { 33091 _ = v.Args[1] 33092 s0 := v.Args[0] 33093 if s0.Op != OpAMD64SHLQconst { 33094 break 33095 } 33096 j0 := s0.AuxInt 33097 r0 := s0.Args[0] 33098 if r0.Op != OpAMD64ROLWconst { 33099 break 33100 } 33101 if r0.AuxInt != 8 { 33102 break 33103 } 33104 x0 := r0.Args[0] 33105 if x0.Op != OpAMD64MOVWloadidx1 { 33106 break 33107 } 33108 i0 := x0.AuxInt 33109 s := x0.Aux 33110 _ = x0.Args[2] 33111 p := x0.Args[0] 33112 idx := x0.Args[1] 33113 mem := x0.Args[2] 33114 or := v.Args[1] 33115 if or.Op != OpAMD64ORQ { 33116 break 33117 } 33118 _ = or.Args[1] 33119 y := or.Args[0] 33120 s1 := or.Args[1] 33121 if s1.Op != OpAMD64SHLQconst { 33122 break 33123 } 33124 j1 := s1.AuxInt 33125 r1 := s1.Args[0] 33126 if r1.Op != OpAMD64ROLWconst { 33127 break 33128 } 33129 if r1.AuxInt != 8 { 33130 break 33131 } 33132 x1 := r1.Args[0] 33133 if x1.Op != OpAMD64MOVWloadidx1 { 33134 break 33135 } 33136 i1 := x1.AuxInt 33137 if x1.Aux != s { 33138 break 33139 } 33140 _ = x1.Args[2] 33141 if p != x1.Args[0] { 33142 break 33143 } 33144 if idx != x1.Args[1] { 33145 break 33146 } 33147 if mem != x1.Args[2] { 33148 break 33149 } 33150 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33151 break 33152 } 33153 b = mergePoint(b, x0, x1) 33154 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33155 v.reset(OpCopy) 33156 v.AddArg(v0) 33157 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33158 v1.AuxInt = j1 33159 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33160 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33161 v3.AuxInt = i0 33162 v3.Aux = s 33163 v3.AddArg(p) 33164 v3.AddArg(idx) 33165 v3.AddArg(mem) 33166 v2.AddArg(v3) 33167 v1.AddArg(v2) 33168 v0.AddArg(v1) 33169 v0.AddArg(y) 33170 return true 33171 } 33172 return false 33173 } 33174 func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool { 33175 b := v.Block 33176 _ = b 33177 typ := &b.Func.Config.Types 33178 _ = typ 33179 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 33180 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33181 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33182 for { 33183 _ = v.Args[1] 33184 s0 := v.Args[0] 33185 if s0.Op != OpAMD64SHLQconst { 33186 break 33187 } 33188 j0 := s0.AuxInt 33189 r0 := s0.Args[0] 33190 if r0.Op != OpAMD64ROLWconst { 33191 break 33192 } 33193 if r0.AuxInt != 8 { 33194 break 33195 } 33196 x0 := r0.Args[0] 33197 if x0.Op != OpAMD64MOVWloadidx1 { 33198 break 33199 } 33200 i0 := x0.AuxInt 33201 s := x0.Aux 33202 _ = x0.Args[2] 33203 idx := x0.Args[0] 33204 p := x0.Args[1] 33205 mem := x0.Args[2] 33206 or := v.Args[1] 33207 if or.Op != OpAMD64ORQ { 33208 break 33209 } 33210 _ = or.Args[1] 33211 y := or.Args[0] 33212 s1 := or.Args[1] 33213 if s1.Op != OpAMD64SHLQconst { 33214 break 33215 } 33216 j1 := s1.AuxInt 33217 r1 := s1.Args[0] 33218 if r1.Op != OpAMD64ROLWconst { 33219 break 33220 } 33221 if r1.AuxInt != 8 { 33222 break 33223 } 33224 x1 := r1.Args[0] 33225 if x1.Op != OpAMD64MOVWloadidx1 { 33226 break 33227 } 33228 i1 := x1.AuxInt 33229 if x1.Aux != s { 33230 break 33231 } 33232 _ = x1.Args[2] 33233 if p != x1.Args[0] { 33234 break 33235 } 33236 if idx != x1.Args[1] { 33237 break 33238 } 33239 if mem != x1.Args[2] { 33240 break 33241 } 33242 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33243 break 33244 } 33245 b = mergePoint(b, x0, x1) 33246 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33247 v.reset(OpCopy) 33248 v.AddArg(v0) 33249 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33250 v1.AuxInt = j1 33251 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33252 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33253 v3.AuxInt = i0 33254 v3.Aux = s 33255 v3.AddArg(p) 33256 v3.AddArg(idx) 33257 v3.AddArg(mem) 33258 v2.AddArg(v3) 33259 v1.AddArg(v2) 33260 v0.AddArg(v1) 33261 v0.AddArg(y) 33262 return true 33263 } 33264 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 33265 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33266 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33267 for { 33268 _ = v.Args[1] 33269 s0 := v.Args[0] 33270 if s0.Op != OpAMD64SHLQconst { 33271 break 33272 } 33273 j0 := s0.AuxInt 33274 r0 := s0.Args[0] 33275 if r0.Op != OpAMD64ROLWconst { 33276 break 33277 } 33278 if r0.AuxInt != 8 { 33279 break 33280 } 33281 x0 := r0.Args[0] 33282 if x0.Op != OpAMD64MOVWloadidx1 { 33283 break 33284 } 33285 i0 := x0.AuxInt 33286 s := x0.Aux 33287 _ = x0.Args[2] 33288 p := x0.Args[0] 33289 idx := x0.Args[1] 33290 mem := x0.Args[2] 33291 or := v.Args[1] 33292 if or.Op != OpAMD64ORQ { 33293 break 33294 } 33295 _ = or.Args[1] 33296 y := or.Args[0] 33297 s1 := or.Args[1] 33298 if s1.Op != OpAMD64SHLQconst { 33299 break 33300 } 33301 j1 := s1.AuxInt 33302 r1 := s1.Args[0] 33303 if r1.Op != OpAMD64ROLWconst { 33304 break 33305 } 33306 if r1.AuxInt != 8 { 33307 break 33308 } 33309 x1 := r1.Args[0] 33310 if x1.Op != OpAMD64MOVWloadidx1 { 33311 break 33312 } 33313 i1 := x1.AuxInt 33314 if x1.Aux != s { 33315 break 33316 } 33317 _ = x1.Args[2] 33318 if idx != x1.Args[0] { 33319 break 33320 } 33321 if p != x1.Args[1] { 33322 break 33323 } 33324 if mem != x1.Args[2] { 33325 break 33326 } 33327 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33328 break 33329 } 33330 b = mergePoint(b, x0, x1) 33331 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33332 v.reset(OpCopy) 33333 v.AddArg(v0) 33334 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33335 v1.AuxInt = j1 33336 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33337 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33338 v3.AuxInt = i0 33339 v3.Aux = s 33340 v3.AddArg(p) 33341 v3.AddArg(idx) 33342 v3.AddArg(mem) 33343 v2.AddArg(v3) 33344 v1.AddArg(v2) 33345 v0.AddArg(v1) 33346 v0.AddArg(y) 33347 return true 33348 } 33349 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 33350 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33351 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33352 for { 33353 _ = v.Args[1] 33354 s0 := v.Args[0] 33355 if s0.Op != OpAMD64SHLQconst { 33356 break 33357 } 33358 j0 := s0.AuxInt 33359 r0 := s0.Args[0] 33360 if r0.Op != OpAMD64ROLWconst { 33361 break 33362 } 33363 if r0.AuxInt != 8 { 33364 break 33365 } 33366 x0 := r0.Args[0] 33367 if x0.Op != OpAMD64MOVWloadidx1 { 33368 break 33369 } 33370 i0 := x0.AuxInt 33371 s := x0.Aux 33372 _ = x0.Args[2] 33373 idx := x0.Args[0] 33374 p := x0.Args[1] 33375 mem := x0.Args[2] 33376 or := v.Args[1] 33377 if or.Op != OpAMD64ORQ { 33378 break 33379 } 33380 _ = or.Args[1] 33381 y := or.Args[0] 33382 s1 := or.Args[1] 33383 if s1.Op != OpAMD64SHLQconst { 33384 break 33385 } 33386 j1 := s1.AuxInt 33387 r1 := s1.Args[0] 33388 if r1.Op != OpAMD64ROLWconst { 33389 break 33390 } 33391 if r1.AuxInt != 8 { 33392 break 33393 } 33394 x1 := r1.Args[0] 33395 if x1.Op != OpAMD64MOVWloadidx1 { 33396 break 33397 } 33398 i1 := x1.AuxInt 33399 if x1.Aux != s { 33400 break 33401 } 33402 _ = x1.Args[2] 33403 if idx != x1.Args[0] { 33404 break 33405 } 33406 if p != x1.Args[1] { 33407 break 33408 } 33409 if mem != x1.Args[2] { 33410 break 33411 } 33412 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33413 break 33414 } 33415 b = mergePoint(b, x0, x1) 33416 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33417 v.reset(OpCopy) 33418 v.AddArg(v0) 33419 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33420 v1.AuxInt = j1 33421 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33422 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33423 v3.AuxInt = i0 33424 v3.Aux = s 33425 v3.AddArg(p) 33426 v3.AddArg(idx) 33427 v3.AddArg(mem) 33428 v2.AddArg(v3) 33429 v1.AddArg(v2) 33430 v0.AddArg(v1) 33431 v0.AddArg(y) 33432 return true 33433 } 33434 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33435 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33436 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33437 for { 33438 _ = v.Args[1] 33439 or := v.Args[0] 33440 if or.Op != OpAMD64ORQ { 33441 break 33442 } 33443 _ = or.Args[1] 33444 s1 := or.Args[0] 33445 if s1.Op != OpAMD64SHLQconst { 33446 break 33447 } 33448 j1 := s1.AuxInt 33449 r1 := s1.Args[0] 33450 if r1.Op != OpAMD64ROLWconst { 33451 break 33452 } 33453 if r1.AuxInt != 8 { 33454 break 33455 } 33456 x1 := r1.Args[0] 33457 if x1.Op != OpAMD64MOVWloadidx1 { 33458 break 33459 } 33460 i1 := x1.AuxInt 33461 s := x1.Aux 33462 _ = x1.Args[2] 33463 p := x1.Args[0] 33464 idx := x1.Args[1] 33465 mem := x1.Args[2] 33466 y := or.Args[1] 33467 s0 := v.Args[1] 33468 if s0.Op != OpAMD64SHLQconst { 33469 break 33470 } 33471 j0 := s0.AuxInt 33472 r0 := s0.Args[0] 33473 if r0.Op != OpAMD64ROLWconst { 33474 break 33475 } 33476 if r0.AuxInt != 8 { 33477 break 33478 } 33479 x0 := r0.Args[0] 33480 if x0.Op != OpAMD64MOVWloadidx1 { 33481 break 33482 } 33483 i0 := x0.AuxInt 33484 if x0.Aux != s { 33485 break 33486 } 33487 _ = x0.Args[2] 33488 if p != x0.Args[0] { 33489 break 33490 } 33491 if idx != x0.Args[1] { 33492 break 33493 } 33494 if mem != x0.Args[2] { 33495 break 33496 } 33497 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33498 break 33499 } 33500 b = mergePoint(b, x0, x1) 33501 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33502 v.reset(OpCopy) 33503 v.AddArg(v0) 33504 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33505 v1.AuxInt = j1 33506 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33507 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33508 v3.AuxInt = i0 33509 v3.Aux = s 33510 v3.AddArg(p) 33511 v3.AddArg(idx) 33512 v3.AddArg(mem) 33513 v2.AddArg(v3) 33514 v1.AddArg(v2) 33515 v0.AddArg(v1) 33516 v0.AddArg(y) 33517 return true 33518 } 33519 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33520 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33521 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33522 for { 33523 _ = v.Args[1] 33524 or := v.Args[0] 33525 if or.Op != OpAMD64ORQ { 33526 break 33527 } 33528 _ = or.Args[1] 33529 s1 := or.Args[0] 33530 if s1.Op != OpAMD64SHLQconst { 33531 break 33532 } 33533 j1 := s1.AuxInt 33534 r1 := s1.Args[0] 33535 if r1.Op != OpAMD64ROLWconst { 33536 break 33537 } 33538 if r1.AuxInt != 8 { 33539 break 33540 } 33541 x1 := r1.Args[0] 33542 if x1.Op != OpAMD64MOVWloadidx1 { 33543 break 33544 } 33545 i1 := x1.AuxInt 33546 s := x1.Aux 33547 _ = x1.Args[2] 33548 idx := x1.Args[0] 33549 p := x1.Args[1] 33550 mem := x1.Args[2] 33551 y := or.Args[1] 33552 s0 := v.Args[1] 33553 if s0.Op != OpAMD64SHLQconst { 33554 break 33555 } 33556 j0 := s0.AuxInt 33557 r0 := s0.Args[0] 33558 if r0.Op != OpAMD64ROLWconst { 33559 break 33560 } 33561 if r0.AuxInt != 8 { 33562 break 33563 } 33564 x0 := r0.Args[0] 33565 if x0.Op != OpAMD64MOVWloadidx1 { 33566 break 33567 } 33568 i0 := x0.AuxInt 33569 if x0.Aux != s { 33570 break 33571 } 33572 _ = x0.Args[2] 33573 if p != x0.Args[0] { 33574 break 33575 } 33576 if idx != x0.Args[1] { 33577 break 33578 } 33579 if mem != x0.Args[2] { 33580 break 33581 } 33582 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33583 break 33584 } 33585 b = mergePoint(b, x0, x1) 33586 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33587 v.reset(OpCopy) 33588 v.AddArg(v0) 33589 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33590 v1.AuxInt = j1 33591 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33592 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33593 v3.AuxInt = i0 33594 v3.Aux = s 33595 v3.AddArg(p) 33596 v3.AddArg(idx) 33597 v3.AddArg(mem) 33598 v2.AddArg(v3) 33599 v1.AddArg(v2) 33600 v0.AddArg(v1) 33601 v0.AddArg(y) 33602 return true 33603 } 33604 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33605 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33606 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33607 for { 33608 _ = v.Args[1] 33609 or := v.Args[0] 33610 if or.Op != OpAMD64ORQ { 33611 break 33612 } 33613 _ = or.Args[1] 33614 y := or.Args[0] 33615 s1 := or.Args[1] 33616 if s1.Op != OpAMD64SHLQconst { 33617 break 33618 } 33619 j1 := s1.AuxInt 33620 r1 := s1.Args[0] 33621 if r1.Op != OpAMD64ROLWconst { 33622 break 33623 } 33624 if r1.AuxInt != 8 { 33625 break 33626 } 33627 x1 := r1.Args[0] 33628 if x1.Op != OpAMD64MOVWloadidx1 { 33629 break 33630 } 33631 i1 := x1.AuxInt 33632 s := x1.Aux 33633 _ = x1.Args[2] 33634 p := x1.Args[0] 33635 idx := x1.Args[1] 33636 mem := x1.Args[2] 33637 s0 := v.Args[1] 33638 if s0.Op != OpAMD64SHLQconst { 33639 break 33640 } 33641 j0 := s0.AuxInt 33642 r0 := s0.Args[0] 33643 if r0.Op != OpAMD64ROLWconst { 33644 break 33645 } 33646 if r0.AuxInt != 8 { 33647 break 33648 } 33649 x0 := r0.Args[0] 33650 if x0.Op != OpAMD64MOVWloadidx1 { 33651 break 33652 } 33653 i0 := x0.AuxInt 33654 if x0.Aux != s { 33655 break 33656 } 33657 _ = x0.Args[2] 33658 if p != x0.Args[0] { 33659 break 33660 } 33661 if idx != x0.Args[1] { 33662 break 33663 } 33664 if mem != x0.Args[2] { 33665 break 33666 } 33667 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33668 break 33669 } 33670 b = mergePoint(b, x0, x1) 33671 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33672 v.reset(OpCopy) 33673 v.AddArg(v0) 33674 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33675 v1.AuxInt = j1 33676 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33677 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33678 v3.AuxInt = i0 33679 v3.Aux = s 33680 v3.AddArg(p) 33681 v3.AddArg(idx) 33682 v3.AddArg(mem) 33683 v2.AddArg(v3) 33684 v1.AddArg(v2) 33685 v0.AddArg(v1) 33686 v0.AddArg(y) 33687 return true 33688 } 33689 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33690 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33691 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33692 for { 33693 _ = v.Args[1] 33694 or := v.Args[0] 33695 if or.Op != OpAMD64ORQ { 33696 break 33697 } 33698 _ = or.Args[1] 33699 y := or.Args[0] 33700 s1 := or.Args[1] 33701 if s1.Op != OpAMD64SHLQconst { 33702 break 33703 } 33704 j1 := s1.AuxInt 33705 r1 := s1.Args[0] 33706 if r1.Op != OpAMD64ROLWconst { 33707 break 33708 } 33709 if r1.AuxInt != 8 { 33710 break 33711 } 33712 x1 := r1.Args[0] 33713 if x1.Op != OpAMD64MOVWloadidx1 { 33714 break 33715 } 33716 i1 := x1.AuxInt 33717 s := x1.Aux 33718 _ = x1.Args[2] 33719 idx := x1.Args[0] 33720 p := x1.Args[1] 33721 mem := x1.Args[2] 33722 s0 := v.Args[1] 33723 if s0.Op != OpAMD64SHLQconst { 33724 break 33725 } 33726 j0 := s0.AuxInt 33727 r0 := s0.Args[0] 33728 if r0.Op != OpAMD64ROLWconst { 33729 break 33730 } 33731 if r0.AuxInt != 8 { 33732 break 33733 } 33734 x0 := r0.Args[0] 33735 if x0.Op != OpAMD64MOVWloadidx1 { 33736 break 33737 } 33738 i0 := x0.AuxInt 33739 if x0.Aux != s { 33740 break 33741 } 33742 _ = x0.Args[2] 33743 if p != x0.Args[0] { 33744 break 33745 } 33746 if idx != x0.Args[1] { 33747 break 33748 } 33749 if mem != x0.Args[2] { 33750 break 33751 } 33752 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33753 break 33754 } 33755 b = mergePoint(b, x0, x1) 33756 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33757 v.reset(OpCopy) 33758 v.AddArg(v0) 33759 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33760 v1.AuxInt = j1 33761 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33762 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33763 v3.AuxInt = i0 33764 v3.Aux = s 33765 v3.AddArg(p) 33766 v3.AddArg(idx) 33767 v3.AddArg(mem) 33768 v2.AddArg(v3) 33769 v1.AddArg(v2) 33770 v0.AddArg(v1) 33771 v0.AddArg(y) 33772 return true 33773 } 33774 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33775 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33776 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33777 for { 33778 _ = v.Args[1] 33779 or := v.Args[0] 33780 if or.Op != OpAMD64ORQ { 33781 break 33782 } 33783 _ = or.Args[1] 33784 s1 := or.Args[0] 33785 if s1.Op != OpAMD64SHLQconst { 33786 break 33787 } 33788 j1 := s1.AuxInt 33789 r1 := s1.Args[0] 33790 if r1.Op != OpAMD64ROLWconst { 33791 break 33792 } 33793 if r1.AuxInt != 8 { 33794 break 33795 } 33796 x1 := r1.Args[0] 33797 if x1.Op != OpAMD64MOVWloadidx1 { 33798 break 33799 } 33800 i1 := x1.AuxInt 33801 s := x1.Aux 33802 _ = x1.Args[2] 33803 p := x1.Args[0] 33804 idx := x1.Args[1] 33805 mem := x1.Args[2] 33806 y := or.Args[1] 33807 s0 := v.Args[1] 33808 if s0.Op != OpAMD64SHLQconst { 33809 break 33810 } 33811 j0 := s0.AuxInt 33812 r0 := s0.Args[0] 33813 if r0.Op != OpAMD64ROLWconst { 33814 break 33815 } 33816 if r0.AuxInt != 8 { 33817 break 33818 } 33819 x0 := r0.Args[0] 33820 if x0.Op != OpAMD64MOVWloadidx1 { 33821 break 33822 } 33823 i0 := x0.AuxInt 33824 if x0.Aux != s { 33825 break 33826 } 33827 _ = x0.Args[2] 33828 if idx != x0.Args[0] { 33829 break 33830 } 33831 if p != x0.Args[1] { 33832 break 33833 } 33834 if mem != x0.Args[2] { 33835 break 33836 } 33837 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33838 break 33839 } 33840 b = mergePoint(b, x0, x1) 33841 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33842 v.reset(OpCopy) 33843 v.AddArg(v0) 33844 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33845 v1.AuxInt = j1 33846 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33847 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33848 v3.AuxInt = i0 33849 v3.Aux = s 33850 v3.AddArg(p) 33851 v3.AddArg(idx) 33852 v3.AddArg(mem) 33853 v2.AddArg(v3) 33854 v1.AddArg(v2) 33855 v0.AddArg(v1) 33856 v0.AddArg(y) 33857 return true 33858 } 33859 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33860 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33861 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33862 for { 33863 _ = v.Args[1] 33864 or := v.Args[0] 33865 if or.Op != OpAMD64ORQ { 33866 break 33867 } 33868 _ = or.Args[1] 33869 s1 := or.Args[0] 33870 if s1.Op != OpAMD64SHLQconst { 33871 break 33872 } 33873 j1 := s1.AuxInt 33874 r1 := s1.Args[0] 33875 if r1.Op != OpAMD64ROLWconst { 33876 break 33877 } 33878 if r1.AuxInt != 8 { 33879 break 33880 } 33881 x1 := r1.Args[0] 33882 if x1.Op != OpAMD64MOVWloadidx1 { 33883 break 33884 } 33885 i1 := x1.AuxInt 33886 s := x1.Aux 33887 _ = x1.Args[2] 33888 idx := x1.Args[0] 33889 p := x1.Args[1] 33890 mem := x1.Args[2] 33891 y := or.Args[1] 33892 s0 := v.Args[1] 33893 if s0.Op != OpAMD64SHLQconst { 33894 break 33895 } 33896 j0 := s0.AuxInt 33897 r0 := s0.Args[0] 33898 if r0.Op != OpAMD64ROLWconst { 33899 break 33900 } 33901 if r0.AuxInt != 8 { 33902 break 33903 } 33904 x0 := r0.Args[0] 33905 if x0.Op != OpAMD64MOVWloadidx1 { 33906 break 33907 } 33908 i0 := x0.AuxInt 33909 if x0.Aux != s { 33910 break 33911 } 33912 _ = x0.Args[2] 33913 if idx != x0.Args[0] { 33914 break 33915 } 33916 if p != x0.Args[1] { 33917 break 33918 } 33919 if mem != x0.Args[2] { 33920 break 33921 } 33922 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33923 break 33924 } 33925 b = mergePoint(b, x0, x1) 33926 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33927 v.reset(OpCopy) 33928 v.AddArg(v0) 33929 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33930 v1.AuxInt = j1 33931 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33932 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33933 v3.AuxInt = i0 33934 v3.Aux = s 33935 v3.AddArg(p) 33936 v3.AddArg(idx) 33937 v3.AddArg(mem) 33938 v2.AddArg(v3) 33939 v1.AddArg(v2) 33940 v0.AddArg(v1) 33941 v0.AddArg(y) 33942 return true 33943 } 33944 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33945 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33946 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33947 for { 33948 _ = v.Args[1] 33949 or := v.Args[0] 33950 if or.Op != OpAMD64ORQ { 33951 break 33952 } 33953 _ = or.Args[1] 33954 y := or.Args[0] 33955 s1 := or.Args[1] 33956 if s1.Op != OpAMD64SHLQconst { 33957 break 33958 } 33959 j1 := s1.AuxInt 33960 r1 := s1.Args[0] 33961 if r1.Op != OpAMD64ROLWconst { 33962 break 33963 } 33964 if r1.AuxInt != 8 { 33965 break 33966 } 33967 x1 := r1.Args[0] 33968 if x1.Op != OpAMD64MOVWloadidx1 { 33969 break 33970 } 33971 i1 := x1.AuxInt 33972 s := x1.Aux 33973 _ = x1.Args[2] 33974 p := x1.Args[0] 33975 idx := x1.Args[1] 33976 mem := x1.Args[2] 33977 s0 := v.Args[1] 33978 if s0.Op != OpAMD64SHLQconst { 33979 break 33980 } 33981 j0 := s0.AuxInt 33982 r0 := s0.Args[0] 33983 if r0.Op != OpAMD64ROLWconst { 33984 break 33985 } 33986 if r0.AuxInt != 8 { 33987 break 33988 } 33989 x0 := r0.Args[0] 33990 if x0.Op != OpAMD64MOVWloadidx1 { 33991 break 33992 } 33993 i0 := x0.AuxInt 33994 if x0.Aux != s { 33995 break 33996 } 33997 _ = x0.Args[2] 33998 if idx != x0.Args[0] { 33999 break 34000 } 34001 if p != x0.Args[1] { 34002 break 34003 } 34004 if mem != x0.Args[2] { 34005 break 34006 } 34007 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 34008 break 34009 } 34010 b = mergePoint(b, x0, x1) 34011 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34012 v.reset(OpCopy) 34013 v.AddArg(v0) 34014 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34015 v1.AuxInt = j1 34016 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 34017 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 34018 v3.AuxInt = i0 34019 v3.Aux = s 34020 v3.AddArg(p) 34021 v3.AddArg(idx) 34022 v3.AddArg(mem) 34023 v2.AddArg(v3) 34024 v1.AddArg(v2) 34025 v0.AddArg(v1) 34026 v0.AddArg(y) 34027 return true 34028 } 34029 return false 34030 } 34031 func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { 34032 b := v.Block 34033 _ = b 34034 typ := &b.Func.Config.Types 34035 _ = typ 34036 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 34037 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 34038 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 34039 for { 34040 _ = v.Args[1] 34041 or := v.Args[0] 34042 if or.Op != OpAMD64ORQ { 34043 break 34044 } 34045 _ = or.Args[1] 34046 y := or.Args[0] 34047 s1 := or.Args[1] 34048 if s1.Op != OpAMD64SHLQconst { 34049 break 34050 } 34051 j1 := s1.AuxInt 34052 r1 := s1.Args[0] 34053 if r1.Op != OpAMD64ROLWconst { 34054 break 34055 } 34056 if r1.AuxInt != 8 { 34057 break 34058 } 34059 x1 := r1.Args[0] 34060 if x1.Op != OpAMD64MOVWloadidx1 { 34061 break 34062 } 34063 i1 := x1.AuxInt 34064 s := x1.Aux 34065 _ = x1.Args[2] 34066 idx := x1.Args[0] 34067 p := x1.Args[1] 34068 mem := x1.Args[2] 34069 s0 := v.Args[1] 34070 if s0.Op != OpAMD64SHLQconst { 34071 break 34072 } 34073 j0 := s0.AuxInt 34074 r0 := s0.Args[0] 34075 if r0.Op != OpAMD64ROLWconst { 34076 break 34077 } 34078 if r0.AuxInt != 8 { 34079 break 34080 } 34081 x0 := r0.Args[0] 34082 if x0.Op != OpAMD64MOVWloadidx1 { 34083 break 34084 } 34085 i0 := x0.AuxInt 34086 if x0.Aux != s { 34087 break 34088 } 34089 _ = x0.Args[2] 34090 if idx != x0.Args[0] { 34091 break 34092 } 34093 if p != x0.Args[1] { 34094 break 34095 } 34096 if mem != x0.Args[2] { 34097 break 34098 } 34099 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 34100 break 34101 } 34102 b = mergePoint(b, x0, x1) 34103 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34104 v.reset(OpCopy) 34105 v.AddArg(v0) 34106 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34107 v1.AuxInt = j1 34108 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 34109 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 34110 v3.AuxInt = i0 34111 v3.Aux = s 34112 v3.AddArg(p) 34113 v3.AddArg(idx) 34114 v3.AddArg(mem) 34115 v2.AddArg(v3) 34116 v1.AddArg(v2) 34117 v0.AddArg(v1) 34118 v0.AddArg(y) 34119 return true 34120 } 34121 // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) 34122 // cond: canMergeLoad(v, l, x) && clobber(l) 34123 // result: (ORQmem x [off] {sym} ptr mem) 34124 for { 34125 _ = v.Args[1] 34126 x := v.Args[0] 34127 l := v.Args[1] 34128 if l.Op != OpAMD64MOVQload { 34129 break 34130 } 34131 off := l.AuxInt 34132 sym := l.Aux 34133 _ = l.Args[1] 34134 ptr := l.Args[0] 34135 mem := l.Args[1] 34136 if !(canMergeLoad(v, l, x) && clobber(l)) { 34137 break 34138 } 34139 v.reset(OpAMD64ORQmem) 34140 v.AuxInt = off 34141 v.Aux = sym 34142 v.AddArg(x) 34143 v.AddArg(ptr) 34144 v.AddArg(mem) 34145 return true 34146 } 34147 // match: (ORQ l:(MOVQload [off] {sym} ptr mem) x) 34148 // cond: canMergeLoad(v, l, x) && clobber(l) 34149 // result: (ORQmem x [off] {sym} ptr mem) 34150 for { 34151 _ = v.Args[1] 34152 l := v.Args[0] 34153 if l.Op != OpAMD64MOVQload { 34154 break 34155 } 34156 off := l.AuxInt 34157 sym := l.Aux 34158 _ = l.Args[1] 34159 ptr := l.Args[0] 34160 mem := l.Args[1] 34161 x := v.Args[1] 34162 if !(canMergeLoad(v, l, x) && clobber(l)) { 34163 break 34164 } 34165 v.reset(OpAMD64ORQmem) 34166 v.AuxInt = off 34167 v.Aux = sym 34168 v.AddArg(x) 34169 v.AddArg(ptr) 34170 v.AddArg(mem) 34171 return true 34172 } 34173 return false 34174 } 34175 func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool { 34176 // match: (ORQconst [0] x) 34177 // cond: 34178 // result: x 34179 for { 34180 if v.AuxInt != 0 { 34181 break 34182 } 34183 x := v.Args[0] 34184 v.reset(OpCopy) 34185 v.Type = x.Type 34186 v.AddArg(x) 34187 return true 34188 } 34189 // match: (ORQconst [-1] _) 34190 // cond: 34191 // result: (MOVQconst [-1]) 34192 for { 34193 if v.AuxInt != -1 { 34194 break 34195 } 34196 v.reset(OpAMD64MOVQconst) 34197 v.AuxInt = -1 34198 return true 34199 } 34200 // match: (ORQconst [c] (MOVQconst [d])) 34201 // cond: 34202 // result: (MOVQconst [c|d]) 34203 for { 34204 c := v.AuxInt 34205 v_0 := v.Args[0] 34206 if v_0.Op != OpAMD64MOVQconst { 34207 break 34208 } 34209 d := v_0.AuxInt 34210 v.reset(OpAMD64MOVQconst) 34211 v.AuxInt = c | d 34212 return true 34213 } 34214 return false 34215 } 34216 func rewriteValueAMD64_OpAMD64ORQmem_0(v *Value) bool { 34217 b := v.Block 34218 _ = b 34219 typ := &b.Func.Config.Types 34220 _ = typ 34221 // match: (ORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 34222 // cond: 34223 // result: ( ORQ x (MOVQf2i y)) 34224 for { 34225 off := v.AuxInt 34226 sym := v.Aux 34227 _ = v.Args[2] 34228 x := v.Args[0] 34229 ptr := v.Args[1] 34230 v_2 := v.Args[2] 34231 if v_2.Op != OpAMD64MOVSDstore { 34232 break 34233 } 34234 if v_2.AuxInt != off { 34235 break 34236 } 34237 if v_2.Aux != sym { 34238 break 34239 } 34240 _ = v_2.Args[2] 34241 if ptr != v_2.Args[0] { 34242 break 34243 } 34244 y := v_2.Args[1] 34245 v.reset(OpAMD64ORQ) 34246 v.AddArg(x) 34247 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 34248 v0.AddArg(y) 34249 v.AddArg(v0) 34250 return true 34251 } 34252 return false 34253 } 34254 func rewriteValueAMD64_OpAMD64ROLB_0(v *Value) bool { 34255 // match: (ROLB x (NEGQ y)) 34256 // cond: 34257 // result: (RORB x y) 34258 for { 34259 _ = v.Args[1] 34260 x := v.Args[0] 34261 v_1 := v.Args[1] 34262 if v_1.Op != OpAMD64NEGQ { 34263 break 34264 } 34265 y := v_1.Args[0] 34266 v.reset(OpAMD64RORB) 34267 v.AddArg(x) 34268 v.AddArg(y) 34269 return true 34270 } 34271 // match: (ROLB x (NEGL y)) 34272 // cond: 34273 // result: (RORB x y) 34274 for { 34275 _ = v.Args[1] 34276 x := v.Args[0] 34277 v_1 := v.Args[1] 34278 if v_1.Op != OpAMD64NEGL { 34279 break 34280 } 34281 y := v_1.Args[0] 34282 v.reset(OpAMD64RORB) 34283 v.AddArg(x) 34284 v.AddArg(y) 34285 return true 34286 } 34287 // match: (ROLB x (MOVQconst [c])) 34288 // cond: 34289 // result: (ROLBconst [c&7 ] x) 34290 for { 34291 _ = v.Args[1] 34292 x := v.Args[0] 34293 v_1 := v.Args[1] 34294 if v_1.Op != OpAMD64MOVQconst { 34295 break 34296 } 34297 c := v_1.AuxInt 34298 v.reset(OpAMD64ROLBconst) 34299 v.AuxInt = c & 7 34300 v.AddArg(x) 34301 return true 34302 } 34303 // match: (ROLB x (MOVLconst [c])) 34304 // cond: 34305 // result: (ROLBconst [c&7 ] x) 34306 for { 34307 _ = v.Args[1] 34308 x := v.Args[0] 34309 v_1 := v.Args[1] 34310 if v_1.Op != OpAMD64MOVLconst { 34311 break 34312 } 34313 c := v_1.AuxInt 34314 v.reset(OpAMD64ROLBconst) 34315 v.AuxInt = c & 7 34316 v.AddArg(x) 34317 return true 34318 } 34319 return false 34320 } 34321 func rewriteValueAMD64_OpAMD64ROLBconst_0(v *Value) bool { 34322 // match: (ROLBconst [c] (ROLBconst [d] x)) 34323 // cond: 34324 // result: (ROLBconst [(c+d)& 7] x) 34325 for { 34326 c := v.AuxInt 34327 v_0 := v.Args[0] 34328 if v_0.Op != OpAMD64ROLBconst { 34329 break 34330 } 34331 d := v_0.AuxInt 34332 x := v_0.Args[0] 34333 v.reset(OpAMD64ROLBconst) 34334 v.AuxInt = (c + d) & 7 34335 v.AddArg(x) 34336 return true 34337 } 34338 // match: (ROLBconst x [0]) 34339 // cond: 34340 // result: x 34341 for { 34342 if v.AuxInt != 0 { 34343 break 34344 } 34345 x := v.Args[0] 34346 v.reset(OpCopy) 34347 v.Type = x.Type 34348 v.AddArg(x) 34349 return true 34350 } 34351 return false 34352 } 34353 func rewriteValueAMD64_OpAMD64ROLL_0(v *Value) bool { 34354 // match: (ROLL x (NEGQ y)) 34355 // cond: 34356 // result: (RORL x y) 34357 for { 34358 _ = v.Args[1] 34359 x := v.Args[0] 34360 v_1 := v.Args[1] 34361 if v_1.Op != OpAMD64NEGQ { 34362 break 34363 } 34364 y := v_1.Args[0] 34365 v.reset(OpAMD64RORL) 34366 v.AddArg(x) 34367 v.AddArg(y) 34368 return true 34369 } 34370 // match: (ROLL x (NEGL y)) 34371 // cond: 34372 // result: (RORL x y) 34373 for { 34374 _ = v.Args[1] 34375 x := v.Args[0] 34376 v_1 := v.Args[1] 34377 if v_1.Op != OpAMD64NEGL { 34378 break 34379 } 34380 y := v_1.Args[0] 34381 v.reset(OpAMD64RORL) 34382 v.AddArg(x) 34383 v.AddArg(y) 34384 return true 34385 } 34386 // match: (ROLL x (MOVQconst [c])) 34387 // cond: 34388 // result: (ROLLconst [c&31] x) 34389 for { 34390 _ = v.Args[1] 34391 x := v.Args[0] 34392 v_1 := v.Args[1] 34393 if v_1.Op != OpAMD64MOVQconst { 34394 break 34395 } 34396 c := v_1.AuxInt 34397 v.reset(OpAMD64ROLLconst) 34398 v.AuxInt = c & 31 34399 v.AddArg(x) 34400 return true 34401 } 34402 // match: (ROLL x (MOVLconst [c])) 34403 // cond: 34404 // result: (ROLLconst [c&31] x) 34405 for { 34406 _ = v.Args[1] 34407 x := v.Args[0] 34408 v_1 := v.Args[1] 34409 if v_1.Op != OpAMD64MOVLconst { 34410 break 34411 } 34412 c := v_1.AuxInt 34413 v.reset(OpAMD64ROLLconst) 34414 v.AuxInt = c & 31 34415 v.AddArg(x) 34416 return true 34417 } 34418 return false 34419 } 34420 func rewriteValueAMD64_OpAMD64ROLLconst_0(v *Value) bool { 34421 // match: (ROLLconst [c] (ROLLconst [d] x)) 34422 // cond: 34423 // result: (ROLLconst [(c+d)&31] x) 34424 for { 34425 c := v.AuxInt 34426 v_0 := v.Args[0] 34427 if v_0.Op != OpAMD64ROLLconst { 34428 break 34429 } 34430 d := v_0.AuxInt 34431 x := v_0.Args[0] 34432 v.reset(OpAMD64ROLLconst) 34433 v.AuxInt = (c + d) & 31 34434 v.AddArg(x) 34435 return true 34436 } 34437 // match: (ROLLconst x [0]) 34438 // cond: 34439 // result: x 34440 for { 34441 if v.AuxInt != 0 { 34442 break 34443 } 34444 x := v.Args[0] 34445 v.reset(OpCopy) 34446 v.Type = x.Type 34447 v.AddArg(x) 34448 return true 34449 } 34450 return false 34451 } 34452 func rewriteValueAMD64_OpAMD64ROLQ_0(v *Value) bool { 34453 // match: (ROLQ x (NEGQ y)) 34454 // cond: 34455 // result: (RORQ x y) 34456 for { 34457 _ = v.Args[1] 34458 x := v.Args[0] 34459 v_1 := v.Args[1] 34460 if v_1.Op != OpAMD64NEGQ { 34461 break 34462 } 34463 y := v_1.Args[0] 34464 v.reset(OpAMD64RORQ) 34465 v.AddArg(x) 34466 v.AddArg(y) 34467 return true 34468 } 34469 // match: (ROLQ x (NEGL y)) 34470 // cond: 34471 // result: (RORQ x y) 34472 for { 34473 _ = v.Args[1] 34474 x := v.Args[0] 34475 v_1 := v.Args[1] 34476 if v_1.Op != OpAMD64NEGL { 34477 break 34478 } 34479 y := v_1.Args[0] 34480 v.reset(OpAMD64RORQ) 34481 v.AddArg(x) 34482 v.AddArg(y) 34483 return true 34484 } 34485 // match: (ROLQ x (MOVQconst [c])) 34486 // cond: 34487 // result: (ROLQconst [c&63] x) 34488 for { 34489 _ = v.Args[1] 34490 x := v.Args[0] 34491 v_1 := v.Args[1] 34492 if v_1.Op != OpAMD64MOVQconst { 34493 break 34494 } 34495 c := v_1.AuxInt 34496 v.reset(OpAMD64ROLQconst) 34497 v.AuxInt = c & 63 34498 v.AddArg(x) 34499 return true 34500 } 34501 // match: (ROLQ x (MOVLconst [c])) 34502 // cond: 34503 // result: (ROLQconst [c&63] x) 34504 for { 34505 _ = v.Args[1] 34506 x := v.Args[0] 34507 v_1 := v.Args[1] 34508 if v_1.Op != OpAMD64MOVLconst { 34509 break 34510 } 34511 c := v_1.AuxInt 34512 v.reset(OpAMD64ROLQconst) 34513 v.AuxInt = c & 63 34514 v.AddArg(x) 34515 return true 34516 } 34517 return false 34518 } 34519 func rewriteValueAMD64_OpAMD64ROLQconst_0(v *Value) bool { 34520 // match: (ROLQconst [c] (ROLQconst [d] x)) 34521 // cond: 34522 // result: (ROLQconst [(c+d)&63] x) 34523 for { 34524 c := v.AuxInt 34525 v_0 := v.Args[0] 34526 if v_0.Op != OpAMD64ROLQconst { 34527 break 34528 } 34529 d := v_0.AuxInt 34530 x := v_0.Args[0] 34531 v.reset(OpAMD64ROLQconst) 34532 v.AuxInt = (c + d) & 63 34533 v.AddArg(x) 34534 return true 34535 } 34536 // match: (ROLQconst x [0]) 34537 // cond: 34538 // result: x 34539 for { 34540 if v.AuxInt != 0 { 34541 break 34542 } 34543 x := v.Args[0] 34544 v.reset(OpCopy) 34545 v.Type = x.Type 34546 v.AddArg(x) 34547 return true 34548 } 34549 return false 34550 } 34551 func rewriteValueAMD64_OpAMD64ROLW_0(v *Value) bool { 34552 // match: (ROLW x (NEGQ y)) 34553 // cond: 34554 // result: (RORW x y) 34555 for { 34556 _ = v.Args[1] 34557 x := v.Args[0] 34558 v_1 := v.Args[1] 34559 if v_1.Op != OpAMD64NEGQ { 34560 break 34561 } 34562 y := v_1.Args[0] 34563 v.reset(OpAMD64RORW) 34564 v.AddArg(x) 34565 v.AddArg(y) 34566 return true 34567 } 34568 // match: (ROLW x (NEGL y)) 34569 // cond: 34570 // result: (RORW x y) 34571 for { 34572 _ = v.Args[1] 34573 x := v.Args[0] 34574 v_1 := v.Args[1] 34575 if v_1.Op != OpAMD64NEGL { 34576 break 34577 } 34578 y := v_1.Args[0] 34579 v.reset(OpAMD64RORW) 34580 v.AddArg(x) 34581 v.AddArg(y) 34582 return true 34583 } 34584 // match: (ROLW x (MOVQconst [c])) 34585 // cond: 34586 // result: (ROLWconst [c&15] x) 34587 for { 34588 _ = v.Args[1] 34589 x := v.Args[0] 34590 v_1 := v.Args[1] 34591 if v_1.Op != OpAMD64MOVQconst { 34592 break 34593 } 34594 c := v_1.AuxInt 34595 v.reset(OpAMD64ROLWconst) 34596 v.AuxInt = c & 15 34597 v.AddArg(x) 34598 return true 34599 } 34600 // match: (ROLW x (MOVLconst [c])) 34601 // cond: 34602 // result: (ROLWconst [c&15] x) 34603 for { 34604 _ = v.Args[1] 34605 x := v.Args[0] 34606 v_1 := v.Args[1] 34607 if v_1.Op != OpAMD64MOVLconst { 34608 break 34609 } 34610 c := v_1.AuxInt 34611 v.reset(OpAMD64ROLWconst) 34612 v.AuxInt = c & 15 34613 v.AddArg(x) 34614 return true 34615 } 34616 return false 34617 } 34618 func rewriteValueAMD64_OpAMD64ROLWconst_0(v *Value) bool { 34619 // match: (ROLWconst [c] (ROLWconst [d] x)) 34620 // cond: 34621 // result: (ROLWconst [(c+d)&15] x) 34622 for { 34623 c := v.AuxInt 34624 v_0 := v.Args[0] 34625 if v_0.Op != OpAMD64ROLWconst { 34626 break 34627 } 34628 d := v_0.AuxInt 34629 x := v_0.Args[0] 34630 v.reset(OpAMD64ROLWconst) 34631 v.AuxInt = (c + d) & 15 34632 v.AddArg(x) 34633 return true 34634 } 34635 // match: (ROLWconst x [0]) 34636 // cond: 34637 // result: x 34638 for { 34639 if v.AuxInt != 0 { 34640 break 34641 } 34642 x := v.Args[0] 34643 v.reset(OpCopy) 34644 v.Type = x.Type 34645 v.AddArg(x) 34646 return true 34647 } 34648 return false 34649 } 34650 func rewriteValueAMD64_OpAMD64RORB_0(v *Value) bool { 34651 // match: (RORB x (NEGQ y)) 34652 // cond: 34653 // result: (ROLB x y) 34654 for { 34655 _ = v.Args[1] 34656 x := v.Args[0] 34657 v_1 := v.Args[1] 34658 if v_1.Op != OpAMD64NEGQ { 34659 break 34660 } 34661 y := v_1.Args[0] 34662 v.reset(OpAMD64ROLB) 34663 v.AddArg(x) 34664 v.AddArg(y) 34665 return true 34666 } 34667 // match: (RORB x (NEGL y)) 34668 // cond: 34669 // result: (ROLB x y) 34670 for { 34671 _ = v.Args[1] 34672 x := v.Args[0] 34673 v_1 := v.Args[1] 34674 if v_1.Op != OpAMD64NEGL { 34675 break 34676 } 34677 y := v_1.Args[0] 34678 v.reset(OpAMD64ROLB) 34679 v.AddArg(x) 34680 v.AddArg(y) 34681 return true 34682 } 34683 // match: (RORB x (MOVQconst [c])) 34684 // cond: 34685 // result: (ROLBconst [(-c)&7 ] x) 34686 for { 34687 _ = v.Args[1] 34688 x := v.Args[0] 34689 v_1 := v.Args[1] 34690 if v_1.Op != OpAMD64MOVQconst { 34691 break 34692 } 34693 c := v_1.AuxInt 34694 v.reset(OpAMD64ROLBconst) 34695 v.AuxInt = (-c) & 7 34696 v.AddArg(x) 34697 return true 34698 } 34699 // match: (RORB x (MOVLconst [c])) 34700 // cond: 34701 // result: (ROLBconst [(-c)&7 ] x) 34702 for { 34703 _ = v.Args[1] 34704 x := v.Args[0] 34705 v_1 := v.Args[1] 34706 if v_1.Op != OpAMD64MOVLconst { 34707 break 34708 } 34709 c := v_1.AuxInt 34710 v.reset(OpAMD64ROLBconst) 34711 v.AuxInt = (-c) & 7 34712 v.AddArg(x) 34713 return true 34714 } 34715 return false 34716 } 34717 func rewriteValueAMD64_OpAMD64RORL_0(v *Value) bool { 34718 // match: (RORL x (NEGQ y)) 34719 // cond: 34720 // result: (ROLL x y) 34721 for { 34722 _ = v.Args[1] 34723 x := v.Args[0] 34724 v_1 := v.Args[1] 34725 if v_1.Op != OpAMD64NEGQ { 34726 break 34727 } 34728 y := v_1.Args[0] 34729 v.reset(OpAMD64ROLL) 34730 v.AddArg(x) 34731 v.AddArg(y) 34732 return true 34733 } 34734 // match: (RORL x (NEGL y)) 34735 // cond: 34736 // result: (ROLL x y) 34737 for { 34738 _ = v.Args[1] 34739 x := v.Args[0] 34740 v_1 := v.Args[1] 34741 if v_1.Op != OpAMD64NEGL { 34742 break 34743 } 34744 y := v_1.Args[0] 34745 v.reset(OpAMD64ROLL) 34746 v.AddArg(x) 34747 v.AddArg(y) 34748 return true 34749 } 34750 // match: (RORL x (MOVQconst [c])) 34751 // cond: 34752 // result: (ROLLconst [(-c)&31] x) 34753 for { 34754 _ = v.Args[1] 34755 x := v.Args[0] 34756 v_1 := v.Args[1] 34757 if v_1.Op != OpAMD64MOVQconst { 34758 break 34759 } 34760 c := v_1.AuxInt 34761 v.reset(OpAMD64ROLLconst) 34762 v.AuxInt = (-c) & 31 34763 v.AddArg(x) 34764 return true 34765 } 34766 // match: (RORL x (MOVLconst [c])) 34767 // cond: 34768 // result: (ROLLconst [(-c)&31] x) 34769 for { 34770 _ = v.Args[1] 34771 x := v.Args[0] 34772 v_1 := v.Args[1] 34773 if v_1.Op != OpAMD64MOVLconst { 34774 break 34775 } 34776 c := v_1.AuxInt 34777 v.reset(OpAMD64ROLLconst) 34778 v.AuxInt = (-c) & 31 34779 v.AddArg(x) 34780 return true 34781 } 34782 return false 34783 } 34784 func rewriteValueAMD64_OpAMD64RORQ_0(v *Value) bool { 34785 // match: (RORQ x (NEGQ y)) 34786 // cond: 34787 // result: (ROLQ x y) 34788 for { 34789 _ = v.Args[1] 34790 x := v.Args[0] 34791 v_1 := v.Args[1] 34792 if v_1.Op != OpAMD64NEGQ { 34793 break 34794 } 34795 y := v_1.Args[0] 34796 v.reset(OpAMD64ROLQ) 34797 v.AddArg(x) 34798 v.AddArg(y) 34799 return true 34800 } 34801 // match: (RORQ x (NEGL y)) 34802 // cond: 34803 // result: (ROLQ x y) 34804 for { 34805 _ = v.Args[1] 34806 x := v.Args[0] 34807 v_1 := v.Args[1] 34808 if v_1.Op != OpAMD64NEGL { 34809 break 34810 } 34811 y := v_1.Args[0] 34812 v.reset(OpAMD64ROLQ) 34813 v.AddArg(x) 34814 v.AddArg(y) 34815 return true 34816 } 34817 // match: (RORQ x (MOVQconst [c])) 34818 // cond: 34819 // result: (ROLQconst [(-c)&63] x) 34820 for { 34821 _ = v.Args[1] 34822 x := v.Args[0] 34823 v_1 := v.Args[1] 34824 if v_1.Op != OpAMD64MOVQconst { 34825 break 34826 } 34827 c := v_1.AuxInt 34828 v.reset(OpAMD64ROLQconst) 34829 v.AuxInt = (-c) & 63 34830 v.AddArg(x) 34831 return true 34832 } 34833 // match: (RORQ x (MOVLconst [c])) 34834 // cond: 34835 // result: (ROLQconst [(-c)&63] x) 34836 for { 34837 _ = v.Args[1] 34838 x := v.Args[0] 34839 v_1 := v.Args[1] 34840 if v_1.Op != OpAMD64MOVLconst { 34841 break 34842 } 34843 c := v_1.AuxInt 34844 v.reset(OpAMD64ROLQconst) 34845 v.AuxInt = (-c) & 63 34846 v.AddArg(x) 34847 return true 34848 } 34849 return false 34850 } 34851 func rewriteValueAMD64_OpAMD64RORW_0(v *Value) bool { 34852 // match: (RORW x (NEGQ y)) 34853 // cond: 34854 // result: (ROLW x y) 34855 for { 34856 _ = v.Args[1] 34857 x := v.Args[0] 34858 v_1 := v.Args[1] 34859 if v_1.Op != OpAMD64NEGQ { 34860 break 34861 } 34862 y := v_1.Args[0] 34863 v.reset(OpAMD64ROLW) 34864 v.AddArg(x) 34865 v.AddArg(y) 34866 return true 34867 } 34868 // match: (RORW x (NEGL y)) 34869 // cond: 34870 // result: (ROLW x y) 34871 for { 34872 _ = v.Args[1] 34873 x := v.Args[0] 34874 v_1 := v.Args[1] 34875 if v_1.Op != OpAMD64NEGL { 34876 break 34877 } 34878 y := v_1.Args[0] 34879 v.reset(OpAMD64ROLW) 34880 v.AddArg(x) 34881 v.AddArg(y) 34882 return true 34883 } 34884 // match: (RORW x (MOVQconst [c])) 34885 // cond: 34886 // result: (ROLWconst [(-c)&15] x) 34887 for { 34888 _ = v.Args[1] 34889 x := v.Args[0] 34890 v_1 := v.Args[1] 34891 if v_1.Op != OpAMD64MOVQconst { 34892 break 34893 } 34894 c := v_1.AuxInt 34895 v.reset(OpAMD64ROLWconst) 34896 v.AuxInt = (-c) & 15 34897 v.AddArg(x) 34898 return true 34899 } 34900 // match: (RORW x (MOVLconst [c])) 34901 // cond: 34902 // result: (ROLWconst [(-c)&15] x) 34903 for { 34904 _ = v.Args[1] 34905 x := v.Args[0] 34906 v_1 := v.Args[1] 34907 if v_1.Op != OpAMD64MOVLconst { 34908 break 34909 } 34910 c := v_1.AuxInt 34911 v.reset(OpAMD64ROLWconst) 34912 v.AuxInt = (-c) & 15 34913 v.AddArg(x) 34914 return true 34915 } 34916 return false 34917 } 34918 func rewriteValueAMD64_OpAMD64SARB_0(v *Value) bool { 34919 // match: (SARB x (MOVQconst [c])) 34920 // cond: 34921 // result: (SARBconst [min(c&31,7)] x) 34922 for { 34923 _ = v.Args[1] 34924 x := v.Args[0] 34925 v_1 := v.Args[1] 34926 if v_1.Op != OpAMD64MOVQconst { 34927 break 34928 } 34929 c := v_1.AuxInt 34930 v.reset(OpAMD64SARBconst) 34931 v.AuxInt = min(c&31, 7) 34932 v.AddArg(x) 34933 return true 34934 } 34935 // match: (SARB x (MOVLconst [c])) 34936 // cond: 34937 // result: (SARBconst [min(c&31,7)] x) 34938 for { 34939 _ = v.Args[1] 34940 x := v.Args[0] 34941 v_1 := v.Args[1] 34942 if v_1.Op != OpAMD64MOVLconst { 34943 break 34944 } 34945 c := v_1.AuxInt 34946 v.reset(OpAMD64SARBconst) 34947 v.AuxInt = min(c&31, 7) 34948 v.AddArg(x) 34949 return true 34950 } 34951 return false 34952 } 34953 func rewriteValueAMD64_OpAMD64SARBconst_0(v *Value) bool { 34954 // match: (SARBconst x [0]) 34955 // cond: 34956 // result: x 34957 for { 34958 if v.AuxInt != 0 { 34959 break 34960 } 34961 x := v.Args[0] 34962 v.reset(OpCopy) 34963 v.Type = x.Type 34964 v.AddArg(x) 34965 return true 34966 } 34967 // match: (SARBconst [c] (MOVQconst [d])) 34968 // cond: 34969 // result: (MOVQconst [d>>uint64(c)]) 34970 for { 34971 c := v.AuxInt 34972 v_0 := v.Args[0] 34973 if v_0.Op != OpAMD64MOVQconst { 34974 break 34975 } 34976 d := v_0.AuxInt 34977 v.reset(OpAMD64MOVQconst) 34978 v.AuxInt = d >> uint64(c) 34979 return true 34980 } 34981 return false 34982 } 34983 func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { 34984 b := v.Block 34985 _ = b 34986 // match: (SARL x (MOVQconst [c])) 34987 // cond: 34988 // result: (SARLconst [c&31] x) 34989 for { 34990 _ = v.Args[1] 34991 x := v.Args[0] 34992 v_1 := v.Args[1] 34993 if v_1.Op != OpAMD64MOVQconst { 34994 break 34995 } 34996 c := v_1.AuxInt 34997 v.reset(OpAMD64SARLconst) 34998 v.AuxInt = c & 31 34999 v.AddArg(x) 35000 return true 35001 } 35002 // match: (SARL x (MOVLconst [c])) 35003 // cond: 35004 // result: (SARLconst [c&31] x) 35005 for { 35006 _ = v.Args[1] 35007 x := v.Args[0] 35008 v_1 := v.Args[1] 35009 if v_1.Op != OpAMD64MOVLconst { 35010 break 35011 } 35012 c := v_1.AuxInt 35013 v.reset(OpAMD64SARLconst) 35014 v.AuxInt = c & 31 35015 v.AddArg(x) 35016 return true 35017 } 35018 // match: (SARL x (ADDQconst [c] y)) 35019 // cond: c & 31 == 0 35020 // result: (SARL x y) 35021 for { 35022 _ = v.Args[1] 35023 x := v.Args[0] 35024 v_1 := v.Args[1] 35025 if v_1.Op != OpAMD64ADDQconst { 35026 break 35027 } 35028 c := v_1.AuxInt 35029 y := v_1.Args[0] 35030 if !(c&31 == 0) { 35031 break 35032 } 35033 v.reset(OpAMD64SARL) 35034 v.AddArg(x) 35035 v.AddArg(y) 35036 return true 35037 } 35038 // match: (SARL x (NEGQ <t> (ADDQconst [c] y))) 35039 // cond: c & 31 == 0 35040 // result: (SARL x (NEGQ <t> y)) 35041 for { 35042 _ = v.Args[1] 35043 x := v.Args[0] 35044 v_1 := v.Args[1] 35045 if v_1.Op != OpAMD64NEGQ { 35046 break 35047 } 35048 t := v_1.Type 35049 v_1_0 := v_1.Args[0] 35050 if v_1_0.Op != OpAMD64ADDQconst { 35051 break 35052 } 35053 c := v_1_0.AuxInt 35054 y := v_1_0.Args[0] 35055 if !(c&31 == 0) { 35056 break 35057 } 35058 v.reset(OpAMD64SARL) 35059 v.AddArg(x) 35060 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35061 v0.AddArg(y) 35062 v.AddArg(v0) 35063 return true 35064 } 35065 // match: (SARL x (ANDQconst [c] y)) 35066 // cond: c & 31 == 31 35067 // result: (SARL x y) 35068 for { 35069 _ = v.Args[1] 35070 x := v.Args[0] 35071 v_1 := v.Args[1] 35072 if v_1.Op != OpAMD64ANDQconst { 35073 break 35074 } 35075 c := v_1.AuxInt 35076 y := v_1.Args[0] 35077 if !(c&31 == 31) { 35078 break 35079 } 35080 v.reset(OpAMD64SARL) 35081 v.AddArg(x) 35082 v.AddArg(y) 35083 return true 35084 } 35085 // match: (SARL x (NEGQ <t> (ANDQconst [c] y))) 35086 // cond: c & 31 == 31 35087 // result: (SARL x (NEGQ <t> y)) 35088 for { 35089 _ = v.Args[1] 35090 x := v.Args[0] 35091 v_1 := v.Args[1] 35092 if v_1.Op != OpAMD64NEGQ { 35093 break 35094 } 35095 t := v_1.Type 35096 v_1_0 := v_1.Args[0] 35097 if v_1_0.Op != OpAMD64ANDQconst { 35098 break 35099 } 35100 c := v_1_0.AuxInt 35101 y := v_1_0.Args[0] 35102 if !(c&31 == 31) { 35103 break 35104 } 35105 v.reset(OpAMD64SARL) 35106 v.AddArg(x) 35107 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35108 v0.AddArg(y) 35109 v.AddArg(v0) 35110 return true 35111 } 35112 // match: (SARL x (ADDLconst [c] y)) 35113 // cond: c & 31 == 0 35114 // result: (SARL x y) 35115 for { 35116 _ = v.Args[1] 35117 x := v.Args[0] 35118 v_1 := v.Args[1] 35119 if v_1.Op != OpAMD64ADDLconst { 35120 break 35121 } 35122 c := v_1.AuxInt 35123 y := v_1.Args[0] 35124 if !(c&31 == 0) { 35125 break 35126 } 35127 v.reset(OpAMD64SARL) 35128 v.AddArg(x) 35129 v.AddArg(y) 35130 return true 35131 } 35132 // match: (SARL x (NEGL <t> (ADDLconst [c] y))) 35133 // cond: c & 31 == 0 35134 // result: (SARL x (NEGL <t> y)) 35135 for { 35136 _ = v.Args[1] 35137 x := v.Args[0] 35138 v_1 := v.Args[1] 35139 if v_1.Op != OpAMD64NEGL { 35140 break 35141 } 35142 t := v_1.Type 35143 v_1_0 := v_1.Args[0] 35144 if v_1_0.Op != OpAMD64ADDLconst { 35145 break 35146 } 35147 c := v_1_0.AuxInt 35148 y := v_1_0.Args[0] 35149 if !(c&31 == 0) { 35150 break 35151 } 35152 v.reset(OpAMD64SARL) 35153 v.AddArg(x) 35154 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35155 v0.AddArg(y) 35156 v.AddArg(v0) 35157 return true 35158 } 35159 // match: (SARL x (ANDLconst [c] y)) 35160 // cond: c & 31 == 31 35161 // result: (SARL x y) 35162 for { 35163 _ = v.Args[1] 35164 x := v.Args[0] 35165 v_1 := v.Args[1] 35166 if v_1.Op != OpAMD64ANDLconst { 35167 break 35168 } 35169 c := v_1.AuxInt 35170 y := v_1.Args[0] 35171 if !(c&31 == 31) { 35172 break 35173 } 35174 v.reset(OpAMD64SARL) 35175 v.AddArg(x) 35176 v.AddArg(y) 35177 return true 35178 } 35179 // match: (SARL x (NEGL <t> (ANDLconst [c] y))) 35180 // cond: c & 31 == 31 35181 // result: (SARL x (NEGL <t> y)) 35182 for { 35183 _ = v.Args[1] 35184 x := v.Args[0] 35185 v_1 := v.Args[1] 35186 if v_1.Op != OpAMD64NEGL { 35187 break 35188 } 35189 t := v_1.Type 35190 v_1_0 := v_1.Args[0] 35191 if v_1_0.Op != OpAMD64ANDLconst { 35192 break 35193 } 35194 c := v_1_0.AuxInt 35195 y := v_1_0.Args[0] 35196 if !(c&31 == 31) { 35197 break 35198 } 35199 v.reset(OpAMD64SARL) 35200 v.AddArg(x) 35201 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35202 v0.AddArg(y) 35203 v.AddArg(v0) 35204 return true 35205 } 35206 return false 35207 } 35208 func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool { 35209 // match: (SARLconst x [0]) 35210 // cond: 35211 // result: x 35212 for { 35213 if v.AuxInt != 0 { 35214 break 35215 } 35216 x := v.Args[0] 35217 v.reset(OpCopy) 35218 v.Type = x.Type 35219 v.AddArg(x) 35220 return true 35221 } 35222 // match: (SARLconst [c] (MOVQconst [d])) 35223 // cond: 35224 // result: (MOVQconst [d>>uint64(c)]) 35225 for { 35226 c := v.AuxInt 35227 v_0 := v.Args[0] 35228 if v_0.Op != OpAMD64MOVQconst { 35229 break 35230 } 35231 d := v_0.AuxInt 35232 v.reset(OpAMD64MOVQconst) 35233 v.AuxInt = d >> uint64(c) 35234 return true 35235 } 35236 return false 35237 } 35238 func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { 35239 b := v.Block 35240 _ = b 35241 // match: (SARQ x (MOVQconst [c])) 35242 // cond: 35243 // result: (SARQconst [c&63] x) 35244 for { 35245 _ = v.Args[1] 35246 x := v.Args[0] 35247 v_1 := v.Args[1] 35248 if v_1.Op != OpAMD64MOVQconst { 35249 break 35250 } 35251 c := v_1.AuxInt 35252 v.reset(OpAMD64SARQconst) 35253 v.AuxInt = c & 63 35254 v.AddArg(x) 35255 return true 35256 } 35257 // match: (SARQ x (MOVLconst [c])) 35258 // cond: 35259 // result: (SARQconst [c&63] x) 35260 for { 35261 _ = v.Args[1] 35262 x := v.Args[0] 35263 v_1 := v.Args[1] 35264 if v_1.Op != OpAMD64MOVLconst { 35265 break 35266 } 35267 c := v_1.AuxInt 35268 v.reset(OpAMD64SARQconst) 35269 v.AuxInt = c & 63 35270 v.AddArg(x) 35271 return true 35272 } 35273 // match: (SARQ x (ADDQconst [c] y)) 35274 // cond: c & 63 == 0 35275 // result: (SARQ x y) 35276 for { 35277 _ = v.Args[1] 35278 x := v.Args[0] 35279 v_1 := v.Args[1] 35280 if v_1.Op != OpAMD64ADDQconst { 35281 break 35282 } 35283 c := v_1.AuxInt 35284 y := v_1.Args[0] 35285 if !(c&63 == 0) { 35286 break 35287 } 35288 v.reset(OpAMD64SARQ) 35289 v.AddArg(x) 35290 v.AddArg(y) 35291 return true 35292 } 35293 // match: (SARQ x (NEGQ <t> (ADDQconst [c] y))) 35294 // cond: c & 63 == 0 35295 // result: (SARQ x (NEGQ <t> y)) 35296 for { 35297 _ = v.Args[1] 35298 x := v.Args[0] 35299 v_1 := v.Args[1] 35300 if v_1.Op != OpAMD64NEGQ { 35301 break 35302 } 35303 t := v_1.Type 35304 v_1_0 := v_1.Args[0] 35305 if v_1_0.Op != OpAMD64ADDQconst { 35306 break 35307 } 35308 c := v_1_0.AuxInt 35309 y := v_1_0.Args[0] 35310 if !(c&63 == 0) { 35311 break 35312 } 35313 v.reset(OpAMD64SARQ) 35314 v.AddArg(x) 35315 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35316 v0.AddArg(y) 35317 v.AddArg(v0) 35318 return true 35319 } 35320 // match: (SARQ x (ANDQconst [c] y)) 35321 // cond: c & 63 == 63 35322 // result: (SARQ x y) 35323 for { 35324 _ = v.Args[1] 35325 x := v.Args[0] 35326 v_1 := v.Args[1] 35327 if v_1.Op != OpAMD64ANDQconst { 35328 break 35329 } 35330 c := v_1.AuxInt 35331 y := v_1.Args[0] 35332 if !(c&63 == 63) { 35333 break 35334 } 35335 v.reset(OpAMD64SARQ) 35336 v.AddArg(x) 35337 v.AddArg(y) 35338 return true 35339 } 35340 // match: (SARQ x (NEGQ <t> (ANDQconst [c] y))) 35341 // cond: c & 63 == 63 35342 // result: (SARQ x (NEGQ <t> y)) 35343 for { 35344 _ = v.Args[1] 35345 x := v.Args[0] 35346 v_1 := v.Args[1] 35347 if v_1.Op != OpAMD64NEGQ { 35348 break 35349 } 35350 t := v_1.Type 35351 v_1_0 := v_1.Args[0] 35352 if v_1_0.Op != OpAMD64ANDQconst { 35353 break 35354 } 35355 c := v_1_0.AuxInt 35356 y := v_1_0.Args[0] 35357 if !(c&63 == 63) { 35358 break 35359 } 35360 v.reset(OpAMD64SARQ) 35361 v.AddArg(x) 35362 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35363 v0.AddArg(y) 35364 v.AddArg(v0) 35365 return true 35366 } 35367 // match: (SARQ x (ADDLconst [c] y)) 35368 // cond: c & 63 == 0 35369 // result: (SARQ x y) 35370 for { 35371 _ = v.Args[1] 35372 x := v.Args[0] 35373 v_1 := v.Args[1] 35374 if v_1.Op != OpAMD64ADDLconst { 35375 break 35376 } 35377 c := v_1.AuxInt 35378 y := v_1.Args[0] 35379 if !(c&63 == 0) { 35380 break 35381 } 35382 v.reset(OpAMD64SARQ) 35383 v.AddArg(x) 35384 v.AddArg(y) 35385 return true 35386 } 35387 // match: (SARQ x (NEGL <t> (ADDLconst [c] y))) 35388 // cond: c & 63 == 0 35389 // result: (SARQ x (NEGL <t> y)) 35390 for { 35391 _ = v.Args[1] 35392 x := v.Args[0] 35393 v_1 := v.Args[1] 35394 if v_1.Op != OpAMD64NEGL { 35395 break 35396 } 35397 t := v_1.Type 35398 v_1_0 := v_1.Args[0] 35399 if v_1_0.Op != OpAMD64ADDLconst { 35400 break 35401 } 35402 c := v_1_0.AuxInt 35403 y := v_1_0.Args[0] 35404 if !(c&63 == 0) { 35405 break 35406 } 35407 v.reset(OpAMD64SARQ) 35408 v.AddArg(x) 35409 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35410 v0.AddArg(y) 35411 v.AddArg(v0) 35412 return true 35413 } 35414 // match: (SARQ x (ANDLconst [c] y)) 35415 // cond: c & 63 == 63 35416 // result: (SARQ x y) 35417 for { 35418 _ = v.Args[1] 35419 x := v.Args[0] 35420 v_1 := v.Args[1] 35421 if v_1.Op != OpAMD64ANDLconst { 35422 break 35423 } 35424 c := v_1.AuxInt 35425 y := v_1.Args[0] 35426 if !(c&63 == 63) { 35427 break 35428 } 35429 v.reset(OpAMD64SARQ) 35430 v.AddArg(x) 35431 v.AddArg(y) 35432 return true 35433 } 35434 // match: (SARQ x (NEGL <t> (ANDLconst [c] y))) 35435 // cond: c & 63 == 63 35436 // result: (SARQ x (NEGL <t> y)) 35437 for { 35438 _ = v.Args[1] 35439 x := v.Args[0] 35440 v_1 := v.Args[1] 35441 if v_1.Op != OpAMD64NEGL { 35442 break 35443 } 35444 t := v_1.Type 35445 v_1_0 := v_1.Args[0] 35446 if v_1_0.Op != OpAMD64ANDLconst { 35447 break 35448 } 35449 c := v_1_0.AuxInt 35450 y := v_1_0.Args[0] 35451 if !(c&63 == 63) { 35452 break 35453 } 35454 v.reset(OpAMD64SARQ) 35455 v.AddArg(x) 35456 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35457 v0.AddArg(y) 35458 v.AddArg(v0) 35459 return true 35460 } 35461 return false 35462 } 35463 func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool { 35464 // match: (SARQconst x [0]) 35465 // cond: 35466 // result: x 35467 for { 35468 if v.AuxInt != 0 { 35469 break 35470 } 35471 x := v.Args[0] 35472 v.reset(OpCopy) 35473 v.Type = x.Type 35474 v.AddArg(x) 35475 return true 35476 } 35477 // match: (SARQconst [c] (MOVQconst [d])) 35478 // cond: 35479 // result: (MOVQconst [d>>uint64(c)]) 35480 for { 35481 c := v.AuxInt 35482 v_0 := v.Args[0] 35483 if v_0.Op != OpAMD64MOVQconst { 35484 break 35485 } 35486 d := v_0.AuxInt 35487 v.reset(OpAMD64MOVQconst) 35488 v.AuxInt = d >> uint64(c) 35489 return true 35490 } 35491 return false 35492 } 35493 func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool { 35494 // match: (SARW x (MOVQconst [c])) 35495 // cond: 35496 // result: (SARWconst [min(c&31,15)] x) 35497 for { 35498 _ = v.Args[1] 35499 x := v.Args[0] 35500 v_1 := v.Args[1] 35501 if v_1.Op != OpAMD64MOVQconst { 35502 break 35503 } 35504 c := v_1.AuxInt 35505 v.reset(OpAMD64SARWconst) 35506 v.AuxInt = min(c&31, 15) 35507 v.AddArg(x) 35508 return true 35509 } 35510 // match: (SARW x (MOVLconst [c])) 35511 // cond: 35512 // result: (SARWconst [min(c&31,15)] x) 35513 for { 35514 _ = v.Args[1] 35515 x := v.Args[0] 35516 v_1 := v.Args[1] 35517 if v_1.Op != OpAMD64MOVLconst { 35518 break 35519 } 35520 c := v_1.AuxInt 35521 v.reset(OpAMD64SARWconst) 35522 v.AuxInt = min(c&31, 15) 35523 v.AddArg(x) 35524 return true 35525 } 35526 return false 35527 } 35528 func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool { 35529 // match: (SARWconst x [0]) 35530 // cond: 35531 // result: x 35532 for { 35533 if v.AuxInt != 0 { 35534 break 35535 } 35536 x := v.Args[0] 35537 v.reset(OpCopy) 35538 v.Type = x.Type 35539 v.AddArg(x) 35540 return true 35541 } 35542 // match: (SARWconst [c] (MOVQconst [d])) 35543 // cond: 35544 // result: (MOVQconst [d>>uint64(c)]) 35545 for { 35546 c := v.AuxInt 35547 v_0 := v.Args[0] 35548 if v_0.Op != OpAMD64MOVQconst { 35549 break 35550 } 35551 d := v_0.AuxInt 35552 v.reset(OpAMD64MOVQconst) 35553 v.AuxInt = d >> uint64(c) 35554 return true 35555 } 35556 return false 35557 } 35558 func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { 35559 // match: (SBBLcarrymask (FlagEQ)) 35560 // cond: 35561 // result: (MOVLconst [0]) 35562 for { 35563 v_0 := v.Args[0] 35564 if v_0.Op != OpAMD64FlagEQ { 35565 break 35566 } 35567 v.reset(OpAMD64MOVLconst) 35568 v.AuxInt = 0 35569 return true 35570 } 35571 // match: (SBBLcarrymask (FlagLT_ULT)) 35572 // cond: 35573 // result: (MOVLconst [-1]) 35574 for { 35575 v_0 := v.Args[0] 35576 if v_0.Op != OpAMD64FlagLT_ULT { 35577 break 35578 } 35579 v.reset(OpAMD64MOVLconst) 35580 v.AuxInt = -1 35581 return true 35582 } 35583 // match: (SBBLcarrymask (FlagLT_UGT)) 35584 // cond: 35585 // result: (MOVLconst [0]) 35586 for { 35587 v_0 := v.Args[0] 35588 if v_0.Op != OpAMD64FlagLT_UGT { 35589 break 35590 } 35591 v.reset(OpAMD64MOVLconst) 35592 v.AuxInt = 0 35593 return true 35594 } 35595 // match: (SBBLcarrymask (FlagGT_ULT)) 35596 // cond: 35597 // result: (MOVLconst [-1]) 35598 for { 35599 v_0 := v.Args[0] 35600 if v_0.Op != OpAMD64FlagGT_ULT { 35601 break 35602 } 35603 v.reset(OpAMD64MOVLconst) 35604 v.AuxInt = -1 35605 return true 35606 } 35607 // match: (SBBLcarrymask (FlagGT_UGT)) 35608 // cond: 35609 // result: (MOVLconst [0]) 35610 for { 35611 v_0 := v.Args[0] 35612 if v_0.Op != OpAMD64FlagGT_UGT { 35613 break 35614 } 35615 v.reset(OpAMD64MOVLconst) 35616 v.AuxInt = 0 35617 return true 35618 } 35619 return false 35620 } 35621 func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { 35622 // match: (SBBQcarrymask (FlagEQ)) 35623 // cond: 35624 // result: (MOVQconst [0]) 35625 for { 35626 v_0 := v.Args[0] 35627 if v_0.Op != OpAMD64FlagEQ { 35628 break 35629 } 35630 v.reset(OpAMD64MOVQconst) 35631 v.AuxInt = 0 35632 return true 35633 } 35634 // match: (SBBQcarrymask (FlagLT_ULT)) 35635 // cond: 35636 // result: (MOVQconst [-1]) 35637 for { 35638 v_0 := v.Args[0] 35639 if v_0.Op != OpAMD64FlagLT_ULT { 35640 break 35641 } 35642 v.reset(OpAMD64MOVQconst) 35643 v.AuxInt = -1 35644 return true 35645 } 35646 // match: (SBBQcarrymask (FlagLT_UGT)) 35647 // cond: 35648 // result: (MOVQconst [0]) 35649 for { 35650 v_0 := v.Args[0] 35651 if v_0.Op != OpAMD64FlagLT_UGT { 35652 break 35653 } 35654 v.reset(OpAMD64MOVQconst) 35655 v.AuxInt = 0 35656 return true 35657 } 35658 // match: (SBBQcarrymask (FlagGT_ULT)) 35659 // cond: 35660 // result: (MOVQconst [-1]) 35661 for { 35662 v_0 := v.Args[0] 35663 if v_0.Op != OpAMD64FlagGT_ULT { 35664 break 35665 } 35666 v.reset(OpAMD64MOVQconst) 35667 v.AuxInt = -1 35668 return true 35669 } 35670 // match: (SBBQcarrymask (FlagGT_UGT)) 35671 // cond: 35672 // result: (MOVQconst [0]) 35673 for { 35674 v_0 := v.Args[0] 35675 if v_0.Op != OpAMD64FlagGT_UGT { 35676 break 35677 } 35678 v.reset(OpAMD64MOVQconst) 35679 v.AuxInt = 0 35680 return true 35681 } 35682 return false 35683 } 35684 func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { 35685 // match: (SETA (InvertFlags x)) 35686 // cond: 35687 // result: (SETB x) 35688 for { 35689 v_0 := v.Args[0] 35690 if v_0.Op != OpAMD64InvertFlags { 35691 break 35692 } 35693 x := v_0.Args[0] 35694 v.reset(OpAMD64SETB) 35695 v.AddArg(x) 35696 return true 35697 } 35698 // match: (SETA (FlagEQ)) 35699 // cond: 35700 // result: (MOVLconst [0]) 35701 for { 35702 v_0 := v.Args[0] 35703 if v_0.Op != OpAMD64FlagEQ { 35704 break 35705 } 35706 v.reset(OpAMD64MOVLconst) 35707 v.AuxInt = 0 35708 return true 35709 } 35710 // match: (SETA (FlagLT_ULT)) 35711 // cond: 35712 // result: (MOVLconst [0]) 35713 for { 35714 v_0 := v.Args[0] 35715 if v_0.Op != OpAMD64FlagLT_ULT { 35716 break 35717 } 35718 v.reset(OpAMD64MOVLconst) 35719 v.AuxInt = 0 35720 return true 35721 } 35722 // match: (SETA (FlagLT_UGT)) 35723 // cond: 35724 // result: (MOVLconst [1]) 35725 for { 35726 v_0 := v.Args[0] 35727 if v_0.Op != OpAMD64FlagLT_UGT { 35728 break 35729 } 35730 v.reset(OpAMD64MOVLconst) 35731 v.AuxInt = 1 35732 return true 35733 } 35734 // match: (SETA (FlagGT_ULT)) 35735 // cond: 35736 // result: (MOVLconst [0]) 35737 for { 35738 v_0 := v.Args[0] 35739 if v_0.Op != OpAMD64FlagGT_ULT { 35740 break 35741 } 35742 v.reset(OpAMD64MOVLconst) 35743 v.AuxInt = 0 35744 return true 35745 } 35746 // match: (SETA (FlagGT_UGT)) 35747 // cond: 35748 // result: (MOVLconst [1]) 35749 for { 35750 v_0 := v.Args[0] 35751 if v_0.Op != OpAMD64FlagGT_UGT { 35752 break 35753 } 35754 v.reset(OpAMD64MOVLconst) 35755 v.AuxInt = 1 35756 return true 35757 } 35758 return false 35759 } 35760 func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { 35761 // match: (SETAE (InvertFlags x)) 35762 // cond: 35763 // result: (SETBE x) 35764 for { 35765 v_0 := v.Args[0] 35766 if v_0.Op != OpAMD64InvertFlags { 35767 break 35768 } 35769 x := v_0.Args[0] 35770 v.reset(OpAMD64SETBE) 35771 v.AddArg(x) 35772 return true 35773 } 35774 // match: (SETAE (FlagEQ)) 35775 // cond: 35776 // result: (MOVLconst [1]) 35777 for { 35778 v_0 := v.Args[0] 35779 if v_0.Op != OpAMD64FlagEQ { 35780 break 35781 } 35782 v.reset(OpAMD64MOVLconst) 35783 v.AuxInt = 1 35784 return true 35785 } 35786 // match: (SETAE (FlagLT_ULT)) 35787 // cond: 35788 // result: (MOVLconst [0]) 35789 for { 35790 v_0 := v.Args[0] 35791 if v_0.Op != OpAMD64FlagLT_ULT { 35792 break 35793 } 35794 v.reset(OpAMD64MOVLconst) 35795 v.AuxInt = 0 35796 return true 35797 } 35798 // match: (SETAE (FlagLT_UGT)) 35799 // cond: 35800 // result: (MOVLconst [1]) 35801 for { 35802 v_0 := v.Args[0] 35803 if v_0.Op != OpAMD64FlagLT_UGT { 35804 break 35805 } 35806 v.reset(OpAMD64MOVLconst) 35807 v.AuxInt = 1 35808 return true 35809 } 35810 // match: (SETAE (FlagGT_ULT)) 35811 // cond: 35812 // result: (MOVLconst [0]) 35813 for { 35814 v_0 := v.Args[0] 35815 if v_0.Op != OpAMD64FlagGT_ULT { 35816 break 35817 } 35818 v.reset(OpAMD64MOVLconst) 35819 v.AuxInt = 0 35820 return true 35821 } 35822 // match: (SETAE (FlagGT_UGT)) 35823 // cond: 35824 // result: (MOVLconst [1]) 35825 for { 35826 v_0 := v.Args[0] 35827 if v_0.Op != OpAMD64FlagGT_UGT { 35828 break 35829 } 35830 v.reset(OpAMD64MOVLconst) 35831 v.AuxInt = 1 35832 return true 35833 } 35834 return false 35835 } 35836 func rewriteValueAMD64_OpAMD64SETAEmem_0(v *Value) bool { 35837 b := v.Block 35838 _ = b 35839 // match: (SETAEmem [off] {sym} ptr (InvertFlags x) mem) 35840 // cond: 35841 // result: (SETBEmem [off] {sym} ptr x mem) 35842 for { 35843 off := v.AuxInt 35844 sym := v.Aux 35845 _ = v.Args[2] 35846 ptr := v.Args[0] 35847 v_1 := v.Args[1] 35848 if v_1.Op != OpAMD64InvertFlags { 35849 break 35850 } 35851 x := v_1.Args[0] 35852 mem := v.Args[2] 35853 v.reset(OpAMD64SETBEmem) 35854 v.AuxInt = off 35855 v.Aux = sym 35856 v.AddArg(ptr) 35857 v.AddArg(x) 35858 v.AddArg(mem) 35859 return true 35860 } 35861 // match: (SETAEmem [off] {sym} ptr x:(FlagEQ) mem) 35862 // cond: 35863 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 35864 for { 35865 off := v.AuxInt 35866 sym := v.Aux 35867 _ = v.Args[2] 35868 ptr := v.Args[0] 35869 x := v.Args[1] 35870 if x.Op != OpAMD64FlagEQ { 35871 break 35872 } 35873 mem := v.Args[2] 35874 v.reset(OpAMD64MOVBstore) 35875 v.AuxInt = off 35876 v.Aux = sym 35877 v.AddArg(ptr) 35878 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35879 v0.AuxInt = 1 35880 v.AddArg(v0) 35881 v.AddArg(mem) 35882 return true 35883 } 35884 // match: (SETAEmem [off] {sym} ptr x:(FlagLT_ULT) mem) 35885 // cond: 35886 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 35887 for { 35888 off := v.AuxInt 35889 sym := v.Aux 35890 _ = v.Args[2] 35891 ptr := v.Args[0] 35892 x := v.Args[1] 35893 if x.Op != OpAMD64FlagLT_ULT { 35894 break 35895 } 35896 mem := v.Args[2] 35897 v.reset(OpAMD64MOVBstore) 35898 v.AuxInt = off 35899 v.Aux = sym 35900 v.AddArg(ptr) 35901 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35902 v0.AuxInt = 0 35903 v.AddArg(v0) 35904 v.AddArg(mem) 35905 return true 35906 } 35907 // match: (SETAEmem [off] {sym} ptr x:(FlagLT_UGT) mem) 35908 // cond: 35909 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 35910 for { 35911 off := v.AuxInt 35912 sym := v.Aux 35913 _ = v.Args[2] 35914 ptr := v.Args[0] 35915 x := v.Args[1] 35916 if x.Op != OpAMD64FlagLT_UGT { 35917 break 35918 } 35919 mem := v.Args[2] 35920 v.reset(OpAMD64MOVBstore) 35921 v.AuxInt = off 35922 v.Aux = sym 35923 v.AddArg(ptr) 35924 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35925 v0.AuxInt = 1 35926 v.AddArg(v0) 35927 v.AddArg(mem) 35928 return true 35929 } 35930 // match: (SETAEmem [off] {sym} ptr x:(FlagGT_ULT) mem) 35931 // cond: 35932 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 35933 for { 35934 off := v.AuxInt 35935 sym := v.Aux 35936 _ = v.Args[2] 35937 ptr := v.Args[0] 35938 x := v.Args[1] 35939 if x.Op != OpAMD64FlagGT_ULT { 35940 break 35941 } 35942 mem := v.Args[2] 35943 v.reset(OpAMD64MOVBstore) 35944 v.AuxInt = off 35945 v.Aux = sym 35946 v.AddArg(ptr) 35947 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35948 v0.AuxInt = 0 35949 v.AddArg(v0) 35950 v.AddArg(mem) 35951 return true 35952 } 35953 // match: (SETAEmem [off] {sym} ptr x:(FlagGT_UGT) mem) 35954 // cond: 35955 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 35956 for { 35957 off := v.AuxInt 35958 sym := v.Aux 35959 _ = v.Args[2] 35960 ptr := v.Args[0] 35961 x := v.Args[1] 35962 if x.Op != OpAMD64FlagGT_UGT { 35963 break 35964 } 35965 mem := v.Args[2] 35966 v.reset(OpAMD64MOVBstore) 35967 v.AuxInt = off 35968 v.Aux = sym 35969 v.AddArg(ptr) 35970 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35971 v0.AuxInt = 1 35972 v.AddArg(v0) 35973 v.AddArg(mem) 35974 return true 35975 } 35976 return false 35977 } 35978 func rewriteValueAMD64_OpAMD64SETAmem_0(v *Value) bool { 35979 b := v.Block 35980 _ = b 35981 // match: (SETAmem [off] {sym} ptr (InvertFlags x) mem) 35982 // cond: 35983 // result: (SETBmem [off] {sym} ptr x mem) 35984 for { 35985 off := v.AuxInt 35986 sym := v.Aux 35987 _ = v.Args[2] 35988 ptr := v.Args[0] 35989 v_1 := v.Args[1] 35990 if v_1.Op != OpAMD64InvertFlags { 35991 break 35992 } 35993 x := v_1.Args[0] 35994 mem := v.Args[2] 35995 v.reset(OpAMD64SETBmem) 35996 v.AuxInt = off 35997 v.Aux = sym 35998 v.AddArg(ptr) 35999 v.AddArg(x) 36000 v.AddArg(mem) 36001 return true 36002 } 36003 // match: (SETAmem [off] {sym} ptr x:(FlagEQ) mem) 36004 // cond: 36005 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36006 for { 36007 off := v.AuxInt 36008 sym := v.Aux 36009 _ = v.Args[2] 36010 ptr := v.Args[0] 36011 x := v.Args[1] 36012 if x.Op != OpAMD64FlagEQ { 36013 break 36014 } 36015 mem := v.Args[2] 36016 v.reset(OpAMD64MOVBstore) 36017 v.AuxInt = off 36018 v.Aux = sym 36019 v.AddArg(ptr) 36020 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36021 v0.AuxInt = 0 36022 v.AddArg(v0) 36023 v.AddArg(mem) 36024 return true 36025 } 36026 // match: (SETAmem [off] {sym} ptr x:(FlagLT_ULT) mem) 36027 // cond: 36028 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36029 for { 36030 off := v.AuxInt 36031 sym := v.Aux 36032 _ = v.Args[2] 36033 ptr := v.Args[0] 36034 x := v.Args[1] 36035 if x.Op != OpAMD64FlagLT_ULT { 36036 break 36037 } 36038 mem := v.Args[2] 36039 v.reset(OpAMD64MOVBstore) 36040 v.AuxInt = off 36041 v.Aux = sym 36042 v.AddArg(ptr) 36043 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36044 v0.AuxInt = 0 36045 v.AddArg(v0) 36046 v.AddArg(mem) 36047 return true 36048 } 36049 // match: (SETAmem [off] {sym} ptr x:(FlagLT_UGT) mem) 36050 // cond: 36051 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36052 for { 36053 off := v.AuxInt 36054 sym := v.Aux 36055 _ = v.Args[2] 36056 ptr := v.Args[0] 36057 x := v.Args[1] 36058 if x.Op != OpAMD64FlagLT_UGT { 36059 break 36060 } 36061 mem := v.Args[2] 36062 v.reset(OpAMD64MOVBstore) 36063 v.AuxInt = off 36064 v.Aux = sym 36065 v.AddArg(ptr) 36066 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36067 v0.AuxInt = 1 36068 v.AddArg(v0) 36069 v.AddArg(mem) 36070 return true 36071 } 36072 // match: (SETAmem [off] {sym} ptr x:(FlagGT_ULT) mem) 36073 // cond: 36074 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36075 for { 36076 off := v.AuxInt 36077 sym := v.Aux 36078 _ = v.Args[2] 36079 ptr := v.Args[0] 36080 x := v.Args[1] 36081 if x.Op != OpAMD64FlagGT_ULT { 36082 break 36083 } 36084 mem := v.Args[2] 36085 v.reset(OpAMD64MOVBstore) 36086 v.AuxInt = off 36087 v.Aux = sym 36088 v.AddArg(ptr) 36089 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36090 v0.AuxInt = 0 36091 v.AddArg(v0) 36092 v.AddArg(mem) 36093 return true 36094 } 36095 // match: (SETAmem [off] {sym} ptr x:(FlagGT_UGT) mem) 36096 // cond: 36097 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36098 for { 36099 off := v.AuxInt 36100 sym := v.Aux 36101 _ = v.Args[2] 36102 ptr := v.Args[0] 36103 x := v.Args[1] 36104 if x.Op != OpAMD64FlagGT_UGT { 36105 break 36106 } 36107 mem := v.Args[2] 36108 v.reset(OpAMD64MOVBstore) 36109 v.AuxInt = off 36110 v.Aux = sym 36111 v.AddArg(ptr) 36112 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36113 v0.AuxInt = 1 36114 v.AddArg(v0) 36115 v.AddArg(mem) 36116 return true 36117 } 36118 return false 36119 } 36120 func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { 36121 // match: (SETB (InvertFlags x)) 36122 // cond: 36123 // result: (SETA x) 36124 for { 36125 v_0 := v.Args[0] 36126 if v_0.Op != OpAMD64InvertFlags { 36127 break 36128 } 36129 x := v_0.Args[0] 36130 v.reset(OpAMD64SETA) 36131 v.AddArg(x) 36132 return true 36133 } 36134 // match: (SETB (FlagEQ)) 36135 // cond: 36136 // result: (MOVLconst [0]) 36137 for { 36138 v_0 := v.Args[0] 36139 if v_0.Op != OpAMD64FlagEQ { 36140 break 36141 } 36142 v.reset(OpAMD64MOVLconst) 36143 v.AuxInt = 0 36144 return true 36145 } 36146 // match: (SETB (FlagLT_ULT)) 36147 // cond: 36148 // result: (MOVLconst [1]) 36149 for { 36150 v_0 := v.Args[0] 36151 if v_0.Op != OpAMD64FlagLT_ULT { 36152 break 36153 } 36154 v.reset(OpAMD64MOVLconst) 36155 v.AuxInt = 1 36156 return true 36157 } 36158 // match: (SETB (FlagLT_UGT)) 36159 // cond: 36160 // result: (MOVLconst [0]) 36161 for { 36162 v_0 := v.Args[0] 36163 if v_0.Op != OpAMD64FlagLT_UGT { 36164 break 36165 } 36166 v.reset(OpAMD64MOVLconst) 36167 v.AuxInt = 0 36168 return true 36169 } 36170 // match: (SETB (FlagGT_ULT)) 36171 // cond: 36172 // result: (MOVLconst [1]) 36173 for { 36174 v_0 := v.Args[0] 36175 if v_0.Op != OpAMD64FlagGT_ULT { 36176 break 36177 } 36178 v.reset(OpAMD64MOVLconst) 36179 v.AuxInt = 1 36180 return true 36181 } 36182 // match: (SETB (FlagGT_UGT)) 36183 // cond: 36184 // result: (MOVLconst [0]) 36185 for { 36186 v_0 := v.Args[0] 36187 if v_0.Op != OpAMD64FlagGT_UGT { 36188 break 36189 } 36190 v.reset(OpAMD64MOVLconst) 36191 v.AuxInt = 0 36192 return true 36193 } 36194 return false 36195 } 36196 func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { 36197 // match: (SETBE (InvertFlags x)) 36198 // cond: 36199 // result: (SETAE x) 36200 for { 36201 v_0 := v.Args[0] 36202 if v_0.Op != OpAMD64InvertFlags { 36203 break 36204 } 36205 x := v_0.Args[0] 36206 v.reset(OpAMD64SETAE) 36207 v.AddArg(x) 36208 return true 36209 } 36210 // match: (SETBE (FlagEQ)) 36211 // cond: 36212 // result: (MOVLconst [1]) 36213 for { 36214 v_0 := v.Args[0] 36215 if v_0.Op != OpAMD64FlagEQ { 36216 break 36217 } 36218 v.reset(OpAMD64MOVLconst) 36219 v.AuxInt = 1 36220 return true 36221 } 36222 // match: (SETBE (FlagLT_ULT)) 36223 // cond: 36224 // result: (MOVLconst [1]) 36225 for { 36226 v_0 := v.Args[0] 36227 if v_0.Op != OpAMD64FlagLT_ULT { 36228 break 36229 } 36230 v.reset(OpAMD64MOVLconst) 36231 v.AuxInt = 1 36232 return true 36233 } 36234 // match: (SETBE (FlagLT_UGT)) 36235 // cond: 36236 // result: (MOVLconst [0]) 36237 for { 36238 v_0 := v.Args[0] 36239 if v_0.Op != OpAMD64FlagLT_UGT { 36240 break 36241 } 36242 v.reset(OpAMD64MOVLconst) 36243 v.AuxInt = 0 36244 return true 36245 } 36246 // match: (SETBE (FlagGT_ULT)) 36247 // cond: 36248 // result: (MOVLconst [1]) 36249 for { 36250 v_0 := v.Args[0] 36251 if v_0.Op != OpAMD64FlagGT_ULT { 36252 break 36253 } 36254 v.reset(OpAMD64MOVLconst) 36255 v.AuxInt = 1 36256 return true 36257 } 36258 // match: (SETBE (FlagGT_UGT)) 36259 // cond: 36260 // result: (MOVLconst [0]) 36261 for { 36262 v_0 := v.Args[0] 36263 if v_0.Op != OpAMD64FlagGT_UGT { 36264 break 36265 } 36266 v.reset(OpAMD64MOVLconst) 36267 v.AuxInt = 0 36268 return true 36269 } 36270 return false 36271 } 36272 func rewriteValueAMD64_OpAMD64SETBEmem_0(v *Value) bool { 36273 b := v.Block 36274 _ = b 36275 // match: (SETBEmem [off] {sym} ptr (InvertFlags x) mem) 36276 // cond: 36277 // result: (SETAEmem [off] {sym} ptr x mem) 36278 for { 36279 off := v.AuxInt 36280 sym := v.Aux 36281 _ = v.Args[2] 36282 ptr := v.Args[0] 36283 v_1 := v.Args[1] 36284 if v_1.Op != OpAMD64InvertFlags { 36285 break 36286 } 36287 x := v_1.Args[0] 36288 mem := v.Args[2] 36289 v.reset(OpAMD64SETAEmem) 36290 v.AuxInt = off 36291 v.Aux = sym 36292 v.AddArg(ptr) 36293 v.AddArg(x) 36294 v.AddArg(mem) 36295 return true 36296 } 36297 // match: (SETBEmem [off] {sym} ptr x:(FlagEQ) mem) 36298 // cond: 36299 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36300 for { 36301 off := v.AuxInt 36302 sym := v.Aux 36303 _ = v.Args[2] 36304 ptr := v.Args[0] 36305 x := v.Args[1] 36306 if x.Op != OpAMD64FlagEQ { 36307 break 36308 } 36309 mem := v.Args[2] 36310 v.reset(OpAMD64MOVBstore) 36311 v.AuxInt = off 36312 v.Aux = sym 36313 v.AddArg(ptr) 36314 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36315 v0.AuxInt = 1 36316 v.AddArg(v0) 36317 v.AddArg(mem) 36318 return true 36319 } 36320 // match: (SETBEmem [off] {sym} ptr x:(FlagLT_ULT) mem) 36321 // cond: 36322 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36323 for { 36324 off := v.AuxInt 36325 sym := v.Aux 36326 _ = v.Args[2] 36327 ptr := v.Args[0] 36328 x := v.Args[1] 36329 if x.Op != OpAMD64FlagLT_ULT { 36330 break 36331 } 36332 mem := v.Args[2] 36333 v.reset(OpAMD64MOVBstore) 36334 v.AuxInt = off 36335 v.Aux = sym 36336 v.AddArg(ptr) 36337 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36338 v0.AuxInt = 1 36339 v.AddArg(v0) 36340 v.AddArg(mem) 36341 return true 36342 } 36343 // match: (SETBEmem [off] {sym} ptr x:(FlagLT_UGT) mem) 36344 // cond: 36345 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36346 for { 36347 off := v.AuxInt 36348 sym := v.Aux 36349 _ = v.Args[2] 36350 ptr := v.Args[0] 36351 x := v.Args[1] 36352 if x.Op != OpAMD64FlagLT_UGT { 36353 break 36354 } 36355 mem := v.Args[2] 36356 v.reset(OpAMD64MOVBstore) 36357 v.AuxInt = off 36358 v.Aux = sym 36359 v.AddArg(ptr) 36360 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36361 v0.AuxInt = 0 36362 v.AddArg(v0) 36363 v.AddArg(mem) 36364 return true 36365 } 36366 // match: (SETBEmem [off] {sym} ptr x:(FlagGT_ULT) mem) 36367 // cond: 36368 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36369 for { 36370 off := v.AuxInt 36371 sym := v.Aux 36372 _ = v.Args[2] 36373 ptr := v.Args[0] 36374 x := v.Args[1] 36375 if x.Op != OpAMD64FlagGT_ULT { 36376 break 36377 } 36378 mem := v.Args[2] 36379 v.reset(OpAMD64MOVBstore) 36380 v.AuxInt = off 36381 v.Aux = sym 36382 v.AddArg(ptr) 36383 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36384 v0.AuxInt = 1 36385 v.AddArg(v0) 36386 v.AddArg(mem) 36387 return true 36388 } 36389 // match: (SETBEmem [off] {sym} ptr x:(FlagGT_UGT) mem) 36390 // cond: 36391 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36392 for { 36393 off := v.AuxInt 36394 sym := v.Aux 36395 _ = v.Args[2] 36396 ptr := v.Args[0] 36397 x := v.Args[1] 36398 if x.Op != OpAMD64FlagGT_UGT { 36399 break 36400 } 36401 mem := v.Args[2] 36402 v.reset(OpAMD64MOVBstore) 36403 v.AuxInt = off 36404 v.Aux = sym 36405 v.AddArg(ptr) 36406 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36407 v0.AuxInt = 0 36408 v.AddArg(v0) 36409 v.AddArg(mem) 36410 return true 36411 } 36412 return false 36413 } 36414 func rewriteValueAMD64_OpAMD64SETBmem_0(v *Value) bool { 36415 b := v.Block 36416 _ = b 36417 // match: (SETBmem [off] {sym} ptr (InvertFlags x) mem) 36418 // cond: 36419 // result: (SETAmem [off] {sym} ptr x mem) 36420 for { 36421 off := v.AuxInt 36422 sym := v.Aux 36423 _ = v.Args[2] 36424 ptr := v.Args[0] 36425 v_1 := v.Args[1] 36426 if v_1.Op != OpAMD64InvertFlags { 36427 break 36428 } 36429 x := v_1.Args[0] 36430 mem := v.Args[2] 36431 v.reset(OpAMD64SETAmem) 36432 v.AuxInt = off 36433 v.Aux = sym 36434 v.AddArg(ptr) 36435 v.AddArg(x) 36436 v.AddArg(mem) 36437 return true 36438 } 36439 // match: (SETBmem [off] {sym} ptr x:(FlagEQ) mem) 36440 // cond: 36441 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36442 for { 36443 off := v.AuxInt 36444 sym := v.Aux 36445 _ = v.Args[2] 36446 ptr := v.Args[0] 36447 x := v.Args[1] 36448 if x.Op != OpAMD64FlagEQ { 36449 break 36450 } 36451 mem := v.Args[2] 36452 v.reset(OpAMD64MOVBstore) 36453 v.AuxInt = off 36454 v.Aux = sym 36455 v.AddArg(ptr) 36456 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36457 v0.AuxInt = 0 36458 v.AddArg(v0) 36459 v.AddArg(mem) 36460 return true 36461 } 36462 // match: (SETBmem [off] {sym} ptr x:(FlagLT_ULT) mem) 36463 // cond: 36464 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36465 for { 36466 off := v.AuxInt 36467 sym := v.Aux 36468 _ = v.Args[2] 36469 ptr := v.Args[0] 36470 x := v.Args[1] 36471 if x.Op != OpAMD64FlagLT_ULT { 36472 break 36473 } 36474 mem := v.Args[2] 36475 v.reset(OpAMD64MOVBstore) 36476 v.AuxInt = off 36477 v.Aux = sym 36478 v.AddArg(ptr) 36479 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36480 v0.AuxInt = 1 36481 v.AddArg(v0) 36482 v.AddArg(mem) 36483 return true 36484 } 36485 // match: (SETBmem [off] {sym} ptr x:(FlagLT_UGT) mem) 36486 // cond: 36487 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36488 for { 36489 off := v.AuxInt 36490 sym := v.Aux 36491 _ = v.Args[2] 36492 ptr := v.Args[0] 36493 x := v.Args[1] 36494 if x.Op != OpAMD64FlagLT_UGT { 36495 break 36496 } 36497 mem := v.Args[2] 36498 v.reset(OpAMD64MOVBstore) 36499 v.AuxInt = off 36500 v.Aux = sym 36501 v.AddArg(ptr) 36502 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36503 v0.AuxInt = 0 36504 v.AddArg(v0) 36505 v.AddArg(mem) 36506 return true 36507 } 36508 // match: (SETBmem [off] {sym} ptr x:(FlagGT_ULT) mem) 36509 // cond: 36510 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36511 for { 36512 off := v.AuxInt 36513 sym := v.Aux 36514 _ = v.Args[2] 36515 ptr := v.Args[0] 36516 x := v.Args[1] 36517 if x.Op != OpAMD64FlagGT_ULT { 36518 break 36519 } 36520 mem := v.Args[2] 36521 v.reset(OpAMD64MOVBstore) 36522 v.AuxInt = off 36523 v.Aux = sym 36524 v.AddArg(ptr) 36525 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36526 v0.AuxInt = 1 36527 v.AddArg(v0) 36528 v.AddArg(mem) 36529 return true 36530 } 36531 // match: (SETBmem [off] {sym} ptr x:(FlagGT_UGT) mem) 36532 // cond: 36533 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36534 for { 36535 off := v.AuxInt 36536 sym := v.Aux 36537 _ = v.Args[2] 36538 ptr := v.Args[0] 36539 x := v.Args[1] 36540 if x.Op != OpAMD64FlagGT_UGT { 36541 break 36542 } 36543 mem := v.Args[2] 36544 v.reset(OpAMD64MOVBstore) 36545 v.AuxInt = off 36546 v.Aux = sym 36547 v.AddArg(ptr) 36548 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36549 v0.AuxInt = 0 36550 v.AddArg(v0) 36551 v.AddArg(mem) 36552 return true 36553 } 36554 return false 36555 } 36556 func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { 36557 b := v.Block 36558 _ = b 36559 config := b.Func.Config 36560 _ = config 36561 // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) 36562 // cond: !config.nacl 36563 // result: (SETAE (BTL x y)) 36564 for { 36565 v_0 := v.Args[0] 36566 if v_0.Op != OpAMD64TESTL { 36567 break 36568 } 36569 _ = v_0.Args[1] 36570 v_0_0 := v_0.Args[0] 36571 if v_0_0.Op != OpAMD64SHLL { 36572 break 36573 } 36574 _ = v_0_0.Args[1] 36575 v_0_0_0 := v_0_0.Args[0] 36576 if v_0_0_0.Op != OpAMD64MOVLconst { 36577 break 36578 } 36579 if v_0_0_0.AuxInt != 1 { 36580 break 36581 } 36582 x := v_0_0.Args[1] 36583 y := v_0.Args[1] 36584 if !(!config.nacl) { 36585 break 36586 } 36587 v.reset(OpAMD64SETAE) 36588 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36589 v0.AddArg(x) 36590 v0.AddArg(y) 36591 v.AddArg(v0) 36592 return true 36593 } 36594 // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x))) 36595 // cond: !config.nacl 36596 // result: (SETAE (BTL x y)) 36597 for { 36598 v_0 := v.Args[0] 36599 if v_0.Op != OpAMD64TESTL { 36600 break 36601 } 36602 _ = v_0.Args[1] 36603 y := v_0.Args[0] 36604 v_0_1 := v_0.Args[1] 36605 if v_0_1.Op != OpAMD64SHLL { 36606 break 36607 } 36608 _ = v_0_1.Args[1] 36609 v_0_1_0 := v_0_1.Args[0] 36610 if v_0_1_0.Op != OpAMD64MOVLconst { 36611 break 36612 } 36613 if v_0_1_0.AuxInt != 1 { 36614 break 36615 } 36616 x := v_0_1.Args[1] 36617 if !(!config.nacl) { 36618 break 36619 } 36620 v.reset(OpAMD64SETAE) 36621 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36622 v0.AddArg(x) 36623 v0.AddArg(y) 36624 v.AddArg(v0) 36625 return true 36626 } 36627 // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 36628 // cond: !config.nacl 36629 // result: (SETAE (BTQ x y)) 36630 for { 36631 v_0 := v.Args[0] 36632 if v_0.Op != OpAMD64TESTQ { 36633 break 36634 } 36635 _ = v_0.Args[1] 36636 v_0_0 := v_0.Args[0] 36637 if v_0_0.Op != OpAMD64SHLQ { 36638 break 36639 } 36640 _ = v_0_0.Args[1] 36641 v_0_0_0 := v_0_0.Args[0] 36642 if v_0_0_0.Op != OpAMD64MOVQconst { 36643 break 36644 } 36645 if v_0_0_0.AuxInt != 1 { 36646 break 36647 } 36648 x := v_0_0.Args[1] 36649 y := v_0.Args[1] 36650 if !(!config.nacl) { 36651 break 36652 } 36653 v.reset(OpAMD64SETAE) 36654 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36655 v0.AddArg(x) 36656 v0.AddArg(y) 36657 v.AddArg(v0) 36658 return true 36659 } 36660 // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 36661 // cond: !config.nacl 36662 // result: (SETAE (BTQ x y)) 36663 for { 36664 v_0 := v.Args[0] 36665 if v_0.Op != OpAMD64TESTQ { 36666 break 36667 } 36668 _ = v_0.Args[1] 36669 y := v_0.Args[0] 36670 v_0_1 := v_0.Args[1] 36671 if v_0_1.Op != OpAMD64SHLQ { 36672 break 36673 } 36674 _ = v_0_1.Args[1] 36675 v_0_1_0 := v_0_1.Args[0] 36676 if v_0_1_0.Op != OpAMD64MOVQconst { 36677 break 36678 } 36679 if v_0_1_0.AuxInt != 1 { 36680 break 36681 } 36682 x := v_0_1.Args[1] 36683 if !(!config.nacl) { 36684 break 36685 } 36686 v.reset(OpAMD64SETAE) 36687 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36688 v0.AddArg(x) 36689 v0.AddArg(y) 36690 v.AddArg(v0) 36691 return true 36692 } 36693 // match: (SETEQ (TESTLconst [c] x)) 36694 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 36695 // result: (SETAE (BTLconst [log2(c)] x)) 36696 for { 36697 v_0 := v.Args[0] 36698 if v_0.Op != OpAMD64TESTLconst { 36699 break 36700 } 36701 c := v_0.AuxInt 36702 x := v_0.Args[0] 36703 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 36704 break 36705 } 36706 v.reset(OpAMD64SETAE) 36707 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 36708 v0.AuxInt = log2(c) 36709 v0.AddArg(x) 36710 v.AddArg(v0) 36711 return true 36712 } 36713 // match: (SETEQ (TESTQconst [c] x)) 36714 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36715 // result: (SETAE (BTQconst [log2(c)] x)) 36716 for { 36717 v_0 := v.Args[0] 36718 if v_0.Op != OpAMD64TESTQconst { 36719 break 36720 } 36721 c := v_0.AuxInt 36722 x := v_0.Args[0] 36723 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36724 break 36725 } 36726 v.reset(OpAMD64SETAE) 36727 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36728 v0.AuxInt = log2(c) 36729 v0.AddArg(x) 36730 v.AddArg(v0) 36731 return true 36732 } 36733 // match: (SETEQ (TESTQ (MOVQconst [c]) x)) 36734 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36735 // result: (SETAE (BTQconst [log2(c)] x)) 36736 for { 36737 v_0 := v.Args[0] 36738 if v_0.Op != OpAMD64TESTQ { 36739 break 36740 } 36741 _ = v_0.Args[1] 36742 v_0_0 := v_0.Args[0] 36743 if v_0_0.Op != OpAMD64MOVQconst { 36744 break 36745 } 36746 c := v_0_0.AuxInt 36747 x := v_0.Args[1] 36748 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36749 break 36750 } 36751 v.reset(OpAMD64SETAE) 36752 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36753 v0.AuxInt = log2(c) 36754 v0.AddArg(x) 36755 v.AddArg(v0) 36756 return true 36757 } 36758 // match: (SETEQ (TESTQ x (MOVQconst [c]))) 36759 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36760 // result: (SETAE (BTQconst [log2(c)] x)) 36761 for { 36762 v_0 := v.Args[0] 36763 if v_0.Op != OpAMD64TESTQ { 36764 break 36765 } 36766 _ = v_0.Args[1] 36767 x := v_0.Args[0] 36768 v_0_1 := v_0.Args[1] 36769 if v_0_1.Op != OpAMD64MOVQconst { 36770 break 36771 } 36772 c := v_0_1.AuxInt 36773 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36774 break 36775 } 36776 v.reset(OpAMD64SETAE) 36777 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36778 v0.AuxInt = log2(c) 36779 v0.AddArg(x) 36780 v.AddArg(v0) 36781 return true 36782 } 36783 // match: (SETEQ (InvertFlags x)) 36784 // cond: 36785 // result: (SETEQ x) 36786 for { 36787 v_0 := v.Args[0] 36788 if v_0.Op != OpAMD64InvertFlags { 36789 break 36790 } 36791 x := v_0.Args[0] 36792 v.reset(OpAMD64SETEQ) 36793 v.AddArg(x) 36794 return true 36795 } 36796 // match: (SETEQ (FlagEQ)) 36797 // cond: 36798 // result: (MOVLconst [1]) 36799 for { 36800 v_0 := v.Args[0] 36801 if v_0.Op != OpAMD64FlagEQ { 36802 break 36803 } 36804 v.reset(OpAMD64MOVLconst) 36805 v.AuxInt = 1 36806 return true 36807 } 36808 return false 36809 } 36810 func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { 36811 // match: (SETEQ (FlagLT_ULT)) 36812 // cond: 36813 // result: (MOVLconst [0]) 36814 for { 36815 v_0 := v.Args[0] 36816 if v_0.Op != OpAMD64FlagLT_ULT { 36817 break 36818 } 36819 v.reset(OpAMD64MOVLconst) 36820 v.AuxInt = 0 36821 return true 36822 } 36823 // match: (SETEQ (FlagLT_UGT)) 36824 // cond: 36825 // result: (MOVLconst [0]) 36826 for { 36827 v_0 := v.Args[0] 36828 if v_0.Op != OpAMD64FlagLT_UGT { 36829 break 36830 } 36831 v.reset(OpAMD64MOVLconst) 36832 v.AuxInt = 0 36833 return true 36834 } 36835 // match: (SETEQ (FlagGT_ULT)) 36836 // cond: 36837 // result: (MOVLconst [0]) 36838 for { 36839 v_0 := v.Args[0] 36840 if v_0.Op != OpAMD64FlagGT_ULT { 36841 break 36842 } 36843 v.reset(OpAMD64MOVLconst) 36844 v.AuxInt = 0 36845 return true 36846 } 36847 // match: (SETEQ (FlagGT_UGT)) 36848 // cond: 36849 // result: (MOVLconst [0]) 36850 for { 36851 v_0 := v.Args[0] 36852 if v_0.Op != OpAMD64FlagGT_UGT { 36853 break 36854 } 36855 v.reset(OpAMD64MOVLconst) 36856 v.AuxInt = 0 36857 return true 36858 } 36859 return false 36860 } 36861 func rewriteValueAMD64_OpAMD64SETEQmem_0(v *Value) bool { 36862 b := v.Block 36863 _ = b 36864 config := b.Func.Config 36865 _ = config 36866 // match: (SETEQmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 36867 // cond: !config.nacl 36868 // result: (SETAEmem [off] {sym} ptr (BTL x y) mem) 36869 for { 36870 off := v.AuxInt 36871 sym := v.Aux 36872 _ = v.Args[2] 36873 ptr := v.Args[0] 36874 v_1 := v.Args[1] 36875 if v_1.Op != OpAMD64TESTL { 36876 break 36877 } 36878 _ = v_1.Args[1] 36879 v_1_0 := v_1.Args[0] 36880 if v_1_0.Op != OpAMD64SHLL { 36881 break 36882 } 36883 _ = v_1_0.Args[1] 36884 v_1_0_0 := v_1_0.Args[0] 36885 if v_1_0_0.Op != OpAMD64MOVLconst { 36886 break 36887 } 36888 if v_1_0_0.AuxInt != 1 { 36889 break 36890 } 36891 x := v_1_0.Args[1] 36892 y := v_1.Args[1] 36893 mem := v.Args[2] 36894 if !(!config.nacl) { 36895 break 36896 } 36897 v.reset(OpAMD64SETAEmem) 36898 v.AuxInt = off 36899 v.Aux = sym 36900 v.AddArg(ptr) 36901 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36902 v0.AddArg(x) 36903 v0.AddArg(y) 36904 v.AddArg(v0) 36905 v.AddArg(mem) 36906 return true 36907 } 36908 // match: (SETEQmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) 36909 // cond: !config.nacl 36910 // result: (SETAEmem [off] {sym} ptr (BTL x y) mem) 36911 for { 36912 off := v.AuxInt 36913 sym := v.Aux 36914 _ = v.Args[2] 36915 ptr := v.Args[0] 36916 v_1 := v.Args[1] 36917 if v_1.Op != OpAMD64TESTL { 36918 break 36919 } 36920 _ = v_1.Args[1] 36921 y := v_1.Args[0] 36922 v_1_1 := v_1.Args[1] 36923 if v_1_1.Op != OpAMD64SHLL { 36924 break 36925 } 36926 _ = v_1_1.Args[1] 36927 v_1_1_0 := v_1_1.Args[0] 36928 if v_1_1_0.Op != OpAMD64MOVLconst { 36929 break 36930 } 36931 if v_1_1_0.AuxInt != 1 { 36932 break 36933 } 36934 x := v_1_1.Args[1] 36935 mem := v.Args[2] 36936 if !(!config.nacl) { 36937 break 36938 } 36939 v.reset(OpAMD64SETAEmem) 36940 v.AuxInt = off 36941 v.Aux = sym 36942 v.AddArg(ptr) 36943 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36944 v0.AddArg(x) 36945 v0.AddArg(y) 36946 v.AddArg(v0) 36947 v.AddArg(mem) 36948 return true 36949 } 36950 // match: (SETEQmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 36951 // cond: !config.nacl 36952 // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem) 36953 for { 36954 off := v.AuxInt 36955 sym := v.Aux 36956 _ = v.Args[2] 36957 ptr := v.Args[0] 36958 v_1 := v.Args[1] 36959 if v_1.Op != OpAMD64TESTQ { 36960 break 36961 } 36962 _ = v_1.Args[1] 36963 v_1_0 := v_1.Args[0] 36964 if v_1_0.Op != OpAMD64SHLQ { 36965 break 36966 } 36967 _ = v_1_0.Args[1] 36968 v_1_0_0 := v_1_0.Args[0] 36969 if v_1_0_0.Op != OpAMD64MOVQconst { 36970 break 36971 } 36972 if v_1_0_0.AuxInt != 1 { 36973 break 36974 } 36975 x := v_1_0.Args[1] 36976 y := v_1.Args[1] 36977 mem := v.Args[2] 36978 if !(!config.nacl) { 36979 break 36980 } 36981 v.reset(OpAMD64SETAEmem) 36982 v.AuxInt = off 36983 v.Aux = sym 36984 v.AddArg(ptr) 36985 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36986 v0.AddArg(x) 36987 v0.AddArg(y) 36988 v.AddArg(v0) 36989 v.AddArg(mem) 36990 return true 36991 } 36992 // match: (SETEQmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) 36993 // cond: !config.nacl 36994 // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem) 36995 for { 36996 off := v.AuxInt 36997 sym := v.Aux 36998 _ = v.Args[2] 36999 ptr := v.Args[0] 37000 v_1 := v.Args[1] 37001 if v_1.Op != OpAMD64TESTQ { 37002 break 37003 } 37004 _ = v_1.Args[1] 37005 y := v_1.Args[0] 37006 v_1_1 := v_1.Args[1] 37007 if v_1_1.Op != OpAMD64SHLQ { 37008 break 37009 } 37010 _ = v_1_1.Args[1] 37011 v_1_1_0 := v_1_1.Args[0] 37012 if v_1_1_0.Op != OpAMD64MOVQconst { 37013 break 37014 } 37015 if v_1_1_0.AuxInt != 1 { 37016 break 37017 } 37018 x := v_1_1.Args[1] 37019 mem := v.Args[2] 37020 if !(!config.nacl) { 37021 break 37022 } 37023 v.reset(OpAMD64SETAEmem) 37024 v.AuxInt = off 37025 v.Aux = sym 37026 v.AddArg(ptr) 37027 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 37028 v0.AddArg(x) 37029 v0.AddArg(y) 37030 v.AddArg(v0) 37031 v.AddArg(mem) 37032 return true 37033 } 37034 // match: (SETEQmem [off] {sym} ptr (TESTLconst [c] x) mem) 37035 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 37036 // result: (SETAEmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) 37037 for { 37038 off := v.AuxInt 37039 sym := v.Aux 37040 _ = v.Args[2] 37041 ptr := v.Args[0] 37042 v_1 := v.Args[1] 37043 if v_1.Op != OpAMD64TESTLconst { 37044 break 37045 } 37046 c := v_1.AuxInt 37047 x := v_1.Args[0] 37048 mem := v.Args[2] 37049 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 37050 break 37051 } 37052 v.reset(OpAMD64SETAEmem) 37053 v.AuxInt = off 37054 v.Aux = sym 37055 v.AddArg(ptr) 37056 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 37057 v0.AuxInt = log2(c) 37058 v0.AddArg(x) 37059 v.AddArg(v0) 37060 v.AddArg(mem) 37061 return true 37062 } 37063 // match: (SETEQmem [off] {sym} ptr (TESTQconst [c] x) mem) 37064 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 37065 // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 37066 for { 37067 off := v.AuxInt 37068 sym := v.Aux 37069 _ = v.Args[2] 37070 ptr := v.Args[0] 37071 v_1 := v.Args[1] 37072 if v_1.Op != OpAMD64TESTQconst { 37073 break 37074 } 37075 c := v_1.AuxInt 37076 x := v_1.Args[0] 37077 mem := v.Args[2] 37078 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 37079 break 37080 } 37081 v.reset(OpAMD64SETAEmem) 37082 v.AuxInt = off 37083 v.Aux = sym 37084 v.AddArg(ptr) 37085 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 37086 v0.AuxInt = log2(c) 37087 v0.AddArg(x) 37088 v.AddArg(v0) 37089 v.AddArg(mem) 37090 return true 37091 } 37092 // match: (SETEQmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 37093 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 37094 // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 37095 for { 37096 off := v.AuxInt 37097 sym := v.Aux 37098 _ = v.Args[2] 37099 ptr := v.Args[0] 37100 v_1 := v.Args[1] 37101 if v_1.Op != OpAMD64TESTQ { 37102 break 37103 } 37104 _ = v_1.Args[1] 37105 v_1_0 := v_1.Args[0] 37106 if v_1_0.Op != OpAMD64MOVQconst { 37107 break 37108 } 37109 c := v_1_0.AuxInt 37110 x := v_1.Args[1] 37111 mem := v.Args[2] 37112 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 37113 break 37114 } 37115 v.reset(OpAMD64SETAEmem) 37116 v.AuxInt = off 37117 v.Aux = sym 37118 v.AddArg(ptr) 37119 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 37120 v0.AuxInt = log2(c) 37121 v0.AddArg(x) 37122 v.AddArg(v0) 37123 v.AddArg(mem) 37124 return true 37125 } 37126 // match: (SETEQmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) 37127 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 37128 // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 37129 for { 37130 off := v.AuxInt 37131 sym := v.Aux 37132 _ = v.Args[2] 37133 ptr := v.Args[0] 37134 v_1 := v.Args[1] 37135 if v_1.Op != OpAMD64TESTQ { 37136 break 37137 } 37138 _ = v_1.Args[1] 37139 x := v_1.Args[0] 37140 v_1_1 := v_1.Args[1] 37141 if v_1_1.Op != OpAMD64MOVQconst { 37142 break 37143 } 37144 c := v_1_1.AuxInt 37145 mem := v.Args[2] 37146 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 37147 break 37148 } 37149 v.reset(OpAMD64SETAEmem) 37150 v.AuxInt = off 37151 v.Aux = sym 37152 v.AddArg(ptr) 37153 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 37154 v0.AuxInt = log2(c) 37155 v0.AddArg(x) 37156 v.AddArg(v0) 37157 v.AddArg(mem) 37158 return true 37159 } 37160 // match: (SETEQmem [off] {sym} ptr (InvertFlags x) mem) 37161 // cond: 37162 // result: (SETEQmem [off] {sym} ptr x mem) 37163 for { 37164 off := v.AuxInt 37165 sym := v.Aux 37166 _ = v.Args[2] 37167 ptr := v.Args[0] 37168 v_1 := v.Args[1] 37169 if v_1.Op != OpAMD64InvertFlags { 37170 break 37171 } 37172 x := v_1.Args[0] 37173 mem := v.Args[2] 37174 v.reset(OpAMD64SETEQmem) 37175 v.AuxInt = off 37176 v.Aux = sym 37177 v.AddArg(ptr) 37178 v.AddArg(x) 37179 v.AddArg(mem) 37180 return true 37181 } 37182 // match: (SETEQmem [off] {sym} ptr x:(FlagEQ) mem) 37183 // cond: 37184 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37185 for { 37186 off := v.AuxInt 37187 sym := v.Aux 37188 _ = v.Args[2] 37189 ptr := v.Args[0] 37190 x := v.Args[1] 37191 if x.Op != OpAMD64FlagEQ { 37192 break 37193 } 37194 mem := v.Args[2] 37195 v.reset(OpAMD64MOVBstore) 37196 v.AuxInt = off 37197 v.Aux = sym 37198 v.AddArg(ptr) 37199 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37200 v0.AuxInt = 1 37201 v.AddArg(v0) 37202 v.AddArg(mem) 37203 return true 37204 } 37205 return false 37206 } 37207 func rewriteValueAMD64_OpAMD64SETEQmem_10(v *Value) bool { 37208 b := v.Block 37209 _ = b 37210 // match: (SETEQmem [off] {sym} ptr x:(FlagLT_ULT) mem) 37211 // cond: 37212 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37213 for { 37214 off := v.AuxInt 37215 sym := v.Aux 37216 _ = v.Args[2] 37217 ptr := v.Args[0] 37218 x := v.Args[1] 37219 if x.Op != OpAMD64FlagLT_ULT { 37220 break 37221 } 37222 mem := v.Args[2] 37223 v.reset(OpAMD64MOVBstore) 37224 v.AuxInt = off 37225 v.Aux = sym 37226 v.AddArg(ptr) 37227 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37228 v0.AuxInt = 0 37229 v.AddArg(v0) 37230 v.AddArg(mem) 37231 return true 37232 } 37233 // match: (SETEQmem [off] {sym} ptr x:(FlagLT_UGT) mem) 37234 // cond: 37235 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37236 for { 37237 off := v.AuxInt 37238 sym := v.Aux 37239 _ = v.Args[2] 37240 ptr := v.Args[0] 37241 x := v.Args[1] 37242 if x.Op != OpAMD64FlagLT_UGT { 37243 break 37244 } 37245 mem := v.Args[2] 37246 v.reset(OpAMD64MOVBstore) 37247 v.AuxInt = off 37248 v.Aux = sym 37249 v.AddArg(ptr) 37250 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37251 v0.AuxInt = 0 37252 v.AddArg(v0) 37253 v.AddArg(mem) 37254 return true 37255 } 37256 // match: (SETEQmem [off] {sym} ptr x:(FlagGT_ULT) mem) 37257 // cond: 37258 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37259 for { 37260 off := v.AuxInt 37261 sym := v.Aux 37262 _ = v.Args[2] 37263 ptr := v.Args[0] 37264 x := v.Args[1] 37265 if x.Op != OpAMD64FlagGT_ULT { 37266 break 37267 } 37268 mem := v.Args[2] 37269 v.reset(OpAMD64MOVBstore) 37270 v.AuxInt = off 37271 v.Aux = sym 37272 v.AddArg(ptr) 37273 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37274 v0.AuxInt = 0 37275 v.AddArg(v0) 37276 v.AddArg(mem) 37277 return true 37278 } 37279 // match: (SETEQmem [off] {sym} ptr x:(FlagGT_UGT) mem) 37280 // cond: 37281 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37282 for { 37283 off := v.AuxInt 37284 sym := v.Aux 37285 _ = v.Args[2] 37286 ptr := v.Args[0] 37287 x := v.Args[1] 37288 if x.Op != OpAMD64FlagGT_UGT { 37289 break 37290 } 37291 mem := v.Args[2] 37292 v.reset(OpAMD64MOVBstore) 37293 v.AuxInt = off 37294 v.Aux = sym 37295 v.AddArg(ptr) 37296 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37297 v0.AuxInt = 0 37298 v.AddArg(v0) 37299 v.AddArg(mem) 37300 return true 37301 } 37302 return false 37303 } 37304 func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { 37305 // match: (SETG (InvertFlags x)) 37306 // cond: 37307 // result: (SETL x) 37308 for { 37309 v_0 := v.Args[0] 37310 if v_0.Op != OpAMD64InvertFlags { 37311 break 37312 } 37313 x := v_0.Args[0] 37314 v.reset(OpAMD64SETL) 37315 v.AddArg(x) 37316 return true 37317 } 37318 // match: (SETG (FlagEQ)) 37319 // cond: 37320 // result: (MOVLconst [0]) 37321 for { 37322 v_0 := v.Args[0] 37323 if v_0.Op != OpAMD64FlagEQ { 37324 break 37325 } 37326 v.reset(OpAMD64MOVLconst) 37327 v.AuxInt = 0 37328 return true 37329 } 37330 // match: (SETG (FlagLT_ULT)) 37331 // cond: 37332 // result: (MOVLconst [0]) 37333 for { 37334 v_0 := v.Args[0] 37335 if v_0.Op != OpAMD64FlagLT_ULT { 37336 break 37337 } 37338 v.reset(OpAMD64MOVLconst) 37339 v.AuxInt = 0 37340 return true 37341 } 37342 // match: (SETG (FlagLT_UGT)) 37343 // cond: 37344 // result: (MOVLconst [0]) 37345 for { 37346 v_0 := v.Args[0] 37347 if v_0.Op != OpAMD64FlagLT_UGT { 37348 break 37349 } 37350 v.reset(OpAMD64MOVLconst) 37351 v.AuxInt = 0 37352 return true 37353 } 37354 // match: (SETG (FlagGT_ULT)) 37355 // cond: 37356 // result: (MOVLconst [1]) 37357 for { 37358 v_0 := v.Args[0] 37359 if v_0.Op != OpAMD64FlagGT_ULT { 37360 break 37361 } 37362 v.reset(OpAMD64MOVLconst) 37363 v.AuxInt = 1 37364 return true 37365 } 37366 // match: (SETG (FlagGT_UGT)) 37367 // cond: 37368 // result: (MOVLconst [1]) 37369 for { 37370 v_0 := v.Args[0] 37371 if v_0.Op != OpAMD64FlagGT_UGT { 37372 break 37373 } 37374 v.reset(OpAMD64MOVLconst) 37375 v.AuxInt = 1 37376 return true 37377 } 37378 return false 37379 } 37380 func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { 37381 // match: (SETGE (InvertFlags x)) 37382 // cond: 37383 // result: (SETLE x) 37384 for { 37385 v_0 := v.Args[0] 37386 if v_0.Op != OpAMD64InvertFlags { 37387 break 37388 } 37389 x := v_0.Args[0] 37390 v.reset(OpAMD64SETLE) 37391 v.AddArg(x) 37392 return true 37393 } 37394 // match: (SETGE (FlagEQ)) 37395 // cond: 37396 // result: (MOVLconst [1]) 37397 for { 37398 v_0 := v.Args[0] 37399 if v_0.Op != OpAMD64FlagEQ { 37400 break 37401 } 37402 v.reset(OpAMD64MOVLconst) 37403 v.AuxInt = 1 37404 return true 37405 } 37406 // match: (SETGE (FlagLT_ULT)) 37407 // cond: 37408 // result: (MOVLconst [0]) 37409 for { 37410 v_0 := v.Args[0] 37411 if v_0.Op != OpAMD64FlagLT_ULT { 37412 break 37413 } 37414 v.reset(OpAMD64MOVLconst) 37415 v.AuxInt = 0 37416 return true 37417 } 37418 // match: (SETGE (FlagLT_UGT)) 37419 // cond: 37420 // result: (MOVLconst [0]) 37421 for { 37422 v_0 := v.Args[0] 37423 if v_0.Op != OpAMD64FlagLT_UGT { 37424 break 37425 } 37426 v.reset(OpAMD64MOVLconst) 37427 v.AuxInt = 0 37428 return true 37429 } 37430 // match: (SETGE (FlagGT_ULT)) 37431 // cond: 37432 // result: (MOVLconst [1]) 37433 for { 37434 v_0 := v.Args[0] 37435 if v_0.Op != OpAMD64FlagGT_ULT { 37436 break 37437 } 37438 v.reset(OpAMD64MOVLconst) 37439 v.AuxInt = 1 37440 return true 37441 } 37442 // match: (SETGE (FlagGT_UGT)) 37443 // cond: 37444 // result: (MOVLconst [1]) 37445 for { 37446 v_0 := v.Args[0] 37447 if v_0.Op != OpAMD64FlagGT_UGT { 37448 break 37449 } 37450 v.reset(OpAMD64MOVLconst) 37451 v.AuxInt = 1 37452 return true 37453 } 37454 return false 37455 } 37456 func rewriteValueAMD64_OpAMD64SETGEmem_0(v *Value) bool { 37457 b := v.Block 37458 _ = b 37459 // match: (SETGEmem [off] {sym} ptr (InvertFlags x) mem) 37460 // cond: 37461 // result: (SETLEmem [off] {sym} ptr x mem) 37462 for { 37463 off := v.AuxInt 37464 sym := v.Aux 37465 _ = v.Args[2] 37466 ptr := v.Args[0] 37467 v_1 := v.Args[1] 37468 if v_1.Op != OpAMD64InvertFlags { 37469 break 37470 } 37471 x := v_1.Args[0] 37472 mem := v.Args[2] 37473 v.reset(OpAMD64SETLEmem) 37474 v.AuxInt = off 37475 v.Aux = sym 37476 v.AddArg(ptr) 37477 v.AddArg(x) 37478 v.AddArg(mem) 37479 return true 37480 } 37481 // match: (SETGEmem [off] {sym} ptr x:(FlagEQ) mem) 37482 // cond: 37483 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37484 for { 37485 off := v.AuxInt 37486 sym := v.Aux 37487 _ = v.Args[2] 37488 ptr := v.Args[0] 37489 x := v.Args[1] 37490 if x.Op != OpAMD64FlagEQ { 37491 break 37492 } 37493 mem := v.Args[2] 37494 v.reset(OpAMD64MOVBstore) 37495 v.AuxInt = off 37496 v.Aux = sym 37497 v.AddArg(ptr) 37498 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37499 v0.AuxInt = 1 37500 v.AddArg(v0) 37501 v.AddArg(mem) 37502 return true 37503 } 37504 // match: (SETGEmem [off] {sym} ptr x:(FlagLT_ULT) mem) 37505 // cond: 37506 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37507 for { 37508 off := v.AuxInt 37509 sym := v.Aux 37510 _ = v.Args[2] 37511 ptr := v.Args[0] 37512 x := v.Args[1] 37513 if x.Op != OpAMD64FlagLT_ULT { 37514 break 37515 } 37516 mem := v.Args[2] 37517 v.reset(OpAMD64MOVBstore) 37518 v.AuxInt = off 37519 v.Aux = sym 37520 v.AddArg(ptr) 37521 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37522 v0.AuxInt = 0 37523 v.AddArg(v0) 37524 v.AddArg(mem) 37525 return true 37526 } 37527 // match: (SETGEmem [off] {sym} ptr x:(FlagLT_UGT) mem) 37528 // cond: 37529 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37530 for { 37531 off := v.AuxInt 37532 sym := v.Aux 37533 _ = v.Args[2] 37534 ptr := v.Args[0] 37535 x := v.Args[1] 37536 if x.Op != OpAMD64FlagLT_UGT { 37537 break 37538 } 37539 mem := v.Args[2] 37540 v.reset(OpAMD64MOVBstore) 37541 v.AuxInt = off 37542 v.Aux = sym 37543 v.AddArg(ptr) 37544 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37545 v0.AuxInt = 0 37546 v.AddArg(v0) 37547 v.AddArg(mem) 37548 return true 37549 } 37550 // match: (SETGEmem [off] {sym} ptr x:(FlagGT_ULT) mem) 37551 // cond: 37552 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37553 for { 37554 off := v.AuxInt 37555 sym := v.Aux 37556 _ = v.Args[2] 37557 ptr := v.Args[0] 37558 x := v.Args[1] 37559 if x.Op != OpAMD64FlagGT_ULT { 37560 break 37561 } 37562 mem := v.Args[2] 37563 v.reset(OpAMD64MOVBstore) 37564 v.AuxInt = off 37565 v.Aux = sym 37566 v.AddArg(ptr) 37567 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37568 v0.AuxInt = 1 37569 v.AddArg(v0) 37570 v.AddArg(mem) 37571 return true 37572 } 37573 // match: (SETGEmem [off] {sym} ptr x:(FlagGT_UGT) mem) 37574 // cond: 37575 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37576 for { 37577 off := v.AuxInt 37578 sym := v.Aux 37579 _ = v.Args[2] 37580 ptr := v.Args[0] 37581 x := v.Args[1] 37582 if x.Op != OpAMD64FlagGT_UGT { 37583 break 37584 } 37585 mem := v.Args[2] 37586 v.reset(OpAMD64MOVBstore) 37587 v.AuxInt = off 37588 v.Aux = sym 37589 v.AddArg(ptr) 37590 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37591 v0.AuxInt = 1 37592 v.AddArg(v0) 37593 v.AddArg(mem) 37594 return true 37595 } 37596 return false 37597 } 37598 func rewriteValueAMD64_OpAMD64SETGmem_0(v *Value) bool { 37599 b := v.Block 37600 _ = b 37601 // match: (SETGmem [off] {sym} ptr (InvertFlags x) mem) 37602 // cond: 37603 // result: (SETLmem [off] {sym} ptr x mem) 37604 for { 37605 off := v.AuxInt 37606 sym := v.Aux 37607 _ = v.Args[2] 37608 ptr := v.Args[0] 37609 v_1 := v.Args[1] 37610 if v_1.Op != OpAMD64InvertFlags { 37611 break 37612 } 37613 x := v_1.Args[0] 37614 mem := v.Args[2] 37615 v.reset(OpAMD64SETLmem) 37616 v.AuxInt = off 37617 v.Aux = sym 37618 v.AddArg(ptr) 37619 v.AddArg(x) 37620 v.AddArg(mem) 37621 return true 37622 } 37623 // match: (SETGmem [off] {sym} ptr x:(FlagEQ) mem) 37624 // cond: 37625 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37626 for { 37627 off := v.AuxInt 37628 sym := v.Aux 37629 _ = v.Args[2] 37630 ptr := v.Args[0] 37631 x := v.Args[1] 37632 if x.Op != OpAMD64FlagEQ { 37633 break 37634 } 37635 mem := v.Args[2] 37636 v.reset(OpAMD64MOVBstore) 37637 v.AuxInt = off 37638 v.Aux = sym 37639 v.AddArg(ptr) 37640 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37641 v0.AuxInt = 0 37642 v.AddArg(v0) 37643 v.AddArg(mem) 37644 return true 37645 } 37646 // match: (SETGmem [off] {sym} ptr x:(FlagLT_ULT) mem) 37647 // cond: 37648 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37649 for { 37650 off := v.AuxInt 37651 sym := v.Aux 37652 _ = v.Args[2] 37653 ptr := v.Args[0] 37654 x := v.Args[1] 37655 if x.Op != OpAMD64FlagLT_ULT { 37656 break 37657 } 37658 mem := v.Args[2] 37659 v.reset(OpAMD64MOVBstore) 37660 v.AuxInt = off 37661 v.Aux = sym 37662 v.AddArg(ptr) 37663 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37664 v0.AuxInt = 0 37665 v.AddArg(v0) 37666 v.AddArg(mem) 37667 return true 37668 } 37669 // match: (SETGmem [off] {sym} ptr x:(FlagLT_UGT) mem) 37670 // cond: 37671 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37672 for { 37673 off := v.AuxInt 37674 sym := v.Aux 37675 _ = v.Args[2] 37676 ptr := v.Args[0] 37677 x := v.Args[1] 37678 if x.Op != OpAMD64FlagLT_UGT { 37679 break 37680 } 37681 mem := v.Args[2] 37682 v.reset(OpAMD64MOVBstore) 37683 v.AuxInt = off 37684 v.Aux = sym 37685 v.AddArg(ptr) 37686 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37687 v0.AuxInt = 0 37688 v.AddArg(v0) 37689 v.AddArg(mem) 37690 return true 37691 } 37692 // match: (SETGmem [off] {sym} ptr x:(FlagGT_ULT) mem) 37693 // cond: 37694 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37695 for { 37696 off := v.AuxInt 37697 sym := v.Aux 37698 _ = v.Args[2] 37699 ptr := v.Args[0] 37700 x := v.Args[1] 37701 if x.Op != OpAMD64FlagGT_ULT { 37702 break 37703 } 37704 mem := v.Args[2] 37705 v.reset(OpAMD64MOVBstore) 37706 v.AuxInt = off 37707 v.Aux = sym 37708 v.AddArg(ptr) 37709 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37710 v0.AuxInt = 1 37711 v.AddArg(v0) 37712 v.AddArg(mem) 37713 return true 37714 } 37715 // match: (SETGmem [off] {sym} ptr x:(FlagGT_UGT) mem) 37716 // cond: 37717 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37718 for { 37719 off := v.AuxInt 37720 sym := v.Aux 37721 _ = v.Args[2] 37722 ptr := v.Args[0] 37723 x := v.Args[1] 37724 if x.Op != OpAMD64FlagGT_UGT { 37725 break 37726 } 37727 mem := v.Args[2] 37728 v.reset(OpAMD64MOVBstore) 37729 v.AuxInt = off 37730 v.Aux = sym 37731 v.AddArg(ptr) 37732 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37733 v0.AuxInt = 1 37734 v.AddArg(v0) 37735 v.AddArg(mem) 37736 return true 37737 } 37738 return false 37739 } 37740 func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { 37741 // match: (SETL (InvertFlags x)) 37742 // cond: 37743 // result: (SETG x) 37744 for { 37745 v_0 := v.Args[0] 37746 if v_0.Op != OpAMD64InvertFlags { 37747 break 37748 } 37749 x := v_0.Args[0] 37750 v.reset(OpAMD64SETG) 37751 v.AddArg(x) 37752 return true 37753 } 37754 // match: (SETL (FlagEQ)) 37755 // cond: 37756 // result: (MOVLconst [0]) 37757 for { 37758 v_0 := v.Args[0] 37759 if v_0.Op != OpAMD64FlagEQ { 37760 break 37761 } 37762 v.reset(OpAMD64MOVLconst) 37763 v.AuxInt = 0 37764 return true 37765 } 37766 // match: (SETL (FlagLT_ULT)) 37767 // cond: 37768 // result: (MOVLconst [1]) 37769 for { 37770 v_0 := v.Args[0] 37771 if v_0.Op != OpAMD64FlagLT_ULT { 37772 break 37773 } 37774 v.reset(OpAMD64MOVLconst) 37775 v.AuxInt = 1 37776 return true 37777 } 37778 // match: (SETL (FlagLT_UGT)) 37779 // cond: 37780 // result: (MOVLconst [1]) 37781 for { 37782 v_0 := v.Args[0] 37783 if v_0.Op != OpAMD64FlagLT_UGT { 37784 break 37785 } 37786 v.reset(OpAMD64MOVLconst) 37787 v.AuxInt = 1 37788 return true 37789 } 37790 // match: (SETL (FlagGT_ULT)) 37791 // cond: 37792 // result: (MOVLconst [0]) 37793 for { 37794 v_0 := v.Args[0] 37795 if v_0.Op != OpAMD64FlagGT_ULT { 37796 break 37797 } 37798 v.reset(OpAMD64MOVLconst) 37799 v.AuxInt = 0 37800 return true 37801 } 37802 // match: (SETL (FlagGT_UGT)) 37803 // cond: 37804 // result: (MOVLconst [0]) 37805 for { 37806 v_0 := v.Args[0] 37807 if v_0.Op != OpAMD64FlagGT_UGT { 37808 break 37809 } 37810 v.reset(OpAMD64MOVLconst) 37811 v.AuxInt = 0 37812 return true 37813 } 37814 return false 37815 } 37816 func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { 37817 // match: (SETLE (InvertFlags x)) 37818 // cond: 37819 // result: (SETGE x) 37820 for { 37821 v_0 := v.Args[0] 37822 if v_0.Op != OpAMD64InvertFlags { 37823 break 37824 } 37825 x := v_0.Args[0] 37826 v.reset(OpAMD64SETGE) 37827 v.AddArg(x) 37828 return true 37829 } 37830 // match: (SETLE (FlagEQ)) 37831 // cond: 37832 // result: (MOVLconst [1]) 37833 for { 37834 v_0 := v.Args[0] 37835 if v_0.Op != OpAMD64FlagEQ { 37836 break 37837 } 37838 v.reset(OpAMD64MOVLconst) 37839 v.AuxInt = 1 37840 return true 37841 } 37842 // match: (SETLE (FlagLT_ULT)) 37843 // cond: 37844 // result: (MOVLconst [1]) 37845 for { 37846 v_0 := v.Args[0] 37847 if v_0.Op != OpAMD64FlagLT_ULT { 37848 break 37849 } 37850 v.reset(OpAMD64MOVLconst) 37851 v.AuxInt = 1 37852 return true 37853 } 37854 // match: (SETLE (FlagLT_UGT)) 37855 // cond: 37856 // result: (MOVLconst [1]) 37857 for { 37858 v_0 := v.Args[0] 37859 if v_0.Op != OpAMD64FlagLT_UGT { 37860 break 37861 } 37862 v.reset(OpAMD64MOVLconst) 37863 v.AuxInt = 1 37864 return true 37865 } 37866 // match: (SETLE (FlagGT_ULT)) 37867 // cond: 37868 // result: (MOVLconst [0]) 37869 for { 37870 v_0 := v.Args[0] 37871 if v_0.Op != OpAMD64FlagGT_ULT { 37872 break 37873 } 37874 v.reset(OpAMD64MOVLconst) 37875 v.AuxInt = 0 37876 return true 37877 } 37878 // match: (SETLE (FlagGT_UGT)) 37879 // cond: 37880 // result: (MOVLconst [0]) 37881 for { 37882 v_0 := v.Args[0] 37883 if v_0.Op != OpAMD64FlagGT_UGT { 37884 break 37885 } 37886 v.reset(OpAMD64MOVLconst) 37887 v.AuxInt = 0 37888 return true 37889 } 37890 return false 37891 } 37892 func rewriteValueAMD64_OpAMD64SETLEmem_0(v *Value) bool { 37893 b := v.Block 37894 _ = b 37895 // match: (SETLEmem [off] {sym} ptr (InvertFlags x) mem) 37896 // cond: 37897 // result: (SETGEmem [off] {sym} ptr x mem) 37898 for { 37899 off := v.AuxInt 37900 sym := v.Aux 37901 _ = v.Args[2] 37902 ptr := v.Args[0] 37903 v_1 := v.Args[1] 37904 if v_1.Op != OpAMD64InvertFlags { 37905 break 37906 } 37907 x := v_1.Args[0] 37908 mem := v.Args[2] 37909 v.reset(OpAMD64SETGEmem) 37910 v.AuxInt = off 37911 v.Aux = sym 37912 v.AddArg(ptr) 37913 v.AddArg(x) 37914 v.AddArg(mem) 37915 return true 37916 } 37917 // match: (SETLEmem [off] {sym} ptr x:(FlagEQ) mem) 37918 // cond: 37919 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37920 for { 37921 off := v.AuxInt 37922 sym := v.Aux 37923 _ = v.Args[2] 37924 ptr := v.Args[0] 37925 x := v.Args[1] 37926 if x.Op != OpAMD64FlagEQ { 37927 break 37928 } 37929 mem := v.Args[2] 37930 v.reset(OpAMD64MOVBstore) 37931 v.AuxInt = off 37932 v.Aux = sym 37933 v.AddArg(ptr) 37934 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37935 v0.AuxInt = 1 37936 v.AddArg(v0) 37937 v.AddArg(mem) 37938 return true 37939 } 37940 // match: (SETLEmem [off] {sym} ptr x:(FlagLT_ULT) mem) 37941 // cond: 37942 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37943 for { 37944 off := v.AuxInt 37945 sym := v.Aux 37946 _ = v.Args[2] 37947 ptr := v.Args[0] 37948 x := v.Args[1] 37949 if x.Op != OpAMD64FlagLT_ULT { 37950 break 37951 } 37952 mem := v.Args[2] 37953 v.reset(OpAMD64MOVBstore) 37954 v.AuxInt = off 37955 v.Aux = sym 37956 v.AddArg(ptr) 37957 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37958 v0.AuxInt = 1 37959 v.AddArg(v0) 37960 v.AddArg(mem) 37961 return true 37962 } 37963 // match: (SETLEmem [off] {sym} ptr x:(FlagLT_UGT) mem) 37964 // cond: 37965 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37966 for { 37967 off := v.AuxInt 37968 sym := v.Aux 37969 _ = v.Args[2] 37970 ptr := v.Args[0] 37971 x := v.Args[1] 37972 if x.Op != OpAMD64FlagLT_UGT { 37973 break 37974 } 37975 mem := v.Args[2] 37976 v.reset(OpAMD64MOVBstore) 37977 v.AuxInt = off 37978 v.Aux = sym 37979 v.AddArg(ptr) 37980 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37981 v0.AuxInt = 1 37982 v.AddArg(v0) 37983 v.AddArg(mem) 37984 return true 37985 } 37986 // match: (SETLEmem [off] {sym} ptr x:(FlagGT_ULT) mem) 37987 // cond: 37988 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37989 for { 37990 off := v.AuxInt 37991 sym := v.Aux 37992 _ = v.Args[2] 37993 ptr := v.Args[0] 37994 x := v.Args[1] 37995 if x.Op != OpAMD64FlagGT_ULT { 37996 break 37997 } 37998 mem := v.Args[2] 37999 v.reset(OpAMD64MOVBstore) 38000 v.AuxInt = off 38001 v.Aux = sym 38002 v.AddArg(ptr) 38003 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38004 v0.AuxInt = 0 38005 v.AddArg(v0) 38006 v.AddArg(mem) 38007 return true 38008 } 38009 // match: (SETLEmem [off] {sym} ptr x:(FlagGT_UGT) mem) 38010 // cond: 38011 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 38012 for { 38013 off := v.AuxInt 38014 sym := v.Aux 38015 _ = v.Args[2] 38016 ptr := v.Args[0] 38017 x := v.Args[1] 38018 if x.Op != OpAMD64FlagGT_UGT { 38019 break 38020 } 38021 mem := v.Args[2] 38022 v.reset(OpAMD64MOVBstore) 38023 v.AuxInt = off 38024 v.Aux = sym 38025 v.AddArg(ptr) 38026 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38027 v0.AuxInt = 0 38028 v.AddArg(v0) 38029 v.AddArg(mem) 38030 return true 38031 } 38032 return false 38033 } 38034 func rewriteValueAMD64_OpAMD64SETLmem_0(v *Value) bool { 38035 b := v.Block 38036 _ = b 38037 // match: (SETLmem [off] {sym} ptr (InvertFlags x) mem) 38038 // cond: 38039 // result: (SETGmem [off] {sym} ptr x mem) 38040 for { 38041 off := v.AuxInt 38042 sym := v.Aux 38043 _ = v.Args[2] 38044 ptr := v.Args[0] 38045 v_1 := v.Args[1] 38046 if v_1.Op != OpAMD64InvertFlags { 38047 break 38048 } 38049 x := v_1.Args[0] 38050 mem := v.Args[2] 38051 v.reset(OpAMD64SETGmem) 38052 v.AuxInt = off 38053 v.Aux = sym 38054 v.AddArg(ptr) 38055 v.AddArg(x) 38056 v.AddArg(mem) 38057 return true 38058 } 38059 // match: (SETLmem [off] {sym} ptr x:(FlagEQ) mem) 38060 // cond: 38061 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 38062 for { 38063 off := v.AuxInt 38064 sym := v.Aux 38065 _ = v.Args[2] 38066 ptr := v.Args[0] 38067 x := v.Args[1] 38068 if x.Op != OpAMD64FlagEQ { 38069 break 38070 } 38071 mem := v.Args[2] 38072 v.reset(OpAMD64MOVBstore) 38073 v.AuxInt = off 38074 v.Aux = sym 38075 v.AddArg(ptr) 38076 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38077 v0.AuxInt = 0 38078 v.AddArg(v0) 38079 v.AddArg(mem) 38080 return true 38081 } 38082 // match: (SETLmem [off] {sym} ptr x:(FlagLT_ULT) mem) 38083 // cond: 38084 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38085 for { 38086 off := v.AuxInt 38087 sym := v.Aux 38088 _ = v.Args[2] 38089 ptr := v.Args[0] 38090 x := v.Args[1] 38091 if x.Op != OpAMD64FlagLT_ULT { 38092 break 38093 } 38094 mem := v.Args[2] 38095 v.reset(OpAMD64MOVBstore) 38096 v.AuxInt = off 38097 v.Aux = sym 38098 v.AddArg(ptr) 38099 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38100 v0.AuxInt = 1 38101 v.AddArg(v0) 38102 v.AddArg(mem) 38103 return true 38104 } 38105 // match: (SETLmem [off] {sym} ptr x:(FlagLT_UGT) mem) 38106 // cond: 38107 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38108 for { 38109 off := v.AuxInt 38110 sym := v.Aux 38111 _ = v.Args[2] 38112 ptr := v.Args[0] 38113 x := v.Args[1] 38114 if x.Op != OpAMD64FlagLT_UGT { 38115 break 38116 } 38117 mem := v.Args[2] 38118 v.reset(OpAMD64MOVBstore) 38119 v.AuxInt = off 38120 v.Aux = sym 38121 v.AddArg(ptr) 38122 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38123 v0.AuxInt = 1 38124 v.AddArg(v0) 38125 v.AddArg(mem) 38126 return true 38127 } 38128 // match: (SETLmem [off] {sym} ptr x:(FlagGT_ULT) mem) 38129 // cond: 38130 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 38131 for { 38132 off := v.AuxInt 38133 sym := v.Aux 38134 _ = v.Args[2] 38135 ptr := v.Args[0] 38136 x := v.Args[1] 38137 if x.Op != OpAMD64FlagGT_ULT { 38138 break 38139 } 38140 mem := v.Args[2] 38141 v.reset(OpAMD64MOVBstore) 38142 v.AuxInt = off 38143 v.Aux = sym 38144 v.AddArg(ptr) 38145 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38146 v0.AuxInt = 0 38147 v.AddArg(v0) 38148 v.AddArg(mem) 38149 return true 38150 } 38151 // match: (SETLmem [off] {sym} ptr x:(FlagGT_UGT) mem) 38152 // cond: 38153 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 38154 for { 38155 off := v.AuxInt 38156 sym := v.Aux 38157 _ = v.Args[2] 38158 ptr := v.Args[0] 38159 x := v.Args[1] 38160 if x.Op != OpAMD64FlagGT_UGT { 38161 break 38162 } 38163 mem := v.Args[2] 38164 v.reset(OpAMD64MOVBstore) 38165 v.AuxInt = off 38166 v.Aux = sym 38167 v.AddArg(ptr) 38168 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38169 v0.AuxInt = 0 38170 v.AddArg(v0) 38171 v.AddArg(mem) 38172 return true 38173 } 38174 return false 38175 } 38176 func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { 38177 b := v.Block 38178 _ = b 38179 config := b.Func.Config 38180 _ = config 38181 // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) 38182 // cond: !config.nacl 38183 // result: (SETB (BTL x y)) 38184 for { 38185 v_0 := v.Args[0] 38186 if v_0.Op != OpAMD64TESTL { 38187 break 38188 } 38189 _ = v_0.Args[1] 38190 v_0_0 := v_0.Args[0] 38191 if v_0_0.Op != OpAMD64SHLL { 38192 break 38193 } 38194 _ = v_0_0.Args[1] 38195 v_0_0_0 := v_0_0.Args[0] 38196 if v_0_0_0.Op != OpAMD64MOVLconst { 38197 break 38198 } 38199 if v_0_0_0.AuxInt != 1 { 38200 break 38201 } 38202 x := v_0_0.Args[1] 38203 y := v_0.Args[1] 38204 if !(!config.nacl) { 38205 break 38206 } 38207 v.reset(OpAMD64SETB) 38208 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 38209 v0.AddArg(x) 38210 v0.AddArg(y) 38211 v.AddArg(v0) 38212 return true 38213 } 38214 // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x))) 38215 // cond: !config.nacl 38216 // result: (SETB (BTL x y)) 38217 for { 38218 v_0 := v.Args[0] 38219 if v_0.Op != OpAMD64TESTL { 38220 break 38221 } 38222 _ = v_0.Args[1] 38223 y := v_0.Args[0] 38224 v_0_1 := v_0.Args[1] 38225 if v_0_1.Op != OpAMD64SHLL { 38226 break 38227 } 38228 _ = v_0_1.Args[1] 38229 v_0_1_0 := v_0_1.Args[0] 38230 if v_0_1_0.Op != OpAMD64MOVLconst { 38231 break 38232 } 38233 if v_0_1_0.AuxInt != 1 { 38234 break 38235 } 38236 x := v_0_1.Args[1] 38237 if !(!config.nacl) { 38238 break 38239 } 38240 v.reset(OpAMD64SETB) 38241 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 38242 v0.AddArg(x) 38243 v0.AddArg(y) 38244 v.AddArg(v0) 38245 return true 38246 } 38247 // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 38248 // cond: !config.nacl 38249 // result: (SETB (BTQ x y)) 38250 for { 38251 v_0 := v.Args[0] 38252 if v_0.Op != OpAMD64TESTQ { 38253 break 38254 } 38255 _ = v_0.Args[1] 38256 v_0_0 := v_0.Args[0] 38257 if v_0_0.Op != OpAMD64SHLQ { 38258 break 38259 } 38260 _ = v_0_0.Args[1] 38261 v_0_0_0 := v_0_0.Args[0] 38262 if v_0_0_0.Op != OpAMD64MOVQconst { 38263 break 38264 } 38265 if v_0_0_0.AuxInt != 1 { 38266 break 38267 } 38268 x := v_0_0.Args[1] 38269 y := v_0.Args[1] 38270 if !(!config.nacl) { 38271 break 38272 } 38273 v.reset(OpAMD64SETB) 38274 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 38275 v0.AddArg(x) 38276 v0.AddArg(y) 38277 v.AddArg(v0) 38278 return true 38279 } 38280 // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x))) 38281 // cond: !config.nacl 38282 // result: (SETB (BTQ x y)) 38283 for { 38284 v_0 := v.Args[0] 38285 if v_0.Op != OpAMD64TESTQ { 38286 break 38287 } 38288 _ = v_0.Args[1] 38289 y := v_0.Args[0] 38290 v_0_1 := v_0.Args[1] 38291 if v_0_1.Op != OpAMD64SHLQ { 38292 break 38293 } 38294 _ = v_0_1.Args[1] 38295 v_0_1_0 := v_0_1.Args[0] 38296 if v_0_1_0.Op != OpAMD64MOVQconst { 38297 break 38298 } 38299 if v_0_1_0.AuxInt != 1 { 38300 break 38301 } 38302 x := v_0_1.Args[1] 38303 if !(!config.nacl) { 38304 break 38305 } 38306 v.reset(OpAMD64SETB) 38307 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 38308 v0.AddArg(x) 38309 v0.AddArg(y) 38310 v.AddArg(v0) 38311 return true 38312 } 38313 // match: (SETNE (TESTLconst [c] x)) 38314 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 38315 // result: (SETB (BTLconst [log2(c)] x)) 38316 for { 38317 v_0 := v.Args[0] 38318 if v_0.Op != OpAMD64TESTLconst { 38319 break 38320 } 38321 c := v_0.AuxInt 38322 x := v_0.Args[0] 38323 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 38324 break 38325 } 38326 v.reset(OpAMD64SETB) 38327 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 38328 v0.AuxInt = log2(c) 38329 v0.AddArg(x) 38330 v.AddArg(v0) 38331 return true 38332 } 38333 // match: (SETNE (TESTQconst [c] x)) 38334 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38335 // result: (SETB (BTQconst [log2(c)] x)) 38336 for { 38337 v_0 := v.Args[0] 38338 if v_0.Op != OpAMD64TESTQconst { 38339 break 38340 } 38341 c := v_0.AuxInt 38342 x := v_0.Args[0] 38343 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38344 break 38345 } 38346 v.reset(OpAMD64SETB) 38347 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38348 v0.AuxInt = log2(c) 38349 v0.AddArg(x) 38350 v.AddArg(v0) 38351 return true 38352 } 38353 // match: (SETNE (TESTQ (MOVQconst [c]) x)) 38354 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38355 // result: (SETB (BTQconst [log2(c)] x)) 38356 for { 38357 v_0 := v.Args[0] 38358 if v_0.Op != OpAMD64TESTQ { 38359 break 38360 } 38361 _ = v_0.Args[1] 38362 v_0_0 := v_0.Args[0] 38363 if v_0_0.Op != OpAMD64MOVQconst { 38364 break 38365 } 38366 c := v_0_0.AuxInt 38367 x := v_0.Args[1] 38368 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38369 break 38370 } 38371 v.reset(OpAMD64SETB) 38372 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38373 v0.AuxInt = log2(c) 38374 v0.AddArg(x) 38375 v.AddArg(v0) 38376 return true 38377 } 38378 // match: (SETNE (TESTQ x (MOVQconst [c]))) 38379 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38380 // result: (SETB (BTQconst [log2(c)] x)) 38381 for { 38382 v_0 := v.Args[0] 38383 if v_0.Op != OpAMD64TESTQ { 38384 break 38385 } 38386 _ = v_0.Args[1] 38387 x := v_0.Args[0] 38388 v_0_1 := v_0.Args[1] 38389 if v_0_1.Op != OpAMD64MOVQconst { 38390 break 38391 } 38392 c := v_0_1.AuxInt 38393 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38394 break 38395 } 38396 v.reset(OpAMD64SETB) 38397 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38398 v0.AuxInt = log2(c) 38399 v0.AddArg(x) 38400 v.AddArg(v0) 38401 return true 38402 } 38403 // match: (SETNE (InvertFlags x)) 38404 // cond: 38405 // result: (SETNE x) 38406 for { 38407 v_0 := v.Args[0] 38408 if v_0.Op != OpAMD64InvertFlags { 38409 break 38410 } 38411 x := v_0.Args[0] 38412 v.reset(OpAMD64SETNE) 38413 v.AddArg(x) 38414 return true 38415 } 38416 // match: (SETNE (FlagEQ)) 38417 // cond: 38418 // result: (MOVLconst [0]) 38419 for { 38420 v_0 := v.Args[0] 38421 if v_0.Op != OpAMD64FlagEQ { 38422 break 38423 } 38424 v.reset(OpAMD64MOVLconst) 38425 v.AuxInt = 0 38426 return true 38427 } 38428 return false 38429 } 38430 func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { 38431 // match: (SETNE (FlagLT_ULT)) 38432 // cond: 38433 // result: (MOVLconst [1]) 38434 for { 38435 v_0 := v.Args[0] 38436 if v_0.Op != OpAMD64FlagLT_ULT { 38437 break 38438 } 38439 v.reset(OpAMD64MOVLconst) 38440 v.AuxInt = 1 38441 return true 38442 } 38443 // match: (SETNE (FlagLT_UGT)) 38444 // cond: 38445 // result: (MOVLconst [1]) 38446 for { 38447 v_0 := v.Args[0] 38448 if v_0.Op != OpAMD64FlagLT_UGT { 38449 break 38450 } 38451 v.reset(OpAMD64MOVLconst) 38452 v.AuxInt = 1 38453 return true 38454 } 38455 // match: (SETNE (FlagGT_ULT)) 38456 // cond: 38457 // result: (MOVLconst [1]) 38458 for { 38459 v_0 := v.Args[0] 38460 if v_0.Op != OpAMD64FlagGT_ULT { 38461 break 38462 } 38463 v.reset(OpAMD64MOVLconst) 38464 v.AuxInt = 1 38465 return true 38466 } 38467 // match: (SETNE (FlagGT_UGT)) 38468 // cond: 38469 // result: (MOVLconst [1]) 38470 for { 38471 v_0 := v.Args[0] 38472 if v_0.Op != OpAMD64FlagGT_UGT { 38473 break 38474 } 38475 v.reset(OpAMD64MOVLconst) 38476 v.AuxInt = 1 38477 return true 38478 } 38479 return false 38480 } 38481 func rewriteValueAMD64_OpAMD64SETNEmem_0(v *Value) bool { 38482 b := v.Block 38483 _ = b 38484 config := b.Func.Config 38485 _ = config 38486 // match: (SETNEmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 38487 // cond: !config.nacl 38488 // result: (SETBmem [off] {sym} ptr (BTL x y) mem) 38489 for { 38490 off := v.AuxInt 38491 sym := v.Aux 38492 _ = v.Args[2] 38493 ptr := v.Args[0] 38494 v_1 := v.Args[1] 38495 if v_1.Op != OpAMD64TESTL { 38496 break 38497 } 38498 _ = v_1.Args[1] 38499 v_1_0 := v_1.Args[0] 38500 if v_1_0.Op != OpAMD64SHLL { 38501 break 38502 } 38503 _ = v_1_0.Args[1] 38504 v_1_0_0 := v_1_0.Args[0] 38505 if v_1_0_0.Op != OpAMD64MOVLconst { 38506 break 38507 } 38508 if v_1_0_0.AuxInt != 1 { 38509 break 38510 } 38511 x := v_1_0.Args[1] 38512 y := v_1.Args[1] 38513 mem := v.Args[2] 38514 if !(!config.nacl) { 38515 break 38516 } 38517 v.reset(OpAMD64SETBmem) 38518 v.AuxInt = off 38519 v.Aux = sym 38520 v.AddArg(ptr) 38521 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 38522 v0.AddArg(x) 38523 v0.AddArg(y) 38524 v.AddArg(v0) 38525 v.AddArg(mem) 38526 return true 38527 } 38528 // match: (SETNEmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) 38529 // cond: !config.nacl 38530 // result: (SETBmem [off] {sym} ptr (BTL x y) mem) 38531 for { 38532 off := v.AuxInt 38533 sym := v.Aux 38534 _ = v.Args[2] 38535 ptr := v.Args[0] 38536 v_1 := v.Args[1] 38537 if v_1.Op != OpAMD64TESTL { 38538 break 38539 } 38540 _ = v_1.Args[1] 38541 y := v_1.Args[0] 38542 v_1_1 := v_1.Args[1] 38543 if v_1_1.Op != OpAMD64SHLL { 38544 break 38545 } 38546 _ = v_1_1.Args[1] 38547 v_1_1_0 := v_1_1.Args[0] 38548 if v_1_1_0.Op != OpAMD64MOVLconst { 38549 break 38550 } 38551 if v_1_1_0.AuxInt != 1 { 38552 break 38553 } 38554 x := v_1_1.Args[1] 38555 mem := v.Args[2] 38556 if !(!config.nacl) { 38557 break 38558 } 38559 v.reset(OpAMD64SETBmem) 38560 v.AuxInt = off 38561 v.Aux = sym 38562 v.AddArg(ptr) 38563 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 38564 v0.AddArg(x) 38565 v0.AddArg(y) 38566 v.AddArg(v0) 38567 v.AddArg(mem) 38568 return true 38569 } 38570 // match: (SETNEmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 38571 // cond: !config.nacl 38572 // result: (SETBmem [off] {sym} ptr (BTQ x y) mem) 38573 for { 38574 off := v.AuxInt 38575 sym := v.Aux 38576 _ = v.Args[2] 38577 ptr := v.Args[0] 38578 v_1 := v.Args[1] 38579 if v_1.Op != OpAMD64TESTQ { 38580 break 38581 } 38582 _ = v_1.Args[1] 38583 v_1_0 := v_1.Args[0] 38584 if v_1_0.Op != OpAMD64SHLQ { 38585 break 38586 } 38587 _ = v_1_0.Args[1] 38588 v_1_0_0 := v_1_0.Args[0] 38589 if v_1_0_0.Op != OpAMD64MOVQconst { 38590 break 38591 } 38592 if v_1_0_0.AuxInt != 1 { 38593 break 38594 } 38595 x := v_1_0.Args[1] 38596 y := v_1.Args[1] 38597 mem := v.Args[2] 38598 if !(!config.nacl) { 38599 break 38600 } 38601 v.reset(OpAMD64SETBmem) 38602 v.AuxInt = off 38603 v.Aux = sym 38604 v.AddArg(ptr) 38605 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 38606 v0.AddArg(x) 38607 v0.AddArg(y) 38608 v.AddArg(v0) 38609 v.AddArg(mem) 38610 return true 38611 } 38612 // match: (SETNEmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) 38613 // cond: !config.nacl 38614 // result: (SETBmem [off] {sym} ptr (BTQ x y) mem) 38615 for { 38616 off := v.AuxInt 38617 sym := v.Aux 38618 _ = v.Args[2] 38619 ptr := v.Args[0] 38620 v_1 := v.Args[1] 38621 if v_1.Op != OpAMD64TESTQ { 38622 break 38623 } 38624 _ = v_1.Args[1] 38625 y := v_1.Args[0] 38626 v_1_1 := v_1.Args[1] 38627 if v_1_1.Op != OpAMD64SHLQ { 38628 break 38629 } 38630 _ = v_1_1.Args[1] 38631 v_1_1_0 := v_1_1.Args[0] 38632 if v_1_1_0.Op != OpAMD64MOVQconst { 38633 break 38634 } 38635 if v_1_1_0.AuxInt != 1 { 38636 break 38637 } 38638 x := v_1_1.Args[1] 38639 mem := v.Args[2] 38640 if !(!config.nacl) { 38641 break 38642 } 38643 v.reset(OpAMD64SETBmem) 38644 v.AuxInt = off 38645 v.Aux = sym 38646 v.AddArg(ptr) 38647 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 38648 v0.AddArg(x) 38649 v0.AddArg(y) 38650 v.AddArg(v0) 38651 v.AddArg(mem) 38652 return true 38653 } 38654 // match: (SETNEmem [off] {sym} ptr (TESTLconst [c] x) mem) 38655 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 38656 // result: (SETBmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) 38657 for { 38658 off := v.AuxInt 38659 sym := v.Aux 38660 _ = v.Args[2] 38661 ptr := v.Args[0] 38662 v_1 := v.Args[1] 38663 if v_1.Op != OpAMD64TESTLconst { 38664 break 38665 } 38666 c := v_1.AuxInt 38667 x := v_1.Args[0] 38668 mem := v.Args[2] 38669 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 38670 break 38671 } 38672 v.reset(OpAMD64SETBmem) 38673 v.AuxInt = off 38674 v.Aux = sym 38675 v.AddArg(ptr) 38676 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 38677 v0.AuxInt = log2(c) 38678 v0.AddArg(x) 38679 v.AddArg(v0) 38680 v.AddArg(mem) 38681 return true 38682 } 38683 // match: (SETNEmem [off] {sym} ptr (TESTQconst [c] x) mem) 38684 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38685 // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 38686 for { 38687 off := v.AuxInt 38688 sym := v.Aux 38689 _ = v.Args[2] 38690 ptr := v.Args[0] 38691 v_1 := v.Args[1] 38692 if v_1.Op != OpAMD64TESTQconst { 38693 break 38694 } 38695 c := v_1.AuxInt 38696 x := v_1.Args[0] 38697 mem := v.Args[2] 38698 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38699 break 38700 } 38701 v.reset(OpAMD64SETBmem) 38702 v.AuxInt = off 38703 v.Aux = sym 38704 v.AddArg(ptr) 38705 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38706 v0.AuxInt = log2(c) 38707 v0.AddArg(x) 38708 v.AddArg(v0) 38709 v.AddArg(mem) 38710 return true 38711 } 38712 // match: (SETNEmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 38713 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38714 // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 38715 for { 38716 off := v.AuxInt 38717 sym := v.Aux 38718 _ = v.Args[2] 38719 ptr := v.Args[0] 38720 v_1 := v.Args[1] 38721 if v_1.Op != OpAMD64TESTQ { 38722 break 38723 } 38724 _ = v_1.Args[1] 38725 v_1_0 := v_1.Args[0] 38726 if v_1_0.Op != OpAMD64MOVQconst { 38727 break 38728 } 38729 c := v_1_0.AuxInt 38730 x := v_1.Args[1] 38731 mem := v.Args[2] 38732 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38733 break 38734 } 38735 v.reset(OpAMD64SETBmem) 38736 v.AuxInt = off 38737 v.Aux = sym 38738 v.AddArg(ptr) 38739 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38740 v0.AuxInt = log2(c) 38741 v0.AddArg(x) 38742 v.AddArg(v0) 38743 v.AddArg(mem) 38744 return true 38745 } 38746 // match: (SETNEmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) 38747 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38748 // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 38749 for { 38750 off := v.AuxInt 38751 sym := v.Aux 38752 _ = v.Args[2] 38753 ptr := v.Args[0] 38754 v_1 := v.Args[1] 38755 if v_1.Op != OpAMD64TESTQ { 38756 break 38757 } 38758 _ = v_1.Args[1] 38759 x := v_1.Args[0] 38760 v_1_1 := v_1.Args[1] 38761 if v_1_1.Op != OpAMD64MOVQconst { 38762 break 38763 } 38764 c := v_1_1.AuxInt 38765 mem := v.Args[2] 38766 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38767 break 38768 } 38769 v.reset(OpAMD64SETBmem) 38770 v.AuxInt = off 38771 v.Aux = sym 38772 v.AddArg(ptr) 38773 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38774 v0.AuxInt = log2(c) 38775 v0.AddArg(x) 38776 v.AddArg(v0) 38777 v.AddArg(mem) 38778 return true 38779 } 38780 // match: (SETNEmem [off] {sym} ptr (InvertFlags x) mem) 38781 // cond: 38782 // result: (SETNEmem [off] {sym} ptr x mem) 38783 for { 38784 off := v.AuxInt 38785 sym := v.Aux 38786 _ = v.Args[2] 38787 ptr := v.Args[0] 38788 v_1 := v.Args[1] 38789 if v_1.Op != OpAMD64InvertFlags { 38790 break 38791 } 38792 x := v_1.Args[0] 38793 mem := v.Args[2] 38794 v.reset(OpAMD64SETNEmem) 38795 v.AuxInt = off 38796 v.Aux = sym 38797 v.AddArg(ptr) 38798 v.AddArg(x) 38799 v.AddArg(mem) 38800 return true 38801 } 38802 // match: (SETNEmem [off] {sym} ptr x:(FlagEQ) mem) 38803 // cond: 38804 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 38805 for { 38806 off := v.AuxInt 38807 sym := v.Aux 38808 _ = v.Args[2] 38809 ptr := v.Args[0] 38810 x := v.Args[1] 38811 if x.Op != OpAMD64FlagEQ { 38812 break 38813 } 38814 mem := v.Args[2] 38815 v.reset(OpAMD64MOVBstore) 38816 v.AuxInt = off 38817 v.Aux = sym 38818 v.AddArg(ptr) 38819 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38820 v0.AuxInt = 0 38821 v.AddArg(v0) 38822 v.AddArg(mem) 38823 return true 38824 } 38825 return false 38826 } 38827 func rewriteValueAMD64_OpAMD64SETNEmem_10(v *Value) bool { 38828 b := v.Block 38829 _ = b 38830 // match: (SETNEmem [off] {sym} ptr x:(FlagLT_ULT) mem) 38831 // cond: 38832 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38833 for { 38834 off := v.AuxInt 38835 sym := v.Aux 38836 _ = v.Args[2] 38837 ptr := v.Args[0] 38838 x := v.Args[1] 38839 if x.Op != OpAMD64FlagLT_ULT { 38840 break 38841 } 38842 mem := v.Args[2] 38843 v.reset(OpAMD64MOVBstore) 38844 v.AuxInt = off 38845 v.Aux = sym 38846 v.AddArg(ptr) 38847 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38848 v0.AuxInt = 1 38849 v.AddArg(v0) 38850 v.AddArg(mem) 38851 return true 38852 } 38853 // match: (SETNEmem [off] {sym} ptr x:(FlagLT_UGT) mem) 38854 // cond: 38855 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38856 for { 38857 off := v.AuxInt 38858 sym := v.Aux 38859 _ = v.Args[2] 38860 ptr := v.Args[0] 38861 x := v.Args[1] 38862 if x.Op != OpAMD64FlagLT_UGT { 38863 break 38864 } 38865 mem := v.Args[2] 38866 v.reset(OpAMD64MOVBstore) 38867 v.AuxInt = off 38868 v.Aux = sym 38869 v.AddArg(ptr) 38870 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38871 v0.AuxInt = 1 38872 v.AddArg(v0) 38873 v.AddArg(mem) 38874 return true 38875 } 38876 // match: (SETNEmem [off] {sym} ptr x:(FlagGT_ULT) mem) 38877 // cond: 38878 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38879 for { 38880 off := v.AuxInt 38881 sym := v.Aux 38882 _ = v.Args[2] 38883 ptr := v.Args[0] 38884 x := v.Args[1] 38885 if x.Op != OpAMD64FlagGT_ULT { 38886 break 38887 } 38888 mem := v.Args[2] 38889 v.reset(OpAMD64MOVBstore) 38890 v.AuxInt = off 38891 v.Aux = sym 38892 v.AddArg(ptr) 38893 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38894 v0.AuxInt = 1 38895 v.AddArg(v0) 38896 v.AddArg(mem) 38897 return true 38898 } 38899 // match: (SETNEmem [off] {sym} ptr x:(FlagGT_UGT) mem) 38900 // cond: 38901 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38902 for { 38903 off := v.AuxInt 38904 sym := v.Aux 38905 _ = v.Args[2] 38906 ptr := v.Args[0] 38907 x := v.Args[1] 38908 if x.Op != OpAMD64FlagGT_UGT { 38909 break 38910 } 38911 mem := v.Args[2] 38912 v.reset(OpAMD64MOVBstore) 38913 v.AuxInt = off 38914 v.Aux = sym 38915 v.AddArg(ptr) 38916 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38917 v0.AuxInt = 1 38918 v.AddArg(v0) 38919 v.AddArg(mem) 38920 return true 38921 } 38922 return false 38923 } 38924 func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { 38925 b := v.Block 38926 _ = b 38927 // match: (SHLL x (MOVQconst [c])) 38928 // cond: 38929 // result: (SHLLconst [c&31] x) 38930 for { 38931 _ = v.Args[1] 38932 x := v.Args[0] 38933 v_1 := v.Args[1] 38934 if v_1.Op != OpAMD64MOVQconst { 38935 break 38936 } 38937 c := v_1.AuxInt 38938 v.reset(OpAMD64SHLLconst) 38939 v.AuxInt = c & 31 38940 v.AddArg(x) 38941 return true 38942 } 38943 // match: (SHLL x (MOVLconst [c])) 38944 // cond: 38945 // result: (SHLLconst [c&31] x) 38946 for { 38947 _ = v.Args[1] 38948 x := v.Args[0] 38949 v_1 := v.Args[1] 38950 if v_1.Op != OpAMD64MOVLconst { 38951 break 38952 } 38953 c := v_1.AuxInt 38954 v.reset(OpAMD64SHLLconst) 38955 v.AuxInt = c & 31 38956 v.AddArg(x) 38957 return true 38958 } 38959 // match: (SHLL x (ADDQconst [c] y)) 38960 // cond: c & 31 == 0 38961 // result: (SHLL x y) 38962 for { 38963 _ = v.Args[1] 38964 x := v.Args[0] 38965 v_1 := v.Args[1] 38966 if v_1.Op != OpAMD64ADDQconst { 38967 break 38968 } 38969 c := v_1.AuxInt 38970 y := v_1.Args[0] 38971 if !(c&31 == 0) { 38972 break 38973 } 38974 v.reset(OpAMD64SHLL) 38975 v.AddArg(x) 38976 v.AddArg(y) 38977 return true 38978 } 38979 // match: (SHLL x (NEGQ <t> (ADDQconst [c] y))) 38980 // cond: c & 31 == 0 38981 // result: (SHLL x (NEGQ <t> y)) 38982 for { 38983 _ = v.Args[1] 38984 x := v.Args[0] 38985 v_1 := v.Args[1] 38986 if v_1.Op != OpAMD64NEGQ { 38987 break 38988 } 38989 t := v_1.Type 38990 v_1_0 := v_1.Args[0] 38991 if v_1_0.Op != OpAMD64ADDQconst { 38992 break 38993 } 38994 c := v_1_0.AuxInt 38995 y := v_1_0.Args[0] 38996 if !(c&31 == 0) { 38997 break 38998 } 38999 v.reset(OpAMD64SHLL) 39000 v.AddArg(x) 39001 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39002 v0.AddArg(y) 39003 v.AddArg(v0) 39004 return true 39005 } 39006 // match: (SHLL x (ANDQconst [c] y)) 39007 // cond: c & 31 == 31 39008 // result: (SHLL x y) 39009 for { 39010 _ = v.Args[1] 39011 x := v.Args[0] 39012 v_1 := v.Args[1] 39013 if v_1.Op != OpAMD64ANDQconst { 39014 break 39015 } 39016 c := v_1.AuxInt 39017 y := v_1.Args[0] 39018 if !(c&31 == 31) { 39019 break 39020 } 39021 v.reset(OpAMD64SHLL) 39022 v.AddArg(x) 39023 v.AddArg(y) 39024 return true 39025 } 39026 // match: (SHLL x (NEGQ <t> (ANDQconst [c] y))) 39027 // cond: c & 31 == 31 39028 // result: (SHLL x (NEGQ <t> y)) 39029 for { 39030 _ = v.Args[1] 39031 x := v.Args[0] 39032 v_1 := v.Args[1] 39033 if v_1.Op != OpAMD64NEGQ { 39034 break 39035 } 39036 t := v_1.Type 39037 v_1_0 := v_1.Args[0] 39038 if v_1_0.Op != OpAMD64ANDQconst { 39039 break 39040 } 39041 c := v_1_0.AuxInt 39042 y := v_1_0.Args[0] 39043 if !(c&31 == 31) { 39044 break 39045 } 39046 v.reset(OpAMD64SHLL) 39047 v.AddArg(x) 39048 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39049 v0.AddArg(y) 39050 v.AddArg(v0) 39051 return true 39052 } 39053 // match: (SHLL x (ADDLconst [c] y)) 39054 // cond: c & 31 == 0 39055 // result: (SHLL x y) 39056 for { 39057 _ = v.Args[1] 39058 x := v.Args[0] 39059 v_1 := v.Args[1] 39060 if v_1.Op != OpAMD64ADDLconst { 39061 break 39062 } 39063 c := v_1.AuxInt 39064 y := v_1.Args[0] 39065 if !(c&31 == 0) { 39066 break 39067 } 39068 v.reset(OpAMD64SHLL) 39069 v.AddArg(x) 39070 v.AddArg(y) 39071 return true 39072 } 39073 // match: (SHLL x (NEGL <t> (ADDLconst [c] y))) 39074 // cond: c & 31 == 0 39075 // result: (SHLL x (NEGL <t> y)) 39076 for { 39077 _ = v.Args[1] 39078 x := v.Args[0] 39079 v_1 := v.Args[1] 39080 if v_1.Op != OpAMD64NEGL { 39081 break 39082 } 39083 t := v_1.Type 39084 v_1_0 := v_1.Args[0] 39085 if v_1_0.Op != OpAMD64ADDLconst { 39086 break 39087 } 39088 c := v_1_0.AuxInt 39089 y := v_1_0.Args[0] 39090 if !(c&31 == 0) { 39091 break 39092 } 39093 v.reset(OpAMD64SHLL) 39094 v.AddArg(x) 39095 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39096 v0.AddArg(y) 39097 v.AddArg(v0) 39098 return true 39099 } 39100 // match: (SHLL x (ANDLconst [c] y)) 39101 // cond: c & 31 == 31 39102 // result: (SHLL x y) 39103 for { 39104 _ = v.Args[1] 39105 x := v.Args[0] 39106 v_1 := v.Args[1] 39107 if v_1.Op != OpAMD64ANDLconst { 39108 break 39109 } 39110 c := v_1.AuxInt 39111 y := v_1.Args[0] 39112 if !(c&31 == 31) { 39113 break 39114 } 39115 v.reset(OpAMD64SHLL) 39116 v.AddArg(x) 39117 v.AddArg(y) 39118 return true 39119 } 39120 // match: (SHLL x (NEGL <t> (ANDLconst [c] y))) 39121 // cond: c & 31 == 31 39122 // result: (SHLL x (NEGL <t> y)) 39123 for { 39124 _ = v.Args[1] 39125 x := v.Args[0] 39126 v_1 := v.Args[1] 39127 if v_1.Op != OpAMD64NEGL { 39128 break 39129 } 39130 t := v_1.Type 39131 v_1_0 := v_1.Args[0] 39132 if v_1_0.Op != OpAMD64ANDLconst { 39133 break 39134 } 39135 c := v_1_0.AuxInt 39136 y := v_1_0.Args[0] 39137 if !(c&31 == 31) { 39138 break 39139 } 39140 v.reset(OpAMD64SHLL) 39141 v.AddArg(x) 39142 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39143 v0.AddArg(y) 39144 v.AddArg(v0) 39145 return true 39146 } 39147 return false 39148 } 39149 func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool { 39150 // match: (SHLLconst x [0]) 39151 // cond: 39152 // result: x 39153 for { 39154 if v.AuxInt != 0 { 39155 break 39156 } 39157 x := v.Args[0] 39158 v.reset(OpCopy) 39159 v.Type = x.Type 39160 v.AddArg(x) 39161 return true 39162 } 39163 return false 39164 } 39165 func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { 39166 b := v.Block 39167 _ = b 39168 // match: (SHLQ x (MOVQconst [c])) 39169 // cond: 39170 // result: (SHLQconst [c&63] x) 39171 for { 39172 _ = v.Args[1] 39173 x := v.Args[0] 39174 v_1 := v.Args[1] 39175 if v_1.Op != OpAMD64MOVQconst { 39176 break 39177 } 39178 c := v_1.AuxInt 39179 v.reset(OpAMD64SHLQconst) 39180 v.AuxInt = c & 63 39181 v.AddArg(x) 39182 return true 39183 } 39184 // match: (SHLQ x (MOVLconst [c])) 39185 // cond: 39186 // result: (SHLQconst [c&63] x) 39187 for { 39188 _ = v.Args[1] 39189 x := v.Args[0] 39190 v_1 := v.Args[1] 39191 if v_1.Op != OpAMD64MOVLconst { 39192 break 39193 } 39194 c := v_1.AuxInt 39195 v.reset(OpAMD64SHLQconst) 39196 v.AuxInt = c & 63 39197 v.AddArg(x) 39198 return true 39199 } 39200 // match: (SHLQ x (ADDQconst [c] y)) 39201 // cond: c & 63 == 0 39202 // result: (SHLQ x y) 39203 for { 39204 _ = v.Args[1] 39205 x := v.Args[0] 39206 v_1 := v.Args[1] 39207 if v_1.Op != OpAMD64ADDQconst { 39208 break 39209 } 39210 c := v_1.AuxInt 39211 y := v_1.Args[0] 39212 if !(c&63 == 0) { 39213 break 39214 } 39215 v.reset(OpAMD64SHLQ) 39216 v.AddArg(x) 39217 v.AddArg(y) 39218 return true 39219 } 39220 // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y))) 39221 // cond: c & 63 == 0 39222 // result: (SHLQ x (NEGQ <t> y)) 39223 for { 39224 _ = v.Args[1] 39225 x := v.Args[0] 39226 v_1 := v.Args[1] 39227 if v_1.Op != OpAMD64NEGQ { 39228 break 39229 } 39230 t := v_1.Type 39231 v_1_0 := v_1.Args[0] 39232 if v_1_0.Op != OpAMD64ADDQconst { 39233 break 39234 } 39235 c := v_1_0.AuxInt 39236 y := v_1_0.Args[0] 39237 if !(c&63 == 0) { 39238 break 39239 } 39240 v.reset(OpAMD64SHLQ) 39241 v.AddArg(x) 39242 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39243 v0.AddArg(y) 39244 v.AddArg(v0) 39245 return true 39246 } 39247 // match: (SHLQ x (ANDQconst [c] y)) 39248 // cond: c & 63 == 63 39249 // result: (SHLQ x y) 39250 for { 39251 _ = v.Args[1] 39252 x := v.Args[0] 39253 v_1 := v.Args[1] 39254 if v_1.Op != OpAMD64ANDQconst { 39255 break 39256 } 39257 c := v_1.AuxInt 39258 y := v_1.Args[0] 39259 if !(c&63 == 63) { 39260 break 39261 } 39262 v.reset(OpAMD64SHLQ) 39263 v.AddArg(x) 39264 v.AddArg(y) 39265 return true 39266 } 39267 // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y))) 39268 // cond: c & 63 == 63 39269 // result: (SHLQ x (NEGQ <t> y)) 39270 for { 39271 _ = v.Args[1] 39272 x := v.Args[0] 39273 v_1 := v.Args[1] 39274 if v_1.Op != OpAMD64NEGQ { 39275 break 39276 } 39277 t := v_1.Type 39278 v_1_0 := v_1.Args[0] 39279 if v_1_0.Op != OpAMD64ANDQconst { 39280 break 39281 } 39282 c := v_1_0.AuxInt 39283 y := v_1_0.Args[0] 39284 if !(c&63 == 63) { 39285 break 39286 } 39287 v.reset(OpAMD64SHLQ) 39288 v.AddArg(x) 39289 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39290 v0.AddArg(y) 39291 v.AddArg(v0) 39292 return true 39293 } 39294 // match: (SHLQ x (ADDLconst [c] y)) 39295 // cond: c & 63 == 0 39296 // result: (SHLQ x y) 39297 for { 39298 _ = v.Args[1] 39299 x := v.Args[0] 39300 v_1 := v.Args[1] 39301 if v_1.Op != OpAMD64ADDLconst { 39302 break 39303 } 39304 c := v_1.AuxInt 39305 y := v_1.Args[0] 39306 if !(c&63 == 0) { 39307 break 39308 } 39309 v.reset(OpAMD64SHLQ) 39310 v.AddArg(x) 39311 v.AddArg(y) 39312 return true 39313 } 39314 // match: (SHLQ x (NEGL <t> (ADDLconst [c] y))) 39315 // cond: c & 63 == 0 39316 // result: (SHLQ x (NEGL <t> y)) 39317 for { 39318 _ = v.Args[1] 39319 x := v.Args[0] 39320 v_1 := v.Args[1] 39321 if v_1.Op != OpAMD64NEGL { 39322 break 39323 } 39324 t := v_1.Type 39325 v_1_0 := v_1.Args[0] 39326 if v_1_0.Op != OpAMD64ADDLconst { 39327 break 39328 } 39329 c := v_1_0.AuxInt 39330 y := v_1_0.Args[0] 39331 if !(c&63 == 0) { 39332 break 39333 } 39334 v.reset(OpAMD64SHLQ) 39335 v.AddArg(x) 39336 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39337 v0.AddArg(y) 39338 v.AddArg(v0) 39339 return true 39340 } 39341 // match: (SHLQ x (ANDLconst [c] y)) 39342 // cond: c & 63 == 63 39343 // result: (SHLQ x y) 39344 for { 39345 _ = v.Args[1] 39346 x := v.Args[0] 39347 v_1 := v.Args[1] 39348 if v_1.Op != OpAMD64ANDLconst { 39349 break 39350 } 39351 c := v_1.AuxInt 39352 y := v_1.Args[0] 39353 if !(c&63 == 63) { 39354 break 39355 } 39356 v.reset(OpAMD64SHLQ) 39357 v.AddArg(x) 39358 v.AddArg(y) 39359 return true 39360 } 39361 // match: (SHLQ x (NEGL <t> (ANDLconst [c] y))) 39362 // cond: c & 63 == 63 39363 // result: (SHLQ x (NEGL <t> y)) 39364 for { 39365 _ = v.Args[1] 39366 x := v.Args[0] 39367 v_1 := v.Args[1] 39368 if v_1.Op != OpAMD64NEGL { 39369 break 39370 } 39371 t := v_1.Type 39372 v_1_0 := v_1.Args[0] 39373 if v_1_0.Op != OpAMD64ANDLconst { 39374 break 39375 } 39376 c := v_1_0.AuxInt 39377 y := v_1_0.Args[0] 39378 if !(c&63 == 63) { 39379 break 39380 } 39381 v.reset(OpAMD64SHLQ) 39382 v.AddArg(x) 39383 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39384 v0.AddArg(y) 39385 v.AddArg(v0) 39386 return true 39387 } 39388 return false 39389 } 39390 func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool { 39391 // match: (SHLQconst x [0]) 39392 // cond: 39393 // result: x 39394 for { 39395 if v.AuxInt != 0 { 39396 break 39397 } 39398 x := v.Args[0] 39399 v.reset(OpCopy) 39400 v.Type = x.Type 39401 v.AddArg(x) 39402 return true 39403 } 39404 return false 39405 } 39406 func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { 39407 // match: (SHRB x (MOVQconst [c])) 39408 // cond: c&31 < 8 39409 // result: (SHRBconst [c&31] x) 39410 for { 39411 _ = v.Args[1] 39412 x := v.Args[0] 39413 v_1 := v.Args[1] 39414 if v_1.Op != OpAMD64MOVQconst { 39415 break 39416 } 39417 c := v_1.AuxInt 39418 if !(c&31 < 8) { 39419 break 39420 } 39421 v.reset(OpAMD64SHRBconst) 39422 v.AuxInt = c & 31 39423 v.AddArg(x) 39424 return true 39425 } 39426 // match: (SHRB x (MOVLconst [c])) 39427 // cond: c&31 < 8 39428 // result: (SHRBconst [c&31] x) 39429 for { 39430 _ = v.Args[1] 39431 x := v.Args[0] 39432 v_1 := v.Args[1] 39433 if v_1.Op != OpAMD64MOVLconst { 39434 break 39435 } 39436 c := v_1.AuxInt 39437 if !(c&31 < 8) { 39438 break 39439 } 39440 v.reset(OpAMD64SHRBconst) 39441 v.AuxInt = c & 31 39442 v.AddArg(x) 39443 return true 39444 } 39445 // match: (SHRB _ (MOVQconst [c])) 39446 // cond: c&31 >= 8 39447 // result: (MOVLconst [0]) 39448 for { 39449 _ = v.Args[1] 39450 v_1 := v.Args[1] 39451 if v_1.Op != OpAMD64MOVQconst { 39452 break 39453 } 39454 c := v_1.AuxInt 39455 if !(c&31 >= 8) { 39456 break 39457 } 39458 v.reset(OpAMD64MOVLconst) 39459 v.AuxInt = 0 39460 return true 39461 } 39462 // match: (SHRB _ (MOVLconst [c])) 39463 // cond: c&31 >= 8 39464 // result: (MOVLconst [0]) 39465 for { 39466 _ = v.Args[1] 39467 v_1 := v.Args[1] 39468 if v_1.Op != OpAMD64MOVLconst { 39469 break 39470 } 39471 c := v_1.AuxInt 39472 if !(c&31 >= 8) { 39473 break 39474 } 39475 v.reset(OpAMD64MOVLconst) 39476 v.AuxInt = 0 39477 return true 39478 } 39479 return false 39480 } 39481 func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool { 39482 // match: (SHRBconst x [0]) 39483 // cond: 39484 // result: x 39485 for { 39486 if v.AuxInt != 0 { 39487 break 39488 } 39489 x := v.Args[0] 39490 v.reset(OpCopy) 39491 v.Type = x.Type 39492 v.AddArg(x) 39493 return true 39494 } 39495 return false 39496 } 39497 func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { 39498 b := v.Block 39499 _ = b 39500 // match: (SHRL x (MOVQconst [c])) 39501 // cond: 39502 // result: (SHRLconst [c&31] x) 39503 for { 39504 _ = v.Args[1] 39505 x := v.Args[0] 39506 v_1 := v.Args[1] 39507 if v_1.Op != OpAMD64MOVQconst { 39508 break 39509 } 39510 c := v_1.AuxInt 39511 v.reset(OpAMD64SHRLconst) 39512 v.AuxInt = c & 31 39513 v.AddArg(x) 39514 return true 39515 } 39516 // match: (SHRL x (MOVLconst [c])) 39517 // cond: 39518 // result: (SHRLconst [c&31] x) 39519 for { 39520 _ = v.Args[1] 39521 x := v.Args[0] 39522 v_1 := v.Args[1] 39523 if v_1.Op != OpAMD64MOVLconst { 39524 break 39525 } 39526 c := v_1.AuxInt 39527 v.reset(OpAMD64SHRLconst) 39528 v.AuxInt = c & 31 39529 v.AddArg(x) 39530 return true 39531 } 39532 // match: (SHRL x (ADDQconst [c] y)) 39533 // cond: c & 31 == 0 39534 // result: (SHRL x y) 39535 for { 39536 _ = v.Args[1] 39537 x := v.Args[0] 39538 v_1 := v.Args[1] 39539 if v_1.Op != OpAMD64ADDQconst { 39540 break 39541 } 39542 c := v_1.AuxInt 39543 y := v_1.Args[0] 39544 if !(c&31 == 0) { 39545 break 39546 } 39547 v.reset(OpAMD64SHRL) 39548 v.AddArg(x) 39549 v.AddArg(y) 39550 return true 39551 } 39552 // match: (SHRL x (NEGQ <t> (ADDQconst [c] y))) 39553 // cond: c & 31 == 0 39554 // result: (SHRL x (NEGQ <t> y)) 39555 for { 39556 _ = v.Args[1] 39557 x := v.Args[0] 39558 v_1 := v.Args[1] 39559 if v_1.Op != OpAMD64NEGQ { 39560 break 39561 } 39562 t := v_1.Type 39563 v_1_0 := v_1.Args[0] 39564 if v_1_0.Op != OpAMD64ADDQconst { 39565 break 39566 } 39567 c := v_1_0.AuxInt 39568 y := v_1_0.Args[0] 39569 if !(c&31 == 0) { 39570 break 39571 } 39572 v.reset(OpAMD64SHRL) 39573 v.AddArg(x) 39574 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39575 v0.AddArg(y) 39576 v.AddArg(v0) 39577 return true 39578 } 39579 // match: (SHRL x (ANDQconst [c] y)) 39580 // cond: c & 31 == 31 39581 // result: (SHRL x y) 39582 for { 39583 _ = v.Args[1] 39584 x := v.Args[0] 39585 v_1 := v.Args[1] 39586 if v_1.Op != OpAMD64ANDQconst { 39587 break 39588 } 39589 c := v_1.AuxInt 39590 y := v_1.Args[0] 39591 if !(c&31 == 31) { 39592 break 39593 } 39594 v.reset(OpAMD64SHRL) 39595 v.AddArg(x) 39596 v.AddArg(y) 39597 return true 39598 } 39599 // match: (SHRL x (NEGQ <t> (ANDQconst [c] y))) 39600 // cond: c & 31 == 31 39601 // result: (SHRL x (NEGQ <t> y)) 39602 for { 39603 _ = v.Args[1] 39604 x := v.Args[0] 39605 v_1 := v.Args[1] 39606 if v_1.Op != OpAMD64NEGQ { 39607 break 39608 } 39609 t := v_1.Type 39610 v_1_0 := v_1.Args[0] 39611 if v_1_0.Op != OpAMD64ANDQconst { 39612 break 39613 } 39614 c := v_1_0.AuxInt 39615 y := v_1_0.Args[0] 39616 if !(c&31 == 31) { 39617 break 39618 } 39619 v.reset(OpAMD64SHRL) 39620 v.AddArg(x) 39621 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39622 v0.AddArg(y) 39623 v.AddArg(v0) 39624 return true 39625 } 39626 // match: (SHRL x (ADDLconst [c] y)) 39627 // cond: c & 31 == 0 39628 // result: (SHRL x y) 39629 for { 39630 _ = v.Args[1] 39631 x := v.Args[0] 39632 v_1 := v.Args[1] 39633 if v_1.Op != OpAMD64ADDLconst { 39634 break 39635 } 39636 c := v_1.AuxInt 39637 y := v_1.Args[0] 39638 if !(c&31 == 0) { 39639 break 39640 } 39641 v.reset(OpAMD64SHRL) 39642 v.AddArg(x) 39643 v.AddArg(y) 39644 return true 39645 } 39646 // match: (SHRL x (NEGL <t> (ADDLconst [c] y))) 39647 // cond: c & 31 == 0 39648 // result: (SHRL x (NEGL <t> y)) 39649 for { 39650 _ = v.Args[1] 39651 x := v.Args[0] 39652 v_1 := v.Args[1] 39653 if v_1.Op != OpAMD64NEGL { 39654 break 39655 } 39656 t := v_1.Type 39657 v_1_0 := v_1.Args[0] 39658 if v_1_0.Op != OpAMD64ADDLconst { 39659 break 39660 } 39661 c := v_1_0.AuxInt 39662 y := v_1_0.Args[0] 39663 if !(c&31 == 0) { 39664 break 39665 } 39666 v.reset(OpAMD64SHRL) 39667 v.AddArg(x) 39668 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39669 v0.AddArg(y) 39670 v.AddArg(v0) 39671 return true 39672 } 39673 // match: (SHRL x (ANDLconst [c] y)) 39674 // cond: c & 31 == 31 39675 // result: (SHRL x y) 39676 for { 39677 _ = v.Args[1] 39678 x := v.Args[0] 39679 v_1 := v.Args[1] 39680 if v_1.Op != OpAMD64ANDLconst { 39681 break 39682 } 39683 c := v_1.AuxInt 39684 y := v_1.Args[0] 39685 if !(c&31 == 31) { 39686 break 39687 } 39688 v.reset(OpAMD64SHRL) 39689 v.AddArg(x) 39690 v.AddArg(y) 39691 return true 39692 } 39693 // match: (SHRL x (NEGL <t> (ANDLconst [c] y))) 39694 // cond: c & 31 == 31 39695 // result: (SHRL x (NEGL <t> y)) 39696 for { 39697 _ = v.Args[1] 39698 x := v.Args[0] 39699 v_1 := v.Args[1] 39700 if v_1.Op != OpAMD64NEGL { 39701 break 39702 } 39703 t := v_1.Type 39704 v_1_0 := v_1.Args[0] 39705 if v_1_0.Op != OpAMD64ANDLconst { 39706 break 39707 } 39708 c := v_1_0.AuxInt 39709 y := v_1_0.Args[0] 39710 if !(c&31 == 31) { 39711 break 39712 } 39713 v.reset(OpAMD64SHRL) 39714 v.AddArg(x) 39715 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39716 v0.AddArg(y) 39717 v.AddArg(v0) 39718 return true 39719 } 39720 return false 39721 } 39722 func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool { 39723 // match: (SHRLconst x [0]) 39724 // cond: 39725 // result: x 39726 for { 39727 if v.AuxInt != 0 { 39728 break 39729 } 39730 x := v.Args[0] 39731 v.reset(OpCopy) 39732 v.Type = x.Type 39733 v.AddArg(x) 39734 return true 39735 } 39736 return false 39737 } 39738 func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { 39739 b := v.Block 39740 _ = b 39741 // match: (SHRQ x (MOVQconst [c])) 39742 // cond: 39743 // result: (SHRQconst [c&63] x) 39744 for { 39745 _ = v.Args[1] 39746 x := v.Args[0] 39747 v_1 := v.Args[1] 39748 if v_1.Op != OpAMD64MOVQconst { 39749 break 39750 } 39751 c := v_1.AuxInt 39752 v.reset(OpAMD64SHRQconst) 39753 v.AuxInt = c & 63 39754 v.AddArg(x) 39755 return true 39756 } 39757 // match: (SHRQ x (MOVLconst [c])) 39758 // cond: 39759 // result: (SHRQconst [c&63] x) 39760 for { 39761 _ = v.Args[1] 39762 x := v.Args[0] 39763 v_1 := v.Args[1] 39764 if v_1.Op != OpAMD64MOVLconst { 39765 break 39766 } 39767 c := v_1.AuxInt 39768 v.reset(OpAMD64SHRQconst) 39769 v.AuxInt = c & 63 39770 v.AddArg(x) 39771 return true 39772 } 39773 // match: (SHRQ x (ADDQconst [c] y)) 39774 // cond: c & 63 == 0 39775 // result: (SHRQ x y) 39776 for { 39777 _ = v.Args[1] 39778 x := v.Args[0] 39779 v_1 := v.Args[1] 39780 if v_1.Op != OpAMD64ADDQconst { 39781 break 39782 } 39783 c := v_1.AuxInt 39784 y := v_1.Args[0] 39785 if !(c&63 == 0) { 39786 break 39787 } 39788 v.reset(OpAMD64SHRQ) 39789 v.AddArg(x) 39790 v.AddArg(y) 39791 return true 39792 } 39793 // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y))) 39794 // cond: c & 63 == 0 39795 // result: (SHRQ x (NEGQ <t> y)) 39796 for { 39797 _ = v.Args[1] 39798 x := v.Args[0] 39799 v_1 := v.Args[1] 39800 if v_1.Op != OpAMD64NEGQ { 39801 break 39802 } 39803 t := v_1.Type 39804 v_1_0 := v_1.Args[0] 39805 if v_1_0.Op != OpAMD64ADDQconst { 39806 break 39807 } 39808 c := v_1_0.AuxInt 39809 y := v_1_0.Args[0] 39810 if !(c&63 == 0) { 39811 break 39812 } 39813 v.reset(OpAMD64SHRQ) 39814 v.AddArg(x) 39815 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39816 v0.AddArg(y) 39817 v.AddArg(v0) 39818 return true 39819 } 39820 // match: (SHRQ x (ANDQconst [c] y)) 39821 // cond: c & 63 == 63 39822 // result: (SHRQ x y) 39823 for { 39824 _ = v.Args[1] 39825 x := v.Args[0] 39826 v_1 := v.Args[1] 39827 if v_1.Op != OpAMD64ANDQconst { 39828 break 39829 } 39830 c := v_1.AuxInt 39831 y := v_1.Args[0] 39832 if !(c&63 == 63) { 39833 break 39834 } 39835 v.reset(OpAMD64SHRQ) 39836 v.AddArg(x) 39837 v.AddArg(y) 39838 return true 39839 } 39840 // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y))) 39841 // cond: c & 63 == 63 39842 // result: (SHRQ x (NEGQ <t> y)) 39843 for { 39844 _ = v.Args[1] 39845 x := v.Args[0] 39846 v_1 := v.Args[1] 39847 if v_1.Op != OpAMD64NEGQ { 39848 break 39849 } 39850 t := v_1.Type 39851 v_1_0 := v_1.Args[0] 39852 if v_1_0.Op != OpAMD64ANDQconst { 39853 break 39854 } 39855 c := v_1_0.AuxInt 39856 y := v_1_0.Args[0] 39857 if !(c&63 == 63) { 39858 break 39859 } 39860 v.reset(OpAMD64SHRQ) 39861 v.AddArg(x) 39862 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39863 v0.AddArg(y) 39864 v.AddArg(v0) 39865 return true 39866 } 39867 // match: (SHRQ x (ADDLconst [c] y)) 39868 // cond: c & 63 == 0 39869 // result: (SHRQ x y) 39870 for { 39871 _ = v.Args[1] 39872 x := v.Args[0] 39873 v_1 := v.Args[1] 39874 if v_1.Op != OpAMD64ADDLconst { 39875 break 39876 } 39877 c := v_1.AuxInt 39878 y := v_1.Args[0] 39879 if !(c&63 == 0) { 39880 break 39881 } 39882 v.reset(OpAMD64SHRQ) 39883 v.AddArg(x) 39884 v.AddArg(y) 39885 return true 39886 } 39887 // match: (SHRQ x (NEGL <t> (ADDLconst [c] y))) 39888 // cond: c & 63 == 0 39889 // result: (SHRQ x (NEGL <t> y)) 39890 for { 39891 _ = v.Args[1] 39892 x := v.Args[0] 39893 v_1 := v.Args[1] 39894 if v_1.Op != OpAMD64NEGL { 39895 break 39896 } 39897 t := v_1.Type 39898 v_1_0 := v_1.Args[0] 39899 if v_1_0.Op != OpAMD64ADDLconst { 39900 break 39901 } 39902 c := v_1_0.AuxInt 39903 y := v_1_0.Args[0] 39904 if !(c&63 == 0) { 39905 break 39906 } 39907 v.reset(OpAMD64SHRQ) 39908 v.AddArg(x) 39909 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39910 v0.AddArg(y) 39911 v.AddArg(v0) 39912 return true 39913 } 39914 // match: (SHRQ x (ANDLconst [c] y)) 39915 // cond: c & 63 == 63 39916 // result: (SHRQ x y) 39917 for { 39918 _ = v.Args[1] 39919 x := v.Args[0] 39920 v_1 := v.Args[1] 39921 if v_1.Op != OpAMD64ANDLconst { 39922 break 39923 } 39924 c := v_1.AuxInt 39925 y := v_1.Args[0] 39926 if !(c&63 == 63) { 39927 break 39928 } 39929 v.reset(OpAMD64SHRQ) 39930 v.AddArg(x) 39931 v.AddArg(y) 39932 return true 39933 } 39934 // match: (SHRQ x (NEGL <t> (ANDLconst [c] y))) 39935 // cond: c & 63 == 63 39936 // result: (SHRQ x (NEGL <t> y)) 39937 for { 39938 _ = v.Args[1] 39939 x := v.Args[0] 39940 v_1 := v.Args[1] 39941 if v_1.Op != OpAMD64NEGL { 39942 break 39943 } 39944 t := v_1.Type 39945 v_1_0 := v_1.Args[0] 39946 if v_1_0.Op != OpAMD64ANDLconst { 39947 break 39948 } 39949 c := v_1_0.AuxInt 39950 y := v_1_0.Args[0] 39951 if !(c&63 == 63) { 39952 break 39953 } 39954 v.reset(OpAMD64SHRQ) 39955 v.AddArg(x) 39956 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39957 v0.AddArg(y) 39958 v.AddArg(v0) 39959 return true 39960 } 39961 return false 39962 } 39963 func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool { 39964 // match: (SHRQconst x [0]) 39965 // cond: 39966 // result: x 39967 for { 39968 if v.AuxInt != 0 { 39969 break 39970 } 39971 x := v.Args[0] 39972 v.reset(OpCopy) 39973 v.Type = x.Type 39974 v.AddArg(x) 39975 return true 39976 } 39977 return false 39978 } 39979 func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { 39980 // match: (SHRW x (MOVQconst [c])) 39981 // cond: c&31 < 16 39982 // result: (SHRWconst [c&31] x) 39983 for { 39984 _ = v.Args[1] 39985 x := v.Args[0] 39986 v_1 := v.Args[1] 39987 if v_1.Op != OpAMD64MOVQconst { 39988 break 39989 } 39990 c := v_1.AuxInt 39991 if !(c&31 < 16) { 39992 break 39993 } 39994 v.reset(OpAMD64SHRWconst) 39995 v.AuxInt = c & 31 39996 v.AddArg(x) 39997 return true 39998 } 39999 // match: (SHRW x (MOVLconst [c])) 40000 // cond: c&31 < 16 40001 // result: (SHRWconst [c&31] x) 40002 for { 40003 _ = v.Args[1] 40004 x := v.Args[0] 40005 v_1 := v.Args[1] 40006 if v_1.Op != OpAMD64MOVLconst { 40007 break 40008 } 40009 c := v_1.AuxInt 40010 if !(c&31 < 16) { 40011 break 40012 } 40013 v.reset(OpAMD64SHRWconst) 40014 v.AuxInt = c & 31 40015 v.AddArg(x) 40016 return true 40017 } 40018 // match: (SHRW _ (MOVQconst [c])) 40019 // cond: c&31 >= 16 40020 // result: (MOVLconst [0]) 40021 for { 40022 _ = v.Args[1] 40023 v_1 := v.Args[1] 40024 if v_1.Op != OpAMD64MOVQconst { 40025 break 40026 } 40027 c := v_1.AuxInt 40028 if !(c&31 >= 16) { 40029 break 40030 } 40031 v.reset(OpAMD64MOVLconst) 40032 v.AuxInt = 0 40033 return true 40034 } 40035 // match: (SHRW _ (MOVLconst [c])) 40036 // cond: c&31 >= 16 40037 // result: (MOVLconst [0]) 40038 for { 40039 _ = v.Args[1] 40040 v_1 := v.Args[1] 40041 if v_1.Op != OpAMD64MOVLconst { 40042 break 40043 } 40044 c := v_1.AuxInt 40045 if !(c&31 >= 16) { 40046 break 40047 } 40048 v.reset(OpAMD64MOVLconst) 40049 v.AuxInt = 0 40050 return true 40051 } 40052 return false 40053 } 40054 func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool { 40055 // match: (SHRWconst x [0]) 40056 // cond: 40057 // result: x 40058 for { 40059 if v.AuxInt != 0 { 40060 break 40061 } 40062 x := v.Args[0] 40063 v.reset(OpCopy) 40064 v.Type = x.Type 40065 v.AddArg(x) 40066 return true 40067 } 40068 return false 40069 } 40070 func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { 40071 b := v.Block 40072 _ = b 40073 // match: (SUBL x (MOVLconst [c])) 40074 // cond: 40075 // result: (SUBLconst x [c]) 40076 for { 40077 _ = v.Args[1] 40078 x := v.Args[0] 40079 v_1 := v.Args[1] 40080 if v_1.Op != OpAMD64MOVLconst { 40081 break 40082 } 40083 c := v_1.AuxInt 40084 v.reset(OpAMD64SUBLconst) 40085 v.AuxInt = c 40086 v.AddArg(x) 40087 return true 40088 } 40089 // match: (SUBL (MOVLconst [c]) x) 40090 // cond: 40091 // result: (NEGL (SUBLconst <v.Type> x [c])) 40092 for { 40093 _ = v.Args[1] 40094 v_0 := v.Args[0] 40095 if v_0.Op != OpAMD64MOVLconst { 40096 break 40097 } 40098 c := v_0.AuxInt 40099 x := v.Args[1] 40100 v.reset(OpAMD64NEGL) 40101 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 40102 v0.AuxInt = c 40103 v0.AddArg(x) 40104 v.AddArg(v0) 40105 return true 40106 } 40107 // match: (SUBL x x) 40108 // cond: 40109 // result: (MOVLconst [0]) 40110 for { 40111 _ = v.Args[1] 40112 x := v.Args[0] 40113 if x != v.Args[1] { 40114 break 40115 } 40116 v.reset(OpAMD64MOVLconst) 40117 v.AuxInt = 0 40118 return true 40119 } 40120 // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) 40121 // cond: canMergeLoad(v, l, x) && clobber(l) 40122 // result: (SUBLmem x [off] {sym} ptr mem) 40123 for { 40124 _ = v.Args[1] 40125 x := v.Args[0] 40126 l := v.Args[1] 40127 if l.Op != OpAMD64MOVLload { 40128 break 40129 } 40130 off := l.AuxInt 40131 sym := l.Aux 40132 _ = l.Args[1] 40133 ptr := l.Args[0] 40134 mem := l.Args[1] 40135 if !(canMergeLoad(v, l, x) && clobber(l)) { 40136 break 40137 } 40138 v.reset(OpAMD64SUBLmem) 40139 v.AuxInt = off 40140 v.Aux = sym 40141 v.AddArg(x) 40142 v.AddArg(ptr) 40143 v.AddArg(mem) 40144 return true 40145 } 40146 return false 40147 } 40148 func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool { 40149 // match: (SUBLconst [c] x) 40150 // cond: int32(c) == 0 40151 // result: x 40152 for { 40153 c := v.AuxInt 40154 x := v.Args[0] 40155 if !(int32(c) == 0) { 40156 break 40157 } 40158 v.reset(OpCopy) 40159 v.Type = x.Type 40160 v.AddArg(x) 40161 return true 40162 } 40163 // match: (SUBLconst [c] x) 40164 // cond: 40165 // result: (ADDLconst [int64(int32(-c))] x) 40166 for { 40167 c := v.AuxInt 40168 x := v.Args[0] 40169 v.reset(OpAMD64ADDLconst) 40170 v.AuxInt = int64(int32(-c)) 40171 v.AddArg(x) 40172 return true 40173 } 40174 } 40175 func rewriteValueAMD64_OpAMD64SUBLmem_0(v *Value) bool { 40176 b := v.Block 40177 _ = b 40178 typ := &b.Func.Config.Types 40179 _ = typ 40180 // match: (SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 40181 // cond: 40182 // result: (SUBL x (MOVLf2i y)) 40183 for { 40184 off := v.AuxInt 40185 sym := v.Aux 40186 _ = v.Args[2] 40187 x := v.Args[0] 40188 ptr := v.Args[1] 40189 v_2 := v.Args[2] 40190 if v_2.Op != OpAMD64MOVSSstore { 40191 break 40192 } 40193 if v_2.AuxInt != off { 40194 break 40195 } 40196 if v_2.Aux != sym { 40197 break 40198 } 40199 _ = v_2.Args[2] 40200 if ptr != v_2.Args[0] { 40201 break 40202 } 40203 y := v_2.Args[1] 40204 v.reset(OpAMD64SUBL) 40205 v.AddArg(x) 40206 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 40207 v0.AddArg(y) 40208 v.AddArg(v0) 40209 return true 40210 } 40211 return false 40212 } 40213 func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { 40214 b := v.Block 40215 _ = b 40216 // match: (SUBQ x (MOVQconst [c])) 40217 // cond: is32Bit(c) 40218 // result: (SUBQconst x [c]) 40219 for { 40220 _ = v.Args[1] 40221 x := v.Args[0] 40222 v_1 := v.Args[1] 40223 if v_1.Op != OpAMD64MOVQconst { 40224 break 40225 } 40226 c := v_1.AuxInt 40227 if !(is32Bit(c)) { 40228 break 40229 } 40230 v.reset(OpAMD64SUBQconst) 40231 v.AuxInt = c 40232 v.AddArg(x) 40233 return true 40234 } 40235 // match: (SUBQ (MOVQconst [c]) x) 40236 // cond: is32Bit(c) 40237 // result: (NEGQ (SUBQconst <v.Type> x [c])) 40238 for { 40239 _ = v.Args[1] 40240 v_0 := v.Args[0] 40241 if v_0.Op != OpAMD64MOVQconst { 40242 break 40243 } 40244 c := v_0.AuxInt 40245 x := v.Args[1] 40246 if !(is32Bit(c)) { 40247 break 40248 } 40249 v.reset(OpAMD64NEGQ) 40250 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 40251 v0.AuxInt = c 40252 v0.AddArg(x) 40253 v.AddArg(v0) 40254 return true 40255 } 40256 // match: (SUBQ x x) 40257 // cond: 40258 // result: (MOVQconst [0]) 40259 for { 40260 _ = v.Args[1] 40261 x := v.Args[0] 40262 if x != v.Args[1] { 40263 break 40264 } 40265 v.reset(OpAMD64MOVQconst) 40266 v.AuxInt = 0 40267 return true 40268 } 40269 // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) 40270 // cond: canMergeLoad(v, l, x) && clobber(l) 40271 // result: (SUBQmem x [off] {sym} ptr mem) 40272 for { 40273 _ = v.Args[1] 40274 x := v.Args[0] 40275 l := v.Args[1] 40276 if l.Op != OpAMD64MOVQload { 40277 break 40278 } 40279 off := l.AuxInt 40280 sym := l.Aux 40281 _ = l.Args[1] 40282 ptr := l.Args[0] 40283 mem := l.Args[1] 40284 if !(canMergeLoad(v, l, x) && clobber(l)) { 40285 break 40286 } 40287 v.reset(OpAMD64SUBQmem) 40288 v.AuxInt = off 40289 v.Aux = sym 40290 v.AddArg(x) 40291 v.AddArg(ptr) 40292 v.AddArg(mem) 40293 return true 40294 } 40295 return false 40296 } 40297 func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { 40298 // match: (SUBQconst [0] x) 40299 // cond: 40300 // result: x 40301 for { 40302 if v.AuxInt != 0 { 40303 break 40304 } 40305 x := v.Args[0] 40306 v.reset(OpCopy) 40307 v.Type = x.Type 40308 v.AddArg(x) 40309 return true 40310 } 40311 // match: (SUBQconst [c] x) 40312 // cond: c != -(1<<31) 40313 // result: (ADDQconst [-c] x) 40314 for { 40315 c := v.AuxInt 40316 x := v.Args[0] 40317 if !(c != -(1 << 31)) { 40318 break 40319 } 40320 v.reset(OpAMD64ADDQconst) 40321 v.AuxInt = -c 40322 v.AddArg(x) 40323 return true 40324 } 40325 // match: (SUBQconst (MOVQconst [d]) [c]) 40326 // cond: 40327 // result: (MOVQconst [d-c]) 40328 for { 40329 c := v.AuxInt 40330 v_0 := v.Args[0] 40331 if v_0.Op != OpAMD64MOVQconst { 40332 break 40333 } 40334 d := v_0.AuxInt 40335 v.reset(OpAMD64MOVQconst) 40336 v.AuxInt = d - c 40337 return true 40338 } 40339 // match: (SUBQconst (SUBQconst x [d]) [c]) 40340 // cond: is32Bit(-c-d) 40341 // result: (ADDQconst [-c-d] x) 40342 for { 40343 c := v.AuxInt 40344 v_0 := v.Args[0] 40345 if v_0.Op != OpAMD64SUBQconst { 40346 break 40347 } 40348 d := v_0.AuxInt 40349 x := v_0.Args[0] 40350 if !(is32Bit(-c - d)) { 40351 break 40352 } 40353 v.reset(OpAMD64ADDQconst) 40354 v.AuxInt = -c - d 40355 v.AddArg(x) 40356 return true 40357 } 40358 return false 40359 } 40360 func rewriteValueAMD64_OpAMD64SUBQmem_0(v *Value) bool { 40361 b := v.Block 40362 _ = b 40363 typ := &b.Func.Config.Types 40364 _ = typ 40365 // match: (SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 40366 // cond: 40367 // result: (SUBQ x (MOVQf2i y)) 40368 for { 40369 off := v.AuxInt 40370 sym := v.Aux 40371 _ = v.Args[2] 40372 x := v.Args[0] 40373 ptr := v.Args[1] 40374 v_2 := v.Args[2] 40375 if v_2.Op != OpAMD64MOVSDstore { 40376 break 40377 } 40378 if v_2.AuxInt != off { 40379 break 40380 } 40381 if v_2.Aux != sym { 40382 break 40383 } 40384 _ = v_2.Args[2] 40385 if ptr != v_2.Args[0] { 40386 break 40387 } 40388 y := v_2.Args[1] 40389 v.reset(OpAMD64SUBQ) 40390 v.AddArg(x) 40391 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 40392 v0.AddArg(y) 40393 v.AddArg(v0) 40394 return true 40395 } 40396 return false 40397 } 40398 func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { 40399 // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) 40400 // cond: canMergeLoad(v, l, x) && clobber(l) 40401 // result: (SUBSDmem x [off] {sym} ptr mem) 40402 for { 40403 _ = v.Args[1] 40404 x := v.Args[0] 40405 l := v.Args[1] 40406 if l.Op != OpAMD64MOVSDload { 40407 break 40408 } 40409 off := l.AuxInt 40410 sym := l.Aux 40411 _ = l.Args[1] 40412 ptr := l.Args[0] 40413 mem := l.Args[1] 40414 if !(canMergeLoad(v, l, x) && clobber(l)) { 40415 break 40416 } 40417 v.reset(OpAMD64SUBSDmem) 40418 v.AuxInt = off 40419 v.Aux = sym 40420 v.AddArg(x) 40421 v.AddArg(ptr) 40422 v.AddArg(mem) 40423 return true 40424 } 40425 return false 40426 } 40427 func rewriteValueAMD64_OpAMD64SUBSDmem_0(v *Value) bool { 40428 b := v.Block 40429 _ = b 40430 typ := &b.Func.Config.Types 40431 _ = typ 40432 // match: (SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 40433 // cond: 40434 // result: (SUBSD x (MOVQi2f y)) 40435 for { 40436 off := v.AuxInt 40437 sym := v.Aux 40438 _ = v.Args[2] 40439 x := v.Args[0] 40440 ptr := v.Args[1] 40441 v_2 := v.Args[2] 40442 if v_2.Op != OpAMD64MOVQstore { 40443 break 40444 } 40445 if v_2.AuxInt != off { 40446 break 40447 } 40448 if v_2.Aux != sym { 40449 break 40450 } 40451 _ = v_2.Args[2] 40452 if ptr != v_2.Args[0] { 40453 break 40454 } 40455 y := v_2.Args[1] 40456 v.reset(OpAMD64SUBSD) 40457 v.AddArg(x) 40458 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 40459 v0.AddArg(y) 40460 v.AddArg(v0) 40461 return true 40462 } 40463 return false 40464 } 40465 func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { 40466 // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) 40467 // cond: canMergeLoad(v, l, x) && clobber(l) 40468 // result: (SUBSSmem x [off] {sym} ptr mem) 40469 for { 40470 _ = v.Args[1] 40471 x := v.Args[0] 40472 l := v.Args[1] 40473 if l.Op != OpAMD64MOVSSload { 40474 break 40475 } 40476 off := l.AuxInt 40477 sym := l.Aux 40478 _ = l.Args[1] 40479 ptr := l.Args[0] 40480 mem := l.Args[1] 40481 if !(canMergeLoad(v, l, x) && clobber(l)) { 40482 break 40483 } 40484 v.reset(OpAMD64SUBSSmem) 40485 v.AuxInt = off 40486 v.Aux = sym 40487 v.AddArg(x) 40488 v.AddArg(ptr) 40489 v.AddArg(mem) 40490 return true 40491 } 40492 return false 40493 } 40494 func rewriteValueAMD64_OpAMD64SUBSSmem_0(v *Value) bool { 40495 b := v.Block 40496 _ = b 40497 typ := &b.Func.Config.Types 40498 _ = typ 40499 // match: (SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 40500 // cond: 40501 // result: (SUBSS x (MOVLi2f y)) 40502 for { 40503 off := v.AuxInt 40504 sym := v.Aux 40505 _ = v.Args[2] 40506 x := v.Args[0] 40507 ptr := v.Args[1] 40508 v_2 := v.Args[2] 40509 if v_2.Op != OpAMD64MOVLstore { 40510 break 40511 } 40512 if v_2.AuxInt != off { 40513 break 40514 } 40515 if v_2.Aux != sym { 40516 break 40517 } 40518 _ = v_2.Args[2] 40519 if ptr != v_2.Args[0] { 40520 break 40521 } 40522 y := v_2.Args[1] 40523 v.reset(OpAMD64SUBSS) 40524 v.AddArg(x) 40525 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 40526 v0.AddArg(y) 40527 v.AddArg(v0) 40528 return true 40529 } 40530 return false 40531 } 40532 func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { 40533 // match: (TESTB (MOVLconst [c]) x) 40534 // cond: 40535 // result: (TESTBconst [c] x) 40536 for { 40537 _ = v.Args[1] 40538 v_0 := v.Args[0] 40539 if v_0.Op != OpAMD64MOVLconst { 40540 break 40541 } 40542 c := v_0.AuxInt 40543 x := v.Args[1] 40544 v.reset(OpAMD64TESTBconst) 40545 v.AuxInt = c 40546 v.AddArg(x) 40547 return true 40548 } 40549 // match: (TESTB x (MOVLconst [c])) 40550 // cond: 40551 // result: (TESTBconst [c] x) 40552 for { 40553 _ = v.Args[1] 40554 x := v.Args[0] 40555 v_1 := v.Args[1] 40556 if v_1.Op != OpAMD64MOVLconst { 40557 break 40558 } 40559 c := v_1.AuxInt 40560 v.reset(OpAMD64TESTBconst) 40561 v.AuxInt = c 40562 v.AddArg(x) 40563 return true 40564 } 40565 return false 40566 } 40567 func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { 40568 // match: (TESTL (MOVLconst [c]) x) 40569 // cond: 40570 // result: (TESTLconst [c] x) 40571 for { 40572 _ = v.Args[1] 40573 v_0 := v.Args[0] 40574 if v_0.Op != OpAMD64MOVLconst { 40575 break 40576 } 40577 c := v_0.AuxInt 40578 x := v.Args[1] 40579 v.reset(OpAMD64TESTLconst) 40580 v.AuxInt = c 40581 v.AddArg(x) 40582 return true 40583 } 40584 // match: (TESTL x (MOVLconst [c])) 40585 // cond: 40586 // result: (TESTLconst [c] x) 40587 for { 40588 _ = v.Args[1] 40589 x := v.Args[0] 40590 v_1 := v.Args[1] 40591 if v_1.Op != OpAMD64MOVLconst { 40592 break 40593 } 40594 c := v_1.AuxInt 40595 v.reset(OpAMD64TESTLconst) 40596 v.AuxInt = c 40597 v.AddArg(x) 40598 return true 40599 } 40600 return false 40601 } 40602 func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { 40603 // match: (TESTQ (MOVQconst [c]) x) 40604 // cond: is32Bit(c) 40605 // result: (TESTQconst [c] x) 40606 for { 40607 _ = v.Args[1] 40608 v_0 := v.Args[0] 40609 if v_0.Op != OpAMD64MOVQconst { 40610 break 40611 } 40612 c := v_0.AuxInt 40613 x := v.Args[1] 40614 if !(is32Bit(c)) { 40615 break 40616 } 40617 v.reset(OpAMD64TESTQconst) 40618 v.AuxInt = c 40619 v.AddArg(x) 40620 return true 40621 } 40622 // match: (TESTQ x (MOVQconst [c])) 40623 // cond: is32Bit(c) 40624 // result: (TESTQconst [c] x) 40625 for { 40626 _ = v.Args[1] 40627 x := v.Args[0] 40628 v_1 := v.Args[1] 40629 if v_1.Op != OpAMD64MOVQconst { 40630 break 40631 } 40632 c := v_1.AuxInt 40633 if !(is32Bit(c)) { 40634 break 40635 } 40636 v.reset(OpAMD64TESTQconst) 40637 v.AuxInt = c 40638 v.AddArg(x) 40639 return true 40640 } 40641 return false 40642 } 40643 func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { 40644 // match: (TESTW (MOVLconst [c]) x) 40645 // cond: 40646 // result: (TESTWconst [c] x) 40647 for { 40648 _ = v.Args[1] 40649 v_0 := v.Args[0] 40650 if v_0.Op != OpAMD64MOVLconst { 40651 break 40652 } 40653 c := v_0.AuxInt 40654 x := v.Args[1] 40655 v.reset(OpAMD64TESTWconst) 40656 v.AuxInt = c 40657 v.AddArg(x) 40658 return true 40659 } 40660 // match: (TESTW x (MOVLconst [c])) 40661 // cond: 40662 // result: (TESTWconst [c] x) 40663 for { 40664 _ = v.Args[1] 40665 x := v.Args[0] 40666 v_1 := v.Args[1] 40667 if v_1.Op != OpAMD64MOVLconst { 40668 break 40669 } 40670 c := v_1.AuxInt 40671 v.reset(OpAMD64TESTWconst) 40672 v.AuxInt = c 40673 v.AddArg(x) 40674 return true 40675 } 40676 return false 40677 } 40678 func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool { 40679 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 40680 // cond: is32Bit(off1+off2) 40681 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 40682 for { 40683 off1 := v.AuxInt 40684 sym := v.Aux 40685 _ = v.Args[2] 40686 val := v.Args[0] 40687 v_1 := v.Args[1] 40688 if v_1.Op != OpAMD64ADDQconst { 40689 break 40690 } 40691 off2 := v_1.AuxInt 40692 ptr := v_1.Args[0] 40693 mem := v.Args[2] 40694 if !(is32Bit(off1 + off2)) { 40695 break 40696 } 40697 v.reset(OpAMD64XADDLlock) 40698 v.AuxInt = off1 + off2 40699 v.Aux = sym 40700 v.AddArg(val) 40701 v.AddArg(ptr) 40702 v.AddArg(mem) 40703 return true 40704 } 40705 return false 40706 } 40707 func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool { 40708 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 40709 // cond: is32Bit(off1+off2) 40710 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 40711 for { 40712 off1 := v.AuxInt 40713 sym := v.Aux 40714 _ = v.Args[2] 40715 val := v.Args[0] 40716 v_1 := v.Args[1] 40717 if v_1.Op != OpAMD64ADDQconst { 40718 break 40719 } 40720 off2 := v_1.AuxInt 40721 ptr := v_1.Args[0] 40722 mem := v.Args[2] 40723 if !(is32Bit(off1 + off2)) { 40724 break 40725 } 40726 v.reset(OpAMD64XADDQlock) 40727 v.AuxInt = off1 + off2 40728 v.Aux = sym 40729 v.AddArg(val) 40730 v.AddArg(ptr) 40731 v.AddArg(mem) 40732 return true 40733 } 40734 return false 40735 } 40736 func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool { 40737 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 40738 // cond: is32Bit(off1+off2) 40739 // result: (XCHGL [off1+off2] {sym} val ptr mem) 40740 for { 40741 off1 := v.AuxInt 40742 sym := v.Aux 40743 _ = v.Args[2] 40744 val := v.Args[0] 40745 v_1 := v.Args[1] 40746 if v_1.Op != OpAMD64ADDQconst { 40747 break 40748 } 40749 off2 := v_1.AuxInt 40750 ptr := v_1.Args[0] 40751 mem := v.Args[2] 40752 if !(is32Bit(off1 + off2)) { 40753 break 40754 } 40755 v.reset(OpAMD64XCHGL) 40756 v.AuxInt = off1 + off2 40757 v.Aux = sym 40758 v.AddArg(val) 40759 v.AddArg(ptr) 40760 v.AddArg(mem) 40761 return true 40762 } 40763 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 40764 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 40765 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 40766 for { 40767 off1 := v.AuxInt 40768 sym1 := v.Aux 40769 _ = v.Args[2] 40770 val := v.Args[0] 40771 v_1 := v.Args[1] 40772 if v_1.Op != OpAMD64LEAQ { 40773 break 40774 } 40775 off2 := v_1.AuxInt 40776 sym2 := v_1.Aux 40777 ptr := v_1.Args[0] 40778 mem := v.Args[2] 40779 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 40780 break 40781 } 40782 v.reset(OpAMD64XCHGL) 40783 v.AuxInt = off1 + off2 40784 v.Aux = mergeSym(sym1, sym2) 40785 v.AddArg(val) 40786 v.AddArg(ptr) 40787 v.AddArg(mem) 40788 return true 40789 } 40790 return false 40791 } 40792 func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool { 40793 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 40794 // cond: is32Bit(off1+off2) 40795 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 40796 for { 40797 off1 := v.AuxInt 40798 sym := v.Aux 40799 _ = v.Args[2] 40800 val := v.Args[0] 40801 v_1 := v.Args[1] 40802 if v_1.Op != OpAMD64ADDQconst { 40803 break 40804 } 40805 off2 := v_1.AuxInt 40806 ptr := v_1.Args[0] 40807 mem := v.Args[2] 40808 if !(is32Bit(off1 + off2)) { 40809 break 40810 } 40811 v.reset(OpAMD64XCHGQ) 40812 v.AuxInt = off1 + off2 40813 v.Aux = sym 40814 v.AddArg(val) 40815 v.AddArg(ptr) 40816 v.AddArg(mem) 40817 return true 40818 } 40819 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 40820 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 40821 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 40822 for { 40823 off1 := v.AuxInt 40824 sym1 := v.Aux 40825 _ = v.Args[2] 40826 val := v.Args[0] 40827 v_1 := v.Args[1] 40828 if v_1.Op != OpAMD64LEAQ { 40829 break 40830 } 40831 off2 := v_1.AuxInt 40832 sym2 := v_1.Aux 40833 ptr := v_1.Args[0] 40834 mem := v.Args[2] 40835 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 40836 break 40837 } 40838 v.reset(OpAMD64XCHGQ) 40839 v.AuxInt = off1 + off2 40840 v.Aux = mergeSym(sym1, sym2) 40841 v.AddArg(val) 40842 v.AddArg(ptr) 40843 v.AddArg(mem) 40844 return true 40845 } 40846 return false 40847 } 40848 func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { 40849 // match: (XORL x (MOVLconst [c])) 40850 // cond: 40851 // result: (XORLconst [c] x) 40852 for { 40853 _ = v.Args[1] 40854 x := v.Args[0] 40855 v_1 := v.Args[1] 40856 if v_1.Op != OpAMD64MOVLconst { 40857 break 40858 } 40859 c := v_1.AuxInt 40860 v.reset(OpAMD64XORLconst) 40861 v.AuxInt = c 40862 v.AddArg(x) 40863 return true 40864 } 40865 // match: (XORL (MOVLconst [c]) x) 40866 // cond: 40867 // result: (XORLconst [c] x) 40868 for { 40869 _ = v.Args[1] 40870 v_0 := v.Args[0] 40871 if v_0.Op != OpAMD64MOVLconst { 40872 break 40873 } 40874 c := v_0.AuxInt 40875 x := v.Args[1] 40876 v.reset(OpAMD64XORLconst) 40877 v.AuxInt = c 40878 v.AddArg(x) 40879 return true 40880 } 40881 // match: (XORL (SHLLconst x [c]) (SHRLconst x [d])) 40882 // cond: d==32-c 40883 // result: (ROLLconst x [c]) 40884 for { 40885 _ = v.Args[1] 40886 v_0 := v.Args[0] 40887 if v_0.Op != OpAMD64SHLLconst { 40888 break 40889 } 40890 c := v_0.AuxInt 40891 x := v_0.Args[0] 40892 v_1 := v.Args[1] 40893 if v_1.Op != OpAMD64SHRLconst { 40894 break 40895 } 40896 d := v_1.AuxInt 40897 if x != v_1.Args[0] { 40898 break 40899 } 40900 if !(d == 32-c) { 40901 break 40902 } 40903 v.reset(OpAMD64ROLLconst) 40904 v.AuxInt = c 40905 v.AddArg(x) 40906 return true 40907 } 40908 // match: (XORL (SHRLconst x [d]) (SHLLconst x [c])) 40909 // cond: d==32-c 40910 // result: (ROLLconst x [c]) 40911 for { 40912 _ = v.Args[1] 40913 v_0 := v.Args[0] 40914 if v_0.Op != OpAMD64SHRLconst { 40915 break 40916 } 40917 d := v_0.AuxInt 40918 x := v_0.Args[0] 40919 v_1 := v.Args[1] 40920 if v_1.Op != OpAMD64SHLLconst { 40921 break 40922 } 40923 c := v_1.AuxInt 40924 if x != v_1.Args[0] { 40925 break 40926 } 40927 if !(d == 32-c) { 40928 break 40929 } 40930 v.reset(OpAMD64ROLLconst) 40931 v.AuxInt = c 40932 v.AddArg(x) 40933 return true 40934 } 40935 // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 40936 // cond: d==16-c && c < 16 && t.Size() == 2 40937 // result: (ROLWconst x [c]) 40938 for { 40939 t := v.Type 40940 _ = v.Args[1] 40941 v_0 := v.Args[0] 40942 if v_0.Op != OpAMD64SHLLconst { 40943 break 40944 } 40945 c := v_0.AuxInt 40946 x := v_0.Args[0] 40947 v_1 := v.Args[1] 40948 if v_1.Op != OpAMD64SHRWconst { 40949 break 40950 } 40951 d := v_1.AuxInt 40952 if x != v_1.Args[0] { 40953 break 40954 } 40955 if !(d == 16-c && c < 16 && t.Size() == 2) { 40956 break 40957 } 40958 v.reset(OpAMD64ROLWconst) 40959 v.AuxInt = c 40960 v.AddArg(x) 40961 return true 40962 } 40963 // match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 40964 // cond: d==16-c && c < 16 && t.Size() == 2 40965 // result: (ROLWconst x [c]) 40966 for { 40967 t := v.Type 40968 _ = v.Args[1] 40969 v_0 := v.Args[0] 40970 if v_0.Op != OpAMD64SHRWconst { 40971 break 40972 } 40973 d := v_0.AuxInt 40974 x := v_0.Args[0] 40975 v_1 := v.Args[1] 40976 if v_1.Op != OpAMD64SHLLconst { 40977 break 40978 } 40979 c := v_1.AuxInt 40980 if x != v_1.Args[0] { 40981 break 40982 } 40983 if !(d == 16-c && c < 16 && t.Size() == 2) { 40984 break 40985 } 40986 v.reset(OpAMD64ROLWconst) 40987 v.AuxInt = c 40988 v.AddArg(x) 40989 return true 40990 } 40991 // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 40992 // cond: d==8-c && c < 8 && t.Size() == 1 40993 // result: (ROLBconst x [c]) 40994 for { 40995 t := v.Type 40996 _ = v.Args[1] 40997 v_0 := v.Args[0] 40998 if v_0.Op != OpAMD64SHLLconst { 40999 break 41000 } 41001 c := v_0.AuxInt 41002 x := v_0.Args[0] 41003 v_1 := v.Args[1] 41004 if v_1.Op != OpAMD64SHRBconst { 41005 break 41006 } 41007 d := v_1.AuxInt 41008 if x != v_1.Args[0] { 41009 break 41010 } 41011 if !(d == 8-c && c < 8 && t.Size() == 1) { 41012 break 41013 } 41014 v.reset(OpAMD64ROLBconst) 41015 v.AuxInt = c 41016 v.AddArg(x) 41017 return true 41018 } 41019 // match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 41020 // cond: d==8-c && c < 8 && t.Size() == 1 41021 // result: (ROLBconst x [c]) 41022 for { 41023 t := v.Type 41024 _ = v.Args[1] 41025 v_0 := v.Args[0] 41026 if v_0.Op != OpAMD64SHRBconst { 41027 break 41028 } 41029 d := v_0.AuxInt 41030 x := v_0.Args[0] 41031 v_1 := v.Args[1] 41032 if v_1.Op != OpAMD64SHLLconst { 41033 break 41034 } 41035 c := v_1.AuxInt 41036 if x != v_1.Args[0] { 41037 break 41038 } 41039 if !(d == 8-c && c < 8 && t.Size() == 1) { 41040 break 41041 } 41042 v.reset(OpAMD64ROLBconst) 41043 v.AuxInt = c 41044 v.AddArg(x) 41045 return true 41046 } 41047 // match: (XORL x x) 41048 // cond: 41049 // result: (MOVLconst [0]) 41050 for { 41051 _ = v.Args[1] 41052 x := v.Args[0] 41053 if x != v.Args[1] { 41054 break 41055 } 41056 v.reset(OpAMD64MOVLconst) 41057 v.AuxInt = 0 41058 return true 41059 } 41060 // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) 41061 // cond: canMergeLoad(v, l, x) && clobber(l) 41062 // result: (XORLmem x [off] {sym} ptr mem) 41063 for { 41064 _ = v.Args[1] 41065 x := v.Args[0] 41066 l := v.Args[1] 41067 if l.Op != OpAMD64MOVLload { 41068 break 41069 } 41070 off := l.AuxInt 41071 sym := l.Aux 41072 _ = l.Args[1] 41073 ptr := l.Args[0] 41074 mem := l.Args[1] 41075 if !(canMergeLoad(v, l, x) && clobber(l)) { 41076 break 41077 } 41078 v.reset(OpAMD64XORLmem) 41079 v.AuxInt = off 41080 v.Aux = sym 41081 v.AddArg(x) 41082 v.AddArg(ptr) 41083 v.AddArg(mem) 41084 return true 41085 } 41086 return false 41087 } 41088 func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { 41089 // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) 41090 // cond: canMergeLoad(v, l, x) && clobber(l) 41091 // result: (XORLmem x [off] {sym} ptr mem) 41092 for { 41093 _ = v.Args[1] 41094 l := v.Args[0] 41095 if l.Op != OpAMD64MOVLload { 41096 break 41097 } 41098 off := l.AuxInt 41099 sym := l.Aux 41100 _ = l.Args[1] 41101 ptr := l.Args[0] 41102 mem := l.Args[1] 41103 x := v.Args[1] 41104 if !(canMergeLoad(v, l, x) && clobber(l)) { 41105 break 41106 } 41107 v.reset(OpAMD64XORLmem) 41108 v.AuxInt = off 41109 v.Aux = sym 41110 v.AddArg(x) 41111 v.AddArg(ptr) 41112 v.AddArg(mem) 41113 return true 41114 } 41115 return false 41116 } 41117 func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { 41118 // match: (XORLconst [1] (SETNE x)) 41119 // cond: 41120 // result: (SETEQ x) 41121 for { 41122 if v.AuxInt != 1 { 41123 break 41124 } 41125 v_0 := v.Args[0] 41126 if v_0.Op != OpAMD64SETNE { 41127 break 41128 } 41129 x := v_0.Args[0] 41130 v.reset(OpAMD64SETEQ) 41131 v.AddArg(x) 41132 return true 41133 } 41134 // match: (XORLconst [1] (SETEQ x)) 41135 // cond: 41136 // result: (SETNE x) 41137 for { 41138 if v.AuxInt != 1 { 41139 break 41140 } 41141 v_0 := v.Args[0] 41142 if v_0.Op != OpAMD64SETEQ { 41143 break 41144 } 41145 x := v_0.Args[0] 41146 v.reset(OpAMD64SETNE) 41147 v.AddArg(x) 41148 return true 41149 } 41150 // match: (XORLconst [1] (SETL x)) 41151 // cond: 41152 // result: (SETGE x) 41153 for { 41154 if v.AuxInt != 1 { 41155 break 41156 } 41157 v_0 := v.Args[0] 41158 if v_0.Op != OpAMD64SETL { 41159 break 41160 } 41161 x := v_0.Args[0] 41162 v.reset(OpAMD64SETGE) 41163 v.AddArg(x) 41164 return true 41165 } 41166 // match: (XORLconst [1] (SETGE x)) 41167 // cond: 41168 // result: (SETL x) 41169 for { 41170 if v.AuxInt != 1 { 41171 break 41172 } 41173 v_0 := v.Args[0] 41174 if v_0.Op != OpAMD64SETGE { 41175 break 41176 } 41177 x := v_0.Args[0] 41178 v.reset(OpAMD64SETL) 41179 v.AddArg(x) 41180 return true 41181 } 41182 // match: (XORLconst [1] (SETLE x)) 41183 // cond: 41184 // result: (SETG x) 41185 for { 41186 if v.AuxInt != 1 { 41187 break 41188 } 41189 v_0 := v.Args[0] 41190 if v_0.Op != OpAMD64SETLE { 41191 break 41192 } 41193 x := v_0.Args[0] 41194 v.reset(OpAMD64SETG) 41195 v.AddArg(x) 41196 return true 41197 } 41198 // match: (XORLconst [1] (SETG x)) 41199 // cond: 41200 // result: (SETLE x) 41201 for { 41202 if v.AuxInt != 1 { 41203 break 41204 } 41205 v_0 := v.Args[0] 41206 if v_0.Op != OpAMD64SETG { 41207 break 41208 } 41209 x := v_0.Args[0] 41210 v.reset(OpAMD64SETLE) 41211 v.AddArg(x) 41212 return true 41213 } 41214 // match: (XORLconst [1] (SETB x)) 41215 // cond: 41216 // result: (SETAE x) 41217 for { 41218 if v.AuxInt != 1 { 41219 break 41220 } 41221 v_0 := v.Args[0] 41222 if v_0.Op != OpAMD64SETB { 41223 break 41224 } 41225 x := v_0.Args[0] 41226 v.reset(OpAMD64SETAE) 41227 v.AddArg(x) 41228 return true 41229 } 41230 // match: (XORLconst [1] (SETAE x)) 41231 // cond: 41232 // result: (SETB x) 41233 for { 41234 if v.AuxInt != 1 { 41235 break 41236 } 41237 v_0 := v.Args[0] 41238 if v_0.Op != OpAMD64SETAE { 41239 break 41240 } 41241 x := v_0.Args[0] 41242 v.reset(OpAMD64SETB) 41243 v.AddArg(x) 41244 return true 41245 } 41246 // match: (XORLconst [1] (SETBE x)) 41247 // cond: 41248 // result: (SETA x) 41249 for { 41250 if v.AuxInt != 1 { 41251 break 41252 } 41253 v_0 := v.Args[0] 41254 if v_0.Op != OpAMD64SETBE { 41255 break 41256 } 41257 x := v_0.Args[0] 41258 v.reset(OpAMD64SETA) 41259 v.AddArg(x) 41260 return true 41261 } 41262 // match: (XORLconst [1] (SETA x)) 41263 // cond: 41264 // result: (SETBE x) 41265 for { 41266 if v.AuxInt != 1 { 41267 break 41268 } 41269 v_0 := v.Args[0] 41270 if v_0.Op != OpAMD64SETA { 41271 break 41272 } 41273 x := v_0.Args[0] 41274 v.reset(OpAMD64SETBE) 41275 v.AddArg(x) 41276 return true 41277 } 41278 return false 41279 } 41280 func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool { 41281 // match: (XORLconst [c] (XORLconst [d] x)) 41282 // cond: 41283 // result: (XORLconst [c ^ d] x) 41284 for { 41285 c := v.AuxInt 41286 v_0 := v.Args[0] 41287 if v_0.Op != OpAMD64XORLconst { 41288 break 41289 } 41290 d := v_0.AuxInt 41291 x := v_0.Args[0] 41292 v.reset(OpAMD64XORLconst) 41293 v.AuxInt = c ^ d 41294 v.AddArg(x) 41295 return true 41296 } 41297 // match: (XORLconst [c] x) 41298 // cond: int32(c)==0 41299 // result: x 41300 for { 41301 c := v.AuxInt 41302 x := v.Args[0] 41303 if !(int32(c) == 0) { 41304 break 41305 } 41306 v.reset(OpCopy) 41307 v.Type = x.Type 41308 v.AddArg(x) 41309 return true 41310 } 41311 // match: (XORLconst [c] (MOVLconst [d])) 41312 // cond: 41313 // result: (MOVLconst [c^d]) 41314 for { 41315 c := v.AuxInt 41316 v_0 := v.Args[0] 41317 if v_0.Op != OpAMD64MOVLconst { 41318 break 41319 } 41320 d := v_0.AuxInt 41321 v.reset(OpAMD64MOVLconst) 41322 v.AuxInt = c ^ d 41323 return true 41324 } 41325 return false 41326 } 41327 func rewriteValueAMD64_OpAMD64XORLmem_0(v *Value) bool { 41328 b := v.Block 41329 _ = b 41330 typ := &b.Func.Config.Types 41331 _ = typ 41332 // match: (XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 41333 // cond: 41334 // result: (XORL x (MOVLf2i y)) 41335 for { 41336 off := v.AuxInt 41337 sym := v.Aux 41338 _ = v.Args[2] 41339 x := v.Args[0] 41340 ptr := v.Args[1] 41341 v_2 := v.Args[2] 41342 if v_2.Op != OpAMD64MOVSSstore { 41343 break 41344 } 41345 if v_2.AuxInt != off { 41346 break 41347 } 41348 if v_2.Aux != sym { 41349 break 41350 } 41351 _ = v_2.Args[2] 41352 if ptr != v_2.Args[0] { 41353 break 41354 } 41355 y := v_2.Args[1] 41356 v.reset(OpAMD64XORL) 41357 v.AddArg(x) 41358 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 41359 v0.AddArg(y) 41360 v.AddArg(v0) 41361 return true 41362 } 41363 return false 41364 } 41365 func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { 41366 // match: (XORQ x (MOVQconst [c])) 41367 // cond: is32Bit(c) 41368 // result: (XORQconst [c] x) 41369 for { 41370 _ = v.Args[1] 41371 x := v.Args[0] 41372 v_1 := v.Args[1] 41373 if v_1.Op != OpAMD64MOVQconst { 41374 break 41375 } 41376 c := v_1.AuxInt 41377 if !(is32Bit(c)) { 41378 break 41379 } 41380 v.reset(OpAMD64XORQconst) 41381 v.AuxInt = c 41382 v.AddArg(x) 41383 return true 41384 } 41385 // match: (XORQ (MOVQconst [c]) x) 41386 // cond: is32Bit(c) 41387 // result: (XORQconst [c] x) 41388 for { 41389 _ = v.Args[1] 41390 v_0 := v.Args[0] 41391 if v_0.Op != OpAMD64MOVQconst { 41392 break 41393 } 41394 c := v_0.AuxInt 41395 x := v.Args[1] 41396 if !(is32Bit(c)) { 41397 break 41398 } 41399 v.reset(OpAMD64XORQconst) 41400 v.AuxInt = c 41401 v.AddArg(x) 41402 return true 41403 } 41404 // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d])) 41405 // cond: d==64-c 41406 // result: (ROLQconst x [c]) 41407 for { 41408 _ = v.Args[1] 41409 v_0 := v.Args[0] 41410 if v_0.Op != OpAMD64SHLQconst { 41411 break 41412 } 41413 c := v_0.AuxInt 41414 x := v_0.Args[0] 41415 v_1 := v.Args[1] 41416 if v_1.Op != OpAMD64SHRQconst { 41417 break 41418 } 41419 d := v_1.AuxInt 41420 if x != v_1.Args[0] { 41421 break 41422 } 41423 if !(d == 64-c) { 41424 break 41425 } 41426 v.reset(OpAMD64ROLQconst) 41427 v.AuxInt = c 41428 v.AddArg(x) 41429 return true 41430 } 41431 // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c])) 41432 // cond: d==64-c 41433 // result: (ROLQconst x [c]) 41434 for { 41435 _ = v.Args[1] 41436 v_0 := v.Args[0] 41437 if v_0.Op != OpAMD64SHRQconst { 41438 break 41439 } 41440 d := v_0.AuxInt 41441 x := v_0.Args[0] 41442 v_1 := v.Args[1] 41443 if v_1.Op != OpAMD64SHLQconst { 41444 break 41445 } 41446 c := v_1.AuxInt 41447 if x != v_1.Args[0] { 41448 break 41449 } 41450 if !(d == 64-c) { 41451 break 41452 } 41453 v.reset(OpAMD64ROLQconst) 41454 v.AuxInt = c 41455 v.AddArg(x) 41456 return true 41457 } 41458 // match: (XORQ x x) 41459 // cond: 41460 // result: (MOVQconst [0]) 41461 for { 41462 _ = v.Args[1] 41463 x := v.Args[0] 41464 if x != v.Args[1] { 41465 break 41466 } 41467 v.reset(OpAMD64MOVQconst) 41468 v.AuxInt = 0 41469 return true 41470 } 41471 // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) 41472 // cond: canMergeLoad(v, l, x) && clobber(l) 41473 // result: (XORQmem x [off] {sym} ptr mem) 41474 for { 41475 _ = v.Args[1] 41476 x := v.Args[0] 41477 l := v.Args[1] 41478 if l.Op != OpAMD64MOVQload { 41479 break 41480 } 41481 off := l.AuxInt 41482 sym := l.Aux 41483 _ = l.Args[1] 41484 ptr := l.Args[0] 41485 mem := l.Args[1] 41486 if !(canMergeLoad(v, l, x) && clobber(l)) { 41487 break 41488 } 41489 v.reset(OpAMD64XORQmem) 41490 v.AuxInt = off 41491 v.Aux = sym 41492 v.AddArg(x) 41493 v.AddArg(ptr) 41494 v.AddArg(mem) 41495 return true 41496 } 41497 // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x) 41498 // cond: canMergeLoad(v, l, x) && clobber(l) 41499 // result: (XORQmem x [off] {sym} ptr mem) 41500 for { 41501 _ = v.Args[1] 41502 l := v.Args[0] 41503 if l.Op != OpAMD64MOVQload { 41504 break 41505 } 41506 off := l.AuxInt 41507 sym := l.Aux 41508 _ = l.Args[1] 41509 ptr := l.Args[0] 41510 mem := l.Args[1] 41511 x := v.Args[1] 41512 if !(canMergeLoad(v, l, x) && clobber(l)) { 41513 break 41514 } 41515 v.reset(OpAMD64XORQmem) 41516 v.AuxInt = off 41517 v.Aux = sym 41518 v.AddArg(x) 41519 v.AddArg(ptr) 41520 v.AddArg(mem) 41521 return true 41522 } 41523 return false 41524 } 41525 func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool { 41526 // match: (XORQconst [c] (XORQconst [d] x)) 41527 // cond: 41528 // result: (XORQconst [c ^ d] x) 41529 for { 41530 c := v.AuxInt 41531 v_0 := v.Args[0] 41532 if v_0.Op != OpAMD64XORQconst { 41533 break 41534 } 41535 d := v_0.AuxInt 41536 x := v_0.Args[0] 41537 v.reset(OpAMD64XORQconst) 41538 v.AuxInt = c ^ d 41539 v.AddArg(x) 41540 return true 41541 } 41542 // match: (XORQconst [0] x) 41543 // cond: 41544 // result: x 41545 for { 41546 if v.AuxInt != 0 { 41547 break 41548 } 41549 x := v.Args[0] 41550 v.reset(OpCopy) 41551 v.Type = x.Type 41552 v.AddArg(x) 41553 return true 41554 } 41555 // match: (XORQconst [c] (MOVQconst [d])) 41556 // cond: 41557 // result: (MOVQconst [c^d]) 41558 for { 41559 c := v.AuxInt 41560 v_0 := v.Args[0] 41561 if v_0.Op != OpAMD64MOVQconst { 41562 break 41563 } 41564 d := v_0.AuxInt 41565 v.reset(OpAMD64MOVQconst) 41566 v.AuxInt = c ^ d 41567 return true 41568 } 41569 return false 41570 } 41571 func rewriteValueAMD64_OpAMD64XORQmem_0(v *Value) bool { 41572 b := v.Block 41573 _ = b 41574 typ := &b.Func.Config.Types 41575 _ = typ 41576 // match: (XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 41577 // cond: 41578 // result: (XORQ x (MOVQf2i y)) 41579 for { 41580 off := v.AuxInt 41581 sym := v.Aux 41582 _ = v.Args[2] 41583 x := v.Args[0] 41584 ptr := v.Args[1] 41585 v_2 := v.Args[2] 41586 if v_2.Op != OpAMD64MOVSDstore { 41587 break 41588 } 41589 if v_2.AuxInt != off { 41590 break 41591 } 41592 if v_2.Aux != sym { 41593 break 41594 } 41595 _ = v_2.Args[2] 41596 if ptr != v_2.Args[0] { 41597 break 41598 } 41599 y := v_2.Args[1] 41600 v.reset(OpAMD64XORQ) 41601 v.AddArg(x) 41602 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 41603 v0.AddArg(y) 41604 v.AddArg(v0) 41605 return true 41606 } 41607 return false 41608 } 41609 func rewriteValueAMD64_OpAdd16_0(v *Value) bool { 41610 // match: (Add16 x y) 41611 // cond: 41612 // result: (ADDL x y) 41613 for { 41614 _ = v.Args[1] 41615 x := v.Args[0] 41616 y := v.Args[1] 41617 v.reset(OpAMD64ADDL) 41618 v.AddArg(x) 41619 v.AddArg(y) 41620 return true 41621 } 41622 } 41623 func rewriteValueAMD64_OpAdd32_0(v *Value) bool { 41624 // match: (Add32 x y) 41625 // cond: 41626 // result: (ADDL x y) 41627 for { 41628 _ = v.Args[1] 41629 x := v.Args[0] 41630 y := v.Args[1] 41631 v.reset(OpAMD64ADDL) 41632 v.AddArg(x) 41633 v.AddArg(y) 41634 return true 41635 } 41636 } 41637 func rewriteValueAMD64_OpAdd32F_0(v *Value) bool { 41638 // match: (Add32F x y) 41639 // cond: 41640 // result: (ADDSS x y) 41641 for { 41642 _ = v.Args[1] 41643 x := v.Args[0] 41644 y := v.Args[1] 41645 v.reset(OpAMD64ADDSS) 41646 v.AddArg(x) 41647 v.AddArg(y) 41648 return true 41649 } 41650 } 41651 func rewriteValueAMD64_OpAdd64_0(v *Value) bool { 41652 // match: (Add64 x y) 41653 // cond: 41654 // result: (ADDQ x y) 41655 for { 41656 _ = v.Args[1] 41657 x := v.Args[0] 41658 y := v.Args[1] 41659 v.reset(OpAMD64ADDQ) 41660 v.AddArg(x) 41661 v.AddArg(y) 41662 return true 41663 } 41664 } 41665 func rewriteValueAMD64_OpAdd64F_0(v *Value) bool { 41666 // match: (Add64F x y) 41667 // cond: 41668 // result: (ADDSD x y) 41669 for { 41670 _ = v.Args[1] 41671 x := v.Args[0] 41672 y := v.Args[1] 41673 v.reset(OpAMD64ADDSD) 41674 v.AddArg(x) 41675 v.AddArg(y) 41676 return true 41677 } 41678 } 41679 func rewriteValueAMD64_OpAdd8_0(v *Value) bool { 41680 // match: (Add8 x y) 41681 // cond: 41682 // result: (ADDL x y) 41683 for { 41684 _ = v.Args[1] 41685 x := v.Args[0] 41686 y := v.Args[1] 41687 v.reset(OpAMD64ADDL) 41688 v.AddArg(x) 41689 v.AddArg(y) 41690 return true 41691 } 41692 } 41693 func rewriteValueAMD64_OpAddPtr_0(v *Value) bool { 41694 b := v.Block 41695 _ = b 41696 config := b.Func.Config 41697 _ = config 41698 // match: (AddPtr x y) 41699 // cond: config.PtrSize == 8 41700 // result: (ADDQ x y) 41701 for { 41702 _ = v.Args[1] 41703 x := v.Args[0] 41704 y := v.Args[1] 41705 if !(config.PtrSize == 8) { 41706 break 41707 } 41708 v.reset(OpAMD64ADDQ) 41709 v.AddArg(x) 41710 v.AddArg(y) 41711 return true 41712 } 41713 // match: (AddPtr x y) 41714 // cond: config.PtrSize == 4 41715 // result: (ADDL x y) 41716 for { 41717 _ = v.Args[1] 41718 x := v.Args[0] 41719 y := v.Args[1] 41720 if !(config.PtrSize == 4) { 41721 break 41722 } 41723 v.reset(OpAMD64ADDL) 41724 v.AddArg(x) 41725 v.AddArg(y) 41726 return true 41727 } 41728 return false 41729 } 41730 func rewriteValueAMD64_OpAddr_0(v *Value) bool { 41731 b := v.Block 41732 _ = b 41733 config := b.Func.Config 41734 _ = config 41735 // match: (Addr {sym} base) 41736 // cond: config.PtrSize == 8 41737 // result: (LEAQ {sym} base) 41738 for { 41739 sym := v.Aux 41740 base := v.Args[0] 41741 if !(config.PtrSize == 8) { 41742 break 41743 } 41744 v.reset(OpAMD64LEAQ) 41745 v.Aux = sym 41746 v.AddArg(base) 41747 return true 41748 } 41749 // match: (Addr {sym} base) 41750 // cond: config.PtrSize == 4 41751 // result: (LEAL {sym} base) 41752 for { 41753 sym := v.Aux 41754 base := v.Args[0] 41755 if !(config.PtrSize == 4) { 41756 break 41757 } 41758 v.reset(OpAMD64LEAL) 41759 v.Aux = sym 41760 v.AddArg(base) 41761 return true 41762 } 41763 return false 41764 } 41765 func rewriteValueAMD64_OpAnd16_0(v *Value) bool { 41766 // match: (And16 x y) 41767 // cond: 41768 // result: (ANDL x y) 41769 for { 41770 _ = v.Args[1] 41771 x := v.Args[0] 41772 y := v.Args[1] 41773 v.reset(OpAMD64ANDL) 41774 v.AddArg(x) 41775 v.AddArg(y) 41776 return true 41777 } 41778 } 41779 func rewriteValueAMD64_OpAnd32_0(v *Value) bool { 41780 // match: (And32 x y) 41781 // cond: 41782 // result: (ANDL x y) 41783 for { 41784 _ = v.Args[1] 41785 x := v.Args[0] 41786 y := v.Args[1] 41787 v.reset(OpAMD64ANDL) 41788 v.AddArg(x) 41789 v.AddArg(y) 41790 return true 41791 } 41792 } 41793 func rewriteValueAMD64_OpAnd64_0(v *Value) bool { 41794 // match: (And64 x y) 41795 // cond: 41796 // result: (ANDQ x y) 41797 for { 41798 _ = v.Args[1] 41799 x := v.Args[0] 41800 y := v.Args[1] 41801 v.reset(OpAMD64ANDQ) 41802 v.AddArg(x) 41803 v.AddArg(y) 41804 return true 41805 } 41806 } 41807 func rewriteValueAMD64_OpAnd8_0(v *Value) bool { 41808 // match: (And8 x y) 41809 // cond: 41810 // result: (ANDL x y) 41811 for { 41812 _ = v.Args[1] 41813 x := v.Args[0] 41814 y := v.Args[1] 41815 v.reset(OpAMD64ANDL) 41816 v.AddArg(x) 41817 v.AddArg(y) 41818 return true 41819 } 41820 } 41821 func rewriteValueAMD64_OpAndB_0(v *Value) bool { 41822 // match: (AndB x y) 41823 // cond: 41824 // result: (ANDL x y) 41825 for { 41826 _ = v.Args[1] 41827 x := v.Args[0] 41828 y := v.Args[1] 41829 v.reset(OpAMD64ANDL) 41830 v.AddArg(x) 41831 v.AddArg(y) 41832 return true 41833 } 41834 } 41835 func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool { 41836 b := v.Block 41837 _ = b 41838 typ := &b.Func.Config.Types 41839 _ = typ 41840 // match: (AtomicAdd32 ptr val mem) 41841 // cond: 41842 // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) 41843 for { 41844 _ = v.Args[2] 41845 ptr := v.Args[0] 41846 val := v.Args[1] 41847 mem := v.Args[2] 41848 v.reset(OpAMD64AddTupleFirst32) 41849 v.AddArg(val) 41850 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) 41851 v0.AddArg(val) 41852 v0.AddArg(ptr) 41853 v0.AddArg(mem) 41854 v.AddArg(v0) 41855 return true 41856 } 41857 } 41858 func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool { 41859 b := v.Block 41860 _ = b 41861 typ := &b.Func.Config.Types 41862 _ = typ 41863 // match: (AtomicAdd64 ptr val mem) 41864 // cond: 41865 // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) 41866 for { 41867 _ = v.Args[2] 41868 ptr := v.Args[0] 41869 val := v.Args[1] 41870 mem := v.Args[2] 41871 v.reset(OpAMD64AddTupleFirst64) 41872 v.AddArg(val) 41873 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) 41874 v0.AddArg(val) 41875 v0.AddArg(ptr) 41876 v0.AddArg(mem) 41877 v.AddArg(v0) 41878 return true 41879 } 41880 } 41881 func rewriteValueAMD64_OpAtomicAnd8_0(v *Value) bool { 41882 // match: (AtomicAnd8 ptr val mem) 41883 // cond: 41884 // result: (ANDBlock ptr val mem) 41885 for { 41886 _ = v.Args[2] 41887 ptr := v.Args[0] 41888 val := v.Args[1] 41889 mem := v.Args[2] 41890 v.reset(OpAMD64ANDBlock) 41891 v.AddArg(ptr) 41892 v.AddArg(val) 41893 v.AddArg(mem) 41894 return true 41895 } 41896 } 41897 func rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v *Value) bool { 41898 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 41899 // cond: 41900 // result: (CMPXCHGLlock ptr old new_ mem) 41901 for { 41902 _ = v.Args[3] 41903 ptr := v.Args[0] 41904 old := v.Args[1] 41905 new_ := v.Args[2] 41906 mem := v.Args[3] 41907 v.reset(OpAMD64CMPXCHGLlock) 41908 v.AddArg(ptr) 41909 v.AddArg(old) 41910 v.AddArg(new_) 41911 v.AddArg(mem) 41912 return true 41913 } 41914 } 41915 func rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v *Value) bool { 41916 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 41917 // cond: 41918 // result: (CMPXCHGQlock ptr old new_ mem) 41919 for { 41920 _ = v.Args[3] 41921 ptr := v.Args[0] 41922 old := v.Args[1] 41923 new_ := v.Args[2] 41924 mem := v.Args[3] 41925 v.reset(OpAMD64CMPXCHGQlock) 41926 v.AddArg(ptr) 41927 v.AddArg(old) 41928 v.AddArg(new_) 41929 v.AddArg(mem) 41930 return true 41931 } 41932 } 41933 func rewriteValueAMD64_OpAtomicExchange32_0(v *Value) bool { 41934 // match: (AtomicExchange32 ptr val mem) 41935 // cond: 41936 // result: (XCHGL val ptr mem) 41937 for { 41938 _ = v.Args[2] 41939 ptr := v.Args[0] 41940 val := v.Args[1] 41941 mem := v.Args[2] 41942 v.reset(OpAMD64XCHGL) 41943 v.AddArg(val) 41944 v.AddArg(ptr) 41945 v.AddArg(mem) 41946 return true 41947 } 41948 } 41949 func rewriteValueAMD64_OpAtomicExchange64_0(v *Value) bool { 41950 // match: (AtomicExchange64 ptr val mem) 41951 // cond: 41952 // result: (XCHGQ val ptr mem) 41953 for { 41954 _ = v.Args[2] 41955 ptr := v.Args[0] 41956 val := v.Args[1] 41957 mem := v.Args[2] 41958 v.reset(OpAMD64XCHGQ) 41959 v.AddArg(val) 41960 v.AddArg(ptr) 41961 v.AddArg(mem) 41962 return true 41963 } 41964 } 41965 func rewriteValueAMD64_OpAtomicLoad32_0(v *Value) bool { 41966 // match: (AtomicLoad32 ptr mem) 41967 // cond: 41968 // result: (MOVLatomicload ptr mem) 41969 for { 41970 _ = v.Args[1] 41971 ptr := v.Args[0] 41972 mem := v.Args[1] 41973 v.reset(OpAMD64MOVLatomicload) 41974 v.AddArg(ptr) 41975 v.AddArg(mem) 41976 return true 41977 } 41978 } 41979 func rewriteValueAMD64_OpAtomicLoad64_0(v *Value) bool { 41980 // match: (AtomicLoad64 ptr mem) 41981 // cond: 41982 // result: (MOVQatomicload ptr mem) 41983 for { 41984 _ = v.Args[1] 41985 ptr := v.Args[0] 41986 mem := v.Args[1] 41987 v.reset(OpAMD64MOVQatomicload) 41988 v.AddArg(ptr) 41989 v.AddArg(mem) 41990 return true 41991 } 41992 } 41993 func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool { 41994 b := v.Block 41995 _ = b 41996 config := b.Func.Config 41997 _ = config 41998 // match: (AtomicLoadPtr ptr mem) 41999 // cond: config.PtrSize == 8 42000 // result: (MOVQatomicload ptr mem) 42001 for { 42002 _ = v.Args[1] 42003 ptr := v.Args[0] 42004 mem := v.Args[1] 42005 if !(config.PtrSize == 8) { 42006 break 42007 } 42008 v.reset(OpAMD64MOVQatomicload) 42009 v.AddArg(ptr) 42010 v.AddArg(mem) 42011 return true 42012 } 42013 // match: (AtomicLoadPtr ptr mem) 42014 // cond: config.PtrSize == 4 42015 // result: (MOVLatomicload ptr mem) 42016 for { 42017 _ = v.Args[1] 42018 ptr := v.Args[0] 42019 mem := v.Args[1] 42020 if !(config.PtrSize == 4) { 42021 break 42022 } 42023 v.reset(OpAMD64MOVLatomicload) 42024 v.AddArg(ptr) 42025 v.AddArg(mem) 42026 return true 42027 } 42028 return false 42029 } 42030 func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool { 42031 // match: (AtomicOr8 ptr val mem) 42032 // cond: 42033 // result: (ORBlock ptr val mem) 42034 for { 42035 _ = v.Args[2] 42036 ptr := v.Args[0] 42037 val := v.Args[1] 42038 mem := v.Args[2] 42039 v.reset(OpAMD64ORBlock) 42040 v.AddArg(ptr) 42041 v.AddArg(val) 42042 v.AddArg(mem) 42043 return true 42044 } 42045 } 42046 func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool { 42047 b := v.Block 42048 _ = b 42049 typ := &b.Func.Config.Types 42050 _ = typ 42051 // match: (AtomicStore32 ptr val mem) 42052 // cond: 42053 // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem)) 42054 for { 42055 _ = v.Args[2] 42056 ptr := v.Args[0] 42057 val := v.Args[1] 42058 mem := v.Args[2] 42059 v.reset(OpSelect1) 42060 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) 42061 v0.AddArg(val) 42062 v0.AddArg(ptr) 42063 v0.AddArg(mem) 42064 v.AddArg(v0) 42065 return true 42066 } 42067 } 42068 func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool { 42069 b := v.Block 42070 _ = b 42071 typ := &b.Func.Config.Types 42072 _ = typ 42073 // match: (AtomicStore64 ptr val mem) 42074 // cond: 42075 // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem)) 42076 for { 42077 _ = v.Args[2] 42078 ptr := v.Args[0] 42079 val := v.Args[1] 42080 mem := v.Args[2] 42081 v.reset(OpSelect1) 42082 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) 42083 v0.AddArg(val) 42084 v0.AddArg(ptr) 42085 v0.AddArg(mem) 42086 v.AddArg(v0) 42087 return true 42088 } 42089 } 42090 func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool { 42091 b := v.Block 42092 _ = b 42093 config := b.Func.Config 42094 _ = config 42095 typ := &b.Func.Config.Types 42096 _ = typ 42097 // match: (AtomicStorePtrNoWB ptr val mem) 42098 // cond: config.PtrSize == 8 42099 // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 42100 for { 42101 _ = v.Args[2] 42102 ptr := v.Args[0] 42103 val := v.Args[1] 42104 mem := v.Args[2] 42105 if !(config.PtrSize == 8) { 42106 break 42107 } 42108 v.reset(OpSelect1) 42109 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) 42110 v0.AddArg(val) 42111 v0.AddArg(ptr) 42112 v0.AddArg(mem) 42113 v.AddArg(v0) 42114 return true 42115 } 42116 // match: (AtomicStorePtrNoWB ptr val mem) 42117 // cond: config.PtrSize == 4 42118 // result: (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 42119 for { 42120 _ = v.Args[2] 42121 ptr := v.Args[0] 42122 val := v.Args[1] 42123 mem := v.Args[2] 42124 if !(config.PtrSize == 4) { 42125 break 42126 } 42127 v.reset(OpSelect1) 42128 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem)) 42129 v0.AddArg(val) 42130 v0.AddArg(ptr) 42131 v0.AddArg(mem) 42132 v.AddArg(v0) 42133 return true 42134 } 42135 return false 42136 } 42137 func rewriteValueAMD64_OpAvg64u_0(v *Value) bool { 42138 // match: (Avg64u x y) 42139 // cond: 42140 // result: (AVGQU x y) 42141 for { 42142 _ = v.Args[1] 42143 x := v.Args[0] 42144 y := v.Args[1] 42145 v.reset(OpAMD64AVGQU) 42146 v.AddArg(x) 42147 v.AddArg(y) 42148 return true 42149 } 42150 } 42151 func rewriteValueAMD64_OpBitLen32_0(v *Value) bool { 42152 b := v.Block 42153 _ = b 42154 typ := &b.Func.Config.Types 42155 _ = typ 42156 // match: (BitLen32 x) 42157 // cond: 42158 // result: (BitLen64 (MOVLQZX <typ.UInt64> x)) 42159 for { 42160 x := v.Args[0] 42161 v.reset(OpBitLen64) 42162 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 42163 v0.AddArg(x) 42164 v.AddArg(v0) 42165 return true 42166 } 42167 } 42168 func rewriteValueAMD64_OpBitLen64_0(v *Value) bool { 42169 b := v.Block 42170 _ = b 42171 typ := &b.Func.Config.Types 42172 _ = typ 42173 // match: (BitLen64 <t> x) 42174 // cond: 42175 // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x)))) 42176 for { 42177 t := v.Type 42178 x := v.Args[0] 42179 v.reset(OpAMD64ADDQconst) 42180 v.AuxInt = 1 42181 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) 42182 v1 := b.NewValue0(v.Pos, OpSelect0, t) 42183 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 42184 v2.AddArg(x) 42185 v1.AddArg(v2) 42186 v0.AddArg(v1) 42187 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 42188 v3.AuxInt = -1 42189 v0.AddArg(v3) 42190 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 42191 v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 42192 v5.AddArg(x) 42193 v4.AddArg(v5) 42194 v0.AddArg(v4) 42195 v.AddArg(v0) 42196 return true 42197 } 42198 } 42199 func rewriteValueAMD64_OpBswap32_0(v *Value) bool { 42200 // match: (Bswap32 x) 42201 // cond: 42202 // result: (BSWAPL x) 42203 for { 42204 x := v.Args[0] 42205 v.reset(OpAMD64BSWAPL) 42206 v.AddArg(x) 42207 return true 42208 } 42209 } 42210 func rewriteValueAMD64_OpBswap64_0(v *Value) bool { 42211 // match: (Bswap64 x) 42212 // cond: 42213 // result: (BSWAPQ x) 42214 for { 42215 x := v.Args[0] 42216 v.reset(OpAMD64BSWAPQ) 42217 v.AddArg(x) 42218 return true 42219 } 42220 } 42221 func rewriteValueAMD64_OpCeil_0(v *Value) bool { 42222 // match: (Ceil x) 42223 // cond: 42224 // result: (ROUNDSD [2] x) 42225 for { 42226 x := v.Args[0] 42227 v.reset(OpAMD64ROUNDSD) 42228 v.AuxInt = 2 42229 v.AddArg(x) 42230 return true 42231 } 42232 } 42233 func rewriteValueAMD64_OpClosureCall_0(v *Value) bool { 42234 // match: (ClosureCall [argwid] entry closure mem) 42235 // cond: 42236 // result: (CALLclosure [argwid] entry closure mem) 42237 for { 42238 argwid := v.AuxInt 42239 _ = v.Args[2] 42240 entry := v.Args[0] 42241 closure := v.Args[1] 42242 mem := v.Args[2] 42243 v.reset(OpAMD64CALLclosure) 42244 v.AuxInt = argwid 42245 v.AddArg(entry) 42246 v.AddArg(closure) 42247 v.AddArg(mem) 42248 return true 42249 } 42250 } 42251 func rewriteValueAMD64_OpCom16_0(v *Value) bool { 42252 // match: (Com16 x) 42253 // cond: 42254 // result: (NOTL x) 42255 for { 42256 x := v.Args[0] 42257 v.reset(OpAMD64NOTL) 42258 v.AddArg(x) 42259 return true 42260 } 42261 } 42262 func rewriteValueAMD64_OpCom32_0(v *Value) bool { 42263 // match: (Com32 x) 42264 // cond: 42265 // result: (NOTL x) 42266 for { 42267 x := v.Args[0] 42268 v.reset(OpAMD64NOTL) 42269 v.AddArg(x) 42270 return true 42271 } 42272 } 42273 func rewriteValueAMD64_OpCom64_0(v *Value) bool { 42274 // match: (Com64 x) 42275 // cond: 42276 // result: (NOTQ x) 42277 for { 42278 x := v.Args[0] 42279 v.reset(OpAMD64NOTQ) 42280 v.AddArg(x) 42281 return true 42282 } 42283 } 42284 func rewriteValueAMD64_OpCom8_0(v *Value) bool { 42285 // match: (Com8 x) 42286 // cond: 42287 // result: (NOTL x) 42288 for { 42289 x := v.Args[0] 42290 v.reset(OpAMD64NOTL) 42291 v.AddArg(x) 42292 return true 42293 } 42294 } 42295 func rewriteValueAMD64_OpConst16_0(v *Value) bool { 42296 // match: (Const16 [val]) 42297 // cond: 42298 // result: (MOVLconst [val]) 42299 for { 42300 val := v.AuxInt 42301 v.reset(OpAMD64MOVLconst) 42302 v.AuxInt = val 42303 return true 42304 } 42305 } 42306 func rewriteValueAMD64_OpConst32_0(v *Value) bool { 42307 // match: (Const32 [val]) 42308 // cond: 42309 // result: (MOVLconst [val]) 42310 for { 42311 val := v.AuxInt 42312 v.reset(OpAMD64MOVLconst) 42313 v.AuxInt = val 42314 return true 42315 } 42316 } 42317 func rewriteValueAMD64_OpConst32F_0(v *Value) bool { 42318 // match: (Const32F [val]) 42319 // cond: 42320 // result: (MOVSSconst [val]) 42321 for { 42322 val := v.AuxInt 42323 v.reset(OpAMD64MOVSSconst) 42324 v.AuxInt = val 42325 return true 42326 } 42327 } 42328 func rewriteValueAMD64_OpConst64_0(v *Value) bool { 42329 // match: (Const64 [val]) 42330 // cond: 42331 // result: (MOVQconst [val]) 42332 for { 42333 val := v.AuxInt 42334 v.reset(OpAMD64MOVQconst) 42335 v.AuxInt = val 42336 return true 42337 } 42338 } 42339 func rewriteValueAMD64_OpConst64F_0(v *Value) bool { 42340 // match: (Const64F [val]) 42341 // cond: 42342 // result: (MOVSDconst [val]) 42343 for { 42344 val := v.AuxInt 42345 v.reset(OpAMD64MOVSDconst) 42346 v.AuxInt = val 42347 return true 42348 } 42349 } 42350 func rewriteValueAMD64_OpConst8_0(v *Value) bool { 42351 // match: (Const8 [val]) 42352 // cond: 42353 // result: (MOVLconst [val]) 42354 for { 42355 val := v.AuxInt 42356 v.reset(OpAMD64MOVLconst) 42357 v.AuxInt = val 42358 return true 42359 } 42360 } 42361 func rewriteValueAMD64_OpConstBool_0(v *Value) bool { 42362 // match: (ConstBool [b]) 42363 // cond: 42364 // result: (MOVLconst [b]) 42365 for { 42366 b := v.AuxInt 42367 v.reset(OpAMD64MOVLconst) 42368 v.AuxInt = b 42369 return true 42370 } 42371 } 42372 func rewriteValueAMD64_OpConstNil_0(v *Value) bool { 42373 b := v.Block 42374 _ = b 42375 config := b.Func.Config 42376 _ = config 42377 // match: (ConstNil) 42378 // cond: config.PtrSize == 8 42379 // result: (MOVQconst [0]) 42380 for { 42381 if !(config.PtrSize == 8) { 42382 break 42383 } 42384 v.reset(OpAMD64MOVQconst) 42385 v.AuxInt = 0 42386 return true 42387 } 42388 // match: (ConstNil) 42389 // cond: config.PtrSize == 4 42390 // result: (MOVLconst [0]) 42391 for { 42392 if !(config.PtrSize == 4) { 42393 break 42394 } 42395 v.reset(OpAMD64MOVLconst) 42396 v.AuxInt = 0 42397 return true 42398 } 42399 return false 42400 } 42401 func rewriteValueAMD64_OpConvert_0(v *Value) bool { 42402 b := v.Block 42403 _ = b 42404 config := b.Func.Config 42405 _ = config 42406 // match: (Convert <t> x mem) 42407 // cond: config.PtrSize == 8 42408 // result: (MOVQconvert <t> x mem) 42409 for { 42410 t := v.Type 42411 _ = v.Args[1] 42412 x := v.Args[0] 42413 mem := v.Args[1] 42414 if !(config.PtrSize == 8) { 42415 break 42416 } 42417 v.reset(OpAMD64MOVQconvert) 42418 v.Type = t 42419 v.AddArg(x) 42420 v.AddArg(mem) 42421 return true 42422 } 42423 // match: (Convert <t> x mem) 42424 // cond: config.PtrSize == 4 42425 // result: (MOVLconvert <t> x mem) 42426 for { 42427 t := v.Type 42428 _ = v.Args[1] 42429 x := v.Args[0] 42430 mem := v.Args[1] 42431 if !(config.PtrSize == 4) { 42432 break 42433 } 42434 v.reset(OpAMD64MOVLconvert) 42435 v.Type = t 42436 v.AddArg(x) 42437 v.AddArg(mem) 42438 return true 42439 } 42440 return false 42441 } 42442 func rewriteValueAMD64_OpCtz32_0(v *Value) bool { 42443 b := v.Block 42444 _ = b 42445 typ := &b.Func.Config.Types 42446 _ = typ 42447 // match: (Ctz32 x) 42448 // cond: 42449 // result: (Select0 (BSFQ (ORQ <typ.UInt64> (MOVQconst [1<<32]) x))) 42450 for { 42451 x := v.Args[0] 42452 v.reset(OpSelect0) 42453 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 42454 v1 := b.NewValue0(v.Pos, OpAMD64ORQ, typ.UInt64) 42455 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 42456 v2.AuxInt = 1 << 32 42457 v1.AddArg(v2) 42458 v1.AddArg(x) 42459 v0.AddArg(v1) 42460 v.AddArg(v0) 42461 return true 42462 } 42463 } 42464 func rewriteValueAMD64_OpCtz64_0(v *Value) bool { 42465 b := v.Block 42466 _ = b 42467 typ := &b.Func.Config.Types 42468 _ = typ 42469 // match: (Ctz64 <t> x) 42470 // cond: 42471 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x))) 42472 for { 42473 t := v.Type 42474 x := v.Args[0] 42475 v.reset(OpAMD64CMOVQEQ) 42476 v0 := b.NewValue0(v.Pos, OpSelect0, t) 42477 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 42478 v1.AddArg(x) 42479 v0.AddArg(v1) 42480 v.AddArg(v0) 42481 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 42482 v2.AuxInt = 64 42483 v.AddArg(v2) 42484 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 42485 v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 42486 v4.AddArg(x) 42487 v3.AddArg(v4) 42488 v.AddArg(v3) 42489 return true 42490 } 42491 } 42492 func rewriteValueAMD64_OpCvt32Fto32_0(v *Value) bool { 42493 // match: (Cvt32Fto32 x) 42494 // cond: 42495 // result: (CVTTSS2SL x) 42496 for { 42497 x := v.Args[0] 42498 v.reset(OpAMD64CVTTSS2SL) 42499 v.AddArg(x) 42500 return true 42501 } 42502 } 42503 func rewriteValueAMD64_OpCvt32Fto64_0(v *Value) bool { 42504 // match: (Cvt32Fto64 x) 42505 // cond: 42506 // result: (CVTTSS2SQ x) 42507 for { 42508 x := v.Args[0] 42509 v.reset(OpAMD64CVTTSS2SQ) 42510 v.AddArg(x) 42511 return true 42512 } 42513 } 42514 func rewriteValueAMD64_OpCvt32Fto64F_0(v *Value) bool { 42515 // match: (Cvt32Fto64F x) 42516 // cond: 42517 // result: (CVTSS2SD x) 42518 for { 42519 x := v.Args[0] 42520 v.reset(OpAMD64CVTSS2SD) 42521 v.AddArg(x) 42522 return true 42523 } 42524 } 42525 func rewriteValueAMD64_OpCvt32to32F_0(v *Value) bool { 42526 // match: (Cvt32to32F x) 42527 // cond: 42528 // result: (CVTSL2SS x) 42529 for { 42530 x := v.Args[0] 42531 v.reset(OpAMD64CVTSL2SS) 42532 v.AddArg(x) 42533 return true 42534 } 42535 } 42536 func rewriteValueAMD64_OpCvt32to64F_0(v *Value) bool { 42537 // match: (Cvt32to64F x) 42538 // cond: 42539 // result: (CVTSL2SD x) 42540 for { 42541 x := v.Args[0] 42542 v.reset(OpAMD64CVTSL2SD) 42543 v.AddArg(x) 42544 return true 42545 } 42546 } 42547 func rewriteValueAMD64_OpCvt64Fto32_0(v *Value) bool { 42548 // match: (Cvt64Fto32 x) 42549 // cond: 42550 // result: (CVTTSD2SL x) 42551 for { 42552 x := v.Args[0] 42553 v.reset(OpAMD64CVTTSD2SL) 42554 v.AddArg(x) 42555 return true 42556 } 42557 } 42558 func rewriteValueAMD64_OpCvt64Fto32F_0(v *Value) bool { 42559 // match: (Cvt64Fto32F x) 42560 // cond: 42561 // result: (CVTSD2SS x) 42562 for { 42563 x := v.Args[0] 42564 v.reset(OpAMD64CVTSD2SS) 42565 v.AddArg(x) 42566 return true 42567 } 42568 } 42569 func rewriteValueAMD64_OpCvt64Fto64_0(v *Value) bool { 42570 // match: (Cvt64Fto64 x) 42571 // cond: 42572 // result: (CVTTSD2SQ x) 42573 for { 42574 x := v.Args[0] 42575 v.reset(OpAMD64CVTTSD2SQ) 42576 v.AddArg(x) 42577 return true 42578 } 42579 } 42580 func rewriteValueAMD64_OpCvt64to32F_0(v *Value) bool { 42581 // match: (Cvt64to32F x) 42582 // cond: 42583 // result: (CVTSQ2SS x) 42584 for { 42585 x := v.Args[0] 42586 v.reset(OpAMD64CVTSQ2SS) 42587 v.AddArg(x) 42588 return true 42589 } 42590 } 42591 func rewriteValueAMD64_OpCvt64to64F_0(v *Value) bool { 42592 // match: (Cvt64to64F x) 42593 // cond: 42594 // result: (CVTSQ2SD x) 42595 for { 42596 x := v.Args[0] 42597 v.reset(OpAMD64CVTSQ2SD) 42598 v.AddArg(x) 42599 return true 42600 } 42601 } 42602 func rewriteValueAMD64_OpDiv128u_0(v *Value) bool { 42603 // match: (Div128u xhi xlo y) 42604 // cond: 42605 // result: (DIVQU2 xhi xlo y) 42606 for { 42607 _ = v.Args[2] 42608 xhi := v.Args[0] 42609 xlo := v.Args[1] 42610 y := v.Args[2] 42611 v.reset(OpAMD64DIVQU2) 42612 v.AddArg(xhi) 42613 v.AddArg(xlo) 42614 v.AddArg(y) 42615 return true 42616 } 42617 } 42618 func rewriteValueAMD64_OpDiv16_0(v *Value) bool { 42619 b := v.Block 42620 _ = b 42621 typ := &b.Func.Config.Types 42622 _ = typ 42623 // match: (Div16 x y) 42624 // cond: 42625 // result: (Select0 (DIVW x y)) 42626 for { 42627 _ = v.Args[1] 42628 x := v.Args[0] 42629 y := v.Args[1] 42630 v.reset(OpSelect0) 42631 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 42632 v0.AddArg(x) 42633 v0.AddArg(y) 42634 v.AddArg(v0) 42635 return true 42636 } 42637 } 42638 func rewriteValueAMD64_OpDiv16u_0(v *Value) bool { 42639 b := v.Block 42640 _ = b 42641 typ := &b.Func.Config.Types 42642 _ = typ 42643 // match: (Div16u x y) 42644 // cond: 42645 // result: (Select0 (DIVWU x y)) 42646 for { 42647 _ = v.Args[1] 42648 x := v.Args[0] 42649 y := v.Args[1] 42650 v.reset(OpSelect0) 42651 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 42652 v0.AddArg(x) 42653 v0.AddArg(y) 42654 v.AddArg(v0) 42655 return true 42656 } 42657 } 42658 func rewriteValueAMD64_OpDiv32_0(v *Value) bool { 42659 b := v.Block 42660 _ = b 42661 typ := &b.Func.Config.Types 42662 _ = typ 42663 // match: (Div32 x y) 42664 // cond: 42665 // result: (Select0 (DIVL x y)) 42666 for { 42667 _ = v.Args[1] 42668 x := v.Args[0] 42669 y := v.Args[1] 42670 v.reset(OpSelect0) 42671 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 42672 v0.AddArg(x) 42673 v0.AddArg(y) 42674 v.AddArg(v0) 42675 return true 42676 } 42677 } 42678 func rewriteValueAMD64_OpDiv32F_0(v *Value) bool { 42679 // match: (Div32F x y) 42680 // cond: 42681 // result: (DIVSS x y) 42682 for { 42683 _ = v.Args[1] 42684 x := v.Args[0] 42685 y := v.Args[1] 42686 v.reset(OpAMD64DIVSS) 42687 v.AddArg(x) 42688 v.AddArg(y) 42689 return true 42690 } 42691 } 42692 func rewriteValueAMD64_OpDiv32u_0(v *Value) bool { 42693 b := v.Block 42694 _ = b 42695 typ := &b.Func.Config.Types 42696 _ = typ 42697 // match: (Div32u x y) 42698 // cond: 42699 // result: (Select0 (DIVLU x y)) 42700 for { 42701 _ = v.Args[1] 42702 x := v.Args[0] 42703 y := v.Args[1] 42704 v.reset(OpSelect0) 42705 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 42706 v0.AddArg(x) 42707 v0.AddArg(y) 42708 v.AddArg(v0) 42709 return true 42710 } 42711 } 42712 func rewriteValueAMD64_OpDiv64_0(v *Value) bool { 42713 b := v.Block 42714 _ = b 42715 typ := &b.Func.Config.Types 42716 _ = typ 42717 // match: (Div64 x y) 42718 // cond: 42719 // result: (Select0 (DIVQ x y)) 42720 for { 42721 _ = v.Args[1] 42722 x := v.Args[0] 42723 y := v.Args[1] 42724 v.reset(OpSelect0) 42725 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 42726 v0.AddArg(x) 42727 v0.AddArg(y) 42728 v.AddArg(v0) 42729 return true 42730 } 42731 } 42732 func rewriteValueAMD64_OpDiv64F_0(v *Value) bool { 42733 // match: (Div64F x y) 42734 // cond: 42735 // result: (DIVSD x y) 42736 for { 42737 _ = v.Args[1] 42738 x := v.Args[0] 42739 y := v.Args[1] 42740 v.reset(OpAMD64DIVSD) 42741 v.AddArg(x) 42742 v.AddArg(y) 42743 return true 42744 } 42745 } 42746 func rewriteValueAMD64_OpDiv64u_0(v *Value) bool { 42747 b := v.Block 42748 _ = b 42749 typ := &b.Func.Config.Types 42750 _ = typ 42751 // match: (Div64u x y) 42752 // cond: 42753 // result: (Select0 (DIVQU x y)) 42754 for { 42755 _ = v.Args[1] 42756 x := v.Args[0] 42757 y := v.Args[1] 42758 v.reset(OpSelect0) 42759 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 42760 v0.AddArg(x) 42761 v0.AddArg(y) 42762 v.AddArg(v0) 42763 return true 42764 } 42765 } 42766 func rewriteValueAMD64_OpDiv8_0(v *Value) bool { 42767 b := v.Block 42768 _ = b 42769 typ := &b.Func.Config.Types 42770 _ = typ 42771 // match: (Div8 x y) 42772 // cond: 42773 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 42774 for { 42775 _ = v.Args[1] 42776 x := v.Args[0] 42777 y := v.Args[1] 42778 v.reset(OpSelect0) 42779 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 42780 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 42781 v1.AddArg(x) 42782 v0.AddArg(v1) 42783 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 42784 v2.AddArg(y) 42785 v0.AddArg(v2) 42786 v.AddArg(v0) 42787 return true 42788 } 42789 } 42790 func rewriteValueAMD64_OpDiv8u_0(v *Value) bool { 42791 b := v.Block 42792 _ = b 42793 typ := &b.Func.Config.Types 42794 _ = typ 42795 // match: (Div8u x y) 42796 // cond: 42797 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 42798 for { 42799 _ = v.Args[1] 42800 x := v.Args[0] 42801 y := v.Args[1] 42802 v.reset(OpSelect0) 42803 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 42804 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 42805 v1.AddArg(x) 42806 v0.AddArg(v1) 42807 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 42808 v2.AddArg(y) 42809 v0.AddArg(v2) 42810 v.AddArg(v0) 42811 return true 42812 } 42813 } 42814 func rewriteValueAMD64_OpEq16_0(v *Value) bool { 42815 b := v.Block 42816 _ = b 42817 // match: (Eq16 x y) 42818 // cond: 42819 // result: (SETEQ (CMPW x y)) 42820 for { 42821 _ = v.Args[1] 42822 x := v.Args[0] 42823 y := v.Args[1] 42824 v.reset(OpAMD64SETEQ) 42825 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 42826 v0.AddArg(x) 42827 v0.AddArg(y) 42828 v.AddArg(v0) 42829 return true 42830 } 42831 } 42832 func rewriteValueAMD64_OpEq32_0(v *Value) bool { 42833 b := v.Block 42834 _ = b 42835 // match: (Eq32 x y) 42836 // cond: 42837 // result: (SETEQ (CMPL x y)) 42838 for { 42839 _ = v.Args[1] 42840 x := v.Args[0] 42841 y := v.Args[1] 42842 v.reset(OpAMD64SETEQ) 42843 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42844 v0.AddArg(x) 42845 v0.AddArg(y) 42846 v.AddArg(v0) 42847 return true 42848 } 42849 } 42850 func rewriteValueAMD64_OpEq32F_0(v *Value) bool { 42851 b := v.Block 42852 _ = b 42853 // match: (Eq32F x y) 42854 // cond: 42855 // result: (SETEQF (UCOMISS x y)) 42856 for { 42857 _ = v.Args[1] 42858 x := v.Args[0] 42859 y := v.Args[1] 42860 v.reset(OpAMD64SETEQF) 42861 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 42862 v0.AddArg(x) 42863 v0.AddArg(y) 42864 v.AddArg(v0) 42865 return true 42866 } 42867 } 42868 func rewriteValueAMD64_OpEq64_0(v *Value) bool { 42869 b := v.Block 42870 _ = b 42871 // match: (Eq64 x y) 42872 // cond: 42873 // result: (SETEQ (CMPQ x y)) 42874 for { 42875 _ = v.Args[1] 42876 x := v.Args[0] 42877 y := v.Args[1] 42878 v.reset(OpAMD64SETEQ) 42879 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 42880 v0.AddArg(x) 42881 v0.AddArg(y) 42882 v.AddArg(v0) 42883 return true 42884 } 42885 } 42886 func rewriteValueAMD64_OpEq64F_0(v *Value) bool { 42887 b := v.Block 42888 _ = b 42889 // match: (Eq64F x y) 42890 // cond: 42891 // result: (SETEQF (UCOMISD x y)) 42892 for { 42893 _ = v.Args[1] 42894 x := v.Args[0] 42895 y := v.Args[1] 42896 v.reset(OpAMD64SETEQF) 42897 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 42898 v0.AddArg(x) 42899 v0.AddArg(y) 42900 v.AddArg(v0) 42901 return true 42902 } 42903 } 42904 func rewriteValueAMD64_OpEq8_0(v *Value) bool { 42905 b := v.Block 42906 _ = b 42907 // match: (Eq8 x y) 42908 // cond: 42909 // result: (SETEQ (CMPB x y)) 42910 for { 42911 _ = v.Args[1] 42912 x := v.Args[0] 42913 y := v.Args[1] 42914 v.reset(OpAMD64SETEQ) 42915 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 42916 v0.AddArg(x) 42917 v0.AddArg(y) 42918 v.AddArg(v0) 42919 return true 42920 } 42921 } 42922 func rewriteValueAMD64_OpEqB_0(v *Value) bool { 42923 b := v.Block 42924 _ = b 42925 // match: (EqB x y) 42926 // cond: 42927 // result: (SETEQ (CMPB x y)) 42928 for { 42929 _ = v.Args[1] 42930 x := v.Args[0] 42931 y := v.Args[1] 42932 v.reset(OpAMD64SETEQ) 42933 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 42934 v0.AddArg(x) 42935 v0.AddArg(y) 42936 v.AddArg(v0) 42937 return true 42938 } 42939 } 42940 func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { 42941 b := v.Block 42942 _ = b 42943 config := b.Func.Config 42944 _ = config 42945 // match: (EqPtr x y) 42946 // cond: config.PtrSize == 8 42947 // result: (SETEQ (CMPQ x y)) 42948 for { 42949 _ = v.Args[1] 42950 x := v.Args[0] 42951 y := v.Args[1] 42952 if !(config.PtrSize == 8) { 42953 break 42954 } 42955 v.reset(OpAMD64SETEQ) 42956 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 42957 v0.AddArg(x) 42958 v0.AddArg(y) 42959 v.AddArg(v0) 42960 return true 42961 } 42962 // match: (EqPtr x y) 42963 // cond: config.PtrSize == 4 42964 // result: (SETEQ (CMPL x y)) 42965 for { 42966 _ = v.Args[1] 42967 x := v.Args[0] 42968 y := v.Args[1] 42969 if !(config.PtrSize == 4) { 42970 break 42971 } 42972 v.reset(OpAMD64SETEQ) 42973 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42974 v0.AddArg(x) 42975 v0.AddArg(y) 42976 v.AddArg(v0) 42977 return true 42978 } 42979 return false 42980 } 42981 func rewriteValueAMD64_OpFloor_0(v *Value) bool { 42982 // match: (Floor x) 42983 // cond: 42984 // result: (ROUNDSD [1] x) 42985 for { 42986 x := v.Args[0] 42987 v.reset(OpAMD64ROUNDSD) 42988 v.AuxInt = 1 42989 v.AddArg(x) 42990 return true 42991 } 42992 } 42993 func rewriteValueAMD64_OpGeq16_0(v *Value) bool { 42994 b := v.Block 42995 _ = b 42996 // match: (Geq16 x y) 42997 // cond: 42998 // result: (SETGE (CMPW x y)) 42999 for { 43000 _ = v.Args[1] 43001 x := v.Args[0] 43002 y := v.Args[1] 43003 v.reset(OpAMD64SETGE) 43004 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43005 v0.AddArg(x) 43006 v0.AddArg(y) 43007 v.AddArg(v0) 43008 return true 43009 } 43010 } 43011 func rewriteValueAMD64_OpGeq16U_0(v *Value) bool { 43012 b := v.Block 43013 _ = b 43014 // match: (Geq16U x y) 43015 // cond: 43016 // result: (SETAE (CMPW x y)) 43017 for { 43018 _ = v.Args[1] 43019 x := v.Args[0] 43020 y := v.Args[1] 43021 v.reset(OpAMD64SETAE) 43022 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43023 v0.AddArg(x) 43024 v0.AddArg(y) 43025 v.AddArg(v0) 43026 return true 43027 } 43028 } 43029 func rewriteValueAMD64_OpGeq32_0(v *Value) bool { 43030 b := v.Block 43031 _ = b 43032 // match: (Geq32 x y) 43033 // cond: 43034 // result: (SETGE (CMPL x y)) 43035 for { 43036 _ = v.Args[1] 43037 x := v.Args[0] 43038 y := v.Args[1] 43039 v.reset(OpAMD64SETGE) 43040 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43041 v0.AddArg(x) 43042 v0.AddArg(y) 43043 v.AddArg(v0) 43044 return true 43045 } 43046 } 43047 func rewriteValueAMD64_OpGeq32F_0(v *Value) bool { 43048 b := v.Block 43049 _ = b 43050 // match: (Geq32F x y) 43051 // cond: 43052 // result: (SETGEF (UCOMISS x y)) 43053 for { 43054 _ = v.Args[1] 43055 x := v.Args[0] 43056 y := v.Args[1] 43057 v.reset(OpAMD64SETGEF) 43058 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 43059 v0.AddArg(x) 43060 v0.AddArg(y) 43061 v.AddArg(v0) 43062 return true 43063 } 43064 } 43065 func rewriteValueAMD64_OpGeq32U_0(v *Value) bool { 43066 b := v.Block 43067 _ = b 43068 // match: (Geq32U x y) 43069 // cond: 43070 // result: (SETAE (CMPL x y)) 43071 for { 43072 _ = v.Args[1] 43073 x := v.Args[0] 43074 y := v.Args[1] 43075 v.reset(OpAMD64SETAE) 43076 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43077 v0.AddArg(x) 43078 v0.AddArg(y) 43079 v.AddArg(v0) 43080 return true 43081 } 43082 } 43083 func rewriteValueAMD64_OpGeq64_0(v *Value) bool { 43084 b := v.Block 43085 _ = b 43086 // match: (Geq64 x y) 43087 // cond: 43088 // result: (SETGE (CMPQ x y)) 43089 for { 43090 _ = v.Args[1] 43091 x := v.Args[0] 43092 y := v.Args[1] 43093 v.reset(OpAMD64SETGE) 43094 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43095 v0.AddArg(x) 43096 v0.AddArg(y) 43097 v.AddArg(v0) 43098 return true 43099 } 43100 } 43101 func rewriteValueAMD64_OpGeq64F_0(v *Value) bool { 43102 b := v.Block 43103 _ = b 43104 // match: (Geq64F x y) 43105 // cond: 43106 // result: (SETGEF (UCOMISD x y)) 43107 for { 43108 _ = v.Args[1] 43109 x := v.Args[0] 43110 y := v.Args[1] 43111 v.reset(OpAMD64SETGEF) 43112 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 43113 v0.AddArg(x) 43114 v0.AddArg(y) 43115 v.AddArg(v0) 43116 return true 43117 } 43118 } 43119 func rewriteValueAMD64_OpGeq64U_0(v *Value) bool { 43120 b := v.Block 43121 _ = b 43122 // match: (Geq64U x y) 43123 // cond: 43124 // result: (SETAE (CMPQ x y)) 43125 for { 43126 _ = v.Args[1] 43127 x := v.Args[0] 43128 y := v.Args[1] 43129 v.reset(OpAMD64SETAE) 43130 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43131 v0.AddArg(x) 43132 v0.AddArg(y) 43133 v.AddArg(v0) 43134 return true 43135 } 43136 } 43137 func rewriteValueAMD64_OpGeq8_0(v *Value) bool { 43138 b := v.Block 43139 _ = b 43140 // match: (Geq8 x y) 43141 // cond: 43142 // result: (SETGE (CMPB x y)) 43143 for { 43144 _ = v.Args[1] 43145 x := v.Args[0] 43146 y := v.Args[1] 43147 v.reset(OpAMD64SETGE) 43148 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43149 v0.AddArg(x) 43150 v0.AddArg(y) 43151 v.AddArg(v0) 43152 return true 43153 } 43154 } 43155 func rewriteValueAMD64_OpGeq8U_0(v *Value) bool { 43156 b := v.Block 43157 _ = b 43158 // match: (Geq8U x y) 43159 // cond: 43160 // result: (SETAE (CMPB x y)) 43161 for { 43162 _ = v.Args[1] 43163 x := v.Args[0] 43164 y := v.Args[1] 43165 v.reset(OpAMD64SETAE) 43166 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43167 v0.AddArg(x) 43168 v0.AddArg(y) 43169 v.AddArg(v0) 43170 return true 43171 } 43172 } 43173 func rewriteValueAMD64_OpGetCallerPC_0(v *Value) bool { 43174 // match: (GetCallerPC) 43175 // cond: 43176 // result: (LoweredGetCallerPC) 43177 for { 43178 v.reset(OpAMD64LoweredGetCallerPC) 43179 return true 43180 } 43181 } 43182 func rewriteValueAMD64_OpGetCallerSP_0(v *Value) bool { 43183 // match: (GetCallerSP) 43184 // cond: 43185 // result: (LoweredGetCallerSP) 43186 for { 43187 v.reset(OpAMD64LoweredGetCallerSP) 43188 return true 43189 } 43190 } 43191 func rewriteValueAMD64_OpGetClosurePtr_0(v *Value) bool { 43192 // match: (GetClosurePtr) 43193 // cond: 43194 // result: (LoweredGetClosurePtr) 43195 for { 43196 v.reset(OpAMD64LoweredGetClosurePtr) 43197 return true 43198 } 43199 } 43200 func rewriteValueAMD64_OpGetG_0(v *Value) bool { 43201 // match: (GetG mem) 43202 // cond: 43203 // result: (LoweredGetG mem) 43204 for { 43205 mem := v.Args[0] 43206 v.reset(OpAMD64LoweredGetG) 43207 v.AddArg(mem) 43208 return true 43209 } 43210 } 43211 func rewriteValueAMD64_OpGreater16_0(v *Value) bool { 43212 b := v.Block 43213 _ = b 43214 // match: (Greater16 x y) 43215 // cond: 43216 // result: (SETG (CMPW x y)) 43217 for { 43218 _ = v.Args[1] 43219 x := v.Args[0] 43220 y := v.Args[1] 43221 v.reset(OpAMD64SETG) 43222 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43223 v0.AddArg(x) 43224 v0.AddArg(y) 43225 v.AddArg(v0) 43226 return true 43227 } 43228 } 43229 func rewriteValueAMD64_OpGreater16U_0(v *Value) bool { 43230 b := v.Block 43231 _ = b 43232 // match: (Greater16U x y) 43233 // cond: 43234 // result: (SETA (CMPW x y)) 43235 for { 43236 _ = v.Args[1] 43237 x := v.Args[0] 43238 y := v.Args[1] 43239 v.reset(OpAMD64SETA) 43240 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43241 v0.AddArg(x) 43242 v0.AddArg(y) 43243 v.AddArg(v0) 43244 return true 43245 } 43246 } 43247 func rewriteValueAMD64_OpGreater32_0(v *Value) bool { 43248 b := v.Block 43249 _ = b 43250 // match: (Greater32 x y) 43251 // cond: 43252 // result: (SETG (CMPL x y)) 43253 for { 43254 _ = v.Args[1] 43255 x := v.Args[0] 43256 y := v.Args[1] 43257 v.reset(OpAMD64SETG) 43258 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43259 v0.AddArg(x) 43260 v0.AddArg(y) 43261 v.AddArg(v0) 43262 return true 43263 } 43264 } 43265 func rewriteValueAMD64_OpGreater32F_0(v *Value) bool { 43266 b := v.Block 43267 _ = b 43268 // match: (Greater32F x y) 43269 // cond: 43270 // result: (SETGF (UCOMISS x y)) 43271 for { 43272 _ = v.Args[1] 43273 x := v.Args[0] 43274 y := v.Args[1] 43275 v.reset(OpAMD64SETGF) 43276 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 43277 v0.AddArg(x) 43278 v0.AddArg(y) 43279 v.AddArg(v0) 43280 return true 43281 } 43282 } 43283 func rewriteValueAMD64_OpGreater32U_0(v *Value) bool { 43284 b := v.Block 43285 _ = b 43286 // match: (Greater32U x y) 43287 // cond: 43288 // result: (SETA (CMPL x y)) 43289 for { 43290 _ = v.Args[1] 43291 x := v.Args[0] 43292 y := v.Args[1] 43293 v.reset(OpAMD64SETA) 43294 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43295 v0.AddArg(x) 43296 v0.AddArg(y) 43297 v.AddArg(v0) 43298 return true 43299 } 43300 } 43301 func rewriteValueAMD64_OpGreater64_0(v *Value) bool { 43302 b := v.Block 43303 _ = b 43304 // match: (Greater64 x y) 43305 // cond: 43306 // result: (SETG (CMPQ x y)) 43307 for { 43308 _ = v.Args[1] 43309 x := v.Args[0] 43310 y := v.Args[1] 43311 v.reset(OpAMD64SETG) 43312 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43313 v0.AddArg(x) 43314 v0.AddArg(y) 43315 v.AddArg(v0) 43316 return true 43317 } 43318 } 43319 func rewriteValueAMD64_OpGreater64F_0(v *Value) bool { 43320 b := v.Block 43321 _ = b 43322 // match: (Greater64F x y) 43323 // cond: 43324 // result: (SETGF (UCOMISD x y)) 43325 for { 43326 _ = v.Args[1] 43327 x := v.Args[0] 43328 y := v.Args[1] 43329 v.reset(OpAMD64SETGF) 43330 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 43331 v0.AddArg(x) 43332 v0.AddArg(y) 43333 v.AddArg(v0) 43334 return true 43335 } 43336 } 43337 func rewriteValueAMD64_OpGreater64U_0(v *Value) bool { 43338 b := v.Block 43339 _ = b 43340 // match: (Greater64U x y) 43341 // cond: 43342 // result: (SETA (CMPQ x y)) 43343 for { 43344 _ = v.Args[1] 43345 x := v.Args[0] 43346 y := v.Args[1] 43347 v.reset(OpAMD64SETA) 43348 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43349 v0.AddArg(x) 43350 v0.AddArg(y) 43351 v.AddArg(v0) 43352 return true 43353 } 43354 } 43355 func rewriteValueAMD64_OpGreater8_0(v *Value) bool { 43356 b := v.Block 43357 _ = b 43358 // match: (Greater8 x y) 43359 // cond: 43360 // result: (SETG (CMPB x y)) 43361 for { 43362 _ = v.Args[1] 43363 x := v.Args[0] 43364 y := v.Args[1] 43365 v.reset(OpAMD64SETG) 43366 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43367 v0.AddArg(x) 43368 v0.AddArg(y) 43369 v.AddArg(v0) 43370 return true 43371 } 43372 } 43373 func rewriteValueAMD64_OpGreater8U_0(v *Value) bool { 43374 b := v.Block 43375 _ = b 43376 // match: (Greater8U x y) 43377 // cond: 43378 // result: (SETA (CMPB x y)) 43379 for { 43380 _ = v.Args[1] 43381 x := v.Args[0] 43382 y := v.Args[1] 43383 v.reset(OpAMD64SETA) 43384 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43385 v0.AddArg(x) 43386 v0.AddArg(y) 43387 v.AddArg(v0) 43388 return true 43389 } 43390 } 43391 func rewriteValueAMD64_OpHmul32_0(v *Value) bool { 43392 // match: (Hmul32 x y) 43393 // cond: 43394 // result: (HMULL x y) 43395 for { 43396 _ = v.Args[1] 43397 x := v.Args[0] 43398 y := v.Args[1] 43399 v.reset(OpAMD64HMULL) 43400 v.AddArg(x) 43401 v.AddArg(y) 43402 return true 43403 } 43404 } 43405 func rewriteValueAMD64_OpHmul32u_0(v *Value) bool { 43406 // match: (Hmul32u x y) 43407 // cond: 43408 // result: (HMULLU x y) 43409 for { 43410 _ = v.Args[1] 43411 x := v.Args[0] 43412 y := v.Args[1] 43413 v.reset(OpAMD64HMULLU) 43414 v.AddArg(x) 43415 v.AddArg(y) 43416 return true 43417 } 43418 } 43419 func rewriteValueAMD64_OpHmul64_0(v *Value) bool { 43420 // match: (Hmul64 x y) 43421 // cond: 43422 // result: (HMULQ x y) 43423 for { 43424 _ = v.Args[1] 43425 x := v.Args[0] 43426 y := v.Args[1] 43427 v.reset(OpAMD64HMULQ) 43428 v.AddArg(x) 43429 v.AddArg(y) 43430 return true 43431 } 43432 } 43433 func rewriteValueAMD64_OpHmul64u_0(v *Value) bool { 43434 // match: (Hmul64u x y) 43435 // cond: 43436 // result: (HMULQU x y) 43437 for { 43438 _ = v.Args[1] 43439 x := v.Args[0] 43440 y := v.Args[1] 43441 v.reset(OpAMD64HMULQU) 43442 v.AddArg(x) 43443 v.AddArg(y) 43444 return true 43445 } 43446 } 43447 func rewriteValueAMD64_OpInt64Hi_0(v *Value) bool { 43448 // match: (Int64Hi x) 43449 // cond: 43450 // result: (SHRQconst [32] x) 43451 for { 43452 x := v.Args[0] 43453 v.reset(OpAMD64SHRQconst) 43454 v.AuxInt = 32 43455 v.AddArg(x) 43456 return true 43457 } 43458 } 43459 func rewriteValueAMD64_OpInterCall_0(v *Value) bool { 43460 // match: (InterCall [argwid] entry mem) 43461 // cond: 43462 // result: (CALLinter [argwid] entry mem) 43463 for { 43464 argwid := v.AuxInt 43465 _ = v.Args[1] 43466 entry := v.Args[0] 43467 mem := v.Args[1] 43468 v.reset(OpAMD64CALLinter) 43469 v.AuxInt = argwid 43470 v.AddArg(entry) 43471 v.AddArg(mem) 43472 return true 43473 } 43474 } 43475 func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool { 43476 b := v.Block 43477 _ = b 43478 config := b.Func.Config 43479 _ = config 43480 // match: (IsInBounds idx len) 43481 // cond: config.PtrSize == 8 43482 // result: (SETB (CMPQ idx len)) 43483 for { 43484 _ = v.Args[1] 43485 idx := v.Args[0] 43486 len := v.Args[1] 43487 if !(config.PtrSize == 8) { 43488 break 43489 } 43490 v.reset(OpAMD64SETB) 43491 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43492 v0.AddArg(idx) 43493 v0.AddArg(len) 43494 v.AddArg(v0) 43495 return true 43496 } 43497 // match: (IsInBounds idx len) 43498 // cond: config.PtrSize == 4 43499 // result: (SETB (CMPL idx len)) 43500 for { 43501 _ = v.Args[1] 43502 idx := v.Args[0] 43503 len := v.Args[1] 43504 if !(config.PtrSize == 4) { 43505 break 43506 } 43507 v.reset(OpAMD64SETB) 43508 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43509 v0.AddArg(idx) 43510 v0.AddArg(len) 43511 v.AddArg(v0) 43512 return true 43513 } 43514 return false 43515 } 43516 func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool { 43517 b := v.Block 43518 _ = b 43519 config := b.Func.Config 43520 _ = config 43521 // match: (IsNonNil p) 43522 // cond: config.PtrSize == 8 43523 // result: (SETNE (TESTQ p p)) 43524 for { 43525 p := v.Args[0] 43526 if !(config.PtrSize == 8) { 43527 break 43528 } 43529 v.reset(OpAMD64SETNE) 43530 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) 43531 v0.AddArg(p) 43532 v0.AddArg(p) 43533 v.AddArg(v0) 43534 return true 43535 } 43536 // match: (IsNonNil p) 43537 // cond: config.PtrSize == 4 43538 // result: (SETNE (TESTL p p)) 43539 for { 43540 p := v.Args[0] 43541 if !(config.PtrSize == 4) { 43542 break 43543 } 43544 v.reset(OpAMD64SETNE) 43545 v0 := b.NewValue0(v.Pos, OpAMD64TESTL, types.TypeFlags) 43546 v0.AddArg(p) 43547 v0.AddArg(p) 43548 v.AddArg(v0) 43549 return true 43550 } 43551 return false 43552 } 43553 func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool { 43554 b := v.Block 43555 _ = b 43556 config := b.Func.Config 43557 _ = config 43558 // match: (IsSliceInBounds idx len) 43559 // cond: config.PtrSize == 8 43560 // result: (SETBE (CMPQ idx len)) 43561 for { 43562 _ = v.Args[1] 43563 idx := v.Args[0] 43564 len := v.Args[1] 43565 if !(config.PtrSize == 8) { 43566 break 43567 } 43568 v.reset(OpAMD64SETBE) 43569 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43570 v0.AddArg(idx) 43571 v0.AddArg(len) 43572 v.AddArg(v0) 43573 return true 43574 } 43575 // match: (IsSliceInBounds idx len) 43576 // cond: config.PtrSize == 4 43577 // result: (SETBE (CMPL idx len)) 43578 for { 43579 _ = v.Args[1] 43580 idx := v.Args[0] 43581 len := v.Args[1] 43582 if !(config.PtrSize == 4) { 43583 break 43584 } 43585 v.reset(OpAMD64SETBE) 43586 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43587 v0.AddArg(idx) 43588 v0.AddArg(len) 43589 v.AddArg(v0) 43590 return true 43591 } 43592 return false 43593 } 43594 func rewriteValueAMD64_OpLeq16_0(v *Value) bool { 43595 b := v.Block 43596 _ = b 43597 // match: (Leq16 x y) 43598 // cond: 43599 // result: (SETLE (CMPW x y)) 43600 for { 43601 _ = v.Args[1] 43602 x := v.Args[0] 43603 y := v.Args[1] 43604 v.reset(OpAMD64SETLE) 43605 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43606 v0.AddArg(x) 43607 v0.AddArg(y) 43608 v.AddArg(v0) 43609 return true 43610 } 43611 } 43612 func rewriteValueAMD64_OpLeq16U_0(v *Value) bool { 43613 b := v.Block 43614 _ = b 43615 // match: (Leq16U x y) 43616 // cond: 43617 // result: (SETBE (CMPW x y)) 43618 for { 43619 _ = v.Args[1] 43620 x := v.Args[0] 43621 y := v.Args[1] 43622 v.reset(OpAMD64SETBE) 43623 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43624 v0.AddArg(x) 43625 v0.AddArg(y) 43626 v.AddArg(v0) 43627 return true 43628 } 43629 } 43630 func rewriteValueAMD64_OpLeq32_0(v *Value) bool { 43631 b := v.Block 43632 _ = b 43633 // match: (Leq32 x y) 43634 // cond: 43635 // result: (SETLE (CMPL x y)) 43636 for { 43637 _ = v.Args[1] 43638 x := v.Args[0] 43639 y := v.Args[1] 43640 v.reset(OpAMD64SETLE) 43641 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43642 v0.AddArg(x) 43643 v0.AddArg(y) 43644 v.AddArg(v0) 43645 return true 43646 } 43647 } 43648 func rewriteValueAMD64_OpLeq32F_0(v *Value) bool { 43649 b := v.Block 43650 _ = b 43651 // match: (Leq32F x y) 43652 // cond: 43653 // result: (SETGEF (UCOMISS y x)) 43654 for { 43655 _ = v.Args[1] 43656 x := v.Args[0] 43657 y := v.Args[1] 43658 v.reset(OpAMD64SETGEF) 43659 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 43660 v0.AddArg(y) 43661 v0.AddArg(x) 43662 v.AddArg(v0) 43663 return true 43664 } 43665 } 43666 func rewriteValueAMD64_OpLeq32U_0(v *Value) bool { 43667 b := v.Block 43668 _ = b 43669 // match: (Leq32U x y) 43670 // cond: 43671 // result: (SETBE (CMPL x y)) 43672 for { 43673 _ = v.Args[1] 43674 x := v.Args[0] 43675 y := v.Args[1] 43676 v.reset(OpAMD64SETBE) 43677 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43678 v0.AddArg(x) 43679 v0.AddArg(y) 43680 v.AddArg(v0) 43681 return true 43682 } 43683 } 43684 func rewriteValueAMD64_OpLeq64_0(v *Value) bool { 43685 b := v.Block 43686 _ = b 43687 // match: (Leq64 x y) 43688 // cond: 43689 // result: (SETLE (CMPQ x y)) 43690 for { 43691 _ = v.Args[1] 43692 x := v.Args[0] 43693 y := v.Args[1] 43694 v.reset(OpAMD64SETLE) 43695 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43696 v0.AddArg(x) 43697 v0.AddArg(y) 43698 v.AddArg(v0) 43699 return true 43700 } 43701 } 43702 func rewriteValueAMD64_OpLeq64F_0(v *Value) bool { 43703 b := v.Block 43704 _ = b 43705 // match: (Leq64F x y) 43706 // cond: 43707 // result: (SETGEF (UCOMISD y x)) 43708 for { 43709 _ = v.Args[1] 43710 x := v.Args[0] 43711 y := v.Args[1] 43712 v.reset(OpAMD64SETGEF) 43713 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 43714 v0.AddArg(y) 43715 v0.AddArg(x) 43716 v.AddArg(v0) 43717 return true 43718 } 43719 } 43720 func rewriteValueAMD64_OpLeq64U_0(v *Value) bool { 43721 b := v.Block 43722 _ = b 43723 // match: (Leq64U x y) 43724 // cond: 43725 // result: (SETBE (CMPQ x y)) 43726 for { 43727 _ = v.Args[1] 43728 x := v.Args[0] 43729 y := v.Args[1] 43730 v.reset(OpAMD64SETBE) 43731 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43732 v0.AddArg(x) 43733 v0.AddArg(y) 43734 v.AddArg(v0) 43735 return true 43736 } 43737 } 43738 func rewriteValueAMD64_OpLeq8_0(v *Value) bool { 43739 b := v.Block 43740 _ = b 43741 // match: (Leq8 x y) 43742 // cond: 43743 // result: (SETLE (CMPB x y)) 43744 for { 43745 _ = v.Args[1] 43746 x := v.Args[0] 43747 y := v.Args[1] 43748 v.reset(OpAMD64SETLE) 43749 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43750 v0.AddArg(x) 43751 v0.AddArg(y) 43752 v.AddArg(v0) 43753 return true 43754 } 43755 } 43756 func rewriteValueAMD64_OpLeq8U_0(v *Value) bool { 43757 b := v.Block 43758 _ = b 43759 // match: (Leq8U x y) 43760 // cond: 43761 // result: (SETBE (CMPB x y)) 43762 for { 43763 _ = v.Args[1] 43764 x := v.Args[0] 43765 y := v.Args[1] 43766 v.reset(OpAMD64SETBE) 43767 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43768 v0.AddArg(x) 43769 v0.AddArg(y) 43770 v.AddArg(v0) 43771 return true 43772 } 43773 } 43774 func rewriteValueAMD64_OpLess16_0(v *Value) bool { 43775 b := v.Block 43776 _ = b 43777 // match: (Less16 x y) 43778 // cond: 43779 // result: (SETL (CMPW x y)) 43780 for { 43781 _ = v.Args[1] 43782 x := v.Args[0] 43783 y := v.Args[1] 43784 v.reset(OpAMD64SETL) 43785 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43786 v0.AddArg(x) 43787 v0.AddArg(y) 43788 v.AddArg(v0) 43789 return true 43790 } 43791 } 43792 func rewriteValueAMD64_OpLess16U_0(v *Value) bool { 43793 b := v.Block 43794 _ = b 43795 // match: (Less16U x y) 43796 // cond: 43797 // result: (SETB (CMPW x y)) 43798 for { 43799 _ = v.Args[1] 43800 x := v.Args[0] 43801 y := v.Args[1] 43802 v.reset(OpAMD64SETB) 43803 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43804 v0.AddArg(x) 43805 v0.AddArg(y) 43806 v.AddArg(v0) 43807 return true 43808 } 43809 } 43810 func rewriteValueAMD64_OpLess32_0(v *Value) bool { 43811 b := v.Block 43812 _ = b 43813 // match: (Less32 x y) 43814 // cond: 43815 // result: (SETL (CMPL x y)) 43816 for { 43817 _ = v.Args[1] 43818 x := v.Args[0] 43819 y := v.Args[1] 43820 v.reset(OpAMD64SETL) 43821 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43822 v0.AddArg(x) 43823 v0.AddArg(y) 43824 v.AddArg(v0) 43825 return true 43826 } 43827 } 43828 func rewriteValueAMD64_OpLess32F_0(v *Value) bool { 43829 b := v.Block 43830 _ = b 43831 // match: (Less32F x y) 43832 // cond: 43833 // result: (SETGF (UCOMISS y x)) 43834 for { 43835 _ = v.Args[1] 43836 x := v.Args[0] 43837 y := v.Args[1] 43838 v.reset(OpAMD64SETGF) 43839 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 43840 v0.AddArg(y) 43841 v0.AddArg(x) 43842 v.AddArg(v0) 43843 return true 43844 } 43845 } 43846 func rewriteValueAMD64_OpLess32U_0(v *Value) bool { 43847 b := v.Block 43848 _ = b 43849 // match: (Less32U x y) 43850 // cond: 43851 // result: (SETB (CMPL x y)) 43852 for { 43853 _ = v.Args[1] 43854 x := v.Args[0] 43855 y := v.Args[1] 43856 v.reset(OpAMD64SETB) 43857 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43858 v0.AddArg(x) 43859 v0.AddArg(y) 43860 v.AddArg(v0) 43861 return true 43862 } 43863 } 43864 func rewriteValueAMD64_OpLess64_0(v *Value) bool { 43865 b := v.Block 43866 _ = b 43867 // match: (Less64 x y) 43868 // cond: 43869 // result: (SETL (CMPQ x y)) 43870 for { 43871 _ = v.Args[1] 43872 x := v.Args[0] 43873 y := v.Args[1] 43874 v.reset(OpAMD64SETL) 43875 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43876 v0.AddArg(x) 43877 v0.AddArg(y) 43878 v.AddArg(v0) 43879 return true 43880 } 43881 } 43882 func rewriteValueAMD64_OpLess64F_0(v *Value) bool { 43883 b := v.Block 43884 _ = b 43885 // match: (Less64F x y) 43886 // cond: 43887 // result: (SETGF (UCOMISD y x)) 43888 for { 43889 _ = v.Args[1] 43890 x := v.Args[0] 43891 y := v.Args[1] 43892 v.reset(OpAMD64SETGF) 43893 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 43894 v0.AddArg(y) 43895 v0.AddArg(x) 43896 v.AddArg(v0) 43897 return true 43898 } 43899 } 43900 func rewriteValueAMD64_OpLess64U_0(v *Value) bool { 43901 b := v.Block 43902 _ = b 43903 // match: (Less64U x y) 43904 // cond: 43905 // result: (SETB (CMPQ x y)) 43906 for { 43907 _ = v.Args[1] 43908 x := v.Args[0] 43909 y := v.Args[1] 43910 v.reset(OpAMD64SETB) 43911 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43912 v0.AddArg(x) 43913 v0.AddArg(y) 43914 v.AddArg(v0) 43915 return true 43916 } 43917 } 43918 func rewriteValueAMD64_OpLess8_0(v *Value) bool { 43919 b := v.Block 43920 _ = b 43921 // match: (Less8 x y) 43922 // cond: 43923 // result: (SETL (CMPB x y)) 43924 for { 43925 _ = v.Args[1] 43926 x := v.Args[0] 43927 y := v.Args[1] 43928 v.reset(OpAMD64SETL) 43929 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43930 v0.AddArg(x) 43931 v0.AddArg(y) 43932 v.AddArg(v0) 43933 return true 43934 } 43935 } 43936 func rewriteValueAMD64_OpLess8U_0(v *Value) bool { 43937 b := v.Block 43938 _ = b 43939 // match: (Less8U x y) 43940 // cond: 43941 // result: (SETB (CMPB x y)) 43942 for { 43943 _ = v.Args[1] 43944 x := v.Args[0] 43945 y := v.Args[1] 43946 v.reset(OpAMD64SETB) 43947 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43948 v0.AddArg(x) 43949 v0.AddArg(y) 43950 v.AddArg(v0) 43951 return true 43952 } 43953 } 43954 func rewriteValueAMD64_OpLoad_0(v *Value) bool { 43955 b := v.Block 43956 _ = b 43957 config := b.Func.Config 43958 _ = config 43959 // match: (Load <t> ptr mem) 43960 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 43961 // result: (MOVQload ptr mem) 43962 for { 43963 t := v.Type 43964 _ = v.Args[1] 43965 ptr := v.Args[0] 43966 mem := v.Args[1] 43967 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 43968 break 43969 } 43970 v.reset(OpAMD64MOVQload) 43971 v.AddArg(ptr) 43972 v.AddArg(mem) 43973 return true 43974 } 43975 // match: (Load <t> ptr mem) 43976 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 43977 // result: (MOVLload ptr mem) 43978 for { 43979 t := v.Type 43980 _ = v.Args[1] 43981 ptr := v.Args[0] 43982 mem := v.Args[1] 43983 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 43984 break 43985 } 43986 v.reset(OpAMD64MOVLload) 43987 v.AddArg(ptr) 43988 v.AddArg(mem) 43989 return true 43990 } 43991 // match: (Load <t> ptr mem) 43992 // cond: is16BitInt(t) 43993 // result: (MOVWload ptr mem) 43994 for { 43995 t := v.Type 43996 _ = v.Args[1] 43997 ptr := v.Args[0] 43998 mem := v.Args[1] 43999 if !(is16BitInt(t)) { 44000 break 44001 } 44002 v.reset(OpAMD64MOVWload) 44003 v.AddArg(ptr) 44004 v.AddArg(mem) 44005 return true 44006 } 44007 // match: (Load <t> ptr mem) 44008 // cond: (t.IsBoolean() || is8BitInt(t)) 44009 // result: (MOVBload ptr mem) 44010 for { 44011 t := v.Type 44012 _ = v.Args[1] 44013 ptr := v.Args[0] 44014 mem := v.Args[1] 44015 if !(t.IsBoolean() || is8BitInt(t)) { 44016 break 44017 } 44018 v.reset(OpAMD64MOVBload) 44019 v.AddArg(ptr) 44020 v.AddArg(mem) 44021 return true 44022 } 44023 // match: (Load <t> ptr mem) 44024 // cond: is32BitFloat(t) 44025 // result: (MOVSSload ptr mem) 44026 for { 44027 t := v.Type 44028 _ = v.Args[1] 44029 ptr := v.Args[0] 44030 mem := v.Args[1] 44031 if !(is32BitFloat(t)) { 44032 break 44033 } 44034 v.reset(OpAMD64MOVSSload) 44035 v.AddArg(ptr) 44036 v.AddArg(mem) 44037 return true 44038 } 44039 // match: (Load <t> ptr mem) 44040 // cond: is64BitFloat(t) 44041 // result: (MOVSDload ptr mem) 44042 for { 44043 t := v.Type 44044 _ = v.Args[1] 44045 ptr := v.Args[0] 44046 mem := v.Args[1] 44047 if !(is64BitFloat(t)) { 44048 break 44049 } 44050 v.reset(OpAMD64MOVSDload) 44051 v.AddArg(ptr) 44052 v.AddArg(mem) 44053 return true 44054 } 44055 return false 44056 } 44057 func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool { 44058 b := v.Block 44059 _ = b 44060 // match: (Lsh16x16 <t> x y) 44061 // cond: 44062 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 44063 for { 44064 t := v.Type 44065 _ = v.Args[1] 44066 x := v.Args[0] 44067 y := v.Args[1] 44068 v.reset(OpAMD64ANDL) 44069 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44070 v0.AddArg(x) 44071 v0.AddArg(y) 44072 v.AddArg(v0) 44073 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44074 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44075 v2.AuxInt = 32 44076 v2.AddArg(y) 44077 v1.AddArg(v2) 44078 v.AddArg(v1) 44079 return true 44080 } 44081 } 44082 func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool { 44083 b := v.Block 44084 _ = b 44085 // match: (Lsh16x32 <t> x y) 44086 // cond: 44087 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 44088 for { 44089 t := v.Type 44090 _ = v.Args[1] 44091 x := v.Args[0] 44092 y := v.Args[1] 44093 v.reset(OpAMD64ANDL) 44094 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44095 v0.AddArg(x) 44096 v0.AddArg(y) 44097 v.AddArg(v0) 44098 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44099 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44100 v2.AuxInt = 32 44101 v2.AddArg(y) 44102 v1.AddArg(v2) 44103 v.AddArg(v1) 44104 return true 44105 } 44106 } 44107 func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool { 44108 b := v.Block 44109 _ = b 44110 // match: (Lsh16x64 <t> x y) 44111 // cond: 44112 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 44113 for { 44114 t := v.Type 44115 _ = v.Args[1] 44116 x := v.Args[0] 44117 y := v.Args[1] 44118 v.reset(OpAMD64ANDL) 44119 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44120 v0.AddArg(x) 44121 v0.AddArg(y) 44122 v.AddArg(v0) 44123 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44124 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44125 v2.AuxInt = 32 44126 v2.AddArg(y) 44127 v1.AddArg(v2) 44128 v.AddArg(v1) 44129 return true 44130 } 44131 } 44132 func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool { 44133 b := v.Block 44134 _ = b 44135 // match: (Lsh16x8 <t> x y) 44136 // cond: 44137 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 44138 for { 44139 t := v.Type 44140 _ = v.Args[1] 44141 x := v.Args[0] 44142 y := v.Args[1] 44143 v.reset(OpAMD64ANDL) 44144 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44145 v0.AddArg(x) 44146 v0.AddArg(y) 44147 v.AddArg(v0) 44148 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44149 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44150 v2.AuxInt = 32 44151 v2.AddArg(y) 44152 v1.AddArg(v2) 44153 v.AddArg(v1) 44154 return true 44155 } 44156 } 44157 func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool { 44158 b := v.Block 44159 _ = b 44160 // match: (Lsh32x16 <t> x y) 44161 // cond: 44162 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 44163 for { 44164 t := v.Type 44165 _ = v.Args[1] 44166 x := v.Args[0] 44167 y := v.Args[1] 44168 v.reset(OpAMD64ANDL) 44169 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44170 v0.AddArg(x) 44171 v0.AddArg(y) 44172 v.AddArg(v0) 44173 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44174 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44175 v2.AuxInt = 32 44176 v2.AddArg(y) 44177 v1.AddArg(v2) 44178 v.AddArg(v1) 44179 return true 44180 } 44181 } 44182 func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool { 44183 b := v.Block 44184 _ = b 44185 // match: (Lsh32x32 <t> x y) 44186 // cond: 44187 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 44188 for { 44189 t := v.Type 44190 _ = v.Args[1] 44191 x := v.Args[0] 44192 y := v.Args[1] 44193 v.reset(OpAMD64ANDL) 44194 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44195 v0.AddArg(x) 44196 v0.AddArg(y) 44197 v.AddArg(v0) 44198 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44199 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44200 v2.AuxInt = 32 44201 v2.AddArg(y) 44202 v1.AddArg(v2) 44203 v.AddArg(v1) 44204 return true 44205 } 44206 } 44207 func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool { 44208 b := v.Block 44209 _ = b 44210 // match: (Lsh32x64 <t> x y) 44211 // cond: 44212 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 44213 for { 44214 t := v.Type 44215 _ = v.Args[1] 44216 x := v.Args[0] 44217 y := v.Args[1] 44218 v.reset(OpAMD64ANDL) 44219 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44220 v0.AddArg(x) 44221 v0.AddArg(y) 44222 v.AddArg(v0) 44223 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44224 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44225 v2.AuxInt = 32 44226 v2.AddArg(y) 44227 v1.AddArg(v2) 44228 v.AddArg(v1) 44229 return true 44230 } 44231 } 44232 func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool { 44233 b := v.Block 44234 _ = b 44235 // match: (Lsh32x8 <t> x y) 44236 // cond: 44237 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 44238 for { 44239 t := v.Type 44240 _ = v.Args[1] 44241 x := v.Args[0] 44242 y := v.Args[1] 44243 v.reset(OpAMD64ANDL) 44244 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44245 v0.AddArg(x) 44246 v0.AddArg(y) 44247 v.AddArg(v0) 44248 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44249 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44250 v2.AuxInt = 32 44251 v2.AddArg(y) 44252 v1.AddArg(v2) 44253 v.AddArg(v1) 44254 return true 44255 } 44256 } 44257 func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool { 44258 b := v.Block 44259 _ = b 44260 // match: (Lsh64x16 <t> x y) 44261 // cond: 44262 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 44263 for { 44264 t := v.Type 44265 _ = v.Args[1] 44266 x := v.Args[0] 44267 y := v.Args[1] 44268 v.reset(OpAMD64ANDQ) 44269 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 44270 v0.AddArg(x) 44271 v0.AddArg(y) 44272 v.AddArg(v0) 44273 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 44274 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44275 v2.AuxInt = 64 44276 v2.AddArg(y) 44277 v1.AddArg(v2) 44278 v.AddArg(v1) 44279 return true 44280 } 44281 } 44282 func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool { 44283 b := v.Block 44284 _ = b 44285 // match: (Lsh64x32 <t> x y) 44286 // cond: 44287 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 44288 for { 44289 t := v.Type 44290 _ = v.Args[1] 44291 x := v.Args[0] 44292 y := v.Args[1] 44293 v.reset(OpAMD64ANDQ) 44294 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 44295 v0.AddArg(x) 44296 v0.AddArg(y) 44297 v.AddArg(v0) 44298 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 44299 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44300 v2.AuxInt = 64 44301 v2.AddArg(y) 44302 v1.AddArg(v2) 44303 v.AddArg(v1) 44304 return true 44305 } 44306 } 44307 func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool { 44308 b := v.Block 44309 _ = b 44310 // match: (Lsh64x64 <t> x y) 44311 // cond: 44312 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 44313 for { 44314 t := v.Type 44315 _ = v.Args[1] 44316 x := v.Args[0] 44317 y := v.Args[1] 44318 v.reset(OpAMD64ANDQ) 44319 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 44320 v0.AddArg(x) 44321 v0.AddArg(y) 44322 v.AddArg(v0) 44323 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 44324 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44325 v2.AuxInt = 64 44326 v2.AddArg(y) 44327 v1.AddArg(v2) 44328 v.AddArg(v1) 44329 return true 44330 } 44331 } 44332 func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool { 44333 b := v.Block 44334 _ = b 44335 // match: (Lsh64x8 <t> x y) 44336 // cond: 44337 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 44338 for { 44339 t := v.Type 44340 _ = v.Args[1] 44341 x := v.Args[0] 44342 y := v.Args[1] 44343 v.reset(OpAMD64ANDQ) 44344 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 44345 v0.AddArg(x) 44346 v0.AddArg(y) 44347 v.AddArg(v0) 44348 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 44349 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44350 v2.AuxInt = 64 44351 v2.AddArg(y) 44352 v1.AddArg(v2) 44353 v.AddArg(v1) 44354 return true 44355 } 44356 } 44357 func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool { 44358 b := v.Block 44359 _ = b 44360 // match: (Lsh8x16 <t> x y) 44361 // cond: 44362 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 44363 for { 44364 t := v.Type 44365 _ = v.Args[1] 44366 x := v.Args[0] 44367 y := v.Args[1] 44368 v.reset(OpAMD64ANDL) 44369 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44370 v0.AddArg(x) 44371 v0.AddArg(y) 44372 v.AddArg(v0) 44373 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44374 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44375 v2.AuxInt = 32 44376 v2.AddArg(y) 44377 v1.AddArg(v2) 44378 v.AddArg(v1) 44379 return true 44380 } 44381 } 44382 func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool { 44383 b := v.Block 44384 _ = b 44385 // match: (Lsh8x32 <t> x y) 44386 // cond: 44387 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 44388 for { 44389 t := v.Type 44390 _ = v.Args[1] 44391 x := v.Args[0] 44392 y := v.Args[1] 44393 v.reset(OpAMD64ANDL) 44394 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44395 v0.AddArg(x) 44396 v0.AddArg(y) 44397 v.AddArg(v0) 44398 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44399 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44400 v2.AuxInt = 32 44401 v2.AddArg(y) 44402 v1.AddArg(v2) 44403 v.AddArg(v1) 44404 return true 44405 } 44406 } 44407 func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool { 44408 b := v.Block 44409 _ = b 44410 // match: (Lsh8x64 <t> x y) 44411 // cond: 44412 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 44413 for { 44414 t := v.Type 44415 _ = v.Args[1] 44416 x := v.Args[0] 44417 y := v.Args[1] 44418 v.reset(OpAMD64ANDL) 44419 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44420 v0.AddArg(x) 44421 v0.AddArg(y) 44422 v.AddArg(v0) 44423 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44424 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44425 v2.AuxInt = 32 44426 v2.AddArg(y) 44427 v1.AddArg(v2) 44428 v.AddArg(v1) 44429 return true 44430 } 44431 } 44432 func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool { 44433 b := v.Block 44434 _ = b 44435 // match: (Lsh8x8 <t> x y) 44436 // cond: 44437 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 44438 for { 44439 t := v.Type 44440 _ = v.Args[1] 44441 x := v.Args[0] 44442 y := v.Args[1] 44443 v.reset(OpAMD64ANDL) 44444 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44445 v0.AddArg(x) 44446 v0.AddArg(y) 44447 v.AddArg(v0) 44448 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44449 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44450 v2.AuxInt = 32 44451 v2.AddArg(y) 44452 v1.AddArg(v2) 44453 v.AddArg(v1) 44454 return true 44455 } 44456 } 44457 func rewriteValueAMD64_OpMod16_0(v *Value) bool { 44458 b := v.Block 44459 _ = b 44460 typ := &b.Func.Config.Types 44461 _ = typ 44462 // match: (Mod16 x y) 44463 // cond: 44464 // result: (Select1 (DIVW x y)) 44465 for { 44466 _ = v.Args[1] 44467 x := v.Args[0] 44468 y := v.Args[1] 44469 v.reset(OpSelect1) 44470 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 44471 v0.AddArg(x) 44472 v0.AddArg(y) 44473 v.AddArg(v0) 44474 return true 44475 } 44476 } 44477 func rewriteValueAMD64_OpMod16u_0(v *Value) bool { 44478 b := v.Block 44479 _ = b 44480 typ := &b.Func.Config.Types 44481 _ = typ 44482 // match: (Mod16u x y) 44483 // cond: 44484 // result: (Select1 (DIVWU x y)) 44485 for { 44486 _ = v.Args[1] 44487 x := v.Args[0] 44488 y := v.Args[1] 44489 v.reset(OpSelect1) 44490 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 44491 v0.AddArg(x) 44492 v0.AddArg(y) 44493 v.AddArg(v0) 44494 return true 44495 } 44496 } 44497 func rewriteValueAMD64_OpMod32_0(v *Value) bool { 44498 b := v.Block 44499 _ = b 44500 typ := &b.Func.Config.Types 44501 _ = typ 44502 // match: (Mod32 x y) 44503 // cond: 44504 // result: (Select1 (DIVL x y)) 44505 for { 44506 _ = v.Args[1] 44507 x := v.Args[0] 44508 y := v.Args[1] 44509 v.reset(OpSelect1) 44510 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 44511 v0.AddArg(x) 44512 v0.AddArg(y) 44513 v.AddArg(v0) 44514 return true 44515 } 44516 } 44517 func rewriteValueAMD64_OpMod32u_0(v *Value) bool { 44518 b := v.Block 44519 _ = b 44520 typ := &b.Func.Config.Types 44521 _ = typ 44522 // match: (Mod32u x y) 44523 // cond: 44524 // result: (Select1 (DIVLU x y)) 44525 for { 44526 _ = v.Args[1] 44527 x := v.Args[0] 44528 y := v.Args[1] 44529 v.reset(OpSelect1) 44530 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 44531 v0.AddArg(x) 44532 v0.AddArg(y) 44533 v.AddArg(v0) 44534 return true 44535 } 44536 } 44537 func rewriteValueAMD64_OpMod64_0(v *Value) bool { 44538 b := v.Block 44539 _ = b 44540 typ := &b.Func.Config.Types 44541 _ = typ 44542 // match: (Mod64 x y) 44543 // cond: 44544 // result: (Select1 (DIVQ x y)) 44545 for { 44546 _ = v.Args[1] 44547 x := v.Args[0] 44548 y := v.Args[1] 44549 v.reset(OpSelect1) 44550 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 44551 v0.AddArg(x) 44552 v0.AddArg(y) 44553 v.AddArg(v0) 44554 return true 44555 } 44556 } 44557 func rewriteValueAMD64_OpMod64u_0(v *Value) bool { 44558 b := v.Block 44559 _ = b 44560 typ := &b.Func.Config.Types 44561 _ = typ 44562 // match: (Mod64u x y) 44563 // cond: 44564 // result: (Select1 (DIVQU x y)) 44565 for { 44566 _ = v.Args[1] 44567 x := v.Args[0] 44568 y := v.Args[1] 44569 v.reset(OpSelect1) 44570 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 44571 v0.AddArg(x) 44572 v0.AddArg(y) 44573 v.AddArg(v0) 44574 return true 44575 } 44576 } 44577 func rewriteValueAMD64_OpMod8_0(v *Value) bool { 44578 b := v.Block 44579 _ = b 44580 typ := &b.Func.Config.Types 44581 _ = typ 44582 // match: (Mod8 x y) 44583 // cond: 44584 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 44585 for { 44586 _ = v.Args[1] 44587 x := v.Args[0] 44588 y := v.Args[1] 44589 v.reset(OpSelect1) 44590 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 44591 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 44592 v1.AddArg(x) 44593 v0.AddArg(v1) 44594 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 44595 v2.AddArg(y) 44596 v0.AddArg(v2) 44597 v.AddArg(v0) 44598 return true 44599 } 44600 } 44601 func rewriteValueAMD64_OpMod8u_0(v *Value) bool { 44602 b := v.Block 44603 _ = b 44604 typ := &b.Func.Config.Types 44605 _ = typ 44606 // match: (Mod8u x y) 44607 // cond: 44608 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 44609 for { 44610 _ = v.Args[1] 44611 x := v.Args[0] 44612 y := v.Args[1] 44613 v.reset(OpSelect1) 44614 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 44615 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 44616 v1.AddArg(x) 44617 v0.AddArg(v1) 44618 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 44619 v2.AddArg(y) 44620 v0.AddArg(v2) 44621 v.AddArg(v0) 44622 return true 44623 } 44624 } 44625 func rewriteValueAMD64_OpMove_0(v *Value) bool { 44626 b := v.Block 44627 _ = b 44628 config := b.Func.Config 44629 _ = config 44630 typ := &b.Func.Config.Types 44631 _ = typ 44632 // match: (Move [0] _ _ mem) 44633 // cond: 44634 // result: mem 44635 for { 44636 if v.AuxInt != 0 { 44637 break 44638 } 44639 _ = v.Args[2] 44640 mem := v.Args[2] 44641 v.reset(OpCopy) 44642 v.Type = mem.Type 44643 v.AddArg(mem) 44644 return true 44645 } 44646 // match: (Move [1] dst src mem) 44647 // cond: 44648 // result: (MOVBstore dst (MOVBload src mem) mem) 44649 for { 44650 if v.AuxInt != 1 { 44651 break 44652 } 44653 _ = v.Args[2] 44654 dst := v.Args[0] 44655 src := v.Args[1] 44656 mem := v.Args[2] 44657 v.reset(OpAMD64MOVBstore) 44658 v.AddArg(dst) 44659 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 44660 v0.AddArg(src) 44661 v0.AddArg(mem) 44662 v.AddArg(v0) 44663 v.AddArg(mem) 44664 return true 44665 } 44666 // match: (Move [2] dst src mem) 44667 // cond: 44668 // result: (MOVWstore dst (MOVWload src mem) mem) 44669 for { 44670 if v.AuxInt != 2 { 44671 break 44672 } 44673 _ = v.Args[2] 44674 dst := v.Args[0] 44675 src := v.Args[1] 44676 mem := v.Args[2] 44677 v.reset(OpAMD64MOVWstore) 44678 v.AddArg(dst) 44679 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 44680 v0.AddArg(src) 44681 v0.AddArg(mem) 44682 v.AddArg(v0) 44683 v.AddArg(mem) 44684 return true 44685 } 44686 // match: (Move [4] dst src mem) 44687 // cond: 44688 // result: (MOVLstore dst (MOVLload src mem) mem) 44689 for { 44690 if v.AuxInt != 4 { 44691 break 44692 } 44693 _ = v.Args[2] 44694 dst := v.Args[0] 44695 src := v.Args[1] 44696 mem := v.Args[2] 44697 v.reset(OpAMD64MOVLstore) 44698 v.AddArg(dst) 44699 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 44700 v0.AddArg(src) 44701 v0.AddArg(mem) 44702 v.AddArg(v0) 44703 v.AddArg(mem) 44704 return true 44705 } 44706 // match: (Move [8] dst src mem) 44707 // cond: 44708 // result: (MOVQstore dst (MOVQload src mem) mem) 44709 for { 44710 if v.AuxInt != 8 { 44711 break 44712 } 44713 _ = v.Args[2] 44714 dst := v.Args[0] 44715 src := v.Args[1] 44716 mem := v.Args[2] 44717 v.reset(OpAMD64MOVQstore) 44718 v.AddArg(dst) 44719 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44720 v0.AddArg(src) 44721 v0.AddArg(mem) 44722 v.AddArg(v0) 44723 v.AddArg(mem) 44724 return true 44725 } 44726 // match: (Move [16] dst src mem) 44727 // cond: config.useSSE 44728 // result: (MOVOstore dst (MOVOload src mem) mem) 44729 for { 44730 if v.AuxInt != 16 { 44731 break 44732 } 44733 _ = v.Args[2] 44734 dst := v.Args[0] 44735 src := v.Args[1] 44736 mem := v.Args[2] 44737 if !(config.useSSE) { 44738 break 44739 } 44740 v.reset(OpAMD64MOVOstore) 44741 v.AddArg(dst) 44742 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 44743 v0.AddArg(src) 44744 v0.AddArg(mem) 44745 v.AddArg(v0) 44746 v.AddArg(mem) 44747 return true 44748 } 44749 // match: (Move [16] dst src mem) 44750 // cond: !config.useSSE 44751 // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 44752 for { 44753 if v.AuxInt != 16 { 44754 break 44755 } 44756 _ = v.Args[2] 44757 dst := v.Args[0] 44758 src := v.Args[1] 44759 mem := v.Args[2] 44760 if !(!config.useSSE) { 44761 break 44762 } 44763 v.reset(OpAMD64MOVQstore) 44764 v.AuxInt = 8 44765 v.AddArg(dst) 44766 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44767 v0.AuxInt = 8 44768 v0.AddArg(src) 44769 v0.AddArg(mem) 44770 v.AddArg(v0) 44771 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 44772 v1.AddArg(dst) 44773 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44774 v2.AddArg(src) 44775 v2.AddArg(mem) 44776 v1.AddArg(v2) 44777 v1.AddArg(mem) 44778 v.AddArg(v1) 44779 return true 44780 } 44781 // match: (Move [3] dst src mem) 44782 // cond: 44783 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 44784 for { 44785 if v.AuxInt != 3 { 44786 break 44787 } 44788 _ = v.Args[2] 44789 dst := v.Args[0] 44790 src := v.Args[1] 44791 mem := v.Args[2] 44792 v.reset(OpAMD64MOVBstore) 44793 v.AuxInt = 2 44794 v.AddArg(dst) 44795 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 44796 v0.AuxInt = 2 44797 v0.AddArg(src) 44798 v0.AddArg(mem) 44799 v.AddArg(v0) 44800 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) 44801 v1.AddArg(dst) 44802 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 44803 v2.AddArg(src) 44804 v2.AddArg(mem) 44805 v1.AddArg(v2) 44806 v1.AddArg(mem) 44807 v.AddArg(v1) 44808 return true 44809 } 44810 // match: (Move [5] dst src mem) 44811 // cond: 44812 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 44813 for { 44814 if v.AuxInt != 5 { 44815 break 44816 } 44817 _ = v.Args[2] 44818 dst := v.Args[0] 44819 src := v.Args[1] 44820 mem := v.Args[2] 44821 v.reset(OpAMD64MOVBstore) 44822 v.AuxInt = 4 44823 v.AddArg(dst) 44824 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 44825 v0.AuxInt = 4 44826 v0.AddArg(src) 44827 v0.AddArg(mem) 44828 v.AddArg(v0) 44829 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 44830 v1.AddArg(dst) 44831 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 44832 v2.AddArg(src) 44833 v2.AddArg(mem) 44834 v1.AddArg(v2) 44835 v1.AddArg(mem) 44836 v.AddArg(v1) 44837 return true 44838 } 44839 // match: (Move [6] dst src mem) 44840 // cond: 44841 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 44842 for { 44843 if v.AuxInt != 6 { 44844 break 44845 } 44846 _ = v.Args[2] 44847 dst := v.Args[0] 44848 src := v.Args[1] 44849 mem := v.Args[2] 44850 v.reset(OpAMD64MOVWstore) 44851 v.AuxInt = 4 44852 v.AddArg(dst) 44853 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 44854 v0.AuxInt = 4 44855 v0.AddArg(src) 44856 v0.AddArg(mem) 44857 v.AddArg(v0) 44858 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 44859 v1.AddArg(dst) 44860 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 44861 v2.AddArg(src) 44862 v2.AddArg(mem) 44863 v1.AddArg(v2) 44864 v1.AddArg(mem) 44865 v.AddArg(v1) 44866 return true 44867 } 44868 return false 44869 } 44870 func rewriteValueAMD64_OpMove_10(v *Value) bool { 44871 b := v.Block 44872 _ = b 44873 config := b.Func.Config 44874 _ = config 44875 typ := &b.Func.Config.Types 44876 _ = typ 44877 // match: (Move [7] dst src mem) 44878 // cond: 44879 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 44880 for { 44881 if v.AuxInt != 7 { 44882 break 44883 } 44884 _ = v.Args[2] 44885 dst := v.Args[0] 44886 src := v.Args[1] 44887 mem := v.Args[2] 44888 v.reset(OpAMD64MOVLstore) 44889 v.AuxInt = 3 44890 v.AddArg(dst) 44891 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 44892 v0.AuxInt = 3 44893 v0.AddArg(src) 44894 v0.AddArg(mem) 44895 v.AddArg(v0) 44896 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 44897 v1.AddArg(dst) 44898 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 44899 v2.AddArg(src) 44900 v2.AddArg(mem) 44901 v1.AddArg(v2) 44902 v1.AddArg(mem) 44903 v.AddArg(v1) 44904 return true 44905 } 44906 // match: (Move [s] dst src mem) 44907 // cond: s > 8 && s < 16 44908 // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 44909 for { 44910 s := v.AuxInt 44911 _ = v.Args[2] 44912 dst := v.Args[0] 44913 src := v.Args[1] 44914 mem := v.Args[2] 44915 if !(s > 8 && s < 16) { 44916 break 44917 } 44918 v.reset(OpAMD64MOVQstore) 44919 v.AuxInt = s - 8 44920 v.AddArg(dst) 44921 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44922 v0.AuxInt = s - 8 44923 v0.AddArg(src) 44924 v0.AddArg(mem) 44925 v.AddArg(v0) 44926 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 44927 v1.AddArg(dst) 44928 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44929 v2.AddArg(src) 44930 v2.AddArg(mem) 44931 v1.AddArg(v2) 44932 v1.AddArg(mem) 44933 v.AddArg(v1) 44934 return true 44935 } 44936 // match: (Move [s] dst src mem) 44937 // cond: s > 16 && s%16 != 0 && s%16 <= 8 44938 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) 44939 for { 44940 s := v.AuxInt 44941 _ = v.Args[2] 44942 dst := v.Args[0] 44943 src := v.Args[1] 44944 mem := v.Args[2] 44945 if !(s > 16 && s%16 != 0 && s%16 <= 8) { 44946 break 44947 } 44948 v.reset(OpMove) 44949 v.AuxInt = s - s%16 44950 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 44951 v0.AuxInt = s % 16 44952 v0.AddArg(dst) 44953 v.AddArg(v0) 44954 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 44955 v1.AuxInt = s % 16 44956 v1.AddArg(src) 44957 v.AddArg(v1) 44958 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 44959 v2.AddArg(dst) 44960 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44961 v3.AddArg(src) 44962 v3.AddArg(mem) 44963 v2.AddArg(v3) 44964 v2.AddArg(mem) 44965 v.AddArg(v2) 44966 return true 44967 } 44968 // match: (Move [s] dst src mem) 44969 // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE 44970 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) 44971 for { 44972 s := v.AuxInt 44973 _ = v.Args[2] 44974 dst := v.Args[0] 44975 src := v.Args[1] 44976 mem := v.Args[2] 44977 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) { 44978 break 44979 } 44980 v.reset(OpMove) 44981 v.AuxInt = s - s%16 44982 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 44983 v0.AuxInt = s % 16 44984 v0.AddArg(dst) 44985 v.AddArg(v0) 44986 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 44987 v1.AuxInt = s % 16 44988 v1.AddArg(src) 44989 v.AddArg(v1) 44990 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 44991 v2.AddArg(dst) 44992 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 44993 v3.AddArg(src) 44994 v3.AddArg(mem) 44995 v2.AddArg(v3) 44996 v2.AddArg(mem) 44997 v.AddArg(v2) 44998 return true 44999 } 45000 // match: (Move [s] dst src mem) 45001 // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE 45002 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))) 45003 for { 45004 s := v.AuxInt 45005 _ = v.Args[2] 45006 dst := v.Args[0] 45007 src := v.Args[1] 45008 mem := v.Args[2] 45009 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) { 45010 break 45011 } 45012 v.reset(OpMove) 45013 v.AuxInt = s - s%16 45014 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 45015 v0.AuxInt = s % 16 45016 v0.AddArg(dst) 45017 v.AddArg(v0) 45018 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 45019 v1.AuxInt = s % 16 45020 v1.AddArg(src) 45021 v.AddArg(v1) 45022 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 45023 v2.AuxInt = 8 45024 v2.AddArg(dst) 45025 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 45026 v3.AuxInt = 8 45027 v3.AddArg(src) 45028 v3.AddArg(mem) 45029 v2.AddArg(v3) 45030 v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 45031 v4.AddArg(dst) 45032 v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 45033 v5.AddArg(src) 45034 v5.AddArg(mem) 45035 v4.AddArg(v5) 45036 v4.AddArg(mem) 45037 v2.AddArg(v4) 45038 v.AddArg(v2) 45039 return true 45040 } 45041 // match: (Move [s] dst src mem) 45042 // cond: s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice 45043 // result: (DUFFCOPY [14*(64-s/16)] dst src mem) 45044 for { 45045 s := v.AuxInt 45046 _ = v.Args[2] 45047 dst := v.Args[0] 45048 src := v.Args[1] 45049 mem := v.Args[2] 45050 if !(s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { 45051 break 45052 } 45053 v.reset(OpAMD64DUFFCOPY) 45054 v.AuxInt = 14 * (64 - s/16) 45055 v.AddArg(dst) 45056 v.AddArg(src) 45057 v.AddArg(mem) 45058 return true 45059 } 45060 // match: (Move [s] dst src mem) 45061 // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 45062 // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) 45063 for { 45064 s := v.AuxInt 45065 _ = v.Args[2] 45066 dst := v.Args[0] 45067 src := v.Args[1] 45068 mem := v.Args[2] 45069 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0) { 45070 break 45071 } 45072 v.reset(OpAMD64REPMOVSQ) 45073 v.AddArg(dst) 45074 v.AddArg(src) 45075 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 45076 v0.AuxInt = s / 8 45077 v.AddArg(v0) 45078 v.AddArg(mem) 45079 return true 45080 } 45081 return false 45082 } 45083 func rewriteValueAMD64_OpMul16_0(v *Value) bool { 45084 // match: (Mul16 x y) 45085 // cond: 45086 // result: (MULL x y) 45087 for { 45088 _ = v.Args[1] 45089 x := v.Args[0] 45090 y := v.Args[1] 45091 v.reset(OpAMD64MULL) 45092 v.AddArg(x) 45093 v.AddArg(y) 45094 return true 45095 } 45096 } 45097 func rewriteValueAMD64_OpMul32_0(v *Value) bool { 45098 // match: (Mul32 x y) 45099 // cond: 45100 // result: (MULL x y) 45101 for { 45102 _ = v.Args[1] 45103 x := v.Args[0] 45104 y := v.Args[1] 45105 v.reset(OpAMD64MULL) 45106 v.AddArg(x) 45107 v.AddArg(y) 45108 return true 45109 } 45110 } 45111 func rewriteValueAMD64_OpMul32F_0(v *Value) bool { 45112 // match: (Mul32F x y) 45113 // cond: 45114 // result: (MULSS x y) 45115 for { 45116 _ = v.Args[1] 45117 x := v.Args[0] 45118 y := v.Args[1] 45119 v.reset(OpAMD64MULSS) 45120 v.AddArg(x) 45121 v.AddArg(y) 45122 return true 45123 } 45124 } 45125 func rewriteValueAMD64_OpMul64_0(v *Value) bool { 45126 // match: (Mul64 x y) 45127 // cond: 45128 // result: (MULQ x y) 45129 for { 45130 _ = v.Args[1] 45131 x := v.Args[0] 45132 y := v.Args[1] 45133 v.reset(OpAMD64MULQ) 45134 v.AddArg(x) 45135 v.AddArg(y) 45136 return true 45137 } 45138 } 45139 func rewriteValueAMD64_OpMul64F_0(v *Value) bool { 45140 // match: (Mul64F x y) 45141 // cond: 45142 // result: (MULSD x y) 45143 for { 45144 _ = v.Args[1] 45145 x := v.Args[0] 45146 y := v.Args[1] 45147 v.reset(OpAMD64MULSD) 45148 v.AddArg(x) 45149 v.AddArg(y) 45150 return true 45151 } 45152 } 45153 func rewriteValueAMD64_OpMul64uhilo_0(v *Value) bool { 45154 // match: (Mul64uhilo x y) 45155 // cond: 45156 // result: (MULQU2 x y) 45157 for { 45158 _ = v.Args[1] 45159 x := v.Args[0] 45160 y := v.Args[1] 45161 v.reset(OpAMD64MULQU2) 45162 v.AddArg(x) 45163 v.AddArg(y) 45164 return true 45165 } 45166 } 45167 func rewriteValueAMD64_OpMul8_0(v *Value) bool { 45168 // match: (Mul8 x y) 45169 // cond: 45170 // result: (MULL x y) 45171 for { 45172 _ = v.Args[1] 45173 x := v.Args[0] 45174 y := v.Args[1] 45175 v.reset(OpAMD64MULL) 45176 v.AddArg(x) 45177 v.AddArg(y) 45178 return true 45179 } 45180 } 45181 func rewriteValueAMD64_OpNeg16_0(v *Value) bool { 45182 // match: (Neg16 x) 45183 // cond: 45184 // result: (NEGL x) 45185 for { 45186 x := v.Args[0] 45187 v.reset(OpAMD64NEGL) 45188 v.AddArg(x) 45189 return true 45190 } 45191 } 45192 func rewriteValueAMD64_OpNeg32_0(v *Value) bool { 45193 // match: (Neg32 x) 45194 // cond: 45195 // result: (NEGL x) 45196 for { 45197 x := v.Args[0] 45198 v.reset(OpAMD64NEGL) 45199 v.AddArg(x) 45200 return true 45201 } 45202 } 45203 func rewriteValueAMD64_OpNeg32F_0(v *Value) bool { 45204 b := v.Block 45205 _ = b 45206 typ := &b.Func.Config.Types 45207 _ = typ 45208 // match: (Neg32F x) 45209 // cond: 45210 // result: (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))])) 45211 for { 45212 x := v.Args[0] 45213 v.reset(OpAMD64PXOR) 45214 v.AddArg(x) 45215 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) 45216 v0.AuxInt = f2i(math.Copysign(0, -1)) 45217 v.AddArg(v0) 45218 return true 45219 } 45220 } 45221 func rewriteValueAMD64_OpNeg64_0(v *Value) bool { 45222 // match: (Neg64 x) 45223 // cond: 45224 // result: (NEGQ x) 45225 for { 45226 x := v.Args[0] 45227 v.reset(OpAMD64NEGQ) 45228 v.AddArg(x) 45229 return true 45230 } 45231 } 45232 func rewriteValueAMD64_OpNeg64F_0(v *Value) bool { 45233 b := v.Block 45234 _ = b 45235 typ := &b.Func.Config.Types 45236 _ = typ 45237 // match: (Neg64F x) 45238 // cond: 45239 // result: (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))])) 45240 for { 45241 x := v.Args[0] 45242 v.reset(OpAMD64PXOR) 45243 v.AddArg(x) 45244 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) 45245 v0.AuxInt = f2i(math.Copysign(0, -1)) 45246 v.AddArg(v0) 45247 return true 45248 } 45249 } 45250 func rewriteValueAMD64_OpNeg8_0(v *Value) bool { 45251 // match: (Neg8 x) 45252 // cond: 45253 // result: (NEGL x) 45254 for { 45255 x := v.Args[0] 45256 v.reset(OpAMD64NEGL) 45257 v.AddArg(x) 45258 return true 45259 } 45260 } 45261 func rewriteValueAMD64_OpNeq16_0(v *Value) bool { 45262 b := v.Block 45263 _ = b 45264 // match: (Neq16 x y) 45265 // cond: 45266 // result: (SETNE (CMPW x y)) 45267 for { 45268 _ = v.Args[1] 45269 x := v.Args[0] 45270 y := v.Args[1] 45271 v.reset(OpAMD64SETNE) 45272 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 45273 v0.AddArg(x) 45274 v0.AddArg(y) 45275 v.AddArg(v0) 45276 return true 45277 } 45278 } 45279 func rewriteValueAMD64_OpNeq32_0(v *Value) bool { 45280 b := v.Block 45281 _ = b 45282 // match: (Neq32 x y) 45283 // cond: 45284 // result: (SETNE (CMPL x y)) 45285 for { 45286 _ = v.Args[1] 45287 x := v.Args[0] 45288 y := v.Args[1] 45289 v.reset(OpAMD64SETNE) 45290 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 45291 v0.AddArg(x) 45292 v0.AddArg(y) 45293 v.AddArg(v0) 45294 return true 45295 } 45296 } 45297 func rewriteValueAMD64_OpNeq32F_0(v *Value) bool { 45298 b := v.Block 45299 _ = b 45300 // match: (Neq32F x y) 45301 // cond: 45302 // result: (SETNEF (UCOMISS x y)) 45303 for { 45304 _ = v.Args[1] 45305 x := v.Args[0] 45306 y := v.Args[1] 45307 v.reset(OpAMD64SETNEF) 45308 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 45309 v0.AddArg(x) 45310 v0.AddArg(y) 45311 v.AddArg(v0) 45312 return true 45313 } 45314 } 45315 func rewriteValueAMD64_OpNeq64_0(v *Value) bool { 45316 b := v.Block 45317 _ = b 45318 // match: (Neq64 x y) 45319 // cond: 45320 // result: (SETNE (CMPQ x y)) 45321 for { 45322 _ = v.Args[1] 45323 x := v.Args[0] 45324 y := v.Args[1] 45325 v.reset(OpAMD64SETNE) 45326 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 45327 v0.AddArg(x) 45328 v0.AddArg(y) 45329 v.AddArg(v0) 45330 return true 45331 } 45332 } 45333 func rewriteValueAMD64_OpNeq64F_0(v *Value) bool { 45334 b := v.Block 45335 _ = b 45336 // match: (Neq64F x y) 45337 // cond: 45338 // result: (SETNEF (UCOMISD x y)) 45339 for { 45340 _ = v.Args[1] 45341 x := v.Args[0] 45342 y := v.Args[1] 45343 v.reset(OpAMD64SETNEF) 45344 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 45345 v0.AddArg(x) 45346 v0.AddArg(y) 45347 v.AddArg(v0) 45348 return true 45349 } 45350 } 45351 func rewriteValueAMD64_OpNeq8_0(v *Value) bool { 45352 b := v.Block 45353 _ = b 45354 // match: (Neq8 x y) 45355 // cond: 45356 // result: (SETNE (CMPB x y)) 45357 for { 45358 _ = v.Args[1] 45359 x := v.Args[0] 45360 y := v.Args[1] 45361 v.reset(OpAMD64SETNE) 45362 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 45363 v0.AddArg(x) 45364 v0.AddArg(y) 45365 v.AddArg(v0) 45366 return true 45367 } 45368 } 45369 func rewriteValueAMD64_OpNeqB_0(v *Value) bool { 45370 b := v.Block 45371 _ = b 45372 // match: (NeqB x y) 45373 // cond: 45374 // result: (SETNE (CMPB x y)) 45375 for { 45376 _ = v.Args[1] 45377 x := v.Args[0] 45378 y := v.Args[1] 45379 v.reset(OpAMD64SETNE) 45380 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 45381 v0.AddArg(x) 45382 v0.AddArg(y) 45383 v.AddArg(v0) 45384 return true 45385 } 45386 } 45387 func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool { 45388 b := v.Block 45389 _ = b 45390 config := b.Func.Config 45391 _ = config 45392 // match: (NeqPtr x y) 45393 // cond: config.PtrSize == 8 45394 // result: (SETNE (CMPQ x y)) 45395 for { 45396 _ = v.Args[1] 45397 x := v.Args[0] 45398 y := v.Args[1] 45399 if !(config.PtrSize == 8) { 45400 break 45401 } 45402 v.reset(OpAMD64SETNE) 45403 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 45404 v0.AddArg(x) 45405 v0.AddArg(y) 45406 v.AddArg(v0) 45407 return true 45408 } 45409 // match: (NeqPtr x y) 45410 // cond: config.PtrSize == 4 45411 // result: (SETNE (CMPL x y)) 45412 for { 45413 _ = v.Args[1] 45414 x := v.Args[0] 45415 y := v.Args[1] 45416 if !(config.PtrSize == 4) { 45417 break 45418 } 45419 v.reset(OpAMD64SETNE) 45420 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 45421 v0.AddArg(x) 45422 v0.AddArg(y) 45423 v.AddArg(v0) 45424 return true 45425 } 45426 return false 45427 } 45428 func rewriteValueAMD64_OpNilCheck_0(v *Value) bool { 45429 // match: (NilCheck ptr mem) 45430 // cond: 45431 // result: (LoweredNilCheck ptr mem) 45432 for { 45433 _ = v.Args[1] 45434 ptr := v.Args[0] 45435 mem := v.Args[1] 45436 v.reset(OpAMD64LoweredNilCheck) 45437 v.AddArg(ptr) 45438 v.AddArg(mem) 45439 return true 45440 } 45441 } 45442 func rewriteValueAMD64_OpNot_0(v *Value) bool { 45443 // match: (Not x) 45444 // cond: 45445 // result: (XORLconst [1] x) 45446 for { 45447 x := v.Args[0] 45448 v.reset(OpAMD64XORLconst) 45449 v.AuxInt = 1 45450 v.AddArg(x) 45451 return true 45452 } 45453 } 45454 func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { 45455 b := v.Block 45456 _ = b 45457 config := b.Func.Config 45458 _ = config 45459 typ := &b.Func.Config.Types 45460 _ = typ 45461 // match: (OffPtr [off] ptr) 45462 // cond: config.PtrSize == 8 && is32Bit(off) 45463 // result: (ADDQconst [off] ptr) 45464 for { 45465 off := v.AuxInt 45466 ptr := v.Args[0] 45467 if !(config.PtrSize == 8 && is32Bit(off)) { 45468 break 45469 } 45470 v.reset(OpAMD64ADDQconst) 45471 v.AuxInt = off 45472 v.AddArg(ptr) 45473 return true 45474 } 45475 // match: (OffPtr [off] ptr) 45476 // cond: config.PtrSize == 8 45477 // result: (ADDQ (MOVQconst [off]) ptr) 45478 for { 45479 off := v.AuxInt 45480 ptr := v.Args[0] 45481 if !(config.PtrSize == 8) { 45482 break 45483 } 45484 v.reset(OpAMD64ADDQ) 45485 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 45486 v0.AuxInt = off 45487 v.AddArg(v0) 45488 v.AddArg(ptr) 45489 return true 45490 } 45491 // match: (OffPtr [off] ptr) 45492 // cond: config.PtrSize == 4 45493 // result: (ADDLconst [off] ptr) 45494 for { 45495 off := v.AuxInt 45496 ptr := v.Args[0] 45497 if !(config.PtrSize == 4) { 45498 break 45499 } 45500 v.reset(OpAMD64ADDLconst) 45501 v.AuxInt = off 45502 v.AddArg(ptr) 45503 return true 45504 } 45505 return false 45506 } 45507 func rewriteValueAMD64_OpOr16_0(v *Value) bool { 45508 // match: (Or16 x y) 45509 // cond: 45510 // result: (ORL x y) 45511 for { 45512 _ = v.Args[1] 45513 x := v.Args[0] 45514 y := v.Args[1] 45515 v.reset(OpAMD64ORL) 45516 v.AddArg(x) 45517 v.AddArg(y) 45518 return true 45519 } 45520 } 45521 func rewriteValueAMD64_OpOr32_0(v *Value) bool { 45522 // match: (Or32 x y) 45523 // cond: 45524 // result: (ORL x y) 45525 for { 45526 _ = v.Args[1] 45527 x := v.Args[0] 45528 y := v.Args[1] 45529 v.reset(OpAMD64ORL) 45530 v.AddArg(x) 45531 v.AddArg(y) 45532 return true 45533 } 45534 } 45535 func rewriteValueAMD64_OpOr64_0(v *Value) bool { 45536 // match: (Or64 x y) 45537 // cond: 45538 // result: (ORQ x y) 45539 for { 45540 _ = v.Args[1] 45541 x := v.Args[0] 45542 y := v.Args[1] 45543 v.reset(OpAMD64ORQ) 45544 v.AddArg(x) 45545 v.AddArg(y) 45546 return true 45547 } 45548 } 45549 func rewriteValueAMD64_OpOr8_0(v *Value) bool { 45550 // match: (Or8 x y) 45551 // cond: 45552 // result: (ORL x y) 45553 for { 45554 _ = v.Args[1] 45555 x := v.Args[0] 45556 y := v.Args[1] 45557 v.reset(OpAMD64ORL) 45558 v.AddArg(x) 45559 v.AddArg(y) 45560 return true 45561 } 45562 } 45563 func rewriteValueAMD64_OpOrB_0(v *Value) bool { 45564 // match: (OrB x y) 45565 // cond: 45566 // result: (ORL x y) 45567 for { 45568 _ = v.Args[1] 45569 x := v.Args[0] 45570 y := v.Args[1] 45571 v.reset(OpAMD64ORL) 45572 v.AddArg(x) 45573 v.AddArg(y) 45574 return true 45575 } 45576 } 45577 func rewriteValueAMD64_OpPopCount16_0(v *Value) bool { 45578 b := v.Block 45579 _ = b 45580 typ := &b.Func.Config.Types 45581 _ = typ 45582 // match: (PopCount16 x) 45583 // cond: 45584 // result: (POPCNTL (MOVWQZX <typ.UInt32> x)) 45585 for { 45586 x := v.Args[0] 45587 v.reset(OpAMD64POPCNTL) 45588 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 45589 v0.AddArg(x) 45590 v.AddArg(v0) 45591 return true 45592 } 45593 } 45594 func rewriteValueAMD64_OpPopCount32_0(v *Value) bool { 45595 // match: (PopCount32 x) 45596 // cond: 45597 // result: (POPCNTL x) 45598 for { 45599 x := v.Args[0] 45600 v.reset(OpAMD64POPCNTL) 45601 v.AddArg(x) 45602 return true 45603 } 45604 } 45605 func rewriteValueAMD64_OpPopCount64_0(v *Value) bool { 45606 // match: (PopCount64 x) 45607 // cond: 45608 // result: (POPCNTQ x) 45609 for { 45610 x := v.Args[0] 45611 v.reset(OpAMD64POPCNTQ) 45612 v.AddArg(x) 45613 return true 45614 } 45615 } 45616 func rewriteValueAMD64_OpPopCount8_0(v *Value) bool { 45617 b := v.Block 45618 _ = b 45619 typ := &b.Func.Config.Types 45620 _ = typ 45621 // match: (PopCount8 x) 45622 // cond: 45623 // result: (POPCNTL (MOVBQZX <typ.UInt32> x)) 45624 for { 45625 x := v.Args[0] 45626 v.reset(OpAMD64POPCNTL) 45627 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 45628 v0.AddArg(x) 45629 v.AddArg(v0) 45630 return true 45631 } 45632 } 45633 func rewriteValueAMD64_OpRound32F_0(v *Value) bool { 45634 // match: (Round32F x) 45635 // cond: 45636 // result: x 45637 for { 45638 x := v.Args[0] 45639 v.reset(OpCopy) 45640 v.Type = x.Type 45641 v.AddArg(x) 45642 return true 45643 } 45644 } 45645 func rewriteValueAMD64_OpRound64F_0(v *Value) bool { 45646 // match: (Round64F x) 45647 // cond: 45648 // result: x 45649 for { 45650 x := v.Args[0] 45651 v.reset(OpCopy) 45652 v.Type = x.Type 45653 v.AddArg(x) 45654 return true 45655 } 45656 } 45657 func rewriteValueAMD64_OpRoundToEven_0(v *Value) bool { 45658 // match: (RoundToEven x) 45659 // cond: 45660 // result: (ROUNDSD [0] x) 45661 for { 45662 x := v.Args[0] 45663 v.reset(OpAMD64ROUNDSD) 45664 v.AuxInt = 0 45665 v.AddArg(x) 45666 return true 45667 } 45668 } 45669 func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool { 45670 b := v.Block 45671 _ = b 45672 // match: (Rsh16Ux16 <t> x y) 45673 // cond: 45674 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 45675 for { 45676 t := v.Type 45677 _ = v.Args[1] 45678 x := v.Args[0] 45679 y := v.Args[1] 45680 v.reset(OpAMD64ANDL) 45681 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 45682 v0.AddArg(x) 45683 v0.AddArg(y) 45684 v.AddArg(v0) 45685 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45686 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 45687 v2.AuxInt = 16 45688 v2.AddArg(y) 45689 v1.AddArg(v2) 45690 v.AddArg(v1) 45691 return true 45692 } 45693 } 45694 func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool { 45695 b := v.Block 45696 _ = b 45697 // match: (Rsh16Ux32 <t> x y) 45698 // cond: 45699 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 45700 for { 45701 t := v.Type 45702 _ = v.Args[1] 45703 x := v.Args[0] 45704 y := v.Args[1] 45705 v.reset(OpAMD64ANDL) 45706 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 45707 v0.AddArg(x) 45708 v0.AddArg(y) 45709 v.AddArg(v0) 45710 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45711 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 45712 v2.AuxInt = 16 45713 v2.AddArg(y) 45714 v1.AddArg(v2) 45715 v.AddArg(v1) 45716 return true 45717 } 45718 } 45719 func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool { 45720 b := v.Block 45721 _ = b 45722 // match: (Rsh16Ux64 <t> x y) 45723 // cond: 45724 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 45725 for { 45726 t := v.Type 45727 _ = v.Args[1] 45728 x := v.Args[0] 45729 y := v.Args[1] 45730 v.reset(OpAMD64ANDL) 45731 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 45732 v0.AddArg(x) 45733 v0.AddArg(y) 45734 v.AddArg(v0) 45735 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45736 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 45737 v2.AuxInt = 16 45738 v2.AddArg(y) 45739 v1.AddArg(v2) 45740 v.AddArg(v1) 45741 return true 45742 } 45743 } 45744 func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool { 45745 b := v.Block 45746 _ = b 45747 // match: (Rsh16Ux8 <t> x y) 45748 // cond: 45749 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 45750 for { 45751 t := v.Type 45752 _ = v.Args[1] 45753 x := v.Args[0] 45754 y := v.Args[1] 45755 v.reset(OpAMD64ANDL) 45756 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 45757 v0.AddArg(x) 45758 v0.AddArg(y) 45759 v.AddArg(v0) 45760 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45761 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 45762 v2.AuxInt = 16 45763 v2.AddArg(y) 45764 v1.AddArg(v2) 45765 v.AddArg(v1) 45766 return true 45767 } 45768 } 45769 func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool { 45770 b := v.Block 45771 _ = b 45772 // match: (Rsh16x16 <t> x y) 45773 // cond: 45774 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 45775 for { 45776 t := v.Type 45777 _ = v.Args[1] 45778 x := v.Args[0] 45779 y := v.Args[1] 45780 v.reset(OpAMD64SARW) 45781 v.Type = t 45782 v.AddArg(x) 45783 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45784 v0.AddArg(y) 45785 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45786 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45787 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 45788 v3.AuxInt = 16 45789 v3.AddArg(y) 45790 v2.AddArg(v3) 45791 v1.AddArg(v2) 45792 v0.AddArg(v1) 45793 v.AddArg(v0) 45794 return true 45795 } 45796 } 45797 func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool { 45798 b := v.Block 45799 _ = b 45800 // match: (Rsh16x32 <t> x y) 45801 // cond: 45802 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 45803 for { 45804 t := v.Type 45805 _ = v.Args[1] 45806 x := v.Args[0] 45807 y := v.Args[1] 45808 v.reset(OpAMD64SARW) 45809 v.Type = t 45810 v.AddArg(x) 45811 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45812 v0.AddArg(y) 45813 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45814 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45815 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 45816 v3.AuxInt = 16 45817 v3.AddArg(y) 45818 v2.AddArg(v3) 45819 v1.AddArg(v2) 45820 v0.AddArg(v1) 45821 v.AddArg(v0) 45822 return true 45823 } 45824 } 45825 func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool { 45826 b := v.Block 45827 _ = b 45828 // match: (Rsh16x64 <t> x y) 45829 // cond: 45830 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 45831 for { 45832 t := v.Type 45833 _ = v.Args[1] 45834 x := v.Args[0] 45835 y := v.Args[1] 45836 v.reset(OpAMD64SARW) 45837 v.Type = t 45838 v.AddArg(x) 45839 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 45840 v0.AddArg(y) 45841 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 45842 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 45843 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 45844 v3.AuxInt = 16 45845 v3.AddArg(y) 45846 v2.AddArg(v3) 45847 v1.AddArg(v2) 45848 v0.AddArg(v1) 45849 v.AddArg(v0) 45850 return true 45851 } 45852 } 45853 func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool { 45854 b := v.Block 45855 _ = b 45856 // match: (Rsh16x8 <t> x y) 45857 // cond: 45858 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 45859 for { 45860 t := v.Type 45861 _ = v.Args[1] 45862 x := v.Args[0] 45863 y := v.Args[1] 45864 v.reset(OpAMD64SARW) 45865 v.Type = t 45866 v.AddArg(x) 45867 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45868 v0.AddArg(y) 45869 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45870 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45871 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 45872 v3.AuxInt = 16 45873 v3.AddArg(y) 45874 v2.AddArg(v3) 45875 v1.AddArg(v2) 45876 v0.AddArg(v1) 45877 v.AddArg(v0) 45878 return true 45879 } 45880 } 45881 func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool { 45882 b := v.Block 45883 _ = b 45884 // match: (Rsh32Ux16 <t> x y) 45885 // cond: 45886 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 45887 for { 45888 t := v.Type 45889 _ = v.Args[1] 45890 x := v.Args[0] 45891 y := v.Args[1] 45892 v.reset(OpAMD64ANDL) 45893 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 45894 v0.AddArg(x) 45895 v0.AddArg(y) 45896 v.AddArg(v0) 45897 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45898 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 45899 v2.AuxInt = 32 45900 v2.AddArg(y) 45901 v1.AddArg(v2) 45902 v.AddArg(v1) 45903 return true 45904 } 45905 } 45906 func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool { 45907 b := v.Block 45908 _ = b 45909 // match: (Rsh32Ux32 <t> x y) 45910 // cond: 45911 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 45912 for { 45913 t := v.Type 45914 _ = v.Args[1] 45915 x := v.Args[0] 45916 y := v.Args[1] 45917 v.reset(OpAMD64ANDL) 45918 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 45919 v0.AddArg(x) 45920 v0.AddArg(y) 45921 v.AddArg(v0) 45922 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45923 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 45924 v2.AuxInt = 32 45925 v2.AddArg(y) 45926 v1.AddArg(v2) 45927 v.AddArg(v1) 45928 return true 45929 } 45930 } 45931 func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool { 45932 b := v.Block 45933 _ = b 45934 // match: (Rsh32Ux64 <t> x y) 45935 // cond: 45936 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 45937 for { 45938 t := v.Type 45939 _ = v.Args[1] 45940 x := v.Args[0] 45941 y := v.Args[1] 45942 v.reset(OpAMD64ANDL) 45943 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 45944 v0.AddArg(x) 45945 v0.AddArg(y) 45946 v.AddArg(v0) 45947 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45948 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 45949 v2.AuxInt = 32 45950 v2.AddArg(y) 45951 v1.AddArg(v2) 45952 v.AddArg(v1) 45953 return true 45954 } 45955 } 45956 func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool { 45957 b := v.Block 45958 _ = b 45959 // match: (Rsh32Ux8 <t> x y) 45960 // cond: 45961 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 45962 for { 45963 t := v.Type 45964 _ = v.Args[1] 45965 x := v.Args[0] 45966 y := v.Args[1] 45967 v.reset(OpAMD64ANDL) 45968 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 45969 v0.AddArg(x) 45970 v0.AddArg(y) 45971 v.AddArg(v0) 45972 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45973 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 45974 v2.AuxInt = 32 45975 v2.AddArg(y) 45976 v1.AddArg(v2) 45977 v.AddArg(v1) 45978 return true 45979 } 45980 } 45981 func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool { 45982 b := v.Block 45983 _ = b 45984 // match: (Rsh32x16 <t> x y) 45985 // cond: 45986 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 45987 for { 45988 t := v.Type 45989 _ = v.Args[1] 45990 x := v.Args[0] 45991 y := v.Args[1] 45992 v.reset(OpAMD64SARL) 45993 v.Type = t 45994 v.AddArg(x) 45995 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45996 v0.AddArg(y) 45997 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45998 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45999 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 46000 v3.AuxInt = 32 46001 v3.AddArg(y) 46002 v2.AddArg(v3) 46003 v1.AddArg(v2) 46004 v0.AddArg(v1) 46005 v.AddArg(v0) 46006 return true 46007 } 46008 } 46009 func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool { 46010 b := v.Block 46011 _ = b 46012 // match: (Rsh32x32 <t> x y) 46013 // cond: 46014 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 46015 for { 46016 t := v.Type 46017 _ = v.Args[1] 46018 x := v.Args[0] 46019 y := v.Args[1] 46020 v.reset(OpAMD64SARL) 46021 v.Type = t 46022 v.AddArg(x) 46023 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46024 v0.AddArg(y) 46025 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46026 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46027 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 46028 v3.AuxInt = 32 46029 v3.AddArg(y) 46030 v2.AddArg(v3) 46031 v1.AddArg(v2) 46032 v0.AddArg(v1) 46033 v.AddArg(v0) 46034 return true 46035 } 46036 } 46037 func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool { 46038 b := v.Block 46039 _ = b 46040 // match: (Rsh32x64 <t> x y) 46041 // cond: 46042 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 46043 for { 46044 t := v.Type 46045 _ = v.Args[1] 46046 x := v.Args[0] 46047 y := v.Args[1] 46048 v.reset(OpAMD64SARL) 46049 v.Type = t 46050 v.AddArg(x) 46051 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 46052 v0.AddArg(y) 46053 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 46054 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 46055 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 46056 v3.AuxInt = 32 46057 v3.AddArg(y) 46058 v2.AddArg(v3) 46059 v1.AddArg(v2) 46060 v0.AddArg(v1) 46061 v.AddArg(v0) 46062 return true 46063 } 46064 } 46065 func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool { 46066 b := v.Block 46067 _ = b 46068 // match: (Rsh32x8 <t> x y) 46069 // cond: 46070 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 46071 for { 46072 t := v.Type 46073 _ = v.Args[1] 46074 x := v.Args[0] 46075 y := v.Args[1] 46076 v.reset(OpAMD64SARL) 46077 v.Type = t 46078 v.AddArg(x) 46079 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46080 v0.AddArg(y) 46081 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46082 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46083 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 46084 v3.AuxInt = 32 46085 v3.AddArg(y) 46086 v2.AddArg(v3) 46087 v1.AddArg(v2) 46088 v0.AddArg(v1) 46089 v.AddArg(v0) 46090 return true 46091 } 46092 } 46093 func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool { 46094 b := v.Block 46095 _ = b 46096 // match: (Rsh64Ux16 <t> x y) 46097 // cond: 46098 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 46099 for { 46100 t := v.Type 46101 _ = v.Args[1] 46102 x := v.Args[0] 46103 y := v.Args[1] 46104 v.reset(OpAMD64ANDQ) 46105 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 46106 v0.AddArg(x) 46107 v0.AddArg(y) 46108 v.AddArg(v0) 46109 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 46110 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 46111 v2.AuxInt = 64 46112 v2.AddArg(y) 46113 v1.AddArg(v2) 46114 v.AddArg(v1) 46115 return true 46116 } 46117 } 46118 func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool { 46119 b := v.Block 46120 _ = b 46121 // match: (Rsh64Ux32 <t> x y) 46122 // cond: 46123 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 46124 for { 46125 t := v.Type 46126 _ = v.Args[1] 46127 x := v.Args[0] 46128 y := v.Args[1] 46129 v.reset(OpAMD64ANDQ) 46130 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 46131 v0.AddArg(x) 46132 v0.AddArg(y) 46133 v.AddArg(v0) 46134 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 46135 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 46136 v2.AuxInt = 64 46137 v2.AddArg(y) 46138 v1.AddArg(v2) 46139 v.AddArg(v1) 46140 return true 46141 } 46142 } 46143 func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool { 46144 b := v.Block 46145 _ = b 46146 // match: (Rsh64Ux64 <t> x y) 46147 // cond: 46148 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 46149 for { 46150 t := v.Type 46151 _ = v.Args[1] 46152 x := v.Args[0] 46153 y := v.Args[1] 46154 v.reset(OpAMD64ANDQ) 46155 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 46156 v0.AddArg(x) 46157 v0.AddArg(y) 46158 v.AddArg(v0) 46159 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 46160 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 46161 v2.AuxInt = 64 46162 v2.AddArg(y) 46163 v1.AddArg(v2) 46164 v.AddArg(v1) 46165 return true 46166 } 46167 } 46168 func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool { 46169 b := v.Block 46170 _ = b 46171 // match: (Rsh64Ux8 <t> x y) 46172 // cond: 46173 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 46174 for { 46175 t := v.Type 46176 _ = v.Args[1] 46177 x := v.Args[0] 46178 y := v.Args[1] 46179 v.reset(OpAMD64ANDQ) 46180 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 46181 v0.AddArg(x) 46182 v0.AddArg(y) 46183 v.AddArg(v0) 46184 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 46185 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 46186 v2.AuxInt = 64 46187 v2.AddArg(y) 46188 v1.AddArg(v2) 46189 v.AddArg(v1) 46190 return true 46191 } 46192 } 46193 func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool { 46194 b := v.Block 46195 _ = b 46196 // match: (Rsh64x16 <t> x y) 46197 // cond: 46198 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 46199 for { 46200 t := v.Type 46201 _ = v.Args[1] 46202 x := v.Args[0] 46203 y := v.Args[1] 46204 v.reset(OpAMD64SARQ) 46205 v.Type = t 46206 v.AddArg(x) 46207 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46208 v0.AddArg(y) 46209 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46210 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46211 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 46212 v3.AuxInt = 64 46213 v3.AddArg(y) 46214 v2.AddArg(v3) 46215 v1.AddArg(v2) 46216 v0.AddArg(v1) 46217 v.AddArg(v0) 46218 return true 46219 } 46220 } 46221 func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool { 46222 b := v.Block 46223 _ = b 46224 // match: (Rsh64x32 <t> x y) 46225 // cond: 46226 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 46227 for { 46228 t := v.Type 46229 _ = v.Args[1] 46230 x := v.Args[0] 46231 y := v.Args[1] 46232 v.reset(OpAMD64SARQ) 46233 v.Type = t 46234 v.AddArg(x) 46235 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46236 v0.AddArg(y) 46237 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46238 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46239 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 46240 v3.AuxInt = 64 46241 v3.AddArg(y) 46242 v2.AddArg(v3) 46243 v1.AddArg(v2) 46244 v0.AddArg(v1) 46245 v.AddArg(v0) 46246 return true 46247 } 46248 } 46249 func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool { 46250 b := v.Block 46251 _ = b 46252 // match: (Rsh64x64 <t> x y) 46253 // cond: 46254 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 46255 for { 46256 t := v.Type 46257 _ = v.Args[1] 46258 x := v.Args[0] 46259 y := v.Args[1] 46260 v.reset(OpAMD64SARQ) 46261 v.Type = t 46262 v.AddArg(x) 46263 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 46264 v0.AddArg(y) 46265 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 46266 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 46267 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 46268 v3.AuxInt = 64 46269 v3.AddArg(y) 46270 v2.AddArg(v3) 46271 v1.AddArg(v2) 46272 v0.AddArg(v1) 46273 v.AddArg(v0) 46274 return true 46275 } 46276 } 46277 func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool { 46278 b := v.Block 46279 _ = b 46280 // match: (Rsh64x8 <t> x y) 46281 // cond: 46282 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 46283 for { 46284 t := v.Type 46285 _ = v.Args[1] 46286 x := v.Args[0] 46287 y := v.Args[1] 46288 v.reset(OpAMD64SARQ) 46289 v.Type = t 46290 v.AddArg(x) 46291 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46292 v0.AddArg(y) 46293 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46294 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46295 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 46296 v3.AuxInt = 64 46297 v3.AddArg(y) 46298 v2.AddArg(v3) 46299 v1.AddArg(v2) 46300 v0.AddArg(v1) 46301 v.AddArg(v0) 46302 return true 46303 } 46304 } 46305 func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool { 46306 b := v.Block 46307 _ = b 46308 // match: (Rsh8Ux16 <t> x y) 46309 // cond: 46310 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 46311 for { 46312 t := v.Type 46313 _ = v.Args[1] 46314 x := v.Args[0] 46315 y := v.Args[1] 46316 v.reset(OpAMD64ANDL) 46317 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 46318 v0.AddArg(x) 46319 v0.AddArg(y) 46320 v.AddArg(v0) 46321 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 46322 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 46323 v2.AuxInt = 8 46324 v2.AddArg(y) 46325 v1.AddArg(v2) 46326 v.AddArg(v1) 46327 return true 46328 } 46329 } 46330 func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool { 46331 b := v.Block 46332 _ = b 46333 // match: (Rsh8Ux32 <t> x y) 46334 // cond: 46335 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 46336 for { 46337 t := v.Type 46338 _ = v.Args[1] 46339 x := v.Args[0] 46340 y := v.Args[1] 46341 v.reset(OpAMD64ANDL) 46342 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 46343 v0.AddArg(x) 46344 v0.AddArg(y) 46345 v.AddArg(v0) 46346 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 46347 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 46348 v2.AuxInt = 8 46349 v2.AddArg(y) 46350 v1.AddArg(v2) 46351 v.AddArg(v1) 46352 return true 46353 } 46354 } 46355 func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool { 46356 b := v.Block 46357 _ = b 46358 // match: (Rsh8Ux64 <t> x y) 46359 // cond: 46360 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 46361 for { 46362 t := v.Type 46363 _ = v.Args[1] 46364 x := v.Args[0] 46365 y := v.Args[1] 46366 v.reset(OpAMD64ANDL) 46367 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 46368 v0.AddArg(x) 46369 v0.AddArg(y) 46370 v.AddArg(v0) 46371 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 46372 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 46373 v2.AuxInt = 8 46374 v2.AddArg(y) 46375 v1.AddArg(v2) 46376 v.AddArg(v1) 46377 return true 46378 } 46379 } 46380 func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool { 46381 b := v.Block 46382 _ = b 46383 // match: (Rsh8Ux8 <t> x y) 46384 // cond: 46385 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 46386 for { 46387 t := v.Type 46388 _ = v.Args[1] 46389 x := v.Args[0] 46390 y := v.Args[1] 46391 v.reset(OpAMD64ANDL) 46392 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 46393 v0.AddArg(x) 46394 v0.AddArg(y) 46395 v.AddArg(v0) 46396 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 46397 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 46398 v2.AuxInt = 8 46399 v2.AddArg(y) 46400 v1.AddArg(v2) 46401 v.AddArg(v1) 46402 return true 46403 } 46404 } 46405 func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool { 46406 b := v.Block 46407 _ = b 46408 // match: (Rsh8x16 <t> x y) 46409 // cond: 46410 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 46411 for { 46412 t := v.Type 46413 _ = v.Args[1] 46414 x := v.Args[0] 46415 y := v.Args[1] 46416 v.reset(OpAMD64SARB) 46417 v.Type = t 46418 v.AddArg(x) 46419 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46420 v0.AddArg(y) 46421 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46422 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46423 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 46424 v3.AuxInt = 8 46425 v3.AddArg(y) 46426 v2.AddArg(v3) 46427 v1.AddArg(v2) 46428 v0.AddArg(v1) 46429 v.AddArg(v0) 46430 return true 46431 } 46432 } 46433 func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool { 46434 b := v.Block 46435 _ = b 46436 // match: (Rsh8x32 <t> x y) 46437 // cond: 46438 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 46439 for { 46440 t := v.Type 46441 _ = v.Args[1] 46442 x := v.Args[0] 46443 y := v.Args[1] 46444 v.reset(OpAMD64SARB) 46445 v.Type = t 46446 v.AddArg(x) 46447 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46448 v0.AddArg(y) 46449 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46450 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46451 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 46452 v3.AuxInt = 8 46453 v3.AddArg(y) 46454 v2.AddArg(v3) 46455 v1.AddArg(v2) 46456 v0.AddArg(v1) 46457 v.AddArg(v0) 46458 return true 46459 } 46460 } 46461 func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool { 46462 b := v.Block 46463 _ = b 46464 // match: (Rsh8x64 <t> x y) 46465 // cond: 46466 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 46467 for { 46468 t := v.Type 46469 _ = v.Args[1] 46470 x := v.Args[0] 46471 y := v.Args[1] 46472 v.reset(OpAMD64SARB) 46473 v.Type = t 46474 v.AddArg(x) 46475 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 46476 v0.AddArg(y) 46477 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 46478 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 46479 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 46480 v3.AuxInt = 8 46481 v3.AddArg(y) 46482 v2.AddArg(v3) 46483 v1.AddArg(v2) 46484 v0.AddArg(v1) 46485 v.AddArg(v0) 46486 return true 46487 } 46488 } 46489 func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { 46490 b := v.Block 46491 _ = b 46492 // match: (Rsh8x8 <t> x y) 46493 // cond: 46494 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 46495 for { 46496 t := v.Type 46497 _ = v.Args[1] 46498 x := v.Args[0] 46499 y := v.Args[1] 46500 v.reset(OpAMD64SARB) 46501 v.Type = t 46502 v.AddArg(x) 46503 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46504 v0.AddArg(y) 46505 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46506 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46507 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 46508 v3.AuxInt = 8 46509 v3.AddArg(y) 46510 v2.AddArg(v3) 46511 v1.AddArg(v2) 46512 v0.AddArg(v1) 46513 v.AddArg(v0) 46514 return true 46515 } 46516 } 46517 func rewriteValueAMD64_OpSelect0_0(v *Value) bool { 46518 b := v.Block 46519 _ = b 46520 // match: (Select0 <t> (AddTupleFirst32 val tuple)) 46521 // cond: 46522 // result: (ADDL val (Select0 <t> tuple)) 46523 for { 46524 t := v.Type 46525 v_0 := v.Args[0] 46526 if v_0.Op != OpAMD64AddTupleFirst32 { 46527 break 46528 } 46529 _ = v_0.Args[1] 46530 val := v_0.Args[0] 46531 tuple := v_0.Args[1] 46532 v.reset(OpAMD64ADDL) 46533 v.AddArg(val) 46534 v0 := b.NewValue0(v.Pos, OpSelect0, t) 46535 v0.AddArg(tuple) 46536 v.AddArg(v0) 46537 return true 46538 } 46539 // match: (Select0 <t> (AddTupleFirst64 val tuple)) 46540 // cond: 46541 // result: (ADDQ val (Select0 <t> tuple)) 46542 for { 46543 t := v.Type 46544 v_0 := v.Args[0] 46545 if v_0.Op != OpAMD64AddTupleFirst64 { 46546 break 46547 } 46548 _ = v_0.Args[1] 46549 val := v_0.Args[0] 46550 tuple := v_0.Args[1] 46551 v.reset(OpAMD64ADDQ) 46552 v.AddArg(val) 46553 v0 := b.NewValue0(v.Pos, OpSelect0, t) 46554 v0.AddArg(tuple) 46555 v.AddArg(v0) 46556 return true 46557 } 46558 return false 46559 } 46560 func rewriteValueAMD64_OpSelect1_0(v *Value) bool { 46561 // match: (Select1 (AddTupleFirst32 _ tuple)) 46562 // cond: 46563 // result: (Select1 tuple) 46564 for { 46565 v_0 := v.Args[0] 46566 if v_0.Op != OpAMD64AddTupleFirst32 { 46567 break 46568 } 46569 _ = v_0.Args[1] 46570 tuple := v_0.Args[1] 46571 v.reset(OpSelect1) 46572 v.AddArg(tuple) 46573 return true 46574 } 46575 // match: (Select1 (AddTupleFirst64 _ tuple)) 46576 // cond: 46577 // result: (Select1 tuple) 46578 for { 46579 v_0 := v.Args[0] 46580 if v_0.Op != OpAMD64AddTupleFirst64 { 46581 break 46582 } 46583 _ = v_0.Args[1] 46584 tuple := v_0.Args[1] 46585 v.reset(OpSelect1) 46586 v.AddArg(tuple) 46587 return true 46588 } 46589 return false 46590 } 46591 func rewriteValueAMD64_OpSignExt16to32_0(v *Value) bool { 46592 // match: (SignExt16to32 x) 46593 // cond: 46594 // result: (MOVWQSX x) 46595 for { 46596 x := v.Args[0] 46597 v.reset(OpAMD64MOVWQSX) 46598 v.AddArg(x) 46599 return true 46600 } 46601 } 46602 func rewriteValueAMD64_OpSignExt16to64_0(v *Value) bool { 46603 // match: (SignExt16to64 x) 46604 // cond: 46605 // result: (MOVWQSX x) 46606 for { 46607 x := v.Args[0] 46608 v.reset(OpAMD64MOVWQSX) 46609 v.AddArg(x) 46610 return true 46611 } 46612 } 46613 func rewriteValueAMD64_OpSignExt32to64_0(v *Value) bool { 46614 // match: (SignExt32to64 x) 46615 // cond: 46616 // result: (MOVLQSX x) 46617 for { 46618 x := v.Args[0] 46619 v.reset(OpAMD64MOVLQSX) 46620 v.AddArg(x) 46621 return true 46622 } 46623 } 46624 func rewriteValueAMD64_OpSignExt8to16_0(v *Value) bool { 46625 // match: (SignExt8to16 x) 46626 // cond: 46627 // result: (MOVBQSX x) 46628 for { 46629 x := v.Args[0] 46630 v.reset(OpAMD64MOVBQSX) 46631 v.AddArg(x) 46632 return true 46633 } 46634 } 46635 func rewriteValueAMD64_OpSignExt8to32_0(v *Value) bool { 46636 // match: (SignExt8to32 x) 46637 // cond: 46638 // result: (MOVBQSX x) 46639 for { 46640 x := v.Args[0] 46641 v.reset(OpAMD64MOVBQSX) 46642 v.AddArg(x) 46643 return true 46644 } 46645 } 46646 func rewriteValueAMD64_OpSignExt8to64_0(v *Value) bool { 46647 // match: (SignExt8to64 x) 46648 // cond: 46649 // result: (MOVBQSX x) 46650 for { 46651 x := v.Args[0] 46652 v.reset(OpAMD64MOVBQSX) 46653 v.AddArg(x) 46654 return true 46655 } 46656 } 46657 func rewriteValueAMD64_OpSlicemask_0(v *Value) bool { 46658 b := v.Block 46659 _ = b 46660 // match: (Slicemask <t> x) 46661 // cond: 46662 // result: (SARQconst (NEGQ <t> x) [63]) 46663 for { 46664 t := v.Type 46665 x := v.Args[0] 46666 v.reset(OpAMD64SARQconst) 46667 v.AuxInt = 63 46668 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 46669 v0.AddArg(x) 46670 v.AddArg(v0) 46671 return true 46672 } 46673 } 46674 func rewriteValueAMD64_OpSqrt_0(v *Value) bool { 46675 // match: (Sqrt x) 46676 // cond: 46677 // result: (SQRTSD x) 46678 for { 46679 x := v.Args[0] 46680 v.reset(OpAMD64SQRTSD) 46681 v.AddArg(x) 46682 return true 46683 } 46684 } 46685 func rewriteValueAMD64_OpStaticCall_0(v *Value) bool { 46686 // match: (StaticCall [argwid] {target} mem) 46687 // cond: 46688 // result: (CALLstatic [argwid] {target} mem) 46689 for { 46690 argwid := v.AuxInt 46691 target := v.Aux 46692 mem := v.Args[0] 46693 v.reset(OpAMD64CALLstatic) 46694 v.AuxInt = argwid 46695 v.Aux = target 46696 v.AddArg(mem) 46697 return true 46698 } 46699 } 46700 func rewriteValueAMD64_OpStore_0(v *Value) bool { 46701 // match: (Store {t} ptr val mem) 46702 // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) 46703 // result: (MOVSDstore ptr val mem) 46704 for { 46705 t := v.Aux 46706 _ = v.Args[2] 46707 ptr := v.Args[0] 46708 val := v.Args[1] 46709 mem := v.Args[2] 46710 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { 46711 break 46712 } 46713 v.reset(OpAMD64MOVSDstore) 46714 v.AddArg(ptr) 46715 v.AddArg(val) 46716 v.AddArg(mem) 46717 return true 46718 } 46719 // match: (Store {t} ptr val mem) 46720 // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) 46721 // result: (MOVSSstore ptr val mem) 46722 for { 46723 t := v.Aux 46724 _ = v.Args[2] 46725 ptr := v.Args[0] 46726 val := v.Args[1] 46727 mem := v.Args[2] 46728 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { 46729 break 46730 } 46731 v.reset(OpAMD64MOVSSstore) 46732 v.AddArg(ptr) 46733 v.AddArg(val) 46734 v.AddArg(mem) 46735 return true 46736 } 46737 // match: (Store {t} ptr val mem) 46738 // cond: t.(*types.Type).Size() == 8 46739 // result: (MOVQstore ptr val mem) 46740 for { 46741 t := v.Aux 46742 _ = v.Args[2] 46743 ptr := v.Args[0] 46744 val := v.Args[1] 46745 mem := v.Args[2] 46746 if !(t.(*types.Type).Size() == 8) { 46747 break 46748 } 46749 v.reset(OpAMD64MOVQstore) 46750 v.AddArg(ptr) 46751 v.AddArg(val) 46752 v.AddArg(mem) 46753 return true 46754 } 46755 // match: (Store {t} ptr val mem) 46756 // cond: t.(*types.Type).Size() == 4 46757 // result: (MOVLstore ptr val mem) 46758 for { 46759 t := v.Aux 46760 _ = v.Args[2] 46761 ptr := v.Args[0] 46762 val := v.Args[1] 46763 mem := v.Args[2] 46764 if !(t.(*types.Type).Size() == 4) { 46765 break 46766 } 46767 v.reset(OpAMD64MOVLstore) 46768 v.AddArg(ptr) 46769 v.AddArg(val) 46770 v.AddArg(mem) 46771 return true 46772 } 46773 // match: (Store {t} ptr val mem) 46774 // cond: t.(*types.Type).Size() == 2 46775 // result: (MOVWstore ptr val mem) 46776 for { 46777 t := v.Aux 46778 _ = v.Args[2] 46779 ptr := v.Args[0] 46780 val := v.Args[1] 46781 mem := v.Args[2] 46782 if !(t.(*types.Type).Size() == 2) { 46783 break 46784 } 46785 v.reset(OpAMD64MOVWstore) 46786 v.AddArg(ptr) 46787 v.AddArg(val) 46788 v.AddArg(mem) 46789 return true 46790 } 46791 // match: (Store {t} ptr val mem) 46792 // cond: t.(*types.Type).Size() == 1 46793 // result: (MOVBstore ptr val mem) 46794 for { 46795 t := v.Aux 46796 _ = v.Args[2] 46797 ptr := v.Args[0] 46798 val := v.Args[1] 46799 mem := v.Args[2] 46800 if !(t.(*types.Type).Size() == 1) { 46801 break 46802 } 46803 v.reset(OpAMD64MOVBstore) 46804 v.AddArg(ptr) 46805 v.AddArg(val) 46806 v.AddArg(mem) 46807 return true 46808 } 46809 return false 46810 } 46811 func rewriteValueAMD64_OpSub16_0(v *Value) bool { 46812 // match: (Sub16 x y) 46813 // cond: 46814 // result: (SUBL x y) 46815 for { 46816 _ = v.Args[1] 46817 x := v.Args[0] 46818 y := v.Args[1] 46819 v.reset(OpAMD64SUBL) 46820 v.AddArg(x) 46821 v.AddArg(y) 46822 return true 46823 } 46824 } 46825 func rewriteValueAMD64_OpSub32_0(v *Value) bool { 46826 // match: (Sub32 x y) 46827 // cond: 46828 // result: (SUBL x y) 46829 for { 46830 _ = v.Args[1] 46831 x := v.Args[0] 46832 y := v.Args[1] 46833 v.reset(OpAMD64SUBL) 46834 v.AddArg(x) 46835 v.AddArg(y) 46836 return true 46837 } 46838 } 46839 func rewriteValueAMD64_OpSub32F_0(v *Value) bool { 46840 // match: (Sub32F x y) 46841 // cond: 46842 // result: (SUBSS x y) 46843 for { 46844 _ = v.Args[1] 46845 x := v.Args[0] 46846 y := v.Args[1] 46847 v.reset(OpAMD64SUBSS) 46848 v.AddArg(x) 46849 v.AddArg(y) 46850 return true 46851 } 46852 } 46853 func rewriteValueAMD64_OpSub64_0(v *Value) bool { 46854 // match: (Sub64 x y) 46855 // cond: 46856 // result: (SUBQ x y) 46857 for { 46858 _ = v.Args[1] 46859 x := v.Args[0] 46860 y := v.Args[1] 46861 v.reset(OpAMD64SUBQ) 46862 v.AddArg(x) 46863 v.AddArg(y) 46864 return true 46865 } 46866 } 46867 func rewriteValueAMD64_OpSub64F_0(v *Value) bool { 46868 // match: (Sub64F x y) 46869 // cond: 46870 // result: (SUBSD x y) 46871 for { 46872 _ = v.Args[1] 46873 x := v.Args[0] 46874 y := v.Args[1] 46875 v.reset(OpAMD64SUBSD) 46876 v.AddArg(x) 46877 v.AddArg(y) 46878 return true 46879 } 46880 } 46881 func rewriteValueAMD64_OpSub8_0(v *Value) bool { 46882 // match: (Sub8 x y) 46883 // cond: 46884 // result: (SUBL x y) 46885 for { 46886 _ = v.Args[1] 46887 x := v.Args[0] 46888 y := v.Args[1] 46889 v.reset(OpAMD64SUBL) 46890 v.AddArg(x) 46891 v.AddArg(y) 46892 return true 46893 } 46894 } 46895 func rewriteValueAMD64_OpSubPtr_0(v *Value) bool { 46896 b := v.Block 46897 _ = b 46898 config := b.Func.Config 46899 _ = config 46900 // match: (SubPtr x y) 46901 // cond: config.PtrSize == 8 46902 // result: (SUBQ x y) 46903 for { 46904 _ = v.Args[1] 46905 x := v.Args[0] 46906 y := v.Args[1] 46907 if !(config.PtrSize == 8) { 46908 break 46909 } 46910 v.reset(OpAMD64SUBQ) 46911 v.AddArg(x) 46912 v.AddArg(y) 46913 return true 46914 } 46915 // match: (SubPtr x y) 46916 // cond: config.PtrSize == 4 46917 // result: (SUBL x y) 46918 for { 46919 _ = v.Args[1] 46920 x := v.Args[0] 46921 y := v.Args[1] 46922 if !(config.PtrSize == 4) { 46923 break 46924 } 46925 v.reset(OpAMD64SUBL) 46926 v.AddArg(x) 46927 v.AddArg(y) 46928 return true 46929 } 46930 return false 46931 } 46932 func rewriteValueAMD64_OpTrunc_0(v *Value) bool { 46933 // match: (Trunc x) 46934 // cond: 46935 // result: (ROUNDSD [3] x) 46936 for { 46937 x := v.Args[0] 46938 v.reset(OpAMD64ROUNDSD) 46939 v.AuxInt = 3 46940 v.AddArg(x) 46941 return true 46942 } 46943 } 46944 func rewriteValueAMD64_OpTrunc16to8_0(v *Value) bool { 46945 // match: (Trunc16to8 x) 46946 // cond: 46947 // result: x 46948 for { 46949 x := v.Args[0] 46950 v.reset(OpCopy) 46951 v.Type = x.Type 46952 v.AddArg(x) 46953 return true 46954 } 46955 } 46956 func rewriteValueAMD64_OpTrunc32to16_0(v *Value) bool { 46957 // match: (Trunc32to16 x) 46958 // cond: 46959 // result: x 46960 for { 46961 x := v.Args[0] 46962 v.reset(OpCopy) 46963 v.Type = x.Type 46964 v.AddArg(x) 46965 return true 46966 } 46967 } 46968 func rewriteValueAMD64_OpTrunc32to8_0(v *Value) bool { 46969 // match: (Trunc32to8 x) 46970 // cond: 46971 // result: x 46972 for { 46973 x := v.Args[0] 46974 v.reset(OpCopy) 46975 v.Type = x.Type 46976 v.AddArg(x) 46977 return true 46978 } 46979 } 46980 func rewriteValueAMD64_OpTrunc64to16_0(v *Value) bool { 46981 // match: (Trunc64to16 x) 46982 // cond: 46983 // result: x 46984 for { 46985 x := v.Args[0] 46986 v.reset(OpCopy) 46987 v.Type = x.Type 46988 v.AddArg(x) 46989 return true 46990 } 46991 } 46992 func rewriteValueAMD64_OpTrunc64to32_0(v *Value) bool { 46993 // match: (Trunc64to32 x) 46994 // cond: 46995 // result: x 46996 for { 46997 x := v.Args[0] 46998 v.reset(OpCopy) 46999 v.Type = x.Type 47000 v.AddArg(x) 47001 return true 47002 } 47003 } 47004 func rewriteValueAMD64_OpTrunc64to8_0(v *Value) bool { 47005 // match: (Trunc64to8 x) 47006 // cond: 47007 // result: x 47008 for { 47009 x := v.Args[0] 47010 v.reset(OpCopy) 47011 v.Type = x.Type 47012 v.AddArg(x) 47013 return true 47014 } 47015 } 47016 func rewriteValueAMD64_OpWB_0(v *Value) bool { 47017 // match: (WB {fn} destptr srcptr mem) 47018 // cond: 47019 // result: (LoweredWB {fn} destptr srcptr mem) 47020 for { 47021 fn := v.Aux 47022 _ = v.Args[2] 47023 destptr := v.Args[0] 47024 srcptr := v.Args[1] 47025 mem := v.Args[2] 47026 v.reset(OpAMD64LoweredWB) 47027 v.Aux = fn 47028 v.AddArg(destptr) 47029 v.AddArg(srcptr) 47030 v.AddArg(mem) 47031 return true 47032 } 47033 } 47034 func rewriteValueAMD64_OpXor16_0(v *Value) bool { 47035 // match: (Xor16 x y) 47036 // cond: 47037 // result: (XORL x y) 47038 for { 47039 _ = v.Args[1] 47040 x := v.Args[0] 47041 y := v.Args[1] 47042 v.reset(OpAMD64XORL) 47043 v.AddArg(x) 47044 v.AddArg(y) 47045 return true 47046 } 47047 } 47048 func rewriteValueAMD64_OpXor32_0(v *Value) bool { 47049 // match: (Xor32 x y) 47050 // cond: 47051 // result: (XORL x y) 47052 for { 47053 _ = v.Args[1] 47054 x := v.Args[0] 47055 y := v.Args[1] 47056 v.reset(OpAMD64XORL) 47057 v.AddArg(x) 47058 v.AddArg(y) 47059 return true 47060 } 47061 } 47062 func rewriteValueAMD64_OpXor64_0(v *Value) bool { 47063 // match: (Xor64 x y) 47064 // cond: 47065 // result: (XORQ x y) 47066 for { 47067 _ = v.Args[1] 47068 x := v.Args[0] 47069 y := v.Args[1] 47070 v.reset(OpAMD64XORQ) 47071 v.AddArg(x) 47072 v.AddArg(y) 47073 return true 47074 } 47075 } 47076 func rewriteValueAMD64_OpXor8_0(v *Value) bool { 47077 // match: (Xor8 x y) 47078 // cond: 47079 // result: (XORL x y) 47080 for { 47081 _ = v.Args[1] 47082 x := v.Args[0] 47083 y := v.Args[1] 47084 v.reset(OpAMD64XORL) 47085 v.AddArg(x) 47086 v.AddArg(y) 47087 return true 47088 } 47089 } 47090 func rewriteValueAMD64_OpZero_0(v *Value) bool { 47091 b := v.Block 47092 _ = b 47093 config := b.Func.Config 47094 _ = config 47095 // match: (Zero [0] _ mem) 47096 // cond: 47097 // result: mem 47098 for { 47099 if v.AuxInt != 0 { 47100 break 47101 } 47102 _ = v.Args[1] 47103 mem := v.Args[1] 47104 v.reset(OpCopy) 47105 v.Type = mem.Type 47106 v.AddArg(mem) 47107 return true 47108 } 47109 // match: (Zero [1] destptr mem) 47110 // cond: 47111 // result: (MOVBstoreconst [0] destptr mem) 47112 for { 47113 if v.AuxInt != 1 { 47114 break 47115 } 47116 _ = v.Args[1] 47117 destptr := v.Args[0] 47118 mem := v.Args[1] 47119 v.reset(OpAMD64MOVBstoreconst) 47120 v.AuxInt = 0 47121 v.AddArg(destptr) 47122 v.AddArg(mem) 47123 return true 47124 } 47125 // match: (Zero [2] destptr mem) 47126 // cond: 47127 // result: (MOVWstoreconst [0] destptr mem) 47128 for { 47129 if v.AuxInt != 2 { 47130 break 47131 } 47132 _ = v.Args[1] 47133 destptr := v.Args[0] 47134 mem := v.Args[1] 47135 v.reset(OpAMD64MOVWstoreconst) 47136 v.AuxInt = 0 47137 v.AddArg(destptr) 47138 v.AddArg(mem) 47139 return true 47140 } 47141 // match: (Zero [4] destptr mem) 47142 // cond: 47143 // result: (MOVLstoreconst [0] destptr mem) 47144 for { 47145 if v.AuxInt != 4 { 47146 break 47147 } 47148 _ = v.Args[1] 47149 destptr := v.Args[0] 47150 mem := v.Args[1] 47151 v.reset(OpAMD64MOVLstoreconst) 47152 v.AuxInt = 0 47153 v.AddArg(destptr) 47154 v.AddArg(mem) 47155 return true 47156 } 47157 // match: (Zero [8] destptr mem) 47158 // cond: 47159 // result: (MOVQstoreconst [0] destptr mem) 47160 for { 47161 if v.AuxInt != 8 { 47162 break 47163 } 47164 _ = v.Args[1] 47165 destptr := v.Args[0] 47166 mem := v.Args[1] 47167 v.reset(OpAMD64MOVQstoreconst) 47168 v.AuxInt = 0 47169 v.AddArg(destptr) 47170 v.AddArg(mem) 47171 return true 47172 } 47173 // match: (Zero [3] destptr mem) 47174 // cond: 47175 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 47176 for { 47177 if v.AuxInt != 3 { 47178 break 47179 } 47180 _ = v.Args[1] 47181 destptr := v.Args[0] 47182 mem := v.Args[1] 47183 v.reset(OpAMD64MOVBstoreconst) 47184 v.AuxInt = makeValAndOff(0, 2) 47185 v.AddArg(destptr) 47186 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) 47187 v0.AuxInt = 0 47188 v0.AddArg(destptr) 47189 v0.AddArg(mem) 47190 v.AddArg(v0) 47191 return true 47192 } 47193 // match: (Zero [5] destptr mem) 47194 // cond: 47195 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 47196 for { 47197 if v.AuxInt != 5 { 47198 break 47199 } 47200 _ = v.Args[1] 47201 destptr := v.Args[0] 47202 mem := v.Args[1] 47203 v.reset(OpAMD64MOVBstoreconst) 47204 v.AuxInt = makeValAndOff(0, 4) 47205 v.AddArg(destptr) 47206 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 47207 v0.AuxInt = 0 47208 v0.AddArg(destptr) 47209 v0.AddArg(mem) 47210 v.AddArg(v0) 47211 return true 47212 } 47213 // match: (Zero [6] destptr mem) 47214 // cond: 47215 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 47216 for { 47217 if v.AuxInt != 6 { 47218 break 47219 } 47220 _ = v.Args[1] 47221 destptr := v.Args[0] 47222 mem := v.Args[1] 47223 v.reset(OpAMD64MOVWstoreconst) 47224 v.AuxInt = makeValAndOff(0, 4) 47225 v.AddArg(destptr) 47226 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 47227 v0.AuxInt = 0 47228 v0.AddArg(destptr) 47229 v0.AddArg(mem) 47230 v.AddArg(v0) 47231 return true 47232 } 47233 // match: (Zero [7] destptr mem) 47234 // cond: 47235 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 47236 for { 47237 if v.AuxInt != 7 { 47238 break 47239 } 47240 _ = v.Args[1] 47241 destptr := v.Args[0] 47242 mem := v.Args[1] 47243 v.reset(OpAMD64MOVLstoreconst) 47244 v.AuxInt = makeValAndOff(0, 3) 47245 v.AddArg(destptr) 47246 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 47247 v0.AuxInt = 0 47248 v0.AddArg(destptr) 47249 v0.AddArg(mem) 47250 v.AddArg(v0) 47251 return true 47252 } 47253 // match: (Zero [s] destptr mem) 47254 // cond: s%8 != 0 && s > 8 && !config.useSSE 47255 // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [0] destptr mem)) 47256 for { 47257 s := v.AuxInt 47258 _ = v.Args[1] 47259 destptr := v.Args[0] 47260 mem := v.Args[1] 47261 if !(s%8 != 0 && s > 8 && !config.useSSE) { 47262 break 47263 } 47264 v.reset(OpZero) 47265 v.AuxInt = s - s%8 47266 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47267 v0.AuxInt = s % 8 47268 v0.AddArg(destptr) 47269 v.AddArg(v0) 47270 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47271 v1.AuxInt = 0 47272 v1.AddArg(destptr) 47273 v1.AddArg(mem) 47274 v.AddArg(v1) 47275 return true 47276 } 47277 return false 47278 } 47279 func rewriteValueAMD64_OpZero_10(v *Value) bool { 47280 b := v.Block 47281 _ = b 47282 config := b.Func.Config 47283 _ = config 47284 // match: (Zero [16] destptr mem) 47285 // cond: !config.useSSE 47286 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 47287 for { 47288 if v.AuxInt != 16 { 47289 break 47290 } 47291 _ = v.Args[1] 47292 destptr := v.Args[0] 47293 mem := v.Args[1] 47294 if !(!config.useSSE) { 47295 break 47296 } 47297 v.reset(OpAMD64MOVQstoreconst) 47298 v.AuxInt = makeValAndOff(0, 8) 47299 v.AddArg(destptr) 47300 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47301 v0.AuxInt = 0 47302 v0.AddArg(destptr) 47303 v0.AddArg(mem) 47304 v.AddArg(v0) 47305 return true 47306 } 47307 // match: (Zero [24] destptr mem) 47308 // cond: !config.useSSE 47309 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 47310 for { 47311 if v.AuxInt != 24 { 47312 break 47313 } 47314 _ = v.Args[1] 47315 destptr := v.Args[0] 47316 mem := v.Args[1] 47317 if !(!config.useSSE) { 47318 break 47319 } 47320 v.reset(OpAMD64MOVQstoreconst) 47321 v.AuxInt = makeValAndOff(0, 16) 47322 v.AddArg(destptr) 47323 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47324 v0.AuxInt = makeValAndOff(0, 8) 47325 v0.AddArg(destptr) 47326 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47327 v1.AuxInt = 0 47328 v1.AddArg(destptr) 47329 v1.AddArg(mem) 47330 v0.AddArg(v1) 47331 v.AddArg(v0) 47332 return true 47333 } 47334 // match: (Zero [32] destptr mem) 47335 // cond: !config.useSSE 47336 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 47337 for { 47338 if v.AuxInt != 32 { 47339 break 47340 } 47341 _ = v.Args[1] 47342 destptr := v.Args[0] 47343 mem := v.Args[1] 47344 if !(!config.useSSE) { 47345 break 47346 } 47347 v.reset(OpAMD64MOVQstoreconst) 47348 v.AuxInt = makeValAndOff(0, 24) 47349 v.AddArg(destptr) 47350 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47351 v0.AuxInt = makeValAndOff(0, 16) 47352 v0.AddArg(destptr) 47353 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47354 v1.AuxInt = makeValAndOff(0, 8) 47355 v1.AddArg(destptr) 47356 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47357 v2.AuxInt = 0 47358 v2.AddArg(destptr) 47359 v2.AddArg(mem) 47360 v1.AddArg(v2) 47361 v0.AddArg(v1) 47362 v.AddArg(v0) 47363 return true 47364 } 47365 // match: (Zero [s] destptr mem) 47366 // cond: s > 8 && s < 16 && config.useSSE 47367 // result: (MOVQstoreconst [makeValAndOff(0,s-8)] destptr (MOVQstoreconst [0] destptr mem)) 47368 for { 47369 s := v.AuxInt 47370 _ = v.Args[1] 47371 destptr := v.Args[0] 47372 mem := v.Args[1] 47373 if !(s > 8 && s < 16 && config.useSSE) { 47374 break 47375 } 47376 v.reset(OpAMD64MOVQstoreconst) 47377 v.AuxInt = makeValAndOff(0, s-8) 47378 v.AddArg(destptr) 47379 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47380 v0.AuxInt = 0 47381 v0.AddArg(destptr) 47382 v0.AddArg(mem) 47383 v.AddArg(v0) 47384 return true 47385 } 47386 // match: (Zero [s] destptr mem) 47387 // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE 47388 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstore destptr (MOVOconst [0]) mem)) 47389 for { 47390 s := v.AuxInt 47391 _ = v.Args[1] 47392 destptr := v.Args[0] 47393 mem := v.Args[1] 47394 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) { 47395 break 47396 } 47397 v.reset(OpZero) 47398 v.AuxInt = s - s%16 47399 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47400 v0.AuxInt = s % 16 47401 v0.AddArg(destptr) 47402 v.AddArg(v0) 47403 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47404 v1.AddArg(destptr) 47405 v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47406 v2.AuxInt = 0 47407 v1.AddArg(v2) 47408 v1.AddArg(mem) 47409 v.AddArg(v1) 47410 return true 47411 } 47412 // match: (Zero [s] destptr mem) 47413 // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE 47414 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVQstoreconst [0] destptr mem)) 47415 for { 47416 s := v.AuxInt 47417 _ = v.Args[1] 47418 destptr := v.Args[0] 47419 mem := v.Args[1] 47420 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) { 47421 break 47422 } 47423 v.reset(OpZero) 47424 v.AuxInt = s - s%16 47425 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47426 v0.AuxInt = s % 16 47427 v0.AddArg(destptr) 47428 v.AddArg(v0) 47429 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47430 v1.AuxInt = 0 47431 v1.AddArg(destptr) 47432 v1.AddArg(mem) 47433 v.AddArg(v1) 47434 return true 47435 } 47436 // match: (Zero [16] destptr mem) 47437 // cond: config.useSSE 47438 // result: (MOVOstore destptr (MOVOconst [0]) mem) 47439 for { 47440 if v.AuxInt != 16 { 47441 break 47442 } 47443 _ = v.Args[1] 47444 destptr := v.Args[0] 47445 mem := v.Args[1] 47446 if !(config.useSSE) { 47447 break 47448 } 47449 v.reset(OpAMD64MOVOstore) 47450 v.AddArg(destptr) 47451 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47452 v0.AuxInt = 0 47453 v.AddArg(v0) 47454 v.AddArg(mem) 47455 return true 47456 } 47457 // match: (Zero [32] destptr mem) 47458 // cond: config.useSSE 47459 // result: (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)) 47460 for { 47461 if v.AuxInt != 32 { 47462 break 47463 } 47464 _ = v.Args[1] 47465 destptr := v.Args[0] 47466 mem := v.Args[1] 47467 if !(config.useSSE) { 47468 break 47469 } 47470 v.reset(OpAMD64MOVOstore) 47471 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47472 v0.AuxInt = 16 47473 v0.AddArg(destptr) 47474 v.AddArg(v0) 47475 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47476 v1.AuxInt = 0 47477 v.AddArg(v1) 47478 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47479 v2.AddArg(destptr) 47480 v3 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47481 v3.AuxInt = 0 47482 v2.AddArg(v3) 47483 v2.AddArg(mem) 47484 v.AddArg(v2) 47485 return true 47486 } 47487 // match: (Zero [48] destptr mem) 47488 // cond: config.useSSE 47489 // result: (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))) 47490 for { 47491 if v.AuxInt != 48 { 47492 break 47493 } 47494 _ = v.Args[1] 47495 destptr := v.Args[0] 47496 mem := v.Args[1] 47497 if !(config.useSSE) { 47498 break 47499 } 47500 v.reset(OpAMD64MOVOstore) 47501 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47502 v0.AuxInt = 32 47503 v0.AddArg(destptr) 47504 v.AddArg(v0) 47505 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47506 v1.AuxInt = 0 47507 v.AddArg(v1) 47508 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47509 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47510 v3.AuxInt = 16 47511 v3.AddArg(destptr) 47512 v2.AddArg(v3) 47513 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47514 v4.AuxInt = 0 47515 v2.AddArg(v4) 47516 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47517 v5.AddArg(destptr) 47518 v6 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47519 v6.AuxInt = 0 47520 v5.AddArg(v6) 47521 v5.AddArg(mem) 47522 v2.AddArg(v5) 47523 v.AddArg(v2) 47524 return true 47525 } 47526 // match: (Zero [64] destptr mem) 47527 // cond: config.useSSE 47528 // result: (MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)))) 47529 for { 47530 if v.AuxInt != 64 { 47531 break 47532 } 47533 _ = v.Args[1] 47534 destptr := v.Args[0] 47535 mem := v.Args[1] 47536 if !(config.useSSE) { 47537 break 47538 } 47539 v.reset(OpAMD64MOVOstore) 47540 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47541 v0.AuxInt = 48 47542 v0.AddArg(destptr) 47543 v.AddArg(v0) 47544 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47545 v1.AuxInt = 0 47546 v.AddArg(v1) 47547 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47548 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47549 v3.AuxInt = 32 47550 v3.AddArg(destptr) 47551 v2.AddArg(v3) 47552 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47553 v4.AuxInt = 0 47554 v2.AddArg(v4) 47555 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47556 v6 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47557 v6.AuxInt = 16 47558 v6.AddArg(destptr) 47559 v5.AddArg(v6) 47560 v7 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47561 v7.AuxInt = 0 47562 v5.AddArg(v7) 47563 v8 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47564 v8.AddArg(destptr) 47565 v9 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47566 v9.AuxInt = 0 47567 v8.AddArg(v9) 47568 v8.AddArg(mem) 47569 v5.AddArg(v8) 47570 v2.AddArg(v5) 47571 v.AddArg(v2) 47572 return true 47573 } 47574 return false 47575 } 47576 func rewriteValueAMD64_OpZero_20(v *Value) bool { 47577 b := v.Block 47578 _ = b 47579 config := b.Func.Config 47580 _ = config 47581 typ := &b.Func.Config.Types 47582 _ = typ 47583 // match: (Zero [s] destptr mem) 47584 // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice 47585 // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem) 47586 for { 47587 s := v.AuxInt 47588 _ = v.Args[1] 47589 destptr := v.Args[0] 47590 mem := v.Args[1] 47591 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) { 47592 break 47593 } 47594 v.reset(OpAMD64DUFFZERO) 47595 v.AuxInt = s 47596 v.AddArg(destptr) 47597 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47598 v0.AuxInt = 0 47599 v.AddArg(v0) 47600 v.AddArg(mem) 47601 return true 47602 } 47603 // match: (Zero [s] destptr mem) 47604 // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0 47605 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) 47606 for { 47607 s := v.AuxInt 47608 _ = v.Args[1] 47609 destptr := v.Args[0] 47610 mem := v.Args[1] 47611 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) { 47612 break 47613 } 47614 v.reset(OpAMD64REPSTOSQ) 47615 v.AddArg(destptr) 47616 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 47617 v0.AuxInt = s / 8 47618 v.AddArg(v0) 47619 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 47620 v1.AuxInt = 0 47621 v.AddArg(v1) 47622 v.AddArg(mem) 47623 return true 47624 } 47625 return false 47626 } 47627 func rewriteValueAMD64_OpZeroExt16to32_0(v *Value) bool { 47628 // match: (ZeroExt16to32 x) 47629 // cond: 47630 // result: (MOVWQZX x) 47631 for { 47632 x := v.Args[0] 47633 v.reset(OpAMD64MOVWQZX) 47634 v.AddArg(x) 47635 return true 47636 } 47637 } 47638 func rewriteValueAMD64_OpZeroExt16to64_0(v *Value) bool { 47639 // match: (ZeroExt16to64 x) 47640 // cond: 47641 // result: (MOVWQZX x) 47642 for { 47643 x := v.Args[0] 47644 v.reset(OpAMD64MOVWQZX) 47645 v.AddArg(x) 47646 return true 47647 } 47648 } 47649 func rewriteValueAMD64_OpZeroExt32to64_0(v *Value) bool { 47650 // match: (ZeroExt32to64 x) 47651 // cond: 47652 // result: (MOVLQZX x) 47653 for { 47654 x := v.Args[0] 47655 v.reset(OpAMD64MOVLQZX) 47656 v.AddArg(x) 47657 return true 47658 } 47659 } 47660 func rewriteValueAMD64_OpZeroExt8to16_0(v *Value) bool { 47661 // match: (ZeroExt8to16 x) 47662 // cond: 47663 // result: (MOVBQZX x) 47664 for { 47665 x := v.Args[0] 47666 v.reset(OpAMD64MOVBQZX) 47667 v.AddArg(x) 47668 return true 47669 } 47670 } 47671 func rewriteValueAMD64_OpZeroExt8to32_0(v *Value) bool { 47672 // match: (ZeroExt8to32 x) 47673 // cond: 47674 // result: (MOVBQZX x) 47675 for { 47676 x := v.Args[0] 47677 v.reset(OpAMD64MOVBQZX) 47678 v.AddArg(x) 47679 return true 47680 } 47681 } 47682 func rewriteValueAMD64_OpZeroExt8to64_0(v *Value) bool { 47683 // match: (ZeroExt8to64 x) 47684 // cond: 47685 // result: (MOVBQZX x) 47686 for { 47687 x := v.Args[0] 47688 v.reset(OpAMD64MOVBQZX) 47689 v.AddArg(x) 47690 return true 47691 } 47692 } 47693 func rewriteBlockAMD64(b *Block) bool { 47694 config := b.Func.Config 47695 _ = config 47696 fe := b.Func.fe 47697 _ = fe 47698 typ := &config.Types 47699 _ = typ 47700 switch b.Kind { 47701 case BlockAMD64EQ: 47702 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) 47703 // cond: !config.nacl 47704 // result: (UGE (BTL x y)) 47705 for { 47706 v := b.Control 47707 if v.Op != OpAMD64TESTL { 47708 break 47709 } 47710 _ = v.Args[1] 47711 v_0 := v.Args[0] 47712 if v_0.Op != OpAMD64SHLL { 47713 break 47714 } 47715 _ = v_0.Args[1] 47716 v_0_0 := v_0.Args[0] 47717 if v_0_0.Op != OpAMD64MOVLconst { 47718 break 47719 } 47720 if v_0_0.AuxInt != 1 { 47721 break 47722 } 47723 x := v_0.Args[1] 47724 y := v.Args[1] 47725 if !(!config.nacl) { 47726 break 47727 } 47728 b.Kind = BlockAMD64UGE 47729 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 47730 v0.AddArg(x) 47731 v0.AddArg(y) 47732 b.SetControl(v0) 47733 b.Aux = nil 47734 return true 47735 } 47736 // match: (EQ (TESTL y (SHLL (MOVLconst [1]) x))) 47737 // cond: !config.nacl 47738 // result: (UGE (BTL x y)) 47739 for { 47740 v := b.Control 47741 if v.Op != OpAMD64TESTL { 47742 break 47743 } 47744 _ = v.Args[1] 47745 y := v.Args[0] 47746 v_1 := v.Args[1] 47747 if v_1.Op != OpAMD64SHLL { 47748 break 47749 } 47750 _ = v_1.Args[1] 47751 v_1_0 := v_1.Args[0] 47752 if v_1_0.Op != OpAMD64MOVLconst { 47753 break 47754 } 47755 if v_1_0.AuxInt != 1 { 47756 break 47757 } 47758 x := v_1.Args[1] 47759 if !(!config.nacl) { 47760 break 47761 } 47762 b.Kind = BlockAMD64UGE 47763 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 47764 v0.AddArg(x) 47765 v0.AddArg(y) 47766 b.SetControl(v0) 47767 b.Aux = nil 47768 return true 47769 } 47770 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 47771 // cond: !config.nacl 47772 // result: (UGE (BTQ x y)) 47773 for { 47774 v := b.Control 47775 if v.Op != OpAMD64TESTQ { 47776 break 47777 } 47778 _ = v.Args[1] 47779 v_0 := v.Args[0] 47780 if v_0.Op != OpAMD64SHLQ { 47781 break 47782 } 47783 _ = v_0.Args[1] 47784 v_0_0 := v_0.Args[0] 47785 if v_0_0.Op != OpAMD64MOVQconst { 47786 break 47787 } 47788 if v_0_0.AuxInt != 1 { 47789 break 47790 } 47791 x := v_0.Args[1] 47792 y := v.Args[1] 47793 if !(!config.nacl) { 47794 break 47795 } 47796 b.Kind = BlockAMD64UGE 47797 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 47798 v0.AddArg(x) 47799 v0.AddArg(y) 47800 b.SetControl(v0) 47801 b.Aux = nil 47802 return true 47803 } 47804 // match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 47805 // cond: !config.nacl 47806 // result: (UGE (BTQ x y)) 47807 for { 47808 v := b.Control 47809 if v.Op != OpAMD64TESTQ { 47810 break 47811 } 47812 _ = v.Args[1] 47813 y := v.Args[0] 47814 v_1 := v.Args[1] 47815 if v_1.Op != OpAMD64SHLQ { 47816 break 47817 } 47818 _ = v_1.Args[1] 47819 v_1_0 := v_1.Args[0] 47820 if v_1_0.Op != OpAMD64MOVQconst { 47821 break 47822 } 47823 if v_1_0.AuxInt != 1 { 47824 break 47825 } 47826 x := v_1.Args[1] 47827 if !(!config.nacl) { 47828 break 47829 } 47830 b.Kind = BlockAMD64UGE 47831 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 47832 v0.AddArg(x) 47833 v0.AddArg(y) 47834 b.SetControl(v0) 47835 b.Aux = nil 47836 return true 47837 } 47838 // match: (EQ (TESTLconst [c] x)) 47839 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 47840 // result: (UGE (BTLconst [log2(c)] x)) 47841 for { 47842 v := b.Control 47843 if v.Op != OpAMD64TESTLconst { 47844 break 47845 } 47846 c := v.AuxInt 47847 x := v.Args[0] 47848 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 47849 break 47850 } 47851 b.Kind = BlockAMD64UGE 47852 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 47853 v0.AuxInt = log2(c) 47854 v0.AddArg(x) 47855 b.SetControl(v0) 47856 b.Aux = nil 47857 return true 47858 } 47859 // match: (EQ (TESTQconst [c] x)) 47860 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 47861 // result: (UGE (BTQconst [log2(c)] x)) 47862 for { 47863 v := b.Control 47864 if v.Op != OpAMD64TESTQconst { 47865 break 47866 } 47867 c := v.AuxInt 47868 x := v.Args[0] 47869 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 47870 break 47871 } 47872 b.Kind = BlockAMD64UGE 47873 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 47874 v0.AuxInt = log2(c) 47875 v0.AddArg(x) 47876 b.SetControl(v0) 47877 b.Aux = nil 47878 return true 47879 } 47880 // match: (EQ (TESTQ (MOVQconst [c]) x)) 47881 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 47882 // result: (UGE (BTQconst [log2(c)] x)) 47883 for { 47884 v := b.Control 47885 if v.Op != OpAMD64TESTQ { 47886 break 47887 } 47888 _ = v.Args[1] 47889 v_0 := v.Args[0] 47890 if v_0.Op != OpAMD64MOVQconst { 47891 break 47892 } 47893 c := v_0.AuxInt 47894 x := v.Args[1] 47895 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 47896 break 47897 } 47898 b.Kind = BlockAMD64UGE 47899 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 47900 v0.AuxInt = log2(c) 47901 v0.AddArg(x) 47902 b.SetControl(v0) 47903 b.Aux = nil 47904 return true 47905 } 47906 // match: (EQ (TESTQ x (MOVQconst [c]))) 47907 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 47908 // result: (UGE (BTQconst [log2(c)] x)) 47909 for { 47910 v := b.Control 47911 if v.Op != OpAMD64TESTQ { 47912 break 47913 } 47914 _ = v.Args[1] 47915 x := v.Args[0] 47916 v_1 := v.Args[1] 47917 if v_1.Op != OpAMD64MOVQconst { 47918 break 47919 } 47920 c := v_1.AuxInt 47921 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 47922 break 47923 } 47924 b.Kind = BlockAMD64UGE 47925 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 47926 v0.AuxInt = log2(c) 47927 v0.AddArg(x) 47928 b.SetControl(v0) 47929 b.Aux = nil 47930 return true 47931 } 47932 // match: (EQ (InvertFlags cmp) yes no) 47933 // cond: 47934 // result: (EQ cmp yes no) 47935 for { 47936 v := b.Control 47937 if v.Op != OpAMD64InvertFlags { 47938 break 47939 } 47940 cmp := v.Args[0] 47941 b.Kind = BlockAMD64EQ 47942 b.SetControl(cmp) 47943 b.Aux = nil 47944 return true 47945 } 47946 // match: (EQ (FlagEQ) yes no) 47947 // cond: 47948 // result: (First nil yes no) 47949 for { 47950 v := b.Control 47951 if v.Op != OpAMD64FlagEQ { 47952 break 47953 } 47954 b.Kind = BlockFirst 47955 b.SetControl(nil) 47956 b.Aux = nil 47957 return true 47958 } 47959 // match: (EQ (FlagLT_ULT) yes no) 47960 // cond: 47961 // result: (First nil no yes) 47962 for { 47963 v := b.Control 47964 if v.Op != OpAMD64FlagLT_ULT { 47965 break 47966 } 47967 b.Kind = BlockFirst 47968 b.SetControl(nil) 47969 b.Aux = nil 47970 b.swapSuccessors() 47971 return true 47972 } 47973 // match: (EQ (FlagLT_UGT) yes no) 47974 // cond: 47975 // result: (First nil no yes) 47976 for { 47977 v := b.Control 47978 if v.Op != OpAMD64FlagLT_UGT { 47979 break 47980 } 47981 b.Kind = BlockFirst 47982 b.SetControl(nil) 47983 b.Aux = nil 47984 b.swapSuccessors() 47985 return true 47986 } 47987 // match: (EQ (FlagGT_ULT) yes no) 47988 // cond: 47989 // result: (First nil no yes) 47990 for { 47991 v := b.Control 47992 if v.Op != OpAMD64FlagGT_ULT { 47993 break 47994 } 47995 b.Kind = BlockFirst 47996 b.SetControl(nil) 47997 b.Aux = nil 47998 b.swapSuccessors() 47999 return true 48000 } 48001 // match: (EQ (FlagGT_UGT) yes no) 48002 // cond: 48003 // result: (First nil no yes) 48004 for { 48005 v := b.Control 48006 if v.Op != OpAMD64FlagGT_UGT { 48007 break 48008 } 48009 b.Kind = BlockFirst 48010 b.SetControl(nil) 48011 b.Aux = nil 48012 b.swapSuccessors() 48013 return true 48014 } 48015 case BlockAMD64GE: 48016 // match: (GE (InvertFlags cmp) yes no) 48017 // cond: 48018 // result: (LE cmp yes no) 48019 for { 48020 v := b.Control 48021 if v.Op != OpAMD64InvertFlags { 48022 break 48023 } 48024 cmp := v.Args[0] 48025 b.Kind = BlockAMD64LE 48026 b.SetControl(cmp) 48027 b.Aux = nil 48028 return true 48029 } 48030 // match: (GE (FlagEQ) yes no) 48031 // cond: 48032 // result: (First nil yes no) 48033 for { 48034 v := b.Control 48035 if v.Op != OpAMD64FlagEQ { 48036 break 48037 } 48038 b.Kind = BlockFirst 48039 b.SetControl(nil) 48040 b.Aux = nil 48041 return true 48042 } 48043 // match: (GE (FlagLT_ULT) yes no) 48044 // cond: 48045 // result: (First nil no yes) 48046 for { 48047 v := b.Control 48048 if v.Op != OpAMD64FlagLT_ULT { 48049 break 48050 } 48051 b.Kind = BlockFirst 48052 b.SetControl(nil) 48053 b.Aux = nil 48054 b.swapSuccessors() 48055 return true 48056 } 48057 // match: (GE (FlagLT_UGT) yes no) 48058 // cond: 48059 // result: (First nil no yes) 48060 for { 48061 v := b.Control 48062 if v.Op != OpAMD64FlagLT_UGT { 48063 break 48064 } 48065 b.Kind = BlockFirst 48066 b.SetControl(nil) 48067 b.Aux = nil 48068 b.swapSuccessors() 48069 return true 48070 } 48071 // match: (GE (FlagGT_ULT) yes no) 48072 // cond: 48073 // result: (First nil yes no) 48074 for { 48075 v := b.Control 48076 if v.Op != OpAMD64FlagGT_ULT { 48077 break 48078 } 48079 b.Kind = BlockFirst 48080 b.SetControl(nil) 48081 b.Aux = nil 48082 return true 48083 } 48084 // match: (GE (FlagGT_UGT) yes no) 48085 // cond: 48086 // result: (First nil yes no) 48087 for { 48088 v := b.Control 48089 if v.Op != OpAMD64FlagGT_UGT { 48090 break 48091 } 48092 b.Kind = BlockFirst 48093 b.SetControl(nil) 48094 b.Aux = nil 48095 return true 48096 } 48097 case BlockAMD64GT: 48098 // match: (GT (InvertFlags cmp) yes no) 48099 // cond: 48100 // result: (LT cmp yes no) 48101 for { 48102 v := b.Control 48103 if v.Op != OpAMD64InvertFlags { 48104 break 48105 } 48106 cmp := v.Args[0] 48107 b.Kind = BlockAMD64LT 48108 b.SetControl(cmp) 48109 b.Aux = nil 48110 return true 48111 } 48112 // match: (GT (FlagEQ) yes no) 48113 // cond: 48114 // result: (First nil no yes) 48115 for { 48116 v := b.Control 48117 if v.Op != OpAMD64FlagEQ { 48118 break 48119 } 48120 b.Kind = BlockFirst 48121 b.SetControl(nil) 48122 b.Aux = nil 48123 b.swapSuccessors() 48124 return true 48125 } 48126 // match: (GT (FlagLT_ULT) yes no) 48127 // cond: 48128 // result: (First nil no yes) 48129 for { 48130 v := b.Control 48131 if v.Op != OpAMD64FlagLT_ULT { 48132 break 48133 } 48134 b.Kind = BlockFirst 48135 b.SetControl(nil) 48136 b.Aux = nil 48137 b.swapSuccessors() 48138 return true 48139 } 48140 // match: (GT (FlagLT_UGT) yes no) 48141 // cond: 48142 // result: (First nil no yes) 48143 for { 48144 v := b.Control 48145 if v.Op != OpAMD64FlagLT_UGT { 48146 break 48147 } 48148 b.Kind = BlockFirst 48149 b.SetControl(nil) 48150 b.Aux = nil 48151 b.swapSuccessors() 48152 return true 48153 } 48154 // match: (GT (FlagGT_ULT) yes no) 48155 // cond: 48156 // result: (First nil yes no) 48157 for { 48158 v := b.Control 48159 if v.Op != OpAMD64FlagGT_ULT { 48160 break 48161 } 48162 b.Kind = BlockFirst 48163 b.SetControl(nil) 48164 b.Aux = nil 48165 return true 48166 } 48167 // match: (GT (FlagGT_UGT) yes no) 48168 // cond: 48169 // result: (First nil yes no) 48170 for { 48171 v := b.Control 48172 if v.Op != OpAMD64FlagGT_UGT { 48173 break 48174 } 48175 b.Kind = BlockFirst 48176 b.SetControl(nil) 48177 b.Aux = nil 48178 return true 48179 } 48180 case BlockIf: 48181 // match: (If (SETL cmp) yes no) 48182 // cond: 48183 // result: (LT cmp yes no) 48184 for { 48185 v := b.Control 48186 if v.Op != OpAMD64SETL { 48187 break 48188 } 48189 cmp := v.Args[0] 48190 b.Kind = BlockAMD64LT 48191 b.SetControl(cmp) 48192 b.Aux = nil 48193 return true 48194 } 48195 // match: (If (SETLE cmp) yes no) 48196 // cond: 48197 // result: (LE cmp yes no) 48198 for { 48199 v := b.Control 48200 if v.Op != OpAMD64SETLE { 48201 break 48202 } 48203 cmp := v.Args[0] 48204 b.Kind = BlockAMD64LE 48205 b.SetControl(cmp) 48206 b.Aux = nil 48207 return true 48208 } 48209 // match: (If (SETG cmp) yes no) 48210 // cond: 48211 // result: (GT cmp yes no) 48212 for { 48213 v := b.Control 48214 if v.Op != OpAMD64SETG { 48215 break 48216 } 48217 cmp := v.Args[0] 48218 b.Kind = BlockAMD64GT 48219 b.SetControl(cmp) 48220 b.Aux = nil 48221 return true 48222 } 48223 // match: (If (SETGE cmp) yes no) 48224 // cond: 48225 // result: (GE cmp yes no) 48226 for { 48227 v := b.Control 48228 if v.Op != OpAMD64SETGE { 48229 break 48230 } 48231 cmp := v.Args[0] 48232 b.Kind = BlockAMD64GE 48233 b.SetControl(cmp) 48234 b.Aux = nil 48235 return true 48236 } 48237 // match: (If (SETEQ cmp) yes no) 48238 // cond: 48239 // result: (EQ cmp yes no) 48240 for { 48241 v := b.Control 48242 if v.Op != OpAMD64SETEQ { 48243 break 48244 } 48245 cmp := v.Args[0] 48246 b.Kind = BlockAMD64EQ 48247 b.SetControl(cmp) 48248 b.Aux = nil 48249 return true 48250 } 48251 // match: (If (SETNE cmp) yes no) 48252 // cond: 48253 // result: (NE cmp yes no) 48254 for { 48255 v := b.Control 48256 if v.Op != OpAMD64SETNE { 48257 break 48258 } 48259 cmp := v.Args[0] 48260 b.Kind = BlockAMD64NE 48261 b.SetControl(cmp) 48262 b.Aux = nil 48263 return true 48264 } 48265 // match: (If (SETB cmp) yes no) 48266 // cond: 48267 // result: (ULT cmp yes no) 48268 for { 48269 v := b.Control 48270 if v.Op != OpAMD64SETB { 48271 break 48272 } 48273 cmp := v.Args[0] 48274 b.Kind = BlockAMD64ULT 48275 b.SetControl(cmp) 48276 b.Aux = nil 48277 return true 48278 } 48279 // match: (If (SETBE cmp) yes no) 48280 // cond: 48281 // result: (ULE cmp yes no) 48282 for { 48283 v := b.Control 48284 if v.Op != OpAMD64SETBE { 48285 break 48286 } 48287 cmp := v.Args[0] 48288 b.Kind = BlockAMD64ULE 48289 b.SetControl(cmp) 48290 b.Aux = nil 48291 return true 48292 } 48293 // match: (If (SETA cmp) yes no) 48294 // cond: 48295 // result: (UGT cmp yes no) 48296 for { 48297 v := b.Control 48298 if v.Op != OpAMD64SETA { 48299 break 48300 } 48301 cmp := v.Args[0] 48302 b.Kind = BlockAMD64UGT 48303 b.SetControl(cmp) 48304 b.Aux = nil 48305 return true 48306 } 48307 // match: (If (SETAE cmp) yes no) 48308 // cond: 48309 // result: (UGE cmp yes no) 48310 for { 48311 v := b.Control 48312 if v.Op != OpAMD64SETAE { 48313 break 48314 } 48315 cmp := v.Args[0] 48316 b.Kind = BlockAMD64UGE 48317 b.SetControl(cmp) 48318 b.Aux = nil 48319 return true 48320 } 48321 // match: (If (SETGF cmp) yes no) 48322 // cond: 48323 // result: (UGT cmp yes no) 48324 for { 48325 v := b.Control 48326 if v.Op != OpAMD64SETGF { 48327 break 48328 } 48329 cmp := v.Args[0] 48330 b.Kind = BlockAMD64UGT 48331 b.SetControl(cmp) 48332 b.Aux = nil 48333 return true 48334 } 48335 // match: (If (SETGEF cmp) yes no) 48336 // cond: 48337 // result: (UGE cmp yes no) 48338 for { 48339 v := b.Control 48340 if v.Op != OpAMD64SETGEF { 48341 break 48342 } 48343 cmp := v.Args[0] 48344 b.Kind = BlockAMD64UGE 48345 b.SetControl(cmp) 48346 b.Aux = nil 48347 return true 48348 } 48349 // match: (If (SETEQF cmp) yes no) 48350 // cond: 48351 // result: (EQF cmp yes no) 48352 for { 48353 v := b.Control 48354 if v.Op != OpAMD64SETEQF { 48355 break 48356 } 48357 cmp := v.Args[0] 48358 b.Kind = BlockAMD64EQF 48359 b.SetControl(cmp) 48360 b.Aux = nil 48361 return true 48362 } 48363 // match: (If (SETNEF cmp) yes no) 48364 // cond: 48365 // result: (NEF cmp yes no) 48366 for { 48367 v := b.Control 48368 if v.Op != OpAMD64SETNEF { 48369 break 48370 } 48371 cmp := v.Args[0] 48372 b.Kind = BlockAMD64NEF 48373 b.SetControl(cmp) 48374 b.Aux = nil 48375 return true 48376 } 48377 // match: (If cond yes no) 48378 // cond: 48379 // result: (NE (TESTB cond cond) yes no) 48380 for { 48381 v := b.Control 48382 _ = v 48383 cond := b.Control 48384 b.Kind = BlockAMD64NE 48385 v0 := b.NewValue0(v.Pos, OpAMD64TESTB, types.TypeFlags) 48386 v0.AddArg(cond) 48387 v0.AddArg(cond) 48388 b.SetControl(v0) 48389 b.Aux = nil 48390 return true 48391 } 48392 case BlockAMD64LE: 48393 // match: (LE (InvertFlags cmp) yes no) 48394 // cond: 48395 // result: (GE cmp yes no) 48396 for { 48397 v := b.Control 48398 if v.Op != OpAMD64InvertFlags { 48399 break 48400 } 48401 cmp := v.Args[0] 48402 b.Kind = BlockAMD64GE 48403 b.SetControl(cmp) 48404 b.Aux = nil 48405 return true 48406 } 48407 // match: (LE (FlagEQ) yes no) 48408 // cond: 48409 // result: (First nil yes no) 48410 for { 48411 v := b.Control 48412 if v.Op != OpAMD64FlagEQ { 48413 break 48414 } 48415 b.Kind = BlockFirst 48416 b.SetControl(nil) 48417 b.Aux = nil 48418 return true 48419 } 48420 // match: (LE (FlagLT_ULT) yes no) 48421 // cond: 48422 // result: (First nil yes no) 48423 for { 48424 v := b.Control 48425 if v.Op != OpAMD64FlagLT_ULT { 48426 break 48427 } 48428 b.Kind = BlockFirst 48429 b.SetControl(nil) 48430 b.Aux = nil 48431 return true 48432 } 48433 // match: (LE (FlagLT_UGT) yes no) 48434 // cond: 48435 // result: (First nil yes no) 48436 for { 48437 v := b.Control 48438 if v.Op != OpAMD64FlagLT_UGT { 48439 break 48440 } 48441 b.Kind = BlockFirst 48442 b.SetControl(nil) 48443 b.Aux = nil 48444 return true 48445 } 48446 // match: (LE (FlagGT_ULT) yes no) 48447 // cond: 48448 // result: (First nil no yes) 48449 for { 48450 v := b.Control 48451 if v.Op != OpAMD64FlagGT_ULT { 48452 break 48453 } 48454 b.Kind = BlockFirst 48455 b.SetControl(nil) 48456 b.Aux = nil 48457 b.swapSuccessors() 48458 return true 48459 } 48460 // match: (LE (FlagGT_UGT) yes no) 48461 // cond: 48462 // result: (First nil no yes) 48463 for { 48464 v := b.Control 48465 if v.Op != OpAMD64FlagGT_UGT { 48466 break 48467 } 48468 b.Kind = BlockFirst 48469 b.SetControl(nil) 48470 b.Aux = nil 48471 b.swapSuccessors() 48472 return true 48473 } 48474 case BlockAMD64LT: 48475 // match: (LT (InvertFlags cmp) yes no) 48476 // cond: 48477 // result: (GT cmp yes no) 48478 for { 48479 v := b.Control 48480 if v.Op != OpAMD64InvertFlags { 48481 break 48482 } 48483 cmp := v.Args[0] 48484 b.Kind = BlockAMD64GT 48485 b.SetControl(cmp) 48486 b.Aux = nil 48487 return true 48488 } 48489 // match: (LT (FlagEQ) yes no) 48490 // cond: 48491 // result: (First nil no yes) 48492 for { 48493 v := b.Control 48494 if v.Op != OpAMD64FlagEQ { 48495 break 48496 } 48497 b.Kind = BlockFirst 48498 b.SetControl(nil) 48499 b.Aux = nil 48500 b.swapSuccessors() 48501 return true 48502 } 48503 // match: (LT (FlagLT_ULT) yes no) 48504 // cond: 48505 // result: (First nil yes no) 48506 for { 48507 v := b.Control 48508 if v.Op != OpAMD64FlagLT_ULT { 48509 break 48510 } 48511 b.Kind = BlockFirst 48512 b.SetControl(nil) 48513 b.Aux = nil 48514 return true 48515 } 48516 // match: (LT (FlagLT_UGT) yes no) 48517 // cond: 48518 // result: (First nil yes no) 48519 for { 48520 v := b.Control 48521 if v.Op != OpAMD64FlagLT_UGT { 48522 break 48523 } 48524 b.Kind = BlockFirst 48525 b.SetControl(nil) 48526 b.Aux = nil 48527 return true 48528 } 48529 // match: (LT (FlagGT_ULT) yes no) 48530 // cond: 48531 // result: (First nil no yes) 48532 for { 48533 v := b.Control 48534 if v.Op != OpAMD64FlagGT_ULT { 48535 break 48536 } 48537 b.Kind = BlockFirst 48538 b.SetControl(nil) 48539 b.Aux = nil 48540 b.swapSuccessors() 48541 return true 48542 } 48543 // match: (LT (FlagGT_UGT) yes no) 48544 // cond: 48545 // result: (First nil no yes) 48546 for { 48547 v := b.Control 48548 if v.Op != OpAMD64FlagGT_UGT { 48549 break 48550 } 48551 b.Kind = BlockFirst 48552 b.SetControl(nil) 48553 b.Aux = nil 48554 b.swapSuccessors() 48555 return true 48556 } 48557 case BlockAMD64NE: 48558 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 48559 // cond: 48560 // result: (LT cmp yes no) 48561 for { 48562 v := b.Control 48563 if v.Op != OpAMD64TESTB { 48564 break 48565 } 48566 _ = v.Args[1] 48567 v_0 := v.Args[0] 48568 if v_0.Op != OpAMD64SETL { 48569 break 48570 } 48571 cmp := v_0.Args[0] 48572 v_1 := v.Args[1] 48573 if v_1.Op != OpAMD64SETL { 48574 break 48575 } 48576 if cmp != v_1.Args[0] { 48577 break 48578 } 48579 b.Kind = BlockAMD64LT 48580 b.SetControl(cmp) 48581 b.Aux = nil 48582 return true 48583 } 48584 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 48585 // cond: 48586 // result: (LT cmp yes no) 48587 for { 48588 v := b.Control 48589 if v.Op != OpAMD64TESTB { 48590 break 48591 } 48592 _ = v.Args[1] 48593 v_0 := v.Args[0] 48594 if v_0.Op != OpAMD64SETL { 48595 break 48596 } 48597 cmp := v_0.Args[0] 48598 v_1 := v.Args[1] 48599 if v_1.Op != OpAMD64SETL { 48600 break 48601 } 48602 if cmp != v_1.Args[0] { 48603 break 48604 } 48605 b.Kind = BlockAMD64LT 48606 b.SetControl(cmp) 48607 b.Aux = nil 48608 return true 48609 } 48610 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 48611 // cond: 48612 // result: (LE cmp yes no) 48613 for { 48614 v := b.Control 48615 if v.Op != OpAMD64TESTB { 48616 break 48617 } 48618 _ = v.Args[1] 48619 v_0 := v.Args[0] 48620 if v_0.Op != OpAMD64SETLE { 48621 break 48622 } 48623 cmp := v_0.Args[0] 48624 v_1 := v.Args[1] 48625 if v_1.Op != OpAMD64SETLE { 48626 break 48627 } 48628 if cmp != v_1.Args[0] { 48629 break 48630 } 48631 b.Kind = BlockAMD64LE 48632 b.SetControl(cmp) 48633 b.Aux = nil 48634 return true 48635 } 48636 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 48637 // cond: 48638 // result: (LE cmp yes no) 48639 for { 48640 v := b.Control 48641 if v.Op != OpAMD64TESTB { 48642 break 48643 } 48644 _ = v.Args[1] 48645 v_0 := v.Args[0] 48646 if v_0.Op != OpAMD64SETLE { 48647 break 48648 } 48649 cmp := v_0.Args[0] 48650 v_1 := v.Args[1] 48651 if v_1.Op != OpAMD64SETLE { 48652 break 48653 } 48654 if cmp != v_1.Args[0] { 48655 break 48656 } 48657 b.Kind = BlockAMD64LE 48658 b.SetControl(cmp) 48659 b.Aux = nil 48660 return true 48661 } 48662 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 48663 // cond: 48664 // result: (GT cmp yes no) 48665 for { 48666 v := b.Control 48667 if v.Op != OpAMD64TESTB { 48668 break 48669 } 48670 _ = v.Args[1] 48671 v_0 := v.Args[0] 48672 if v_0.Op != OpAMD64SETG { 48673 break 48674 } 48675 cmp := v_0.Args[0] 48676 v_1 := v.Args[1] 48677 if v_1.Op != OpAMD64SETG { 48678 break 48679 } 48680 if cmp != v_1.Args[0] { 48681 break 48682 } 48683 b.Kind = BlockAMD64GT 48684 b.SetControl(cmp) 48685 b.Aux = nil 48686 return true 48687 } 48688 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 48689 // cond: 48690 // result: (GT cmp yes no) 48691 for { 48692 v := b.Control 48693 if v.Op != OpAMD64TESTB { 48694 break 48695 } 48696 _ = v.Args[1] 48697 v_0 := v.Args[0] 48698 if v_0.Op != OpAMD64SETG { 48699 break 48700 } 48701 cmp := v_0.Args[0] 48702 v_1 := v.Args[1] 48703 if v_1.Op != OpAMD64SETG { 48704 break 48705 } 48706 if cmp != v_1.Args[0] { 48707 break 48708 } 48709 b.Kind = BlockAMD64GT 48710 b.SetControl(cmp) 48711 b.Aux = nil 48712 return true 48713 } 48714 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 48715 // cond: 48716 // result: (GE cmp yes no) 48717 for { 48718 v := b.Control 48719 if v.Op != OpAMD64TESTB { 48720 break 48721 } 48722 _ = v.Args[1] 48723 v_0 := v.Args[0] 48724 if v_0.Op != OpAMD64SETGE { 48725 break 48726 } 48727 cmp := v_0.Args[0] 48728 v_1 := v.Args[1] 48729 if v_1.Op != OpAMD64SETGE { 48730 break 48731 } 48732 if cmp != v_1.Args[0] { 48733 break 48734 } 48735 b.Kind = BlockAMD64GE 48736 b.SetControl(cmp) 48737 b.Aux = nil 48738 return true 48739 } 48740 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 48741 // cond: 48742 // result: (GE cmp yes no) 48743 for { 48744 v := b.Control 48745 if v.Op != OpAMD64TESTB { 48746 break 48747 } 48748 _ = v.Args[1] 48749 v_0 := v.Args[0] 48750 if v_0.Op != OpAMD64SETGE { 48751 break 48752 } 48753 cmp := v_0.Args[0] 48754 v_1 := v.Args[1] 48755 if v_1.Op != OpAMD64SETGE { 48756 break 48757 } 48758 if cmp != v_1.Args[0] { 48759 break 48760 } 48761 b.Kind = BlockAMD64GE 48762 b.SetControl(cmp) 48763 b.Aux = nil 48764 return true 48765 } 48766 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 48767 // cond: 48768 // result: (EQ cmp yes no) 48769 for { 48770 v := b.Control 48771 if v.Op != OpAMD64TESTB { 48772 break 48773 } 48774 _ = v.Args[1] 48775 v_0 := v.Args[0] 48776 if v_0.Op != OpAMD64SETEQ { 48777 break 48778 } 48779 cmp := v_0.Args[0] 48780 v_1 := v.Args[1] 48781 if v_1.Op != OpAMD64SETEQ { 48782 break 48783 } 48784 if cmp != v_1.Args[0] { 48785 break 48786 } 48787 b.Kind = BlockAMD64EQ 48788 b.SetControl(cmp) 48789 b.Aux = nil 48790 return true 48791 } 48792 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 48793 // cond: 48794 // result: (EQ cmp yes no) 48795 for { 48796 v := b.Control 48797 if v.Op != OpAMD64TESTB { 48798 break 48799 } 48800 _ = v.Args[1] 48801 v_0 := v.Args[0] 48802 if v_0.Op != OpAMD64SETEQ { 48803 break 48804 } 48805 cmp := v_0.Args[0] 48806 v_1 := v.Args[1] 48807 if v_1.Op != OpAMD64SETEQ { 48808 break 48809 } 48810 if cmp != v_1.Args[0] { 48811 break 48812 } 48813 b.Kind = BlockAMD64EQ 48814 b.SetControl(cmp) 48815 b.Aux = nil 48816 return true 48817 } 48818 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 48819 // cond: 48820 // result: (NE cmp yes no) 48821 for { 48822 v := b.Control 48823 if v.Op != OpAMD64TESTB { 48824 break 48825 } 48826 _ = v.Args[1] 48827 v_0 := v.Args[0] 48828 if v_0.Op != OpAMD64SETNE { 48829 break 48830 } 48831 cmp := v_0.Args[0] 48832 v_1 := v.Args[1] 48833 if v_1.Op != OpAMD64SETNE { 48834 break 48835 } 48836 if cmp != v_1.Args[0] { 48837 break 48838 } 48839 b.Kind = BlockAMD64NE 48840 b.SetControl(cmp) 48841 b.Aux = nil 48842 return true 48843 } 48844 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 48845 // cond: 48846 // result: (NE cmp yes no) 48847 for { 48848 v := b.Control 48849 if v.Op != OpAMD64TESTB { 48850 break 48851 } 48852 _ = v.Args[1] 48853 v_0 := v.Args[0] 48854 if v_0.Op != OpAMD64SETNE { 48855 break 48856 } 48857 cmp := v_0.Args[0] 48858 v_1 := v.Args[1] 48859 if v_1.Op != OpAMD64SETNE { 48860 break 48861 } 48862 if cmp != v_1.Args[0] { 48863 break 48864 } 48865 b.Kind = BlockAMD64NE 48866 b.SetControl(cmp) 48867 b.Aux = nil 48868 return true 48869 } 48870 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 48871 // cond: 48872 // result: (ULT cmp yes no) 48873 for { 48874 v := b.Control 48875 if v.Op != OpAMD64TESTB { 48876 break 48877 } 48878 _ = v.Args[1] 48879 v_0 := v.Args[0] 48880 if v_0.Op != OpAMD64SETB { 48881 break 48882 } 48883 cmp := v_0.Args[0] 48884 v_1 := v.Args[1] 48885 if v_1.Op != OpAMD64SETB { 48886 break 48887 } 48888 if cmp != v_1.Args[0] { 48889 break 48890 } 48891 b.Kind = BlockAMD64ULT 48892 b.SetControl(cmp) 48893 b.Aux = nil 48894 return true 48895 } 48896 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 48897 // cond: 48898 // result: (ULT cmp yes no) 48899 for { 48900 v := b.Control 48901 if v.Op != OpAMD64TESTB { 48902 break 48903 } 48904 _ = v.Args[1] 48905 v_0 := v.Args[0] 48906 if v_0.Op != OpAMD64SETB { 48907 break 48908 } 48909 cmp := v_0.Args[0] 48910 v_1 := v.Args[1] 48911 if v_1.Op != OpAMD64SETB { 48912 break 48913 } 48914 if cmp != v_1.Args[0] { 48915 break 48916 } 48917 b.Kind = BlockAMD64ULT 48918 b.SetControl(cmp) 48919 b.Aux = nil 48920 return true 48921 } 48922 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 48923 // cond: 48924 // result: (ULE cmp yes no) 48925 for { 48926 v := b.Control 48927 if v.Op != OpAMD64TESTB { 48928 break 48929 } 48930 _ = v.Args[1] 48931 v_0 := v.Args[0] 48932 if v_0.Op != OpAMD64SETBE { 48933 break 48934 } 48935 cmp := v_0.Args[0] 48936 v_1 := v.Args[1] 48937 if v_1.Op != OpAMD64SETBE { 48938 break 48939 } 48940 if cmp != v_1.Args[0] { 48941 break 48942 } 48943 b.Kind = BlockAMD64ULE 48944 b.SetControl(cmp) 48945 b.Aux = nil 48946 return true 48947 } 48948 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 48949 // cond: 48950 // result: (ULE cmp yes no) 48951 for { 48952 v := b.Control 48953 if v.Op != OpAMD64TESTB { 48954 break 48955 } 48956 _ = v.Args[1] 48957 v_0 := v.Args[0] 48958 if v_0.Op != OpAMD64SETBE { 48959 break 48960 } 48961 cmp := v_0.Args[0] 48962 v_1 := v.Args[1] 48963 if v_1.Op != OpAMD64SETBE { 48964 break 48965 } 48966 if cmp != v_1.Args[0] { 48967 break 48968 } 48969 b.Kind = BlockAMD64ULE 48970 b.SetControl(cmp) 48971 b.Aux = nil 48972 return true 48973 } 48974 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 48975 // cond: 48976 // result: (UGT cmp yes no) 48977 for { 48978 v := b.Control 48979 if v.Op != OpAMD64TESTB { 48980 break 48981 } 48982 _ = v.Args[1] 48983 v_0 := v.Args[0] 48984 if v_0.Op != OpAMD64SETA { 48985 break 48986 } 48987 cmp := v_0.Args[0] 48988 v_1 := v.Args[1] 48989 if v_1.Op != OpAMD64SETA { 48990 break 48991 } 48992 if cmp != v_1.Args[0] { 48993 break 48994 } 48995 b.Kind = BlockAMD64UGT 48996 b.SetControl(cmp) 48997 b.Aux = nil 48998 return true 48999 } 49000 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 49001 // cond: 49002 // result: (UGT cmp yes no) 49003 for { 49004 v := b.Control 49005 if v.Op != OpAMD64TESTB { 49006 break 49007 } 49008 _ = v.Args[1] 49009 v_0 := v.Args[0] 49010 if v_0.Op != OpAMD64SETA { 49011 break 49012 } 49013 cmp := v_0.Args[0] 49014 v_1 := v.Args[1] 49015 if v_1.Op != OpAMD64SETA { 49016 break 49017 } 49018 if cmp != v_1.Args[0] { 49019 break 49020 } 49021 b.Kind = BlockAMD64UGT 49022 b.SetControl(cmp) 49023 b.Aux = nil 49024 return true 49025 } 49026 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 49027 // cond: 49028 // result: (UGE cmp yes no) 49029 for { 49030 v := b.Control 49031 if v.Op != OpAMD64TESTB { 49032 break 49033 } 49034 _ = v.Args[1] 49035 v_0 := v.Args[0] 49036 if v_0.Op != OpAMD64SETAE { 49037 break 49038 } 49039 cmp := v_0.Args[0] 49040 v_1 := v.Args[1] 49041 if v_1.Op != OpAMD64SETAE { 49042 break 49043 } 49044 if cmp != v_1.Args[0] { 49045 break 49046 } 49047 b.Kind = BlockAMD64UGE 49048 b.SetControl(cmp) 49049 b.Aux = nil 49050 return true 49051 } 49052 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 49053 // cond: 49054 // result: (UGE cmp yes no) 49055 for { 49056 v := b.Control 49057 if v.Op != OpAMD64TESTB { 49058 break 49059 } 49060 _ = v.Args[1] 49061 v_0 := v.Args[0] 49062 if v_0.Op != OpAMD64SETAE { 49063 break 49064 } 49065 cmp := v_0.Args[0] 49066 v_1 := v.Args[1] 49067 if v_1.Op != OpAMD64SETAE { 49068 break 49069 } 49070 if cmp != v_1.Args[0] { 49071 break 49072 } 49073 b.Kind = BlockAMD64UGE 49074 b.SetControl(cmp) 49075 b.Aux = nil 49076 return true 49077 } 49078 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) 49079 // cond: !config.nacl 49080 // result: (ULT (BTL x y)) 49081 for { 49082 v := b.Control 49083 if v.Op != OpAMD64TESTL { 49084 break 49085 } 49086 _ = v.Args[1] 49087 v_0 := v.Args[0] 49088 if v_0.Op != OpAMD64SHLL { 49089 break 49090 } 49091 _ = v_0.Args[1] 49092 v_0_0 := v_0.Args[0] 49093 if v_0_0.Op != OpAMD64MOVLconst { 49094 break 49095 } 49096 if v_0_0.AuxInt != 1 { 49097 break 49098 } 49099 x := v_0.Args[1] 49100 y := v.Args[1] 49101 if !(!config.nacl) { 49102 break 49103 } 49104 b.Kind = BlockAMD64ULT 49105 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 49106 v0.AddArg(x) 49107 v0.AddArg(y) 49108 b.SetControl(v0) 49109 b.Aux = nil 49110 return true 49111 } 49112 // match: (NE (TESTL y (SHLL (MOVLconst [1]) x))) 49113 // cond: !config.nacl 49114 // result: (ULT (BTL x y)) 49115 for { 49116 v := b.Control 49117 if v.Op != OpAMD64TESTL { 49118 break 49119 } 49120 _ = v.Args[1] 49121 y := v.Args[0] 49122 v_1 := v.Args[1] 49123 if v_1.Op != OpAMD64SHLL { 49124 break 49125 } 49126 _ = v_1.Args[1] 49127 v_1_0 := v_1.Args[0] 49128 if v_1_0.Op != OpAMD64MOVLconst { 49129 break 49130 } 49131 if v_1_0.AuxInt != 1 { 49132 break 49133 } 49134 x := v_1.Args[1] 49135 if !(!config.nacl) { 49136 break 49137 } 49138 b.Kind = BlockAMD64ULT 49139 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 49140 v0.AddArg(x) 49141 v0.AddArg(y) 49142 b.SetControl(v0) 49143 b.Aux = nil 49144 return true 49145 } 49146 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 49147 // cond: !config.nacl 49148 // result: (ULT (BTQ x y)) 49149 for { 49150 v := b.Control 49151 if v.Op != OpAMD64TESTQ { 49152 break 49153 } 49154 _ = v.Args[1] 49155 v_0 := v.Args[0] 49156 if v_0.Op != OpAMD64SHLQ { 49157 break 49158 } 49159 _ = v_0.Args[1] 49160 v_0_0 := v_0.Args[0] 49161 if v_0_0.Op != OpAMD64MOVQconst { 49162 break 49163 } 49164 if v_0_0.AuxInt != 1 { 49165 break 49166 } 49167 x := v_0.Args[1] 49168 y := v.Args[1] 49169 if !(!config.nacl) { 49170 break 49171 } 49172 b.Kind = BlockAMD64ULT 49173 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 49174 v0.AddArg(x) 49175 v0.AddArg(y) 49176 b.SetControl(v0) 49177 b.Aux = nil 49178 return true 49179 } 49180 // match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x))) 49181 // cond: !config.nacl 49182 // result: (ULT (BTQ x y)) 49183 for { 49184 v := b.Control 49185 if v.Op != OpAMD64TESTQ { 49186 break 49187 } 49188 _ = v.Args[1] 49189 y := v.Args[0] 49190 v_1 := v.Args[1] 49191 if v_1.Op != OpAMD64SHLQ { 49192 break 49193 } 49194 _ = v_1.Args[1] 49195 v_1_0 := v_1.Args[0] 49196 if v_1_0.Op != OpAMD64MOVQconst { 49197 break 49198 } 49199 if v_1_0.AuxInt != 1 { 49200 break 49201 } 49202 x := v_1.Args[1] 49203 if !(!config.nacl) { 49204 break 49205 } 49206 b.Kind = BlockAMD64ULT 49207 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 49208 v0.AddArg(x) 49209 v0.AddArg(y) 49210 b.SetControl(v0) 49211 b.Aux = nil 49212 return true 49213 } 49214 // match: (NE (TESTLconst [c] x)) 49215 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 49216 // result: (ULT (BTLconst [log2(c)] x)) 49217 for { 49218 v := b.Control 49219 if v.Op != OpAMD64TESTLconst { 49220 break 49221 } 49222 c := v.AuxInt 49223 x := v.Args[0] 49224 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 49225 break 49226 } 49227 b.Kind = BlockAMD64ULT 49228 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 49229 v0.AuxInt = log2(c) 49230 v0.AddArg(x) 49231 b.SetControl(v0) 49232 b.Aux = nil 49233 return true 49234 } 49235 // match: (NE (TESTQconst [c] x)) 49236 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 49237 // result: (ULT (BTQconst [log2(c)] x)) 49238 for { 49239 v := b.Control 49240 if v.Op != OpAMD64TESTQconst { 49241 break 49242 } 49243 c := v.AuxInt 49244 x := v.Args[0] 49245 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 49246 break 49247 } 49248 b.Kind = BlockAMD64ULT 49249 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49250 v0.AuxInt = log2(c) 49251 v0.AddArg(x) 49252 b.SetControl(v0) 49253 b.Aux = nil 49254 return true 49255 } 49256 // match: (NE (TESTQ (MOVQconst [c]) x)) 49257 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 49258 // result: (ULT (BTQconst [log2(c)] x)) 49259 for { 49260 v := b.Control 49261 if v.Op != OpAMD64TESTQ { 49262 break 49263 } 49264 _ = v.Args[1] 49265 v_0 := v.Args[0] 49266 if v_0.Op != OpAMD64MOVQconst { 49267 break 49268 } 49269 c := v_0.AuxInt 49270 x := v.Args[1] 49271 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 49272 break 49273 } 49274 b.Kind = BlockAMD64ULT 49275 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49276 v0.AuxInt = log2(c) 49277 v0.AddArg(x) 49278 b.SetControl(v0) 49279 b.Aux = nil 49280 return true 49281 } 49282 // match: (NE (TESTQ x (MOVQconst [c]))) 49283 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 49284 // result: (ULT (BTQconst [log2(c)] x)) 49285 for { 49286 v := b.Control 49287 if v.Op != OpAMD64TESTQ { 49288 break 49289 } 49290 _ = v.Args[1] 49291 x := v.Args[0] 49292 v_1 := v.Args[1] 49293 if v_1.Op != OpAMD64MOVQconst { 49294 break 49295 } 49296 c := v_1.AuxInt 49297 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 49298 break 49299 } 49300 b.Kind = BlockAMD64ULT 49301 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49302 v0.AuxInt = log2(c) 49303 v0.AddArg(x) 49304 b.SetControl(v0) 49305 b.Aux = nil 49306 return true 49307 } 49308 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 49309 // cond: 49310 // result: (UGT cmp yes no) 49311 for { 49312 v := b.Control 49313 if v.Op != OpAMD64TESTB { 49314 break 49315 } 49316 _ = v.Args[1] 49317 v_0 := v.Args[0] 49318 if v_0.Op != OpAMD64SETGF { 49319 break 49320 } 49321 cmp := v_0.Args[0] 49322 v_1 := v.Args[1] 49323 if v_1.Op != OpAMD64SETGF { 49324 break 49325 } 49326 if cmp != v_1.Args[0] { 49327 break 49328 } 49329 b.Kind = BlockAMD64UGT 49330 b.SetControl(cmp) 49331 b.Aux = nil 49332 return true 49333 } 49334 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 49335 // cond: 49336 // result: (UGT cmp yes no) 49337 for { 49338 v := b.Control 49339 if v.Op != OpAMD64TESTB { 49340 break 49341 } 49342 _ = v.Args[1] 49343 v_0 := v.Args[0] 49344 if v_0.Op != OpAMD64SETGF { 49345 break 49346 } 49347 cmp := v_0.Args[0] 49348 v_1 := v.Args[1] 49349 if v_1.Op != OpAMD64SETGF { 49350 break 49351 } 49352 if cmp != v_1.Args[0] { 49353 break 49354 } 49355 b.Kind = BlockAMD64UGT 49356 b.SetControl(cmp) 49357 b.Aux = nil 49358 return true 49359 } 49360 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 49361 // cond: 49362 // result: (UGE cmp yes no) 49363 for { 49364 v := b.Control 49365 if v.Op != OpAMD64TESTB { 49366 break 49367 } 49368 _ = v.Args[1] 49369 v_0 := v.Args[0] 49370 if v_0.Op != OpAMD64SETGEF { 49371 break 49372 } 49373 cmp := v_0.Args[0] 49374 v_1 := v.Args[1] 49375 if v_1.Op != OpAMD64SETGEF { 49376 break 49377 } 49378 if cmp != v_1.Args[0] { 49379 break 49380 } 49381 b.Kind = BlockAMD64UGE 49382 b.SetControl(cmp) 49383 b.Aux = nil 49384 return true 49385 } 49386 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 49387 // cond: 49388 // result: (UGE cmp yes no) 49389 for { 49390 v := b.Control 49391 if v.Op != OpAMD64TESTB { 49392 break 49393 } 49394 _ = v.Args[1] 49395 v_0 := v.Args[0] 49396 if v_0.Op != OpAMD64SETGEF { 49397 break 49398 } 49399 cmp := v_0.Args[0] 49400 v_1 := v.Args[1] 49401 if v_1.Op != OpAMD64SETGEF { 49402 break 49403 } 49404 if cmp != v_1.Args[0] { 49405 break 49406 } 49407 b.Kind = BlockAMD64UGE 49408 b.SetControl(cmp) 49409 b.Aux = nil 49410 return true 49411 } 49412 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 49413 // cond: 49414 // result: (EQF cmp yes no) 49415 for { 49416 v := b.Control 49417 if v.Op != OpAMD64TESTB { 49418 break 49419 } 49420 _ = v.Args[1] 49421 v_0 := v.Args[0] 49422 if v_0.Op != OpAMD64SETEQF { 49423 break 49424 } 49425 cmp := v_0.Args[0] 49426 v_1 := v.Args[1] 49427 if v_1.Op != OpAMD64SETEQF { 49428 break 49429 } 49430 if cmp != v_1.Args[0] { 49431 break 49432 } 49433 b.Kind = BlockAMD64EQF 49434 b.SetControl(cmp) 49435 b.Aux = nil 49436 return true 49437 } 49438 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 49439 // cond: 49440 // result: (EQF cmp yes no) 49441 for { 49442 v := b.Control 49443 if v.Op != OpAMD64TESTB { 49444 break 49445 } 49446 _ = v.Args[1] 49447 v_0 := v.Args[0] 49448 if v_0.Op != OpAMD64SETEQF { 49449 break 49450 } 49451 cmp := v_0.Args[0] 49452 v_1 := v.Args[1] 49453 if v_1.Op != OpAMD64SETEQF { 49454 break 49455 } 49456 if cmp != v_1.Args[0] { 49457 break 49458 } 49459 b.Kind = BlockAMD64EQF 49460 b.SetControl(cmp) 49461 b.Aux = nil 49462 return true 49463 } 49464 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 49465 // cond: 49466 // result: (NEF cmp yes no) 49467 for { 49468 v := b.Control 49469 if v.Op != OpAMD64TESTB { 49470 break 49471 } 49472 _ = v.Args[1] 49473 v_0 := v.Args[0] 49474 if v_0.Op != OpAMD64SETNEF { 49475 break 49476 } 49477 cmp := v_0.Args[0] 49478 v_1 := v.Args[1] 49479 if v_1.Op != OpAMD64SETNEF { 49480 break 49481 } 49482 if cmp != v_1.Args[0] { 49483 break 49484 } 49485 b.Kind = BlockAMD64NEF 49486 b.SetControl(cmp) 49487 b.Aux = nil 49488 return true 49489 } 49490 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 49491 // cond: 49492 // result: (NEF cmp yes no) 49493 for { 49494 v := b.Control 49495 if v.Op != OpAMD64TESTB { 49496 break 49497 } 49498 _ = v.Args[1] 49499 v_0 := v.Args[0] 49500 if v_0.Op != OpAMD64SETNEF { 49501 break 49502 } 49503 cmp := v_0.Args[0] 49504 v_1 := v.Args[1] 49505 if v_1.Op != OpAMD64SETNEF { 49506 break 49507 } 49508 if cmp != v_1.Args[0] { 49509 break 49510 } 49511 b.Kind = BlockAMD64NEF 49512 b.SetControl(cmp) 49513 b.Aux = nil 49514 return true 49515 } 49516 // match: (NE (InvertFlags cmp) yes no) 49517 // cond: 49518 // result: (NE cmp yes no) 49519 for { 49520 v := b.Control 49521 if v.Op != OpAMD64InvertFlags { 49522 break 49523 } 49524 cmp := v.Args[0] 49525 b.Kind = BlockAMD64NE 49526 b.SetControl(cmp) 49527 b.Aux = nil 49528 return true 49529 } 49530 // match: (NE (FlagEQ) yes no) 49531 // cond: 49532 // result: (First nil no yes) 49533 for { 49534 v := b.Control 49535 if v.Op != OpAMD64FlagEQ { 49536 break 49537 } 49538 b.Kind = BlockFirst 49539 b.SetControl(nil) 49540 b.Aux = nil 49541 b.swapSuccessors() 49542 return true 49543 } 49544 // match: (NE (FlagLT_ULT) yes no) 49545 // cond: 49546 // result: (First nil yes no) 49547 for { 49548 v := b.Control 49549 if v.Op != OpAMD64FlagLT_ULT { 49550 break 49551 } 49552 b.Kind = BlockFirst 49553 b.SetControl(nil) 49554 b.Aux = nil 49555 return true 49556 } 49557 // match: (NE (FlagLT_UGT) yes no) 49558 // cond: 49559 // result: (First nil yes no) 49560 for { 49561 v := b.Control 49562 if v.Op != OpAMD64FlagLT_UGT { 49563 break 49564 } 49565 b.Kind = BlockFirst 49566 b.SetControl(nil) 49567 b.Aux = nil 49568 return true 49569 } 49570 // match: (NE (FlagGT_ULT) yes no) 49571 // cond: 49572 // result: (First nil yes no) 49573 for { 49574 v := b.Control 49575 if v.Op != OpAMD64FlagGT_ULT { 49576 break 49577 } 49578 b.Kind = BlockFirst 49579 b.SetControl(nil) 49580 b.Aux = nil 49581 return true 49582 } 49583 // match: (NE (FlagGT_UGT) yes no) 49584 // cond: 49585 // result: (First nil yes no) 49586 for { 49587 v := b.Control 49588 if v.Op != OpAMD64FlagGT_UGT { 49589 break 49590 } 49591 b.Kind = BlockFirst 49592 b.SetControl(nil) 49593 b.Aux = nil 49594 return true 49595 } 49596 case BlockAMD64UGE: 49597 // match: (UGE (InvertFlags cmp) yes no) 49598 // cond: 49599 // result: (ULE cmp yes no) 49600 for { 49601 v := b.Control 49602 if v.Op != OpAMD64InvertFlags { 49603 break 49604 } 49605 cmp := v.Args[0] 49606 b.Kind = BlockAMD64ULE 49607 b.SetControl(cmp) 49608 b.Aux = nil 49609 return true 49610 } 49611 // match: (UGE (FlagEQ) yes no) 49612 // cond: 49613 // result: (First nil yes no) 49614 for { 49615 v := b.Control 49616 if v.Op != OpAMD64FlagEQ { 49617 break 49618 } 49619 b.Kind = BlockFirst 49620 b.SetControl(nil) 49621 b.Aux = nil 49622 return true 49623 } 49624 // match: (UGE (FlagLT_ULT) yes no) 49625 // cond: 49626 // result: (First nil no yes) 49627 for { 49628 v := b.Control 49629 if v.Op != OpAMD64FlagLT_ULT { 49630 break 49631 } 49632 b.Kind = BlockFirst 49633 b.SetControl(nil) 49634 b.Aux = nil 49635 b.swapSuccessors() 49636 return true 49637 } 49638 // match: (UGE (FlagLT_UGT) yes no) 49639 // cond: 49640 // result: (First nil yes no) 49641 for { 49642 v := b.Control 49643 if v.Op != OpAMD64FlagLT_UGT { 49644 break 49645 } 49646 b.Kind = BlockFirst 49647 b.SetControl(nil) 49648 b.Aux = nil 49649 return true 49650 } 49651 // match: (UGE (FlagGT_ULT) yes no) 49652 // cond: 49653 // result: (First nil no yes) 49654 for { 49655 v := b.Control 49656 if v.Op != OpAMD64FlagGT_ULT { 49657 break 49658 } 49659 b.Kind = BlockFirst 49660 b.SetControl(nil) 49661 b.Aux = nil 49662 b.swapSuccessors() 49663 return true 49664 } 49665 // match: (UGE (FlagGT_UGT) yes no) 49666 // cond: 49667 // result: (First nil yes no) 49668 for { 49669 v := b.Control 49670 if v.Op != OpAMD64FlagGT_UGT { 49671 break 49672 } 49673 b.Kind = BlockFirst 49674 b.SetControl(nil) 49675 b.Aux = nil 49676 return true 49677 } 49678 case BlockAMD64UGT: 49679 // match: (UGT (InvertFlags cmp) yes no) 49680 // cond: 49681 // result: (ULT cmp yes no) 49682 for { 49683 v := b.Control 49684 if v.Op != OpAMD64InvertFlags { 49685 break 49686 } 49687 cmp := v.Args[0] 49688 b.Kind = BlockAMD64ULT 49689 b.SetControl(cmp) 49690 b.Aux = nil 49691 return true 49692 } 49693 // match: (UGT (FlagEQ) yes no) 49694 // cond: 49695 // result: (First nil no yes) 49696 for { 49697 v := b.Control 49698 if v.Op != OpAMD64FlagEQ { 49699 break 49700 } 49701 b.Kind = BlockFirst 49702 b.SetControl(nil) 49703 b.Aux = nil 49704 b.swapSuccessors() 49705 return true 49706 } 49707 // match: (UGT (FlagLT_ULT) yes no) 49708 // cond: 49709 // result: (First nil no yes) 49710 for { 49711 v := b.Control 49712 if v.Op != OpAMD64FlagLT_ULT { 49713 break 49714 } 49715 b.Kind = BlockFirst 49716 b.SetControl(nil) 49717 b.Aux = nil 49718 b.swapSuccessors() 49719 return true 49720 } 49721 // match: (UGT (FlagLT_UGT) yes no) 49722 // cond: 49723 // result: (First nil yes no) 49724 for { 49725 v := b.Control 49726 if v.Op != OpAMD64FlagLT_UGT { 49727 break 49728 } 49729 b.Kind = BlockFirst 49730 b.SetControl(nil) 49731 b.Aux = nil 49732 return true 49733 } 49734 // match: (UGT (FlagGT_ULT) yes no) 49735 // cond: 49736 // result: (First nil no yes) 49737 for { 49738 v := b.Control 49739 if v.Op != OpAMD64FlagGT_ULT { 49740 break 49741 } 49742 b.Kind = BlockFirst 49743 b.SetControl(nil) 49744 b.Aux = nil 49745 b.swapSuccessors() 49746 return true 49747 } 49748 // match: (UGT (FlagGT_UGT) yes no) 49749 // cond: 49750 // result: (First nil yes no) 49751 for { 49752 v := b.Control 49753 if v.Op != OpAMD64FlagGT_UGT { 49754 break 49755 } 49756 b.Kind = BlockFirst 49757 b.SetControl(nil) 49758 b.Aux = nil 49759 return true 49760 } 49761 case BlockAMD64ULE: 49762 // match: (ULE (InvertFlags cmp) yes no) 49763 // cond: 49764 // result: (UGE cmp yes no) 49765 for { 49766 v := b.Control 49767 if v.Op != OpAMD64InvertFlags { 49768 break 49769 } 49770 cmp := v.Args[0] 49771 b.Kind = BlockAMD64UGE 49772 b.SetControl(cmp) 49773 b.Aux = nil 49774 return true 49775 } 49776 // match: (ULE (FlagEQ) yes no) 49777 // cond: 49778 // result: (First nil yes no) 49779 for { 49780 v := b.Control 49781 if v.Op != OpAMD64FlagEQ { 49782 break 49783 } 49784 b.Kind = BlockFirst 49785 b.SetControl(nil) 49786 b.Aux = nil 49787 return true 49788 } 49789 // match: (ULE (FlagLT_ULT) yes no) 49790 // cond: 49791 // result: (First nil yes no) 49792 for { 49793 v := b.Control 49794 if v.Op != OpAMD64FlagLT_ULT { 49795 break 49796 } 49797 b.Kind = BlockFirst 49798 b.SetControl(nil) 49799 b.Aux = nil 49800 return true 49801 } 49802 // match: (ULE (FlagLT_UGT) yes no) 49803 // cond: 49804 // result: (First nil no yes) 49805 for { 49806 v := b.Control 49807 if v.Op != OpAMD64FlagLT_UGT { 49808 break 49809 } 49810 b.Kind = BlockFirst 49811 b.SetControl(nil) 49812 b.Aux = nil 49813 b.swapSuccessors() 49814 return true 49815 } 49816 // match: (ULE (FlagGT_ULT) yes no) 49817 // cond: 49818 // result: (First nil yes no) 49819 for { 49820 v := b.Control 49821 if v.Op != OpAMD64FlagGT_ULT { 49822 break 49823 } 49824 b.Kind = BlockFirst 49825 b.SetControl(nil) 49826 b.Aux = nil 49827 return true 49828 } 49829 // match: (ULE (FlagGT_UGT) yes no) 49830 // cond: 49831 // result: (First nil no yes) 49832 for { 49833 v := b.Control 49834 if v.Op != OpAMD64FlagGT_UGT { 49835 break 49836 } 49837 b.Kind = BlockFirst 49838 b.SetControl(nil) 49839 b.Aux = nil 49840 b.swapSuccessors() 49841 return true 49842 } 49843 case BlockAMD64ULT: 49844 // match: (ULT (InvertFlags cmp) yes no) 49845 // cond: 49846 // result: (UGT cmp yes no) 49847 for { 49848 v := b.Control 49849 if v.Op != OpAMD64InvertFlags { 49850 break 49851 } 49852 cmp := v.Args[0] 49853 b.Kind = BlockAMD64UGT 49854 b.SetControl(cmp) 49855 b.Aux = nil 49856 return true 49857 } 49858 // match: (ULT (FlagEQ) yes no) 49859 // cond: 49860 // result: (First nil no yes) 49861 for { 49862 v := b.Control 49863 if v.Op != OpAMD64FlagEQ { 49864 break 49865 } 49866 b.Kind = BlockFirst 49867 b.SetControl(nil) 49868 b.Aux = nil 49869 b.swapSuccessors() 49870 return true 49871 } 49872 // match: (ULT (FlagLT_ULT) yes no) 49873 // cond: 49874 // result: (First nil yes no) 49875 for { 49876 v := b.Control 49877 if v.Op != OpAMD64FlagLT_ULT { 49878 break 49879 } 49880 b.Kind = BlockFirst 49881 b.SetControl(nil) 49882 b.Aux = nil 49883 return true 49884 } 49885 // match: (ULT (FlagLT_UGT) yes no) 49886 // cond: 49887 // result: (First nil no yes) 49888 for { 49889 v := b.Control 49890 if v.Op != OpAMD64FlagLT_UGT { 49891 break 49892 } 49893 b.Kind = BlockFirst 49894 b.SetControl(nil) 49895 b.Aux = nil 49896 b.swapSuccessors() 49897 return true 49898 } 49899 // match: (ULT (FlagGT_ULT) yes no) 49900 // cond: 49901 // result: (First nil yes no) 49902 for { 49903 v := b.Control 49904 if v.Op != OpAMD64FlagGT_ULT { 49905 break 49906 } 49907 b.Kind = BlockFirst 49908 b.SetControl(nil) 49909 b.Aux = nil 49910 return true 49911 } 49912 // match: (ULT (FlagGT_UGT) yes no) 49913 // cond: 49914 // result: (First nil no yes) 49915 for { 49916 v := b.Control 49917 if v.Op != OpAMD64FlagGT_UGT { 49918 break 49919 } 49920 b.Kind = BlockFirst 49921 b.SetControl(nil) 49922 b.Aux = nil 49923 b.swapSuccessors() 49924 return true 49925 } 49926 } 49927 return false 49928 }