github.com/yukk001/go1.10.8@v0.0.0-20190813125351-6df2d3982e20/src/cmd/compile/internal/ssa/rewriteAMD64.go (about) 1 // Code generated from gen/AMD64.rules; DO NOT EDIT. 2 // generated with: cd gen; go run *.go 3 4 package ssa 5 6 import "math" 7 import "cmd/internal/obj" 8 import "cmd/internal/objabi" 9 import "cmd/compile/internal/types" 10 11 var _ = math.MinInt8 // in case not otherwise used 12 var _ = obj.ANOP // in case not otherwise used 13 var _ = objabi.GOROOT // in case not otherwise used 14 var _ = types.TypeMem // in case not otherwise used 15 16 func rewriteValueAMD64(v *Value) bool { 17 switch v.Op { 18 case OpAMD64ADDL: 19 return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) 20 case OpAMD64ADDLconst: 21 return rewriteValueAMD64_OpAMD64ADDLconst_0(v) 22 case OpAMD64ADDLconstmem: 23 return rewriteValueAMD64_OpAMD64ADDLconstmem_0(v) 24 case OpAMD64ADDLmem: 25 return rewriteValueAMD64_OpAMD64ADDLmem_0(v) 26 case OpAMD64ADDQ: 27 return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) 28 case OpAMD64ADDQconst: 29 return rewriteValueAMD64_OpAMD64ADDQconst_0(v) 30 case OpAMD64ADDQconstmem: 31 return rewriteValueAMD64_OpAMD64ADDQconstmem_0(v) 32 case OpAMD64ADDQmem: 33 return rewriteValueAMD64_OpAMD64ADDQmem_0(v) 34 case OpAMD64ADDSD: 35 return rewriteValueAMD64_OpAMD64ADDSD_0(v) 36 case OpAMD64ADDSDmem: 37 return rewriteValueAMD64_OpAMD64ADDSDmem_0(v) 38 case OpAMD64ADDSS: 39 return rewriteValueAMD64_OpAMD64ADDSS_0(v) 40 case OpAMD64ADDSSmem: 41 return rewriteValueAMD64_OpAMD64ADDSSmem_0(v) 42 case OpAMD64ANDL: 43 return rewriteValueAMD64_OpAMD64ANDL_0(v) 44 case OpAMD64ANDLconst: 45 return rewriteValueAMD64_OpAMD64ANDLconst_0(v) 46 case OpAMD64ANDLmem: 47 return rewriteValueAMD64_OpAMD64ANDLmem_0(v) 48 case OpAMD64ANDQ: 49 return rewriteValueAMD64_OpAMD64ANDQ_0(v) 50 case OpAMD64ANDQconst: 51 return rewriteValueAMD64_OpAMD64ANDQconst_0(v) 52 case OpAMD64ANDQmem: 53 return rewriteValueAMD64_OpAMD64ANDQmem_0(v) 54 case OpAMD64BSFQ: 55 return rewriteValueAMD64_OpAMD64BSFQ_0(v) 56 case OpAMD64BTQconst: 57 return rewriteValueAMD64_OpAMD64BTQconst_0(v) 58 case OpAMD64CMOVQEQ: 59 return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v) 60 case OpAMD64CMPB: 61 return rewriteValueAMD64_OpAMD64CMPB_0(v) 62 case OpAMD64CMPBconst: 63 return rewriteValueAMD64_OpAMD64CMPBconst_0(v) 64 case OpAMD64CMPL: 65 return rewriteValueAMD64_OpAMD64CMPL_0(v) 66 case OpAMD64CMPLconst: 67 return rewriteValueAMD64_OpAMD64CMPLconst_0(v) 68 case OpAMD64CMPQ: 69 return rewriteValueAMD64_OpAMD64CMPQ_0(v) 70 case OpAMD64CMPQconst: 71 return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v) 72 case OpAMD64CMPW: 73 return rewriteValueAMD64_OpAMD64CMPW_0(v) 74 case OpAMD64CMPWconst: 75 return rewriteValueAMD64_OpAMD64CMPWconst_0(v) 76 case OpAMD64CMPXCHGLlock: 77 return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v) 78 case OpAMD64CMPXCHGQlock: 79 return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v) 80 case OpAMD64LEAL: 81 return rewriteValueAMD64_OpAMD64LEAL_0(v) 82 case OpAMD64LEAQ: 83 return rewriteValueAMD64_OpAMD64LEAQ_0(v) 84 case OpAMD64LEAQ1: 85 return rewriteValueAMD64_OpAMD64LEAQ1_0(v) 86 case OpAMD64LEAQ2: 87 return rewriteValueAMD64_OpAMD64LEAQ2_0(v) 88 case OpAMD64LEAQ4: 89 return rewriteValueAMD64_OpAMD64LEAQ4_0(v) 90 case OpAMD64LEAQ8: 91 return rewriteValueAMD64_OpAMD64LEAQ8_0(v) 92 case OpAMD64MOVBQSX: 93 return rewriteValueAMD64_OpAMD64MOVBQSX_0(v) 94 case OpAMD64MOVBQSXload: 95 return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v) 96 case OpAMD64MOVBQZX: 97 return rewriteValueAMD64_OpAMD64MOVBQZX_0(v) 98 case OpAMD64MOVBload: 99 return rewriteValueAMD64_OpAMD64MOVBload_0(v) 100 case OpAMD64MOVBloadidx1: 101 return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) 102 case OpAMD64MOVBstore: 103 return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) 104 case OpAMD64MOVBstoreconst: 105 return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) 106 case OpAMD64MOVBstoreconstidx1: 107 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v) 108 case OpAMD64MOVBstoreidx1: 109 return rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v) 110 case OpAMD64MOVLQSX: 111 return rewriteValueAMD64_OpAMD64MOVLQSX_0(v) 112 case OpAMD64MOVLQSXload: 113 return rewriteValueAMD64_OpAMD64MOVLQSXload_0(v) 114 case OpAMD64MOVLQZX: 115 return rewriteValueAMD64_OpAMD64MOVLQZX_0(v) 116 case OpAMD64MOVLatomicload: 117 return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v) 118 case OpAMD64MOVLf2i: 119 return rewriteValueAMD64_OpAMD64MOVLf2i_0(v) 120 case OpAMD64MOVLi2f: 121 return rewriteValueAMD64_OpAMD64MOVLi2f_0(v) 122 case OpAMD64MOVLload: 123 return rewriteValueAMD64_OpAMD64MOVLload_0(v) 124 case OpAMD64MOVLloadidx1: 125 return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v) 126 case OpAMD64MOVLloadidx4: 127 return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v) 128 case OpAMD64MOVLloadidx8: 129 return rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v) 130 case OpAMD64MOVLstore: 131 return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) 132 case OpAMD64MOVLstoreconst: 133 return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v) 134 case OpAMD64MOVLstoreconstidx1: 135 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v) 136 case OpAMD64MOVLstoreconstidx4: 137 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v) 138 case OpAMD64MOVLstoreidx1: 139 return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v) 140 case OpAMD64MOVLstoreidx4: 141 return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v) 142 case OpAMD64MOVLstoreidx8: 143 return rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v) 144 case OpAMD64MOVOload: 145 return rewriteValueAMD64_OpAMD64MOVOload_0(v) 146 case OpAMD64MOVOstore: 147 return rewriteValueAMD64_OpAMD64MOVOstore_0(v) 148 case OpAMD64MOVQatomicload: 149 return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v) 150 case OpAMD64MOVQf2i: 151 return rewriteValueAMD64_OpAMD64MOVQf2i_0(v) 152 case OpAMD64MOVQi2f: 153 return rewriteValueAMD64_OpAMD64MOVQi2f_0(v) 154 case OpAMD64MOVQload: 155 return rewriteValueAMD64_OpAMD64MOVQload_0(v) 156 case OpAMD64MOVQloadidx1: 157 return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v) 158 case OpAMD64MOVQloadidx8: 159 return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v) 160 case OpAMD64MOVQstore: 161 return rewriteValueAMD64_OpAMD64MOVQstore_0(v) 162 case OpAMD64MOVQstoreconst: 163 return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v) 164 case OpAMD64MOVQstoreconstidx1: 165 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v) 166 case OpAMD64MOVQstoreconstidx8: 167 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v) 168 case OpAMD64MOVQstoreidx1: 169 return rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v) 170 case OpAMD64MOVQstoreidx8: 171 return rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v) 172 case OpAMD64MOVSDload: 173 return rewriteValueAMD64_OpAMD64MOVSDload_0(v) 174 case OpAMD64MOVSDloadidx1: 175 return rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v) 176 case OpAMD64MOVSDloadidx8: 177 return rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v) 178 case OpAMD64MOVSDstore: 179 return rewriteValueAMD64_OpAMD64MOVSDstore_0(v) 180 case OpAMD64MOVSDstoreidx1: 181 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v) 182 case OpAMD64MOVSDstoreidx8: 183 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v) 184 case OpAMD64MOVSSload: 185 return rewriteValueAMD64_OpAMD64MOVSSload_0(v) 186 case OpAMD64MOVSSloadidx1: 187 return rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v) 188 case OpAMD64MOVSSloadidx4: 189 return rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v) 190 case OpAMD64MOVSSstore: 191 return rewriteValueAMD64_OpAMD64MOVSSstore_0(v) 192 case OpAMD64MOVSSstoreidx1: 193 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v) 194 case OpAMD64MOVSSstoreidx4: 195 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v) 196 case OpAMD64MOVWQSX: 197 return rewriteValueAMD64_OpAMD64MOVWQSX_0(v) 198 case OpAMD64MOVWQSXload: 199 return rewriteValueAMD64_OpAMD64MOVWQSXload_0(v) 200 case OpAMD64MOVWQZX: 201 return rewriteValueAMD64_OpAMD64MOVWQZX_0(v) 202 case OpAMD64MOVWload: 203 return rewriteValueAMD64_OpAMD64MOVWload_0(v) 204 case OpAMD64MOVWloadidx1: 205 return rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v) 206 case OpAMD64MOVWloadidx2: 207 return rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v) 208 case OpAMD64MOVWstore: 209 return rewriteValueAMD64_OpAMD64MOVWstore_0(v) || rewriteValueAMD64_OpAMD64MOVWstore_10(v) 210 case OpAMD64MOVWstoreconst: 211 return rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v) 212 case OpAMD64MOVWstoreconstidx1: 213 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v) 214 case OpAMD64MOVWstoreconstidx2: 215 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v) 216 case OpAMD64MOVWstoreidx1: 217 return rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v) 218 case OpAMD64MOVWstoreidx2: 219 return rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v) 220 case OpAMD64MULL: 221 return rewriteValueAMD64_OpAMD64MULL_0(v) 222 case OpAMD64MULLconst: 223 return rewriteValueAMD64_OpAMD64MULLconst_0(v) 224 case OpAMD64MULQ: 225 return rewriteValueAMD64_OpAMD64MULQ_0(v) 226 case OpAMD64MULQconst: 227 return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) 228 case OpAMD64MULSD: 229 return rewriteValueAMD64_OpAMD64MULSD_0(v) 230 case OpAMD64MULSDmem: 231 return rewriteValueAMD64_OpAMD64MULSDmem_0(v) 232 case OpAMD64MULSS: 233 return rewriteValueAMD64_OpAMD64MULSS_0(v) 234 case OpAMD64MULSSmem: 235 return rewriteValueAMD64_OpAMD64MULSSmem_0(v) 236 case OpAMD64NEGL: 237 return rewriteValueAMD64_OpAMD64NEGL_0(v) 238 case OpAMD64NEGQ: 239 return rewriteValueAMD64_OpAMD64NEGQ_0(v) 240 case OpAMD64NOTL: 241 return rewriteValueAMD64_OpAMD64NOTL_0(v) 242 case OpAMD64NOTQ: 243 return rewriteValueAMD64_OpAMD64NOTQ_0(v) 244 case OpAMD64ORL: 245 return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v) 246 case OpAMD64ORLconst: 247 return rewriteValueAMD64_OpAMD64ORLconst_0(v) 248 case OpAMD64ORLmem: 249 return rewriteValueAMD64_OpAMD64ORLmem_0(v) 250 case OpAMD64ORQ: 251 return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v) 252 case OpAMD64ORQconst: 253 return rewriteValueAMD64_OpAMD64ORQconst_0(v) 254 case OpAMD64ORQmem: 255 return rewriteValueAMD64_OpAMD64ORQmem_0(v) 256 case OpAMD64ROLB: 257 return rewriteValueAMD64_OpAMD64ROLB_0(v) 258 case OpAMD64ROLBconst: 259 return rewriteValueAMD64_OpAMD64ROLBconst_0(v) 260 case OpAMD64ROLL: 261 return rewriteValueAMD64_OpAMD64ROLL_0(v) 262 case OpAMD64ROLLconst: 263 return rewriteValueAMD64_OpAMD64ROLLconst_0(v) 264 case OpAMD64ROLQ: 265 return rewriteValueAMD64_OpAMD64ROLQ_0(v) 266 case OpAMD64ROLQconst: 267 return rewriteValueAMD64_OpAMD64ROLQconst_0(v) 268 case OpAMD64ROLW: 269 return rewriteValueAMD64_OpAMD64ROLW_0(v) 270 case OpAMD64ROLWconst: 271 return rewriteValueAMD64_OpAMD64ROLWconst_0(v) 272 case OpAMD64RORB: 273 return rewriteValueAMD64_OpAMD64RORB_0(v) 274 case OpAMD64RORL: 275 return rewriteValueAMD64_OpAMD64RORL_0(v) 276 case OpAMD64RORQ: 277 return rewriteValueAMD64_OpAMD64RORQ_0(v) 278 case OpAMD64RORW: 279 return rewriteValueAMD64_OpAMD64RORW_0(v) 280 case OpAMD64SARB: 281 return rewriteValueAMD64_OpAMD64SARB_0(v) 282 case OpAMD64SARBconst: 283 return rewriteValueAMD64_OpAMD64SARBconst_0(v) 284 case OpAMD64SARL: 285 return rewriteValueAMD64_OpAMD64SARL_0(v) 286 case OpAMD64SARLconst: 287 return rewriteValueAMD64_OpAMD64SARLconst_0(v) 288 case OpAMD64SARQ: 289 return rewriteValueAMD64_OpAMD64SARQ_0(v) 290 case OpAMD64SARQconst: 291 return rewriteValueAMD64_OpAMD64SARQconst_0(v) 292 case OpAMD64SARW: 293 return rewriteValueAMD64_OpAMD64SARW_0(v) 294 case OpAMD64SARWconst: 295 return rewriteValueAMD64_OpAMD64SARWconst_0(v) 296 case OpAMD64SBBLcarrymask: 297 return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v) 298 case OpAMD64SBBQcarrymask: 299 return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v) 300 case OpAMD64SETA: 301 return rewriteValueAMD64_OpAMD64SETA_0(v) 302 case OpAMD64SETAE: 303 return rewriteValueAMD64_OpAMD64SETAE_0(v) 304 case OpAMD64SETAEmem: 305 return rewriteValueAMD64_OpAMD64SETAEmem_0(v) 306 case OpAMD64SETAmem: 307 return rewriteValueAMD64_OpAMD64SETAmem_0(v) 308 case OpAMD64SETB: 309 return rewriteValueAMD64_OpAMD64SETB_0(v) 310 case OpAMD64SETBE: 311 return rewriteValueAMD64_OpAMD64SETBE_0(v) 312 case OpAMD64SETBEmem: 313 return rewriteValueAMD64_OpAMD64SETBEmem_0(v) 314 case OpAMD64SETBmem: 315 return rewriteValueAMD64_OpAMD64SETBmem_0(v) 316 case OpAMD64SETEQ: 317 return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) 318 case OpAMD64SETEQmem: 319 return rewriteValueAMD64_OpAMD64SETEQmem_0(v) || rewriteValueAMD64_OpAMD64SETEQmem_10(v) 320 case OpAMD64SETG: 321 return rewriteValueAMD64_OpAMD64SETG_0(v) 322 case OpAMD64SETGE: 323 return rewriteValueAMD64_OpAMD64SETGE_0(v) 324 case OpAMD64SETGEmem: 325 return rewriteValueAMD64_OpAMD64SETGEmem_0(v) 326 case OpAMD64SETGmem: 327 return rewriteValueAMD64_OpAMD64SETGmem_0(v) 328 case OpAMD64SETL: 329 return rewriteValueAMD64_OpAMD64SETL_0(v) 330 case OpAMD64SETLE: 331 return rewriteValueAMD64_OpAMD64SETLE_0(v) 332 case OpAMD64SETLEmem: 333 return rewriteValueAMD64_OpAMD64SETLEmem_0(v) 334 case OpAMD64SETLmem: 335 return rewriteValueAMD64_OpAMD64SETLmem_0(v) 336 case OpAMD64SETNE: 337 return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) 338 case OpAMD64SETNEmem: 339 return rewriteValueAMD64_OpAMD64SETNEmem_0(v) || rewriteValueAMD64_OpAMD64SETNEmem_10(v) 340 case OpAMD64SHLL: 341 return rewriteValueAMD64_OpAMD64SHLL_0(v) 342 case OpAMD64SHLLconst: 343 return rewriteValueAMD64_OpAMD64SHLLconst_0(v) 344 case OpAMD64SHLQ: 345 return rewriteValueAMD64_OpAMD64SHLQ_0(v) 346 case OpAMD64SHLQconst: 347 return rewriteValueAMD64_OpAMD64SHLQconst_0(v) 348 case OpAMD64SHRB: 349 return rewriteValueAMD64_OpAMD64SHRB_0(v) 350 case OpAMD64SHRBconst: 351 return rewriteValueAMD64_OpAMD64SHRBconst_0(v) 352 case OpAMD64SHRL: 353 return rewriteValueAMD64_OpAMD64SHRL_0(v) 354 case OpAMD64SHRLconst: 355 return rewriteValueAMD64_OpAMD64SHRLconst_0(v) 356 case OpAMD64SHRQ: 357 return rewriteValueAMD64_OpAMD64SHRQ_0(v) 358 case OpAMD64SHRQconst: 359 return rewriteValueAMD64_OpAMD64SHRQconst_0(v) 360 case OpAMD64SHRW: 361 return rewriteValueAMD64_OpAMD64SHRW_0(v) 362 case OpAMD64SHRWconst: 363 return rewriteValueAMD64_OpAMD64SHRWconst_0(v) 364 case OpAMD64SUBL: 365 return rewriteValueAMD64_OpAMD64SUBL_0(v) 366 case OpAMD64SUBLconst: 367 return rewriteValueAMD64_OpAMD64SUBLconst_0(v) 368 case OpAMD64SUBLmem: 369 return rewriteValueAMD64_OpAMD64SUBLmem_0(v) 370 case OpAMD64SUBQ: 371 return rewriteValueAMD64_OpAMD64SUBQ_0(v) 372 case OpAMD64SUBQconst: 373 return rewriteValueAMD64_OpAMD64SUBQconst_0(v) 374 case OpAMD64SUBQmem: 375 return rewriteValueAMD64_OpAMD64SUBQmem_0(v) 376 case OpAMD64SUBSD: 377 return rewriteValueAMD64_OpAMD64SUBSD_0(v) 378 case OpAMD64SUBSDmem: 379 return rewriteValueAMD64_OpAMD64SUBSDmem_0(v) 380 case OpAMD64SUBSS: 381 return rewriteValueAMD64_OpAMD64SUBSS_0(v) 382 case OpAMD64SUBSSmem: 383 return rewriteValueAMD64_OpAMD64SUBSSmem_0(v) 384 case OpAMD64TESTB: 385 return rewriteValueAMD64_OpAMD64TESTB_0(v) 386 case OpAMD64TESTL: 387 return rewriteValueAMD64_OpAMD64TESTL_0(v) 388 case OpAMD64TESTQ: 389 return rewriteValueAMD64_OpAMD64TESTQ_0(v) 390 case OpAMD64TESTW: 391 return rewriteValueAMD64_OpAMD64TESTW_0(v) 392 case OpAMD64XADDLlock: 393 return rewriteValueAMD64_OpAMD64XADDLlock_0(v) 394 case OpAMD64XADDQlock: 395 return rewriteValueAMD64_OpAMD64XADDQlock_0(v) 396 case OpAMD64XCHGL: 397 return rewriteValueAMD64_OpAMD64XCHGL_0(v) 398 case OpAMD64XCHGQ: 399 return rewriteValueAMD64_OpAMD64XCHGQ_0(v) 400 case OpAMD64XORL: 401 return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v) 402 case OpAMD64XORLconst: 403 return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v) 404 case OpAMD64XORLmem: 405 return rewriteValueAMD64_OpAMD64XORLmem_0(v) 406 case OpAMD64XORQ: 407 return rewriteValueAMD64_OpAMD64XORQ_0(v) 408 case OpAMD64XORQconst: 409 return rewriteValueAMD64_OpAMD64XORQconst_0(v) 410 case OpAMD64XORQmem: 411 return rewriteValueAMD64_OpAMD64XORQmem_0(v) 412 case OpAdd16: 413 return rewriteValueAMD64_OpAdd16_0(v) 414 case OpAdd32: 415 return rewriteValueAMD64_OpAdd32_0(v) 416 case OpAdd32F: 417 return rewriteValueAMD64_OpAdd32F_0(v) 418 case OpAdd64: 419 return rewriteValueAMD64_OpAdd64_0(v) 420 case OpAdd64F: 421 return rewriteValueAMD64_OpAdd64F_0(v) 422 case OpAdd8: 423 return rewriteValueAMD64_OpAdd8_0(v) 424 case OpAddPtr: 425 return rewriteValueAMD64_OpAddPtr_0(v) 426 case OpAddr: 427 return rewriteValueAMD64_OpAddr_0(v) 428 case OpAnd16: 429 return rewriteValueAMD64_OpAnd16_0(v) 430 case OpAnd32: 431 return rewriteValueAMD64_OpAnd32_0(v) 432 case OpAnd64: 433 return rewriteValueAMD64_OpAnd64_0(v) 434 case OpAnd8: 435 return rewriteValueAMD64_OpAnd8_0(v) 436 case OpAndB: 437 return rewriteValueAMD64_OpAndB_0(v) 438 case OpAtomicAdd32: 439 return rewriteValueAMD64_OpAtomicAdd32_0(v) 440 case OpAtomicAdd64: 441 return rewriteValueAMD64_OpAtomicAdd64_0(v) 442 case OpAtomicAnd8: 443 return rewriteValueAMD64_OpAtomicAnd8_0(v) 444 case OpAtomicCompareAndSwap32: 445 return rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v) 446 case OpAtomicCompareAndSwap64: 447 return rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v) 448 case OpAtomicExchange32: 449 return rewriteValueAMD64_OpAtomicExchange32_0(v) 450 case OpAtomicExchange64: 451 return rewriteValueAMD64_OpAtomicExchange64_0(v) 452 case OpAtomicLoad32: 453 return rewriteValueAMD64_OpAtomicLoad32_0(v) 454 case OpAtomicLoad64: 455 return rewriteValueAMD64_OpAtomicLoad64_0(v) 456 case OpAtomicLoadPtr: 457 return rewriteValueAMD64_OpAtomicLoadPtr_0(v) 458 case OpAtomicOr8: 459 return rewriteValueAMD64_OpAtomicOr8_0(v) 460 case OpAtomicStore32: 461 return rewriteValueAMD64_OpAtomicStore32_0(v) 462 case OpAtomicStore64: 463 return rewriteValueAMD64_OpAtomicStore64_0(v) 464 case OpAtomicStorePtrNoWB: 465 return rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v) 466 case OpAvg64u: 467 return rewriteValueAMD64_OpAvg64u_0(v) 468 case OpBitLen32: 469 return rewriteValueAMD64_OpBitLen32_0(v) 470 case OpBitLen64: 471 return rewriteValueAMD64_OpBitLen64_0(v) 472 case OpBswap32: 473 return rewriteValueAMD64_OpBswap32_0(v) 474 case OpBswap64: 475 return rewriteValueAMD64_OpBswap64_0(v) 476 case OpCeil: 477 return rewriteValueAMD64_OpCeil_0(v) 478 case OpClosureCall: 479 return rewriteValueAMD64_OpClosureCall_0(v) 480 case OpCom16: 481 return rewriteValueAMD64_OpCom16_0(v) 482 case OpCom32: 483 return rewriteValueAMD64_OpCom32_0(v) 484 case OpCom64: 485 return rewriteValueAMD64_OpCom64_0(v) 486 case OpCom8: 487 return rewriteValueAMD64_OpCom8_0(v) 488 case OpConst16: 489 return rewriteValueAMD64_OpConst16_0(v) 490 case OpConst32: 491 return rewriteValueAMD64_OpConst32_0(v) 492 case OpConst32F: 493 return rewriteValueAMD64_OpConst32F_0(v) 494 case OpConst64: 495 return rewriteValueAMD64_OpConst64_0(v) 496 case OpConst64F: 497 return rewriteValueAMD64_OpConst64F_0(v) 498 case OpConst8: 499 return rewriteValueAMD64_OpConst8_0(v) 500 case OpConstBool: 501 return rewriteValueAMD64_OpConstBool_0(v) 502 case OpConstNil: 503 return rewriteValueAMD64_OpConstNil_0(v) 504 case OpConvert: 505 return rewriteValueAMD64_OpConvert_0(v) 506 case OpCtz32: 507 return rewriteValueAMD64_OpCtz32_0(v) 508 case OpCtz64: 509 return rewriteValueAMD64_OpCtz64_0(v) 510 case OpCvt32Fto32: 511 return rewriteValueAMD64_OpCvt32Fto32_0(v) 512 case OpCvt32Fto64: 513 return rewriteValueAMD64_OpCvt32Fto64_0(v) 514 case OpCvt32Fto64F: 515 return rewriteValueAMD64_OpCvt32Fto64F_0(v) 516 case OpCvt32to32F: 517 return rewriteValueAMD64_OpCvt32to32F_0(v) 518 case OpCvt32to64F: 519 return rewriteValueAMD64_OpCvt32to64F_0(v) 520 case OpCvt64Fto32: 521 return rewriteValueAMD64_OpCvt64Fto32_0(v) 522 case OpCvt64Fto32F: 523 return rewriteValueAMD64_OpCvt64Fto32F_0(v) 524 case OpCvt64Fto64: 525 return rewriteValueAMD64_OpCvt64Fto64_0(v) 526 case OpCvt64to32F: 527 return rewriteValueAMD64_OpCvt64to32F_0(v) 528 case OpCvt64to64F: 529 return rewriteValueAMD64_OpCvt64to64F_0(v) 530 case OpDiv128u: 531 return rewriteValueAMD64_OpDiv128u_0(v) 532 case OpDiv16: 533 return rewriteValueAMD64_OpDiv16_0(v) 534 case OpDiv16u: 535 return rewriteValueAMD64_OpDiv16u_0(v) 536 case OpDiv32: 537 return rewriteValueAMD64_OpDiv32_0(v) 538 case OpDiv32F: 539 return rewriteValueAMD64_OpDiv32F_0(v) 540 case OpDiv32u: 541 return rewriteValueAMD64_OpDiv32u_0(v) 542 case OpDiv64: 543 return rewriteValueAMD64_OpDiv64_0(v) 544 case OpDiv64F: 545 return rewriteValueAMD64_OpDiv64F_0(v) 546 case OpDiv64u: 547 return rewriteValueAMD64_OpDiv64u_0(v) 548 case OpDiv8: 549 return rewriteValueAMD64_OpDiv8_0(v) 550 case OpDiv8u: 551 return rewriteValueAMD64_OpDiv8u_0(v) 552 case OpEq16: 553 return rewriteValueAMD64_OpEq16_0(v) 554 case OpEq32: 555 return rewriteValueAMD64_OpEq32_0(v) 556 case OpEq32F: 557 return rewriteValueAMD64_OpEq32F_0(v) 558 case OpEq64: 559 return rewriteValueAMD64_OpEq64_0(v) 560 case OpEq64F: 561 return rewriteValueAMD64_OpEq64F_0(v) 562 case OpEq8: 563 return rewriteValueAMD64_OpEq8_0(v) 564 case OpEqB: 565 return rewriteValueAMD64_OpEqB_0(v) 566 case OpEqPtr: 567 return rewriteValueAMD64_OpEqPtr_0(v) 568 case OpFloor: 569 return rewriteValueAMD64_OpFloor_0(v) 570 case OpGeq16: 571 return rewriteValueAMD64_OpGeq16_0(v) 572 case OpGeq16U: 573 return rewriteValueAMD64_OpGeq16U_0(v) 574 case OpGeq32: 575 return rewriteValueAMD64_OpGeq32_0(v) 576 case OpGeq32F: 577 return rewriteValueAMD64_OpGeq32F_0(v) 578 case OpGeq32U: 579 return rewriteValueAMD64_OpGeq32U_0(v) 580 case OpGeq64: 581 return rewriteValueAMD64_OpGeq64_0(v) 582 case OpGeq64F: 583 return rewriteValueAMD64_OpGeq64F_0(v) 584 case OpGeq64U: 585 return rewriteValueAMD64_OpGeq64U_0(v) 586 case OpGeq8: 587 return rewriteValueAMD64_OpGeq8_0(v) 588 case OpGeq8U: 589 return rewriteValueAMD64_OpGeq8U_0(v) 590 case OpGetCallerPC: 591 return rewriteValueAMD64_OpGetCallerPC_0(v) 592 case OpGetCallerSP: 593 return rewriteValueAMD64_OpGetCallerSP_0(v) 594 case OpGetClosurePtr: 595 return rewriteValueAMD64_OpGetClosurePtr_0(v) 596 case OpGetG: 597 return rewriteValueAMD64_OpGetG_0(v) 598 case OpGreater16: 599 return rewriteValueAMD64_OpGreater16_0(v) 600 case OpGreater16U: 601 return rewriteValueAMD64_OpGreater16U_0(v) 602 case OpGreater32: 603 return rewriteValueAMD64_OpGreater32_0(v) 604 case OpGreater32F: 605 return rewriteValueAMD64_OpGreater32F_0(v) 606 case OpGreater32U: 607 return rewriteValueAMD64_OpGreater32U_0(v) 608 case OpGreater64: 609 return rewriteValueAMD64_OpGreater64_0(v) 610 case OpGreater64F: 611 return rewriteValueAMD64_OpGreater64F_0(v) 612 case OpGreater64U: 613 return rewriteValueAMD64_OpGreater64U_0(v) 614 case OpGreater8: 615 return rewriteValueAMD64_OpGreater8_0(v) 616 case OpGreater8U: 617 return rewriteValueAMD64_OpGreater8U_0(v) 618 case OpHmul32: 619 return rewriteValueAMD64_OpHmul32_0(v) 620 case OpHmul32u: 621 return rewriteValueAMD64_OpHmul32u_0(v) 622 case OpHmul64: 623 return rewriteValueAMD64_OpHmul64_0(v) 624 case OpHmul64u: 625 return rewriteValueAMD64_OpHmul64u_0(v) 626 case OpInt64Hi: 627 return rewriteValueAMD64_OpInt64Hi_0(v) 628 case OpInterCall: 629 return rewriteValueAMD64_OpInterCall_0(v) 630 case OpIsInBounds: 631 return rewriteValueAMD64_OpIsInBounds_0(v) 632 case OpIsNonNil: 633 return rewriteValueAMD64_OpIsNonNil_0(v) 634 case OpIsSliceInBounds: 635 return rewriteValueAMD64_OpIsSliceInBounds_0(v) 636 case OpLeq16: 637 return rewriteValueAMD64_OpLeq16_0(v) 638 case OpLeq16U: 639 return rewriteValueAMD64_OpLeq16U_0(v) 640 case OpLeq32: 641 return rewriteValueAMD64_OpLeq32_0(v) 642 case OpLeq32F: 643 return rewriteValueAMD64_OpLeq32F_0(v) 644 case OpLeq32U: 645 return rewriteValueAMD64_OpLeq32U_0(v) 646 case OpLeq64: 647 return rewriteValueAMD64_OpLeq64_0(v) 648 case OpLeq64F: 649 return rewriteValueAMD64_OpLeq64F_0(v) 650 case OpLeq64U: 651 return rewriteValueAMD64_OpLeq64U_0(v) 652 case OpLeq8: 653 return rewriteValueAMD64_OpLeq8_0(v) 654 case OpLeq8U: 655 return rewriteValueAMD64_OpLeq8U_0(v) 656 case OpLess16: 657 return rewriteValueAMD64_OpLess16_0(v) 658 case OpLess16U: 659 return rewriteValueAMD64_OpLess16U_0(v) 660 case OpLess32: 661 return rewriteValueAMD64_OpLess32_0(v) 662 case OpLess32F: 663 return rewriteValueAMD64_OpLess32F_0(v) 664 case OpLess32U: 665 return rewriteValueAMD64_OpLess32U_0(v) 666 case OpLess64: 667 return rewriteValueAMD64_OpLess64_0(v) 668 case OpLess64F: 669 return rewriteValueAMD64_OpLess64F_0(v) 670 case OpLess64U: 671 return rewriteValueAMD64_OpLess64U_0(v) 672 case OpLess8: 673 return rewriteValueAMD64_OpLess8_0(v) 674 case OpLess8U: 675 return rewriteValueAMD64_OpLess8U_0(v) 676 case OpLoad: 677 return rewriteValueAMD64_OpLoad_0(v) 678 case OpLsh16x16: 679 return rewriteValueAMD64_OpLsh16x16_0(v) 680 case OpLsh16x32: 681 return rewriteValueAMD64_OpLsh16x32_0(v) 682 case OpLsh16x64: 683 return rewriteValueAMD64_OpLsh16x64_0(v) 684 case OpLsh16x8: 685 return rewriteValueAMD64_OpLsh16x8_0(v) 686 case OpLsh32x16: 687 return rewriteValueAMD64_OpLsh32x16_0(v) 688 case OpLsh32x32: 689 return rewriteValueAMD64_OpLsh32x32_0(v) 690 case OpLsh32x64: 691 return rewriteValueAMD64_OpLsh32x64_0(v) 692 case OpLsh32x8: 693 return rewriteValueAMD64_OpLsh32x8_0(v) 694 case OpLsh64x16: 695 return rewriteValueAMD64_OpLsh64x16_0(v) 696 case OpLsh64x32: 697 return rewriteValueAMD64_OpLsh64x32_0(v) 698 case OpLsh64x64: 699 return rewriteValueAMD64_OpLsh64x64_0(v) 700 case OpLsh64x8: 701 return rewriteValueAMD64_OpLsh64x8_0(v) 702 case OpLsh8x16: 703 return rewriteValueAMD64_OpLsh8x16_0(v) 704 case OpLsh8x32: 705 return rewriteValueAMD64_OpLsh8x32_0(v) 706 case OpLsh8x64: 707 return rewriteValueAMD64_OpLsh8x64_0(v) 708 case OpLsh8x8: 709 return rewriteValueAMD64_OpLsh8x8_0(v) 710 case OpMod16: 711 return rewriteValueAMD64_OpMod16_0(v) 712 case OpMod16u: 713 return rewriteValueAMD64_OpMod16u_0(v) 714 case OpMod32: 715 return rewriteValueAMD64_OpMod32_0(v) 716 case OpMod32u: 717 return rewriteValueAMD64_OpMod32u_0(v) 718 case OpMod64: 719 return rewriteValueAMD64_OpMod64_0(v) 720 case OpMod64u: 721 return rewriteValueAMD64_OpMod64u_0(v) 722 case OpMod8: 723 return rewriteValueAMD64_OpMod8_0(v) 724 case OpMod8u: 725 return rewriteValueAMD64_OpMod8u_0(v) 726 case OpMove: 727 return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) 728 case OpMul16: 729 return rewriteValueAMD64_OpMul16_0(v) 730 case OpMul32: 731 return rewriteValueAMD64_OpMul32_0(v) 732 case OpMul32F: 733 return rewriteValueAMD64_OpMul32F_0(v) 734 case OpMul64: 735 return rewriteValueAMD64_OpMul64_0(v) 736 case OpMul64F: 737 return rewriteValueAMD64_OpMul64F_0(v) 738 case OpMul64uhilo: 739 return rewriteValueAMD64_OpMul64uhilo_0(v) 740 case OpMul8: 741 return rewriteValueAMD64_OpMul8_0(v) 742 case OpNeg16: 743 return rewriteValueAMD64_OpNeg16_0(v) 744 case OpNeg32: 745 return rewriteValueAMD64_OpNeg32_0(v) 746 case OpNeg32F: 747 return rewriteValueAMD64_OpNeg32F_0(v) 748 case OpNeg64: 749 return rewriteValueAMD64_OpNeg64_0(v) 750 case OpNeg64F: 751 return rewriteValueAMD64_OpNeg64F_0(v) 752 case OpNeg8: 753 return rewriteValueAMD64_OpNeg8_0(v) 754 case OpNeq16: 755 return rewriteValueAMD64_OpNeq16_0(v) 756 case OpNeq32: 757 return rewriteValueAMD64_OpNeq32_0(v) 758 case OpNeq32F: 759 return rewriteValueAMD64_OpNeq32F_0(v) 760 case OpNeq64: 761 return rewriteValueAMD64_OpNeq64_0(v) 762 case OpNeq64F: 763 return rewriteValueAMD64_OpNeq64F_0(v) 764 case OpNeq8: 765 return rewriteValueAMD64_OpNeq8_0(v) 766 case OpNeqB: 767 return rewriteValueAMD64_OpNeqB_0(v) 768 case OpNeqPtr: 769 return rewriteValueAMD64_OpNeqPtr_0(v) 770 case OpNilCheck: 771 return rewriteValueAMD64_OpNilCheck_0(v) 772 case OpNot: 773 return rewriteValueAMD64_OpNot_0(v) 774 case OpOffPtr: 775 return rewriteValueAMD64_OpOffPtr_0(v) 776 case OpOr16: 777 return rewriteValueAMD64_OpOr16_0(v) 778 case OpOr32: 779 return rewriteValueAMD64_OpOr32_0(v) 780 case OpOr64: 781 return rewriteValueAMD64_OpOr64_0(v) 782 case OpOr8: 783 return rewriteValueAMD64_OpOr8_0(v) 784 case OpOrB: 785 return rewriteValueAMD64_OpOrB_0(v) 786 case OpPopCount16: 787 return rewriteValueAMD64_OpPopCount16_0(v) 788 case OpPopCount32: 789 return rewriteValueAMD64_OpPopCount32_0(v) 790 case OpPopCount64: 791 return rewriteValueAMD64_OpPopCount64_0(v) 792 case OpPopCount8: 793 return rewriteValueAMD64_OpPopCount8_0(v) 794 case OpRound32F: 795 return rewriteValueAMD64_OpRound32F_0(v) 796 case OpRound64F: 797 return rewriteValueAMD64_OpRound64F_0(v) 798 case OpRoundToEven: 799 return rewriteValueAMD64_OpRoundToEven_0(v) 800 case OpRsh16Ux16: 801 return rewriteValueAMD64_OpRsh16Ux16_0(v) 802 case OpRsh16Ux32: 803 return rewriteValueAMD64_OpRsh16Ux32_0(v) 804 case OpRsh16Ux64: 805 return rewriteValueAMD64_OpRsh16Ux64_0(v) 806 case OpRsh16Ux8: 807 return rewriteValueAMD64_OpRsh16Ux8_0(v) 808 case OpRsh16x16: 809 return rewriteValueAMD64_OpRsh16x16_0(v) 810 case OpRsh16x32: 811 return rewriteValueAMD64_OpRsh16x32_0(v) 812 case OpRsh16x64: 813 return rewriteValueAMD64_OpRsh16x64_0(v) 814 case OpRsh16x8: 815 return rewriteValueAMD64_OpRsh16x8_0(v) 816 case OpRsh32Ux16: 817 return rewriteValueAMD64_OpRsh32Ux16_0(v) 818 case OpRsh32Ux32: 819 return rewriteValueAMD64_OpRsh32Ux32_0(v) 820 case OpRsh32Ux64: 821 return rewriteValueAMD64_OpRsh32Ux64_0(v) 822 case OpRsh32Ux8: 823 return rewriteValueAMD64_OpRsh32Ux8_0(v) 824 case OpRsh32x16: 825 return rewriteValueAMD64_OpRsh32x16_0(v) 826 case OpRsh32x32: 827 return rewriteValueAMD64_OpRsh32x32_0(v) 828 case OpRsh32x64: 829 return rewriteValueAMD64_OpRsh32x64_0(v) 830 case OpRsh32x8: 831 return rewriteValueAMD64_OpRsh32x8_0(v) 832 case OpRsh64Ux16: 833 return rewriteValueAMD64_OpRsh64Ux16_0(v) 834 case OpRsh64Ux32: 835 return rewriteValueAMD64_OpRsh64Ux32_0(v) 836 case OpRsh64Ux64: 837 return rewriteValueAMD64_OpRsh64Ux64_0(v) 838 case OpRsh64Ux8: 839 return rewriteValueAMD64_OpRsh64Ux8_0(v) 840 case OpRsh64x16: 841 return rewriteValueAMD64_OpRsh64x16_0(v) 842 case OpRsh64x32: 843 return rewriteValueAMD64_OpRsh64x32_0(v) 844 case OpRsh64x64: 845 return rewriteValueAMD64_OpRsh64x64_0(v) 846 case OpRsh64x8: 847 return rewriteValueAMD64_OpRsh64x8_0(v) 848 case OpRsh8Ux16: 849 return rewriteValueAMD64_OpRsh8Ux16_0(v) 850 case OpRsh8Ux32: 851 return rewriteValueAMD64_OpRsh8Ux32_0(v) 852 case OpRsh8Ux64: 853 return rewriteValueAMD64_OpRsh8Ux64_0(v) 854 case OpRsh8Ux8: 855 return rewriteValueAMD64_OpRsh8Ux8_0(v) 856 case OpRsh8x16: 857 return rewriteValueAMD64_OpRsh8x16_0(v) 858 case OpRsh8x32: 859 return rewriteValueAMD64_OpRsh8x32_0(v) 860 case OpRsh8x64: 861 return rewriteValueAMD64_OpRsh8x64_0(v) 862 case OpRsh8x8: 863 return rewriteValueAMD64_OpRsh8x8_0(v) 864 case OpSelect0: 865 return rewriteValueAMD64_OpSelect0_0(v) 866 case OpSelect1: 867 return rewriteValueAMD64_OpSelect1_0(v) 868 case OpSignExt16to32: 869 return rewriteValueAMD64_OpSignExt16to32_0(v) 870 case OpSignExt16to64: 871 return rewriteValueAMD64_OpSignExt16to64_0(v) 872 case OpSignExt32to64: 873 return rewriteValueAMD64_OpSignExt32to64_0(v) 874 case OpSignExt8to16: 875 return rewriteValueAMD64_OpSignExt8to16_0(v) 876 case OpSignExt8to32: 877 return rewriteValueAMD64_OpSignExt8to32_0(v) 878 case OpSignExt8to64: 879 return rewriteValueAMD64_OpSignExt8to64_0(v) 880 case OpSlicemask: 881 return rewriteValueAMD64_OpSlicemask_0(v) 882 case OpSqrt: 883 return rewriteValueAMD64_OpSqrt_0(v) 884 case OpStaticCall: 885 return rewriteValueAMD64_OpStaticCall_0(v) 886 case OpStore: 887 return rewriteValueAMD64_OpStore_0(v) 888 case OpSub16: 889 return rewriteValueAMD64_OpSub16_0(v) 890 case OpSub32: 891 return rewriteValueAMD64_OpSub32_0(v) 892 case OpSub32F: 893 return rewriteValueAMD64_OpSub32F_0(v) 894 case OpSub64: 895 return rewriteValueAMD64_OpSub64_0(v) 896 case OpSub64F: 897 return rewriteValueAMD64_OpSub64F_0(v) 898 case OpSub8: 899 return rewriteValueAMD64_OpSub8_0(v) 900 case OpSubPtr: 901 return rewriteValueAMD64_OpSubPtr_0(v) 902 case OpTrunc: 903 return rewriteValueAMD64_OpTrunc_0(v) 904 case OpTrunc16to8: 905 return rewriteValueAMD64_OpTrunc16to8_0(v) 906 case OpTrunc32to16: 907 return rewriteValueAMD64_OpTrunc32to16_0(v) 908 case OpTrunc32to8: 909 return rewriteValueAMD64_OpTrunc32to8_0(v) 910 case OpTrunc64to16: 911 return rewriteValueAMD64_OpTrunc64to16_0(v) 912 case OpTrunc64to32: 913 return rewriteValueAMD64_OpTrunc64to32_0(v) 914 case OpTrunc64to8: 915 return rewriteValueAMD64_OpTrunc64to8_0(v) 916 case OpWB: 917 return rewriteValueAMD64_OpWB_0(v) 918 case OpXor16: 919 return rewriteValueAMD64_OpXor16_0(v) 920 case OpXor32: 921 return rewriteValueAMD64_OpXor32_0(v) 922 case OpXor64: 923 return rewriteValueAMD64_OpXor64_0(v) 924 case OpXor8: 925 return rewriteValueAMD64_OpXor8_0(v) 926 case OpZero: 927 return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) || rewriteValueAMD64_OpZero_20(v) 928 case OpZeroExt16to32: 929 return rewriteValueAMD64_OpZeroExt16to32_0(v) 930 case OpZeroExt16to64: 931 return rewriteValueAMD64_OpZeroExt16to64_0(v) 932 case OpZeroExt32to64: 933 return rewriteValueAMD64_OpZeroExt32to64_0(v) 934 case OpZeroExt8to16: 935 return rewriteValueAMD64_OpZeroExt8to16_0(v) 936 case OpZeroExt8to32: 937 return rewriteValueAMD64_OpZeroExt8to32_0(v) 938 case OpZeroExt8to64: 939 return rewriteValueAMD64_OpZeroExt8to64_0(v) 940 } 941 return false 942 } 943 func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { 944 // match: (ADDL x (MOVLconst [c])) 945 // cond: 946 // result: (ADDLconst [c] x) 947 for { 948 _ = v.Args[1] 949 x := v.Args[0] 950 v_1 := v.Args[1] 951 if v_1.Op != OpAMD64MOVLconst { 952 break 953 } 954 c := v_1.AuxInt 955 v.reset(OpAMD64ADDLconst) 956 v.AuxInt = c 957 v.AddArg(x) 958 return true 959 } 960 // match: (ADDL (MOVLconst [c]) x) 961 // cond: 962 // result: (ADDLconst [c] x) 963 for { 964 _ = v.Args[1] 965 v_0 := v.Args[0] 966 if v_0.Op != OpAMD64MOVLconst { 967 break 968 } 969 c := v_0.AuxInt 970 x := v.Args[1] 971 v.reset(OpAMD64ADDLconst) 972 v.AuxInt = c 973 v.AddArg(x) 974 return true 975 } 976 // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) 977 // cond: d==32-c 978 // result: (ROLLconst x [c]) 979 for { 980 _ = v.Args[1] 981 v_0 := v.Args[0] 982 if v_0.Op != OpAMD64SHLLconst { 983 break 984 } 985 c := v_0.AuxInt 986 x := v_0.Args[0] 987 v_1 := v.Args[1] 988 if v_1.Op != OpAMD64SHRLconst { 989 break 990 } 991 d := v_1.AuxInt 992 if x != v_1.Args[0] { 993 break 994 } 995 if !(d == 32-c) { 996 break 997 } 998 v.reset(OpAMD64ROLLconst) 999 v.AuxInt = c 1000 v.AddArg(x) 1001 return true 1002 } 1003 // match: (ADDL (SHRLconst x [d]) (SHLLconst x [c])) 1004 // cond: d==32-c 1005 // result: (ROLLconst x [c]) 1006 for { 1007 _ = v.Args[1] 1008 v_0 := v.Args[0] 1009 if v_0.Op != OpAMD64SHRLconst { 1010 break 1011 } 1012 d := v_0.AuxInt 1013 x := v_0.Args[0] 1014 v_1 := v.Args[1] 1015 if v_1.Op != OpAMD64SHLLconst { 1016 break 1017 } 1018 c := v_1.AuxInt 1019 if x != v_1.Args[0] { 1020 break 1021 } 1022 if !(d == 32-c) { 1023 break 1024 } 1025 v.reset(OpAMD64ROLLconst) 1026 v.AuxInt = c 1027 v.AddArg(x) 1028 return true 1029 } 1030 // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) 1031 // cond: d==16-c && c < 16 && t.Size() == 2 1032 // result: (ROLWconst x [c]) 1033 for { 1034 t := v.Type 1035 _ = v.Args[1] 1036 v_0 := v.Args[0] 1037 if v_0.Op != OpAMD64SHLLconst { 1038 break 1039 } 1040 c := v_0.AuxInt 1041 x := v_0.Args[0] 1042 v_1 := v.Args[1] 1043 if v_1.Op != OpAMD64SHRWconst { 1044 break 1045 } 1046 d := v_1.AuxInt 1047 if x != v_1.Args[0] { 1048 break 1049 } 1050 if !(d == 16-c && c < 16 && t.Size() == 2) { 1051 break 1052 } 1053 v.reset(OpAMD64ROLWconst) 1054 v.AuxInt = c 1055 v.AddArg(x) 1056 return true 1057 } 1058 // match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c])) 1059 // cond: d==16-c && c < 16 && t.Size() == 2 1060 // result: (ROLWconst x [c]) 1061 for { 1062 t := v.Type 1063 _ = v.Args[1] 1064 v_0 := v.Args[0] 1065 if v_0.Op != OpAMD64SHRWconst { 1066 break 1067 } 1068 d := v_0.AuxInt 1069 x := v_0.Args[0] 1070 v_1 := v.Args[1] 1071 if v_1.Op != OpAMD64SHLLconst { 1072 break 1073 } 1074 c := v_1.AuxInt 1075 if x != v_1.Args[0] { 1076 break 1077 } 1078 if !(d == 16-c && c < 16 && t.Size() == 2) { 1079 break 1080 } 1081 v.reset(OpAMD64ROLWconst) 1082 v.AuxInt = c 1083 v.AddArg(x) 1084 return true 1085 } 1086 // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) 1087 // cond: d==8-c && c < 8 && t.Size() == 1 1088 // result: (ROLBconst x [c]) 1089 for { 1090 t := v.Type 1091 _ = v.Args[1] 1092 v_0 := v.Args[0] 1093 if v_0.Op != OpAMD64SHLLconst { 1094 break 1095 } 1096 c := v_0.AuxInt 1097 x := v_0.Args[0] 1098 v_1 := v.Args[1] 1099 if v_1.Op != OpAMD64SHRBconst { 1100 break 1101 } 1102 d := v_1.AuxInt 1103 if x != v_1.Args[0] { 1104 break 1105 } 1106 if !(d == 8-c && c < 8 && t.Size() == 1) { 1107 break 1108 } 1109 v.reset(OpAMD64ROLBconst) 1110 v.AuxInt = c 1111 v.AddArg(x) 1112 return true 1113 } 1114 // match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c])) 1115 // cond: d==8-c && c < 8 && t.Size() == 1 1116 // result: (ROLBconst x [c]) 1117 for { 1118 t := v.Type 1119 _ = v.Args[1] 1120 v_0 := v.Args[0] 1121 if v_0.Op != OpAMD64SHRBconst { 1122 break 1123 } 1124 d := v_0.AuxInt 1125 x := v_0.Args[0] 1126 v_1 := v.Args[1] 1127 if v_1.Op != OpAMD64SHLLconst { 1128 break 1129 } 1130 c := v_1.AuxInt 1131 if x != v_1.Args[0] { 1132 break 1133 } 1134 if !(d == 8-c && c < 8 && t.Size() == 1) { 1135 break 1136 } 1137 v.reset(OpAMD64ROLBconst) 1138 v.AuxInt = c 1139 v.AddArg(x) 1140 return true 1141 } 1142 // match: (ADDL x (NEGL y)) 1143 // cond: 1144 // result: (SUBL x y) 1145 for { 1146 _ = v.Args[1] 1147 x := v.Args[0] 1148 v_1 := v.Args[1] 1149 if v_1.Op != OpAMD64NEGL { 1150 break 1151 } 1152 y := v_1.Args[0] 1153 v.reset(OpAMD64SUBL) 1154 v.AddArg(x) 1155 v.AddArg(y) 1156 return true 1157 } 1158 // match: (ADDL (NEGL y) x) 1159 // cond: 1160 // result: (SUBL x y) 1161 for { 1162 _ = v.Args[1] 1163 v_0 := v.Args[0] 1164 if v_0.Op != OpAMD64NEGL { 1165 break 1166 } 1167 y := v_0.Args[0] 1168 x := v.Args[1] 1169 v.reset(OpAMD64SUBL) 1170 v.AddArg(x) 1171 v.AddArg(y) 1172 return true 1173 } 1174 return false 1175 } 1176 func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool { 1177 // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) 1178 // cond: canMergeLoad(v, l, x) && clobber(l) 1179 // result: (ADDLmem x [off] {sym} ptr mem) 1180 for { 1181 _ = v.Args[1] 1182 x := v.Args[0] 1183 l := v.Args[1] 1184 if l.Op != OpAMD64MOVLload { 1185 break 1186 } 1187 off := l.AuxInt 1188 sym := l.Aux 1189 _ = l.Args[1] 1190 ptr := l.Args[0] 1191 mem := l.Args[1] 1192 if !(canMergeLoad(v, l, x) && clobber(l)) { 1193 break 1194 } 1195 v.reset(OpAMD64ADDLmem) 1196 v.AuxInt = off 1197 v.Aux = sym 1198 v.AddArg(x) 1199 v.AddArg(ptr) 1200 v.AddArg(mem) 1201 return true 1202 } 1203 // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) 1204 // cond: canMergeLoad(v, l, x) && clobber(l) 1205 // result: (ADDLmem x [off] {sym} ptr mem) 1206 for { 1207 _ = v.Args[1] 1208 l := v.Args[0] 1209 if l.Op != OpAMD64MOVLload { 1210 break 1211 } 1212 off := l.AuxInt 1213 sym := l.Aux 1214 _ = l.Args[1] 1215 ptr := l.Args[0] 1216 mem := l.Args[1] 1217 x := v.Args[1] 1218 if !(canMergeLoad(v, l, x) && clobber(l)) { 1219 break 1220 } 1221 v.reset(OpAMD64ADDLmem) 1222 v.AuxInt = off 1223 v.Aux = sym 1224 v.AddArg(x) 1225 v.AddArg(ptr) 1226 v.AddArg(mem) 1227 return true 1228 } 1229 return false 1230 } 1231 func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { 1232 // match: (ADDLconst [c] x) 1233 // cond: int32(c)==0 1234 // result: x 1235 for { 1236 c := v.AuxInt 1237 x := v.Args[0] 1238 if !(int32(c) == 0) { 1239 break 1240 } 1241 v.reset(OpCopy) 1242 v.Type = x.Type 1243 v.AddArg(x) 1244 return true 1245 } 1246 // match: (ADDLconst [c] (MOVLconst [d])) 1247 // cond: 1248 // result: (MOVLconst [int64(int32(c+d))]) 1249 for { 1250 c := v.AuxInt 1251 v_0 := v.Args[0] 1252 if v_0.Op != OpAMD64MOVLconst { 1253 break 1254 } 1255 d := v_0.AuxInt 1256 v.reset(OpAMD64MOVLconst) 1257 v.AuxInt = int64(int32(c + d)) 1258 return true 1259 } 1260 // match: (ADDLconst [c] (ADDLconst [d] x)) 1261 // cond: 1262 // result: (ADDLconst [int64(int32(c+d))] x) 1263 for { 1264 c := v.AuxInt 1265 v_0 := v.Args[0] 1266 if v_0.Op != OpAMD64ADDLconst { 1267 break 1268 } 1269 d := v_0.AuxInt 1270 x := v_0.Args[0] 1271 v.reset(OpAMD64ADDLconst) 1272 v.AuxInt = int64(int32(c + d)) 1273 v.AddArg(x) 1274 return true 1275 } 1276 // match: (ADDLconst [c] (LEAL [d] {s} x)) 1277 // cond: is32Bit(c+d) 1278 // result: (LEAL [c+d] {s} x) 1279 for { 1280 c := v.AuxInt 1281 v_0 := v.Args[0] 1282 if v_0.Op != OpAMD64LEAL { 1283 break 1284 } 1285 d := v_0.AuxInt 1286 s := v_0.Aux 1287 x := v_0.Args[0] 1288 if !(is32Bit(c + d)) { 1289 break 1290 } 1291 v.reset(OpAMD64LEAL) 1292 v.AuxInt = c + d 1293 v.Aux = s 1294 v.AddArg(x) 1295 return true 1296 } 1297 return false 1298 } 1299 func rewriteValueAMD64_OpAMD64ADDLconstmem_0(v *Value) bool { 1300 b := v.Block 1301 _ = b 1302 typ := &b.Func.Config.Types 1303 _ = typ 1304 // match: (ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _)) 1305 // cond: 1306 // result: (ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x)) 1307 for { 1308 valOff := v.AuxInt 1309 sym := v.Aux 1310 _ = v.Args[1] 1311 ptr := v.Args[0] 1312 v_1 := v.Args[1] 1313 if v_1.Op != OpAMD64MOVSSstore { 1314 break 1315 } 1316 if v_1.AuxInt != ValAndOff(valOff).Off() { 1317 break 1318 } 1319 if v_1.Aux != sym { 1320 break 1321 } 1322 _ = v_1.Args[2] 1323 if ptr != v_1.Args[0] { 1324 break 1325 } 1326 x := v_1.Args[1] 1327 v.reset(OpAMD64ADDLconst) 1328 v.AuxInt = ValAndOff(valOff).Val() 1329 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 1330 v0.AddArg(x) 1331 v.AddArg(v0) 1332 return true 1333 } 1334 return false 1335 } 1336 func rewriteValueAMD64_OpAMD64ADDLmem_0(v *Value) bool { 1337 b := v.Block 1338 _ = b 1339 typ := &b.Func.Config.Types 1340 _ = typ 1341 // match: (ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 1342 // cond: 1343 // result: (ADDL x (MOVLf2i y)) 1344 for { 1345 off := v.AuxInt 1346 sym := v.Aux 1347 _ = v.Args[2] 1348 x := v.Args[0] 1349 ptr := v.Args[1] 1350 v_2 := v.Args[2] 1351 if v_2.Op != OpAMD64MOVSSstore { 1352 break 1353 } 1354 if v_2.AuxInt != off { 1355 break 1356 } 1357 if v_2.Aux != sym { 1358 break 1359 } 1360 _ = v_2.Args[2] 1361 if ptr != v_2.Args[0] { 1362 break 1363 } 1364 y := v_2.Args[1] 1365 v.reset(OpAMD64ADDL) 1366 v.AddArg(x) 1367 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 1368 v0.AddArg(y) 1369 v.AddArg(v0) 1370 return true 1371 } 1372 return false 1373 } 1374 func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { 1375 // match: (ADDQ x (MOVQconst [c])) 1376 // cond: is32Bit(c) 1377 // result: (ADDQconst [c] x) 1378 for { 1379 _ = v.Args[1] 1380 x := v.Args[0] 1381 v_1 := v.Args[1] 1382 if v_1.Op != OpAMD64MOVQconst { 1383 break 1384 } 1385 c := v_1.AuxInt 1386 if !(is32Bit(c)) { 1387 break 1388 } 1389 v.reset(OpAMD64ADDQconst) 1390 v.AuxInt = c 1391 v.AddArg(x) 1392 return true 1393 } 1394 // match: (ADDQ (MOVQconst [c]) x) 1395 // cond: is32Bit(c) 1396 // result: (ADDQconst [c] x) 1397 for { 1398 _ = v.Args[1] 1399 v_0 := v.Args[0] 1400 if v_0.Op != OpAMD64MOVQconst { 1401 break 1402 } 1403 c := v_0.AuxInt 1404 x := v.Args[1] 1405 if !(is32Bit(c)) { 1406 break 1407 } 1408 v.reset(OpAMD64ADDQconst) 1409 v.AuxInt = c 1410 v.AddArg(x) 1411 return true 1412 } 1413 // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) 1414 // cond: d==64-c 1415 // result: (ROLQconst x [c]) 1416 for { 1417 _ = v.Args[1] 1418 v_0 := v.Args[0] 1419 if v_0.Op != OpAMD64SHLQconst { 1420 break 1421 } 1422 c := v_0.AuxInt 1423 x := v_0.Args[0] 1424 v_1 := v.Args[1] 1425 if v_1.Op != OpAMD64SHRQconst { 1426 break 1427 } 1428 d := v_1.AuxInt 1429 if x != v_1.Args[0] { 1430 break 1431 } 1432 if !(d == 64-c) { 1433 break 1434 } 1435 v.reset(OpAMD64ROLQconst) 1436 v.AuxInt = c 1437 v.AddArg(x) 1438 return true 1439 } 1440 // match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c])) 1441 // cond: d==64-c 1442 // result: (ROLQconst x [c]) 1443 for { 1444 _ = v.Args[1] 1445 v_0 := v.Args[0] 1446 if v_0.Op != OpAMD64SHRQconst { 1447 break 1448 } 1449 d := v_0.AuxInt 1450 x := v_0.Args[0] 1451 v_1 := v.Args[1] 1452 if v_1.Op != OpAMD64SHLQconst { 1453 break 1454 } 1455 c := v_1.AuxInt 1456 if x != v_1.Args[0] { 1457 break 1458 } 1459 if !(d == 64-c) { 1460 break 1461 } 1462 v.reset(OpAMD64ROLQconst) 1463 v.AuxInt = c 1464 v.AddArg(x) 1465 return true 1466 } 1467 // match: (ADDQ x (SHLQconst [3] y)) 1468 // cond: 1469 // result: (LEAQ8 x y) 1470 for { 1471 _ = v.Args[1] 1472 x := v.Args[0] 1473 v_1 := v.Args[1] 1474 if v_1.Op != OpAMD64SHLQconst { 1475 break 1476 } 1477 if v_1.AuxInt != 3 { 1478 break 1479 } 1480 y := v_1.Args[0] 1481 v.reset(OpAMD64LEAQ8) 1482 v.AddArg(x) 1483 v.AddArg(y) 1484 return true 1485 } 1486 // match: (ADDQ (SHLQconst [3] y) x) 1487 // cond: 1488 // result: (LEAQ8 x y) 1489 for { 1490 _ = v.Args[1] 1491 v_0 := v.Args[0] 1492 if v_0.Op != OpAMD64SHLQconst { 1493 break 1494 } 1495 if v_0.AuxInt != 3 { 1496 break 1497 } 1498 y := v_0.Args[0] 1499 x := v.Args[1] 1500 v.reset(OpAMD64LEAQ8) 1501 v.AddArg(x) 1502 v.AddArg(y) 1503 return true 1504 } 1505 // match: (ADDQ x (SHLQconst [2] y)) 1506 // cond: 1507 // result: (LEAQ4 x y) 1508 for { 1509 _ = v.Args[1] 1510 x := v.Args[0] 1511 v_1 := v.Args[1] 1512 if v_1.Op != OpAMD64SHLQconst { 1513 break 1514 } 1515 if v_1.AuxInt != 2 { 1516 break 1517 } 1518 y := v_1.Args[0] 1519 v.reset(OpAMD64LEAQ4) 1520 v.AddArg(x) 1521 v.AddArg(y) 1522 return true 1523 } 1524 // match: (ADDQ (SHLQconst [2] y) x) 1525 // cond: 1526 // result: (LEAQ4 x y) 1527 for { 1528 _ = v.Args[1] 1529 v_0 := v.Args[0] 1530 if v_0.Op != OpAMD64SHLQconst { 1531 break 1532 } 1533 if v_0.AuxInt != 2 { 1534 break 1535 } 1536 y := v_0.Args[0] 1537 x := v.Args[1] 1538 v.reset(OpAMD64LEAQ4) 1539 v.AddArg(x) 1540 v.AddArg(y) 1541 return true 1542 } 1543 // match: (ADDQ x (SHLQconst [1] y)) 1544 // cond: 1545 // result: (LEAQ2 x y) 1546 for { 1547 _ = v.Args[1] 1548 x := v.Args[0] 1549 v_1 := v.Args[1] 1550 if v_1.Op != OpAMD64SHLQconst { 1551 break 1552 } 1553 if v_1.AuxInt != 1 { 1554 break 1555 } 1556 y := v_1.Args[0] 1557 v.reset(OpAMD64LEAQ2) 1558 v.AddArg(x) 1559 v.AddArg(y) 1560 return true 1561 } 1562 // match: (ADDQ (SHLQconst [1] y) x) 1563 // cond: 1564 // result: (LEAQ2 x y) 1565 for { 1566 _ = v.Args[1] 1567 v_0 := v.Args[0] 1568 if v_0.Op != OpAMD64SHLQconst { 1569 break 1570 } 1571 if v_0.AuxInt != 1 { 1572 break 1573 } 1574 y := v_0.Args[0] 1575 x := v.Args[1] 1576 v.reset(OpAMD64LEAQ2) 1577 v.AddArg(x) 1578 v.AddArg(y) 1579 return true 1580 } 1581 return false 1582 } 1583 func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool { 1584 // match: (ADDQ x (ADDQ y y)) 1585 // cond: 1586 // result: (LEAQ2 x y) 1587 for { 1588 _ = v.Args[1] 1589 x := v.Args[0] 1590 v_1 := v.Args[1] 1591 if v_1.Op != OpAMD64ADDQ { 1592 break 1593 } 1594 _ = v_1.Args[1] 1595 y := v_1.Args[0] 1596 if y != v_1.Args[1] { 1597 break 1598 } 1599 v.reset(OpAMD64LEAQ2) 1600 v.AddArg(x) 1601 v.AddArg(y) 1602 return true 1603 } 1604 // match: (ADDQ (ADDQ y y) x) 1605 // cond: 1606 // result: (LEAQ2 x y) 1607 for { 1608 _ = v.Args[1] 1609 v_0 := v.Args[0] 1610 if v_0.Op != OpAMD64ADDQ { 1611 break 1612 } 1613 _ = v_0.Args[1] 1614 y := v_0.Args[0] 1615 if y != v_0.Args[1] { 1616 break 1617 } 1618 x := v.Args[1] 1619 v.reset(OpAMD64LEAQ2) 1620 v.AddArg(x) 1621 v.AddArg(y) 1622 return true 1623 } 1624 // match: (ADDQ x (ADDQ x y)) 1625 // cond: 1626 // result: (LEAQ2 y x) 1627 for { 1628 _ = v.Args[1] 1629 x := v.Args[0] 1630 v_1 := v.Args[1] 1631 if v_1.Op != OpAMD64ADDQ { 1632 break 1633 } 1634 _ = v_1.Args[1] 1635 if x != v_1.Args[0] { 1636 break 1637 } 1638 y := v_1.Args[1] 1639 v.reset(OpAMD64LEAQ2) 1640 v.AddArg(y) 1641 v.AddArg(x) 1642 return true 1643 } 1644 // match: (ADDQ x (ADDQ y x)) 1645 // cond: 1646 // result: (LEAQ2 y x) 1647 for { 1648 _ = v.Args[1] 1649 x := v.Args[0] 1650 v_1 := v.Args[1] 1651 if v_1.Op != OpAMD64ADDQ { 1652 break 1653 } 1654 _ = v_1.Args[1] 1655 y := v_1.Args[0] 1656 if x != v_1.Args[1] { 1657 break 1658 } 1659 v.reset(OpAMD64LEAQ2) 1660 v.AddArg(y) 1661 v.AddArg(x) 1662 return true 1663 } 1664 // match: (ADDQ (ADDQ x y) x) 1665 // cond: 1666 // result: (LEAQ2 y x) 1667 for { 1668 _ = v.Args[1] 1669 v_0 := v.Args[0] 1670 if v_0.Op != OpAMD64ADDQ { 1671 break 1672 } 1673 _ = v_0.Args[1] 1674 x := v_0.Args[0] 1675 y := v_0.Args[1] 1676 if x != v.Args[1] { 1677 break 1678 } 1679 v.reset(OpAMD64LEAQ2) 1680 v.AddArg(y) 1681 v.AddArg(x) 1682 return true 1683 } 1684 // match: (ADDQ (ADDQ y x) x) 1685 // cond: 1686 // result: (LEAQ2 y x) 1687 for { 1688 _ = v.Args[1] 1689 v_0 := v.Args[0] 1690 if v_0.Op != OpAMD64ADDQ { 1691 break 1692 } 1693 _ = v_0.Args[1] 1694 y := v_0.Args[0] 1695 x := v_0.Args[1] 1696 if x != v.Args[1] { 1697 break 1698 } 1699 v.reset(OpAMD64LEAQ2) 1700 v.AddArg(y) 1701 v.AddArg(x) 1702 return true 1703 } 1704 // match: (ADDQ (ADDQconst [c] x) y) 1705 // cond: 1706 // result: (LEAQ1 [c] x y) 1707 for { 1708 _ = v.Args[1] 1709 v_0 := v.Args[0] 1710 if v_0.Op != OpAMD64ADDQconst { 1711 break 1712 } 1713 c := v_0.AuxInt 1714 x := v_0.Args[0] 1715 y := v.Args[1] 1716 v.reset(OpAMD64LEAQ1) 1717 v.AuxInt = c 1718 v.AddArg(x) 1719 v.AddArg(y) 1720 return true 1721 } 1722 // match: (ADDQ y (ADDQconst [c] x)) 1723 // cond: 1724 // result: (LEAQ1 [c] x y) 1725 for { 1726 _ = v.Args[1] 1727 y := v.Args[0] 1728 v_1 := v.Args[1] 1729 if v_1.Op != OpAMD64ADDQconst { 1730 break 1731 } 1732 c := v_1.AuxInt 1733 x := v_1.Args[0] 1734 v.reset(OpAMD64LEAQ1) 1735 v.AuxInt = c 1736 v.AddArg(x) 1737 v.AddArg(y) 1738 return true 1739 } 1740 // match: (ADDQ x (LEAQ [c] {s} y)) 1741 // cond: x.Op != OpSB && y.Op != OpSB 1742 // result: (LEAQ1 [c] {s} x y) 1743 for { 1744 _ = v.Args[1] 1745 x := v.Args[0] 1746 v_1 := v.Args[1] 1747 if v_1.Op != OpAMD64LEAQ { 1748 break 1749 } 1750 c := v_1.AuxInt 1751 s := v_1.Aux 1752 y := v_1.Args[0] 1753 if !(x.Op != OpSB && y.Op != OpSB) { 1754 break 1755 } 1756 v.reset(OpAMD64LEAQ1) 1757 v.AuxInt = c 1758 v.Aux = s 1759 v.AddArg(x) 1760 v.AddArg(y) 1761 return true 1762 } 1763 // match: (ADDQ (LEAQ [c] {s} y) x) 1764 // cond: x.Op != OpSB && y.Op != OpSB 1765 // result: (LEAQ1 [c] {s} x y) 1766 for { 1767 _ = v.Args[1] 1768 v_0 := v.Args[0] 1769 if v_0.Op != OpAMD64LEAQ { 1770 break 1771 } 1772 c := v_0.AuxInt 1773 s := v_0.Aux 1774 y := v_0.Args[0] 1775 x := v.Args[1] 1776 if !(x.Op != OpSB && y.Op != OpSB) { 1777 break 1778 } 1779 v.reset(OpAMD64LEAQ1) 1780 v.AuxInt = c 1781 v.Aux = s 1782 v.AddArg(x) 1783 v.AddArg(y) 1784 return true 1785 } 1786 return false 1787 } 1788 func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { 1789 // match: (ADDQ x (NEGQ y)) 1790 // cond: 1791 // result: (SUBQ x y) 1792 for { 1793 _ = v.Args[1] 1794 x := v.Args[0] 1795 v_1 := v.Args[1] 1796 if v_1.Op != OpAMD64NEGQ { 1797 break 1798 } 1799 y := v_1.Args[0] 1800 v.reset(OpAMD64SUBQ) 1801 v.AddArg(x) 1802 v.AddArg(y) 1803 return true 1804 } 1805 // match: (ADDQ (NEGQ y) x) 1806 // cond: 1807 // result: (SUBQ x y) 1808 for { 1809 _ = v.Args[1] 1810 v_0 := v.Args[0] 1811 if v_0.Op != OpAMD64NEGQ { 1812 break 1813 } 1814 y := v_0.Args[0] 1815 x := v.Args[1] 1816 v.reset(OpAMD64SUBQ) 1817 v.AddArg(x) 1818 v.AddArg(y) 1819 return true 1820 } 1821 // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) 1822 // cond: canMergeLoad(v, l, x) && clobber(l) 1823 // result: (ADDQmem x [off] {sym} ptr mem) 1824 for { 1825 _ = v.Args[1] 1826 x := v.Args[0] 1827 l := v.Args[1] 1828 if l.Op != OpAMD64MOVQload { 1829 break 1830 } 1831 off := l.AuxInt 1832 sym := l.Aux 1833 _ = l.Args[1] 1834 ptr := l.Args[0] 1835 mem := l.Args[1] 1836 if !(canMergeLoad(v, l, x) && clobber(l)) { 1837 break 1838 } 1839 v.reset(OpAMD64ADDQmem) 1840 v.AuxInt = off 1841 v.Aux = sym 1842 v.AddArg(x) 1843 v.AddArg(ptr) 1844 v.AddArg(mem) 1845 return true 1846 } 1847 // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) 1848 // cond: canMergeLoad(v, l, x) && clobber(l) 1849 // result: (ADDQmem x [off] {sym} ptr mem) 1850 for { 1851 _ = v.Args[1] 1852 l := v.Args[0] 1853 if l.Op != OpAMD64MOVQload { 1854 break 1855 } 1856 off := l.AuxInt 1857 sym := l.Aux 1858 _ = l.Args[1] 1859 ptr := l.Args[0] 1860 mem := l.Args[1] 1861 x := v.Args[1] 1862 if !(canMergeLoad(v, l, x) && clobber(l)) { 1863 break 1864 } 1865 v.reset(OpAMD64ADDQmem) 1866 v.AuxInt = off 1867 v.Aux = sym 1868 v.AddArg(x) 1869 v.AddArg(ptr) 1870 v.AddArg(mem) 1871 return true 1872 } 1873 return false 1874 } 1875 func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { 1876 // match: (ADDQconst [c] (ADDQ x y)) 1877 // cond: 1878 // result: (LEAQ1 [c] x y) 1879 for { 1880 c := v.AuxInt 1881 v_0 := v.Args[0] 1882 if v_0.Op != OpAMD64ADDQ { 1883 break 1884 } 1885 _ = v_0.Args[1] 1886 x := v_0.Args[0] 1887 y := v_0.Args[1] 1888 v.reset(OpAMD64LEAQ1) 1889 v.AuxInt = c 1890 v.AddArg(x) 1891 v.AddArg(y) 1892 return true 1893 } 1894 // match: (ADDQconst [c] (LEAQ [d] {s} x)) 1895 // cond: is32Bit(c+d) 1896 // result: (LEAQ [c+d] {s} x) 1897 for { 1898 c := v.AuxInt 1899 v_0 := v.Args[0] 1900 if v_0.Op != OpAMD64LEAQ { 1901 break 1902 } 1903 d := v_0.AuxInt 1904 s := v_0.Aux 1905 x := v_0.Args[0] 1906 if !(is32Bit(c + d)) { 1907 break 1908 } 1909 v.reset(OpAMD64LEAQ) 1910 v.AuxInt = c + d 1911 v.Aux = s 1912 v.AddArg(x) 1913 return true 1914 } 1915 // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) 1916 // cond: is32Bit(c+d) 1917 // result: (LEAQ1 [c+d] {s} x y) 1918 for { 1919 c := v.AuxInt 1920 v_0 := v.Args[0] 1921 if v_0.Op != OpAMD64LEAQ1 { 1922 break 1923 } 1924 d := v_0.AuxInt 1925 s := v_0.Aux 1926 _ = v_0.Args[1] 1927 x := v_0.Args[0] 1928 y := v_0.Args[1] 1929 if !(is32Bit(c + d)) { 1930 break 1931 } 1932 v.reset(OpAMD64LEAQ1) 1933 v.AuxInt = c + d 1934 v.Aux = s 1935 v.AddArg(x) 1936 v.AddArg(y) 1937 return true 1938 } 1939 // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) 1940 // cond: is32Bit(c+d) 1941 // result: (LEAQ2 [c+d] {s} x y) 1942 for { 1943 c := v.AuxInt 1944 v_0 := v.Args[0] 1945 if v_0.Op != OpAMD64LEAQ2 { 1946 break 1947 } 1948 d := v_0.AuxInt 1949 s := v_0.Aux 1950 _ = v_0.Args[1] 1951 x := v_0.Args[0] 1952 y := v_0.Args[1] 1953 if !(is32Bit(c + d)) { 1954 break 1955 } 1956 v.reset(OpAMD64LEAQ2) 1957 v.AuxInt = c + d 1958 v.Aux = s 1959 v.AddArg(x) 1960 v.AddArg(y) 1961 return true 1962 } 1963 // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) 1964 // cond: is32Bit(c+d) 1965 // result: (LEAQ4 [c+d] {s} x y) 1966 for { 1967 c := v.AuxInt 1968 v_0 := v.Args[0] 1969 if v_0.Op != OpAMD64LEAQ4 { 1970 break 1971 } 1972 d := v_0.AuxInt 1973 s := v_0.Aux 1974 _ = v_0.Args[1] 1975 x := v_0.Args[0] 1976 y := v_0.Args[1] 1977 if !(is32Bit(c + d)) { 1978 break 1979 } 1980 v.reset(OpAMD64LEAQ4) 1981 v.AuxInt = c + d 1982 v.Aux = s 1983 v.AddArg(x) 1984 v.AddArg(y) 1985 return true 1986 } 1987 // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) 1988 // cond: is32Bit(c+d) 1989 // result: (LEAQ8 [c+d] {s} x y) 1990 for { 1991 c := v.AuxInt 1992 v_0 := v.Args[0] 1993 if v_0.Op != OpAMD64LEAQ8 { 1994 break 1995 } 1996 d := v_0.AuxInt 1997 s := v_0.Aux 1998 _ = v_0.Args[1] 1999 x := v_0.Args[0] 2000 y := v_0.Args[1] 2001 if !(is32Bit(c + d)) { 2002 break 2003 } 2004 v.reset(OpAMD64LEAQ8) 2005 v.AuxInt = c + d 2006 v.Aux = s 2007 v.AddArg(x) 2008 v.AddArg(y) 2009 return true 2010 } 2011 // match: (ADDQconst [0] x) 2012 // cond: 2013 // result: x 2014 for { 2015 if v.AuxInt != 0 { 2016 break 2017 } 2018 x := v.Args[0] 2019 v.reset(OpCopy) 2020 v.Type = x.Type 2021 v.AddArg(x) 2022 return true 2023 } 2024 // match: (ADDQconst [c] (MOVQconst [d])) 2025 // cond: 2026 // result: (MOVQconst [c+d]) 2027 for { 2028 c := v.AuxInt 2029 v_0 := v.Args[0] 2030 if v_0.Op != OpAMD64MOVQconst { 2031 break 2032 } 2033 d := v_0.AuxInt 2034 v.reset(OpAMD64MOVQconst) 2035 v.AuxInt = c + d 2036 return true 2037 } 2038 // match: (ADDQconst [c] (ADDQconst [d] x)) 2039 // cond: is32Bit(c+d) 2040 // result: (ADDQconst [c+d] x) 2041 for { 2042 c := v.AuxInt 2043 v_0 := v.Args[0] 2044 if v_0.Op != OpAMD64ADDQconst { 2045 break 2046 } 2047 d := v_0.AuxInt 2048 x := v_0.Args[0] 2049 if !(is32Bit(c + d)) { 2050 break 2051 } 2052 v.reset(OpAMD64ADDQconst) 2053 v.AuxInt = c + d 2054 v.AddArg(x) 2055 return true 2056 } 2057 return false 2058 } 2059 func rewriteValueAMD64_OpAMD64ADDQconstmem_0(v *Value) bool { 2060 b := v.Block 2061 _ = b 2062 typ := &b.Func.Config.Types 2063 _ = typ 2064 // match: (ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _)) 2065 // cond: 2066 // result: (ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x)) 2067 for { 2068 valOff := v.AuxInt 2069 sym := v.Aux 2070 _ = v.Args[1] 2071 ptr := v.Args[0] 2072 v_1 := v.Args[1] 2073 if v_1.Op != OpAMD64MOVSDstore { 2074 break 2075 } 2076 if v_1.AuxInt != ValAndOff(valOff).Off() { 2077 break 2078 } 2079 if v_1.Aux != sym { 2080 break 2081 } 2082 _ = v_1.Args[2] 2083 if ptr != v_1.Args[0] { 2084 break 2085 } 2086 x := v_1.Args[1] 2087 v.reset(OpAMD64ADDQconst) 2088 v.AuxInt = ValAndOff(valOff).Val() 2089 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 2090 v0.AddArg(x) 2091 v.AddArg(v0) 2092 return true 2093 } 2094 return false 2095 } 2096 func rewriteValueAMD64_OpAMD64ADDQmem_0(v *Value) bool { 2097 b := v.Block 2098 _ = b 2099 typ := &b.Func.Config.Types 2100 _ = typ 2101 // match: (ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 2102 // cond: 2103 // result: (ADDQ x (MOVQf2i y)) 2104 for { 2105 off := v.AuxInt 2106 sym := v.Aux 2107 _ = v.Args[2] 2108 x := v.Args[0] 2109 ptr := v.Args[1] 2110 v_2 := v.Args[2] 2111 if v_2.Op != OpAMD64MOVSDstore { 2112 break 2113 } 2114 if v_2.AuxInt != off { 2115 break 2116 } 2117 if v_2.Aux != sym { 2118 break 2119 } 2120 _ = v_2.Args[2] 2121 if ptr != v_2.Args[0] { 2122 break 2123 } 2124 y := v_2.Args[1] 2125 v.reset(OpAMD64ADDQ) 2126 v.AddArg(x) 2127 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 2128 v0.AddArg(y) 2129 v.AddArg(v0) 2130 return true 2131 } 2132 return false 2133 } 2134 func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { 2135 // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) 2136 // cond: canMergeLoad(v, l, x) && clobber(l) 2137 // result: (ADDSDmem x [off] {sym} ptr mem) 2138 for { 2139 _ = v.Args[1] 2140 x := v.Args[0] 2141 l := v.Args[1] 2142 if l.Op != OpAMD64MOVSDload { 2143 break 2144 } 2145 off := l.AuxInt 2146 sym := l.Aux 2147 _ = l.Args[1] 2148 ptr := l.Args[0] 2149 mem := l.Args[1] 2150 if !(canMergeLoad(v, l, x) && clobber(l)) { 2151 break 2152 } 2153 v.reset(OpAMD64ADDSDmem) 2154 v.AuxInt = off 2155 v.Aux = sym 2156 v.AddArg(x) 2157 v.AddArg(ptr) 2158 v.AddArg(mem) 2159 return true 2160 } 2161 // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) 2162 // cond: canMergeLoad(v, l, x) && clobber(l) 2163 // result: (ADDSDmem x [off] {sym} ptr mem) 2164 for { 2165 _ = v.Args[1] 2166 l := v.Args[0] 2167 if l.Op != OpAMD64MOVSDload { 2168 break 2169 } 2170 off := l.AuxInt 2171 sym := l.Aux 2172 _ = l.Args[1] 2173 ptr := l.Args[0] 2174 mem := l.Args[1] 2175 x := v.Args[1] 2176 if !(canMergeLoad(v, l, x) && clobber(l)) { 2177 break 2178 } 2179 v.reset(OpAMD64ADDSDmem) 2180 v.AuxInt = off 2181 v.Aux = sym 2182 v.AddArg(x) 2183 v.AddArg(ptr) 2184 v.AddArg(mem) 2185 return true 2186 } 2187 return false 2188 } 2189 func rewriteValueAMD64_OpAMD64ADDSDmem_0(v *Value) bool { 2190 b := v.Block 2191 _ = b 2192 typ := &b.Func.Config.Types 2193 _ = typ 2194 // match: (ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 2195 // cond: 2196 // result: (ADDSD x (MOVQi2f y)) 2197 for { 2198 off := v.AuxInt 2199 sym := v.Aux 2200 _ = v.Args[2] 2201 x := v.Args[0] 2202 ptr := v.Args[1] 2203 v_2 := v.Args[2] 2204 if v_2.Op != OpAMD64MOVQstore { 2205 break 2206 } 2207 if v_2.AuxInt != off { 2208 break 2209 } 2210 if v_2.Aux != sym { 2211 break 2212 } 2213 _ = v_2.Args[2] 2214 if ptr != v_2.Args[0] { 2215 break 2216 } 2217 y := v_2.Args[1] 2218 v.reset(OpAMD64ADDSD) 2219 v.AddArg(x) 2220 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 2221 v0.AddArg(y) 2222 v.AddArg(v0) 2223 return true 2224 } 2225 return false 2226 } 2227 func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { 2228 // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) 2229 // cond: canMergeLoad(v, l, x) && clobber(l) 2230 // result: (ADDSSmem x [off] {sym} ptr mem) 2231 for { 2232 _ = v.Args[1] 2233 x := v.Args[0] 2234 l := v.Args[1] 2235 if l.Op != OpAMD64MOVSSload { 2236 break 2237 } 2238 off := l.AuxInt 2239 sym := l.Aux 2240 _ = l.Args[1] 2241 ptr := l.Args[0] 2242 mem := l.Args[1] 2243 if !(canMergeLoad(v, l, x) && clobber(l)) { 2244 break 2245 } 2246 v.reset(OpAMD64ADDSSmem) 2247 v.AuxInt = off 2248 v.Aux = sym 2249 v.AddArg(x) 2250 v.AddArg(ptr) 2251 v.AddArg(mem) 2252 return true 2253 } 2254 // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) 2255 // cond: canMergeLoad(v, l, x) && clobber(l) 2256 // result: (ADDSSmem x [off] {sym} ptr mem) 2257 for { 2258 _ = v.Args[1] 2259 l := v.Args[0] 2260 if l.Op != OpAMD64MOVSSload { 2261 break 2262 } 2263 off := l.AuxInt 2264 sym := l.Aux 2265 _ = l.Args[1] 2266 ptr := l.Args[0] 2267 mem := l.Args[1] 2268 x := v.Args[1] 2269 if !(canMergeLoad(v, l, x) && clobber(l)) { 2270 break 2271 } 2272 v.reset(OpAMD64ADDSSmem) 2273 v.AuxInt = off 2274 v.Aux = sym 2275 v.AddArg(x) 2276 v.AddArg(ptr) 2277 v.AddArg(mem) 2278 return true 2279 } 2280 return false 2281 } 2282 func rewriteValueAMD64_OpAMD64ADDSSmem_0(v *Value) bool { 2283 b := v.Block 2284 _ = b 2285 typ := &b.Func.Config.Types 2286 _ = typ 2287 // match: (ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 2288 // cond: 2289 // result: (ADDSS x (MOVLi2f y)) 2290 for { 2291 off := v.AuxInt 2292 sym := v.Aux 2293 _ = v.Args[2] 2294 x := v.Args[0] 2295 ptr := v.Args[1] 2296 v_2 := v.Args[2] 2297 if v_2.Op != OpAMD64MOVLstore { 2298 break 2299 } 2300 if v_2.AuxInt != off { 2301 break 2302 } 2303 if v_2.Aux != sym { 2304 break 2305 } 2306 _ = v_2.Args[2] 2307 if ptr != v_2.Args[0] { 2308 break 2309 } 2310 y := v_2.Args[1] 2311 v.reset(OpAMD64ADDSS) 2312 v.AddArg(x) 2313 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 2314 v0.AddArg(y) 2315 v.AddArg(v0) 2316 return true 2317 } 2318 return false 2319 } 2320 func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { 2321 // match: (ANDL x (MOVLconst [c])) 2322 // cond: 2323 // result: (ANDLconst [c] x) 2324 for { 2325 _ = v.Args[1] 2326 x := v.Args[0] 2327 v_1 := v.Args[1] 2328 if v_1.Op != OpAMD64MOVLconst { 2329 break 2330 } 2331 c := v_1.AuxInt 2332 v.reset(OpAMD64ANDLconst) 2333 v.AuxInt = c 2334 v.AddArg(x) 2335 return true 2336 } 2337 // match: (ANDL (MOVLconst [c]) x) 2338 // cond: 2339 // result: (ANDLconst [c] x) 2340 for { 2341 _ = v.Args[1] 2342 v_0 := v.Args[0] 2343 if v_0.Op != OpAMD64MOVLconst { 2344 break 2345 } 2346 c := v_0.AuxInt 2347 x := v.Args[1] 2348 v.reset(OpAMD64ANDLconst) 2349 v.AuxInt = c 2350 v.AddArg(x) 2351 return true 2352 } 2353 // match: (ANDL x x) 2354 // cond: 2355 // result: x 2356 for { 2357 _ = v.Args[1] 2358 x := v.Args[0] 2359 if x != v.Args[1] { 2360 break 2361 } 2362 v.reset(OpCopy) 2363 v.Type = x.Type 2364 v.AddArg(x) 2365 return true 2366 } 2367 // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) 2368 // cond: canMergeLoad(v, l, x) && clobber(l) 2369 // result: (ANDLmem x [off] {sym} ptr mem) 2370 for { 2371 _ = v.Args[1] 2372 x := v.Args[0] 2373 l := v.Args[1] 2374 if l.Op != OpAMD64MOVLload { 2375 break 2376 } 2377 off := l.AuxInt 2378 sym := l.Aux 2379 _ = l.Args[1] 2380 ptr := l.Args[0] 2381 mem := l.Args[1] 2382 if !(canMergeLoad(v, l, x) && clobber(l)) { 2383 break 2384 } 2385 v.reset(OpAMD64ANDLmem) 2386 v.AuxInt = off 2387 v.Aux = sym 2388 v.AddArg(x) 2389 v.AddArg(ptr) 2390 v.AddArg(mem) 2391 return true 2392 } 2393 // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) 2394 // cond: canMergeLoad(v, l, x) && clobber(l) 2395 // result: (ANDLmem x [off] {sym} ptr mem) 2396 for { 2397 _ = v.Args[1] 2398 l := v.Args[0] 2399 if l.Op != OpAMD64MOVLload { 2400 break 2401 } 2402 off := l.AuxInt 2403 sym := l.Aux 2404 _ = l.Args[1] 2405 ptr := l.Args[0] 2406 mem := l.Args[1] 2407 x := v.Args[1] 2408 if !(canMergeLoad(v, l, x) && clobber(l)) { 2409 break 2410 } 2411 v.reset(OpAMD64ANDLmem) 2412 v.AuxInt = off 2413 v.Aux = sym 2414 v.AddArg(x) 2415 v.AddArg(ptr) 2416 v.AddArg(mem) 2417 return true 2418 } 2419 return false 2420 } 2421 func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { 2422 // match: (ANDLconst [c] (ANDLconst [d] x)) 2423 // cond: 2424 // result: (ANDLconst [c & d] x) 2425 for { 2426 c := v.AuxInt 2427 v_0 := v.Args[0] 2428 if v_0.Op != OpAMD64ANDLconst { 2429 break 2430 } 2431 d := v_0.AuxInt 2432 x := v_0.Args[0] 2433 v.reset(OpAMD64ANDLconst) 2434 v.AuxInt = c & d 2435 v.AddArg(x) 2436 return true 2437 } 2438 // match: (ANDLconst [0xFF] x) 2439 // cond: 2440 // result: (MOVBQZX x) 2441 for { 2442 if v.AuxInt != 0xFF { 2443 break 2444 } 2445 x := v.Args[0] 2446 v.reset(OpAMD64MOVBQZX) 2447 v.AddArg(x) 2448 return true 2449 } 2450 // match: (ANDLconst [0xFFFF] x) 2451 // cond: 2452 // result: (MOVWQZX x) 2453 for { 2454 if v.AuxInt != 0xFFFF { 2455 break 2456 } 2457 x := v.Args[0] 2458 v.reset(OpAMD64MOVWQZX) 2459 v.AddArg(x) 2460 return true 2461 } 2462 // match: (ANDLconst [c] _) 2463 // cond: int32(c)==0 2464 // result: (MOVLconst [0]) 2465 for { 2466 c := v.AuxInt 2467 if !(int32(c) == 0) { 2468 break 2469 } 2470 v.reset(OpAMD64MOVLconst) 2471 v.AuxInt = 0 2472 return true 2473 } 2474 // match: (ANDLconst [c] x) 2475 // cond: int32(c)==-1 2476 // result: x 2477 for { 2478 c := v.AuxInt 2479 x := v.Args[0] 2480 if !(int32(c) == -1) { 2481 break 2482 } 2483 v.reset(OpCopy) 2484 v.Type = x.Type 2485 v.AddArg(x) 2486 return true 2487 } 2488 // match: (ANDLconst [c] (MOVLconst [d])) 2489 // cond: 2490 // result: (MOVLconst [c&d]) 2491 for { 2492 c := v.AuxInt 2493 v_0 := v.Args[0] 2494 if v_0.Op != OpAMD64MOVLconst { 2495 break 2496 } 2497 d := v_0.AuxInt 2498 v.reset(OpAMD64MOVLconst) 2499 v.AuxInt = c & d 2500 return true 2501 } 2502 return false 2503 } 2504 func rewriteValueAMD64_OpAMD64ANDLmem_0(v *Value) bool { 2505 b := v.Block 2506 _ = b 2507 typ := &b.Func.Config.Types 2508 _ = typ 2509 // match: (ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 2510 // cond: 2511 // result: (ANDL x (MOVLf2i y)) 2512 for { 2513 off := v.AuxInt 2514 sym := v.Aux 2515 _ = v.Args[2] 2516 x := v.Args[0] 2517 ptr := v.Args[1] 2518 v_2 := v.Args[2] 2519 if v_2.Op != OpAMD64MOVSSstore { 2520 break 2521 } 2522 if v_2.AuxInt != off { 2523 break 2524 } 2525 if v_2.Aux != sym { 2526 break 2527 } 2528 _ = v_2.Args[2] 2529 if ptr != v_2.Args[0] { 2530 break 2531 } 2532 y := v_2.Args[1] 2533 v.reset(OpAMD64ANDL) 2534 v.AddArg(x) 2535 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 2536 v0.AddArg(y) 2537 v.AddArg(v0) 2538 return true 2539 } 2540 return false 2541 } 2542 func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { 2543 // match: (ANDQ x (MOVQconst [c])) 2544 // cond: is32Bit(c) 2545 // result: (ANDQconst [c] x) 2546 for { 2547 _ = v.Args[1] 2548 x := v.Args[0] 2549 v_1 := v.Args[1] 2550 if v_1.Op != OpAMD64MOVQconst { 2551 break 2552 } 2553 c := v_1.AuxInt 2554 if !(is32Bit(c)) { 2555 break 2556 } 2557 v.reset(OpAMD64ANDQconst) 2558 v.AuxInt = c 2559 v.AddArg(x) 2560 return true 2561 } 2562 // match: (ANDQ (MOVQconst [c]) x) 2563 // cond: is32Bit(c) 2564 // result: (ANDQconst [c] x) 2565 for { 2566 _ = v.Args[1] 2567 v_0 := v.Args[0] 2568 if v_0.Op != OpAMD64MOVQconst { 2569 break 2570 } 2571 c := v_0.AuxInt 2572 x := v.Args[1] 2573 if !(is32Bit(c)) { 2574 break 2575 } 2576 v.reset(OpAMD64ANDQconst) 2577 v.AuxInt = c 2578 v.AddArg(x) 2579 return true 2580 } 2581 // match: (ANDQ x x) 2582 // cond: 2583 // result: x 2584 for { 2585 _ = v.Args[1] 2586 x := v.Args[0] 2587 if x != v.Args[1] { 2588 break 2589 } 2590 v.reset(OpCopy) 2591 v.Type = x.Type 2592 v.AddArg(x) 2593 return true 2594 } 2595 // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) 2596 // cond: canMergeLoad(v, l, x) && clobber(l) 2597 // result: (ANDQmem x [off] {sym} ptr mem) 2598 for { 2599 _ = v.Args[1] 2600 x := v.Args[0] 2601 l := v.Args[1] 2602 if l.Op != OpAMD64MOVQload { 2603 break 2604 } 2605 off := l.AuxInt 2606 sym := l.Aux 2607 _ = l.Args[1] 2608 ptr := l.Args[0] 2609 mem := l.Args[1] 2610 if !(canMergeLoad(v, l, x) && clobber(l)) { 2611 break 2612 } 2613 v.reset(OpAMD64ANDQmem) 2614 v.AuxInt = off 2615 v.Aux = sym 2616 v.AddArg(x) 2617 v.AddArg(ptr) 2618 v.AddArg(mem) 2619 return true 2620 } 2621 // match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x) 2622 // cond: canMergeLoad(v, l, x) && clobber(l) 2623 // result: (ANDQmem x [off] {sym} ptr mem) 2624 for { 2625 _ = v.Args[1] 2626 l := v.Args[0] 2627 if l.Op != OpAMD64MOVQload { 2628 break 2629 } 2630 off := l.AuxInt 2631 sym := l.Aux 2632 _ = l.Args[1] 2633 ptr := l.Args[0] 2634 mem := l.Args[1] 2635 x := v.Args[1] 2636 if !(canMergeLoad(v, l, x) && clobber(l)) { 2637 break 2638 } 2639 v.reset(OpAMD64ANDQmem) 2640 v.AuxInt = off 2641 v.Aux = sym 2642 v.AddArg(x) 2643 v.AddArg(ptr) 2644 v.AddArg(mem) 2645 return true 2646 } 2647 return false 2648 } 2649 func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { 2650 // match: (ANDQconst [c] (ANDQconst [d] x)) 2651 // cond: 2652 // result: (ANDQconst [c & d] x) 2653 for { 2654 c := v.AuxInt 2655 v_0 := v.Args[0] 2656 if v_0.Op != OpAMD64ANDQconst { 2657 break 2658 } 2659 d := v_0.AuxInt 2660 x := v_0.Args[0] 2661 v.reset(OpAMD64ANDQconst) 2662 v.AuxInt = c & d 2663 v.AddArg(x) 2664 return true 2665 } 2666 // match: (ANDQconst [0xFF] x) 2667 // cond: 2668 // result: (MOVBQZX x) 2669 for { 2670 if v.AuxInt != 0xFF { 2671 break 2672 } 2673 x := v.Args[0] 2674 v.reset(OpAMD64MOVBQZX) 2675 v.AddArg(x) 2676 return true 2677 } 2678 // match: (ANDQconst [0xFFFF] x) 2679 // cond: 2680 // result: (MOVWQZX x) 2681 for { 2682 if v.AuxInt != 0xFFFF { 2683 break 2684 } 2685 x := v.Args[0] 2686 v.reset(OpAMD64MOVWQZX) 2687 v.AddArg(x) 2688 return true 2689 } 2690 // match: (ANDQconst [0xFFFFFFFF] x) 2691 // cond: 2692 // result: (MOVLQZX x) 2693 for { 2694 if v.AuxInt != 0xFFFFFFFF { 2695 break 2696 } 2697 x := v.Args[0] 2698 v.reset(OpAMD64MOVLQZX) 2699 v.AddArg(x) 2700 return true 2701 } 2702 // match: (ANDQconst [0] _) 2703 // cond: 2704 // result: (MOVQconst [0]) 2705 for { 2706 if v.AuxInt != 0 { 2707 break 2708 } 2709 v.reset(OpAMD64MOVQconst) 2710 v.AuxInt = 0 2711 return true 2712 } 2713 // match: (ANDQconst [-1] x) 2714 // cond: 2715 // result: x 2716 for { 2717 if v.AuxInt != -1 { 2718 break 2719 } 2720 x := v.Args[0] 2721 v.reset(OpCopy) 2722 v.Type = x.Type 2723 v.AddArg(x) 2724 return true 2725 } 2726 // match: (ANDQconst [c] (MOVQconst [d])) 2727 // cond: 2728 // result: (MOVQconst [c&d]) 2729 for { 2730 c := v.AuxInt 2731 v_0 := v.Args[0] 2732 if v_0.Op != OpAMD64MOVQconst { 2733 break 2734 } 2735 d := v_0.AuxInt 2736 v.reset(OpAMD64MOVQconst) 2737 v.AuxInt = c & d 2738 return true 2739 } 2740 return false 2741 } 2742 func rewriteValueAMD64_OpAMD64ANDQmem_0(v *Value) bool { 2743 b := v.Block 2744 _ = b 2745 typ := &b.Func.Config.Types 2746 _ = typ 2747 // match: (ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 2748 // cond: 2749 // result: (ANDQ x (MOVQf2i y)) 2750 for { 2751 off := v.AuxInt 2752 sym := v.Aux 2753 _ = v.Args[2] 2754 x := v.Args[0] 2755 ptr := v.Args[1] 2756 v_2 := v.Args[2] 2757 if v_2.Op != OpAMD64MOVSDstore { 2758 break 2759 } 2760 if v_2.AuxInt != off { 2761 break 2762 } 2763 if v_2.Aux != sym { 2764 break 2765 } 2766 _ = v_2.Args[2] 2767 if ptr != v_2.Args[0] { 2768 break 2769 } 2770 y := v_2.Args[1] 2771 v.reset(OpAMD64ANDQ) 2772 v.AddArg(x) 2773 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 2774 v0.AddArg(y) 2775 v.AddArg(v0) 2776 return true 2777 } 2778 return false 2779 } 2780 func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool { 2781 b := v.Block 2782 _ = b 2783 // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) 2784 // cond: 2785 // result: (BSFQ (ORQconst <t> [1<<8] x)) 2786 for { 2787 v_0 := v.Args[0] 2788 if v_0.Op != OpAMD64ORQconst { 2789 break 2790 } 2791 t := v_0.Type 2792 if v_0.AuxInt != 1<<8 { 2793 break 2794 } 2795 v_0_0 := v_0.Args[0] 2796 if v_0_0.Op != OpAMD64MOVBQZX { 2797 break 2798 } 2799 x := v_0_0.Args[0] 2800 v.reset(OpAMD64BSFQ) 2801 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2802 v0.AuxInt = 1 << 8 2803 v0.AddArg(x) 2804 v.AddArg(v0) 2805 return true 2806 } 2807 // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) 2808 // cond: 2809 // result: (BSFQ (ORQconst <t> [1<<16] x)) 2810 for { 2811 v_0 := v.Args[0] 2812 if v_0.Op != OpAMD64ORQconst { 2813 break 2814 } 2815 t := v_0.Type 2816 if v_0.AuxInt != 1<<16 { 2817 break 2818 } 2819 v_0_0 := v_0.Args[0] 2820 if v_0_0.Op != OpAMD64MOVWQZX { 2821 break 2822 } 2823 x := v_0_0.Args[0] 2824 v.reset(OpAMD64BSFQ) 2825 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) 2826 v0.AuxInt = 1 << 16 2827 v0.AddArg(x) 2828 v.AddArg(v0) 2829 return true 2830 } 2831 return false 2832 } 2833 func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool { 2834 // match: (BTQconst [c] x) 2835 // cond: c < 32 2836 // result: (BTLconst [c] x) 2837 for { 2838 c := v.AuxInt 2839 x := v.Args[0] 2840 if !(c < 32) { 2841 break 2842 } 2843 v.reset(OpAMD64BTLconst) 2844 v.AuxInt = c 2845 v.AddArg(x) 2846 return true 2847 } 2848 return false 2849 } 2850 func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool { 2851 // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) 2852 // cond: c != 0 2853 // result: x 2854 for { 2855 _ = v.Args[2] 2856 x := v.Args[0] 2857 v_2 := v.Args[2] 2858 if v_2.Op != OpSelect1 { 2859 break 2860 } 2861 v_2_0 := v_2.Args[0] 2862 if v_2_0.Op != OpAMD64BSFQ { 2863 break 2864 } 2865 v_2_0_0 := v_2_0.Args[0] 2866 if v_2_0_0.Op != OpAMD64ORQconst { 2867 break 2868 } 2869 c := v_2_0_0.AuxInt 2870 if !(c != 0) { 2871 break 2872 } 2873 v.reset(OpCopy) 2874 v.Type = x.Type 2875 v.AddArg(x) 2876 return true 2877 } 2878 return false 2879 } 2880 func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { 2881 b := v.Block 2882 _ = b 2883 // match: (CMPB x (MOVLconst [c])) 2884 // cond: 2885 // result: (CMPBconst x [int64(int8(c))]) 2886 for { 2887 _ = v.Args[1] 2888 x := v.Args[0] 2889 v_1 := v.Args[1] 2890 if v_1.Op != OpAMD64MOVLconst { 2891 break 2892 } 2893 c := v_1.AuxInt 2894 v.reset(OpAMD64CMPBconst) 2895 v.AuxInt = int64(int8(c)) 2896 v.AddArg(x) 2897 return true 2898 } 2899 // match: (CMPB (MOVLconst [c]) x) 2900 // cond: 2901 // result: (InvertFlags (CMPBconst x [int64(int8(c))])) 2902 for { 2903 _ = v.Args[1] 2904 v_0 := v.Args[0] 2905 if v_0.Op != OpAMD64MOVLconst { 2906 break 2907 } 2908 c := v_0.AuxInt 2909 x := v.Args[1] 2910 v.reset(OpAMD64InvertFlags) 2911 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 2912 v0.AuxInt = int64(int8(c)) 2913 v0.AddArg(x) 2914 v.AddArg(v0) 2915 return true 2916 } 2917 return false 2918 } 2919 func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool { 2920 // match: (CMPBconst (MOVLconst [x]) [y]) 2921 // cond: int8(x)==int8(y) 2922 // result: (FlagEQ) 2923 for { 2924 y := v.AuxInt 2925 v_0 := v.Args[0] 2926 if v_0.Op != OpAMD64MOVLconst { 2927 break 2928 } 2929 x := v_0.AuxInt 2930 if !(int8(x) == int8(y)) { 2931 break 2932 } 2933 v.reset(OpAMD64FlagEQ) 2934 return true 2935 } 2936 // match: (CMPBconst (MOVLconst [x]) [y]) 2937 // cond: int8(x)<int8(y) && uint8(x)<uint8(y) 2938 // result: (FlagLT_ULT) 2939 for { 2940 y := v.AuxInt 2941 v_0 := v.Args[0] 2942 if v_0.Op != OpAMD64MOVLconst { 2943 break 2944 } 2945 x := v_0.AuxInt 2946 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { 2947 break 2948 } 2949 v.reset(OpAMD64FlagLT_ULT) 2950 return true 2951 } 2952 // match: (CMPBconst (MOVLconst [x]) [y]) 2953 // cond: int8(x)<int8(y) && uint8(x)>uint8(y) 2954 // result: (FlagLT_UGT) 2955 for { 2956 y := v.AuxInt 2957 v_0 := v.Args[0] 2958 if v_0.Op != OpAMD64MOVLconst { 2959 break 2960 } 2961 x := v_0.AuxInt 2962 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { 2963 break 2964 } 2965 v.reset(OpAMD64FlagLT_UGT) 2966 return true 2967 } 2968 // match: (CMPBconst (MOVLconst [x]) [y]) 2969 // cond: int8(x)>int8(y) && uint8(x)<uint8(y) 2970 // result: (FlagGT_ULT) 2971 for { 2972 y := v.AuxInt 2973 v_0 := v.Args[0] 2974 if v_0.Op != OpAMD64MOVLconst { 2975 break 2976 } 2977 x := v_0.AuxInt 2978 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { 2979 break 2980 } 2981 v.reset(OpAMD64FlagGT_ULT) 2982 return true 2983 } 2984 // match: (CMPBconst (MOVLconst [x]) [y]) 2985 // cond: int8(x)>int8(y) && uint8(x)>uint8(y) 2986 // result: (FlagGT_UGT) 2987 for { 2988 y := v.AuxInt 2989 v_0 := v.Args[0] 2990 if v_0.Op != OpAMD64MOVLconst { 2991 break 2992 } 2993 x := v_0.AuxInt 2994 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { 2995 break 2996 } 2997 v.reset(OpAMD64FlagGT_UGT) 2998 return true 2999 } 3000 // match: (CMPBconst (ANDLconst _ [m]) [n]) 3001 // cond: 0 <= int8(m) && int8(m) < int8(n) 3002 // result: (FlagLT_ULT) 3003 for { 3004 n := v.AuxInt 3005 v_0 := v.Args[0] 3006 if v_0.Op != OpAMD64ANDLconst { 3007 break 3008 } 3009 m := v_0.AuxInt 3010 if !(0 <= int8(m) && int8(m) < int8(n)) { 3011 break 3012 } 3013 v.reset(OpAMD64FlagLT_ULT) 3014 return true 3015 } 3016 // match: (CMPBconst (ANDL x y) [0]) 3017 // cond: 3018 // result: (TESTB x y) 3019 for { 3020 if v.AuxInt != 0 { 3021 break 3022 } 3023 v_0 := v.Args[0] 3024 if v_0.Op != OpAMD64ANDL { 3025 break 3026 } 3027 _ = v_0.Args[1] 3028 x := v_0.Args[0] 3029 y := v_0.Args[1] 3030 v.reset(OpAMD64TESTB) 3031 v.AddArg(x) 3032 v.AddArg(y) 3033 return true 3034 } 3035 // match: (CMPBconst (ANDLconst [c] x) [0]) 3036 // cond: 3037 // result: (TESTBconst [int64(int8(c))] x) 3038 for { 3039 if v.AuxInt != 0 { 3040 break 3041 } 3042 v_0 := v.Args[0] 3043 if v_0.Op != OpAMD64ANDLconst { 3044 break 3045 } 3046 c := v_0.AuxInt 3047 x := v_0.Args[0] 3048 v.reset(OpAMD64TESTBconst) 3049 v.AuxInt = int64(int8(c)) 3050 v.AddArg(x) 3051 return true 3052 } 3053 // match: (CMPBconst x [0]) 3054 // cond: 3055 // result: (TESTB x x) 3056 for { 3057 if v.AuxInt != 0 { 3058 break 3059 } 3060 x := v.Args[0] 3061 v.reset(OpAMD64TESTB) 3062 v.AddArg(x) 3063 v.AddArg(x) 3064 return true 3065 } 3066 return false 3067 } 3068 func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { 3069 b := v.Block 3070 _ = b 3071 // match: (CMPL x (MOVLconst [c])) 3072 // cond: 3073 // result: (CMPLconst x [c]) 3074 for { 3075 _ = v.Args[1] 3076 x := v.Args[0] 3077 v_1 := v.Args[1] 3078 if v_1.Op != OpAMD64MOVLconst { 3079 break 3080 } 3081 c := v_1.AuxInt 3082 v.reset(OpAMD64CMPLconst) 3083 v.AuxInt = c 3084 v.AddArg(x) 3085 return true 3086 } 3087 // match: (CMPL (MOVLconst [c]) x) 3088 // cond: 3089 // result: (InvertFlags (CMPLconst x [c])) 3090 for { 3091 _ = v.Args[1] 3092 v_0 := v.Args[0] 3093 if v_0.Op != OpAMD64MOVLconst { 3094 break 3095 } 3096 c := v_0.AuxInt 3097 x := v.Args[1] 3098 v.reset(OpAMD64InvertFlags) 3099 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 3100 v0.AuxInt = c 3101 v0.AddArg(x) 3102 v.AddArg(v0) 3103 return true 3104 } 3105 return false 3106 } 3107 func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool { 3108 // match: (CMPLconst (MOVLconst [x]) [y]) 3109 // cond: int32(x)==int32(y) 3110 // result: (FlagEQ) 3111 for { 3112 y := v.AuxInt 3113 v_0 := v.Args[0] 3114 if v_0.Op != OpAMD64MOVLconst { 3115 break 3116 } 3117 x := v_0.AuxInt 3118 if !(int32(x) == int32(y)) { 3119 break 3120 } 3121 v.reset(OpAMD64FlagEQ) 3122 return true 3123 } 3124 // match: (CMPLconst (MOVLconst [x]) [y]) 3125 // cond: int32(x)<int32(y) && uint32(x)<uint32(y) 3126 // result: (FlagLT_ULT) 3127 for { 3128 y := v.AuxInt 3129 v_0 := v.Args[0] 3130 if v_0.Op != OpAMD64MOVLconst { 3131 break 3132 } 3133 x := v_0.AuxInt 3134 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { 3135 break 3136 } 3137 v.reset(OpAMD64FlagLT_ULT) 3138 return true 3139 } 3140 // match: (CMPLconst (MOVLconst [x]) [y]) 3141 // cond: int32(x)<int32(y) && uint32(x)>uint32(y) 3142 // result: (FlagLT_UGT) 3143 for { 3144 y := v.AuxInt 3145 v_0 := v.Args[0] 3146 if v_0.Op != OpAMD64MOVLconst { 3147 break 3148 } 3149 x := v_0.AuxInt 3150 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { 3151 break 3152 } 3153 v.reset(OpAMD64FlagLT_UGT) 3154 return true 3155 } 3156 // match: (CMPLconst (MOVLconst [x]) [y]) 3157 // cond: int32(x)>int32(y) && uint32(x)<uint32(y) 3158 // result: (FlagGT_ULT) 3159 for { 3160 y := v.AuxInt 3161 v_0 := v.Args[0] 3162 if v_0.Op != OpAMD64MOVLconst { 3163 break 3164 } 3165 x := v_0.AuxInt 3166 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { 3167 break 3168 } 3169 v.reset(OpAMD64FlagGT_ULT) 3170 return true 3171 } 3172 // match: (CMPLconst (MOVLconst [x]) [y]) 3173 // cond: int32(x)>int32(y) && uint32(x)>uint32(y) 3174 // result: (FlagGT_UGT) 3175 for { 3176 y := v.AuxInt 3177 v_0 := v.Args[0] 3178 if v_0.Op != OpAMD64MOVLconst { 3179 break 3180 } 3181 x := v_0.AuxInt 3182 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { 3183 break 3184 } 3185 v.reset(OpAMD64FlagGT_UGT) 3186 return true 3187 } 3188 // match: (CMPLconst (SHRLconst _ [c]) [n]) 3189 // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) 3190 // result: (FlagLT_ULT) 3191 for { 3192 n := v.AuxInt 3193 v_0 := v.Args[0] 3194 if v_0.Op != OpAMD64SHRLconst { 3195 break 3196 } 3197 c := v_0.AuxInt 3198 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) { 3199 break 3200 } 3201 v.reset(OpAMD64FlagLT_ULT) 3202 return true 3203 } 3204 // match: (CMPLconst (ANDLconst _ [m]) [n]) 3205 // cond: 0 <= int32(m) && int32(m) < int32(n) 3206 // result: (FlagLT_ULT) 3207 for { 3208 n := v.AuxInt 3209 v_0 := v.Args[0] 3210 if v_0.Op != OpAMD64ANDLconst { 3211 break 3212 } 3213 m := v_0.AuxInt 3214 if !(0 <= int32(m) && int32(m) < int32(n)) { 3215 break 3216 } 3217 v.reset(OpAMD64FlagLT_ULT) 3218 return true 3219 } 3220 // match: (CMPLconst (ANDL x y) [0]) 3221 // cond: 3222 // result: (TESTL x y) 3223 for { 3224 if v.AuxInt != 0 { 3225 break 3226 } 3227 v_0 := v.Args[0] 3228 if v_0.Op != OpAMD64ANDL { 3229 break 3230 } 3231 _ = v_0.Args[1] 3232 x := v_0.Args[0] 3233 y := v_0.Args[1] 3234 v.reset(OpAMD64TESTL) 3235 v.AddArg(x) 3236 v.AddArg(y) 3237 return true 3238 } 3239 // match: (CMPLconst (ANDLconst [c] x) [0]) 3240 // cond: 3241 // result: (TESTLconst [c] x) 3242 for { 3243 if v.AuxInt != 0 { 3244 break 3245 } 3246 v_0 := v.Args[0] 3247 if v_0.Op != OpAMD64ANDLconst { 3248 break 3249 } 3250 c := v_0.AuxInt 3251 x := v_0.Args[0] 3252 v.reset(OpAMD64TESTLconst) 3253 v.AuxInt = c 3254 v.AddArg(x) 3255 return true 3256 } 3257 // match: (CMPLconst x [0]) 3258 // cond: 3259 // result: (TESTL x x) 3260 for { 3261 if v.AuxInt != 0 { 3262 break 3263 } 3264 x := v.Args[0] 3265 v.reset(OpAMD64TESTL) 3266 v.AddArg(x) 3267 v.AddArg(x) 3268 return true 3269 } 3270 return false 3271 } 3272 func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { 3273 b := v.Block 3274 _ = b 3275 // match: (CMPQ x (MOVQconst [c])) 3276 // cond: is32Bit(c) 3277 // result: (CMPQconst x [c]) 3278 for { 3279 _ = v.Args[1] 3280 x := v.Args[0] 3281 v_1 := v.Args[1] 3282 if v_1.Op != OpAMD64MOVQconst { 3283 break 3284 } 3285 c := v_1.AuxInt 3286 if !(is32Bit(c)) { 3287 break 3288 } 3289 v.reset(OpAMD64CMPQconst) 3290 v.AuxInt = c 3291 v.AddArg(x) 3292 return true 3293 } 3294 // match: (CMPQ (MOVQconst [c]) x) 3295 // cond: is32Bit(c) 3296 // result: (InvertFlags (CMPQconst x [c])) 3297 for { 3298 _ = v.Args[1] 3299 v_0 := v.Args[0] 3300 if v_0.Op != OpAMD64MOVQconst { 3301 break 3302 } 3303 c := v_0.AuxInt 3304 x := v.Args[1] 3305 if !(is32Bit(c)) { 3306 break 3307 } 3308 v.reset(OpAMD64InvertFlags) 3309 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 3310 v0.AuxInt = c 3311 v0.AddArg(x) 3312 v.AddArg(v0) 3313 return true 3314 } 3315 return false 3316 } 3317 func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool { 3318 // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) 3319 // cond: 3320 // result: (FlagLT_ULT) 3321 for { 3322 if v.AuxInt != 32 { 3323 break 3324 } 3325 v_0 := v.Args[0] 3326 if v_0.Op != OpAMD64NEGQ { 3327 break 3328 } 3329 v_0_0 := v_0.Args[0] 3330 if v_0_0.Op != OpAMD64ADDQconst { 3331 break 3332 } 3333 if v_0_0.AuxInt != -16 { 3334 break 3335 } 3336 v_0_0_0 := v_0_0.Args[0] 3337 if v_0_0_0.Op != OpAMD64ANDQconst { 3338 break 3339 } 3340 if v_0_0_0.AuxInt != 15 { 3341 break 3342 } 3343 v.reset(OpAMD64FlagLT_ULT) 3344 return true 3345 } 3346 // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) 3347 // cond: 3348 // result: (FlagLT_ULT) 3349 for { 3350 if v.AuxInt != 32 { 3351 break 3352 } 3353 v_0 := v.Args[0] 3354 if v_0.Op != OpAMD64NEGQ { 3355 break 3356 } 3357 v_0_0 := v_0.Args[0] 3358 if v_0_0.Op != OpAMD64ADDQconst { 3359 break 3360 } 3361 if v_0_0.AuxInt != -8 { 3362 break 3363 } 3364 v_0_0_0 := v_0_0.Args[0] 3365 if v_0_0_0.Op != OpAMD64ANDQconst { 3366 break 3367 } 3368 if v_0_0_0.AuxInt != 7 { 3369 break 3370 } 3371 v.reset(OpAMD64FlagLT_ULT) 3372 return true 3373 } 3374 // match: (CMPQconst (MOVQconst [x]) [y]) 3375 // cond: x==y 3376 // result: (FlagEQ) 3377 for { 3378 y := v.AuxInt 3379 v_0 := v.Args[0] 3380 if v_0.Op != OpAMD64MOVQconst { 3381 break 3382 } 3383 x := v_0.AuxInt 3384 if !(x == y) { 3385 break 3386 } 3387 v.reset(OpAMD64FlagEQ) 3388 return true 3389 } 3390 // match: (CMPQconst (MOVQconst [x]) [y]) 3391 // cond: x<y && uint64(x)<uint64(y) 3392 // result: (FlagLT_ULT) 3393 for { 3394 y := v.AuxInt 3395 v_0 := v.Args[0] 3396 if v_0.Op != OpAMD64MOVQconst { 3397 break 3398 } 3399 x := v_0.AuxInt 3400 if !(x < y && uint64(x) < uint64(y)) { 3401 break 3402 } 3403 v.reset(OpAMD64FlagLT_ULT) 3404 return true 3405 } 3406 // match: (CMPQconst (MOVQconst [x]) [y]) 3407 // cond: x<y && uint64(x)>uint64(y) 3408 // result: (FlagLT_UGT) 3409 for { 3410 y := v.AuxInt 3411 v_0 := v.Args[0] 3412 if v_0.Op != OpAMD64MOVQconst { 3413 break 3414 } 3415 x := v_0.AuxInt 3416 if !(x < y && uint64(x) > uint64(y)) { 3417 break 3418 } 3419 v.reset(OpAMD64FlagLT_UGT) 3420 return true 3421 } 3422 // match: (CMPQconst (MOVQconst [x]) [y]) 3423 // cond: x>y && uint64(x)<uint64(y) 3424 // result: (FlagGT_ULT) 3425 for { 3426 y := v.AuxInt 3427 v_0 := v.Args[0] 3428 if v_0.Op != OpAMD64MOVQconst { 3429 break 3430 } 3431 x := v_0.AuxInt 3432 if !(x > y && uint64(x) < uint64(y)) { 3433 break 3434 } 3435 v.reset(OpAMD64FlagGT_ULT) 3436 return true 3437 } 3438 // match: (CMPQconst (MOVQconst [x]) [y]) 3439 // cond: x>y && uint64(x)>uint64(y) 3440 // result: (FlagGT_UGT) 3441 for { 3442 y := v.AuxInt 3443 v_0 := v.Args[0] 3444 if v_0.Op != OpAMD64MOVQconst { 3445 break 3446 } 3447 x := v_0.AuxInt 3448 if !(x > y && uint64(x) > uint64(y)) { 3449 break 3450 } 3451 v.reset(OpAMD64FlagGT_UGT) 3452 return true 3453 } 3454 // match: (CMPQconst (MOVBQZX _) [c]) 3455 // cond: 0xFF < c 3456 // result: (FlagLT_ULT) 3457 for { 3458 c := v.AuxInt 3459 v_0 := v.Args[0] 3460 if v_0.Op != OpAMD64MOVBQZX { 3461 break 3462 } 3463 if !(0xFF < c) { 3464 break 3465 } 3466 v.reset(OpAMD64FlagLT_ULT) 3467 return true 3468 } 3469 // match: (CMPQconst (MOVWQZX _) [c]) 3470 // cond: 0xFFFF < c 3471 // result: (FlagLT_ULT) 3472 for { 3473 c := v.AuxInt 3474 v_0 := v.Args[0] 3475 if v_0.Op != OpAMD64MOVWQZX { 3476 break 3477 } 3478 if !(0xFFFF < c) { 3479 break 3480 } 3481 v.reset(OpAMD64FlagLT_ULT) 3482 return true 3483 } 3484 // match: (CMPQconst (MOVLQZX _) [c]) 3485 // cond: 0xFFFFFFFF < c 3486 // result: (FlagLT_ULT) 3487 for { 3488 c := v.AuxInt 3489 v_0 := v.Args[0] 3490 if v_0.Op != OpAMD64MOVLQZX { 3491 break 3492 } 3493 if !(0xFFFFFFFF < c) { 3494 break 3495 } 3496 v.reset(OpAMD64FlagLT_ULT) 3497 return true 3498 } 3499 return false 3500 } 3501 func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool { 3502 // match: (CMPQconst (SHRQconst _ [c]) [n]) 3503 // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) 3504 // result: (FlagLT_ULT) 3505 for { 3506 n := v.AuxInt 3507 v_0 := v.Args[0] 3508 if v_0.Op != OpAMD64SHRQconst { 3509 break 3510 } 3511 c := v_0.AuxInt 3512 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) { 3513 break 3514 } 3515 v.reset(OpAMD64FlagLT_ULT) 3516 return true 3517 } 3518 // match: (CMPQconst (ANDQconst _ [m]) [n]) 3519 // cond: 0 <= m && m < n 3520 // result: (FlagLT_ULT) 3521 for { 3522 n := v.AuxInt 3523 v_0 := v.Args[0] 3524 if v_0.Op != OpAMD64ANDQconst { 3525 break 3526 } 3527 m := v_0.AuxInt 3528 if !(0 <= m && m < n) { 3529 break 3530 } 3531 v.reset(OpAMD64FlagLT_ULT) 3532 return true 3533 } 3534 // match: (CMPQconst (ANDLconst _ [m]) [n]) 3535 // cond: 0 <= m && m < n 3536 // result: (FlagLT_ULT) 3537 for { 3538 n := v.AuxInt 3539 v_0 := v.Args[0] 3540 if v_0.Op != OpAMD64ANDLconst { 3541 break 3542 } 3543 m := v_0.AuxInt 3544 if !(0 <= m && m < n) { 3545 break 3546 } 3547 v.reset(OpAMD64FlagLT_ULT) 3548 return true 3549 } 3550 // match: (CMPQconst (ANDQ x y) [0]) 3551 // cond: 3552 // result: (TESTQ x y) 3553 for { 3554 if v.AuxInt != 0 { 3555 break 3556 } 3557 v_0 := v.Args[0] 3558 if v_0.Op != OpAMD64ANDQ { 3559 break 3560 } 3561 _ = v_0.Args[1] 3562 x := v_0.Args[0] 3563 y := v_0.Args[1] 3564 v.reset(OpAMD64TESTQ) 3565 v.AddArg(x) 3566 v.AddArg(y) 3567 return true 3568 } 3569 // match: (CMPQconst (ANDQconst [c] x) [0]) 3570 // cond: 3571 // result: (TESTQconst [c] x) 3572 for { 3573 if v.AuxInt != 0 { 3574 break 3575 } 3576 v_0 := v.Args[0] 3577 if v_0.Op != OpAMD64ANDQconst { 3578 break 3579 } 3580 c := v_0.AuxInt 3581 x := v_0.Args[0] 3582 v.reset(OpAMD64TESTQconst) 3583 v.AuxInt = c 3584 v.AddArg(x) 3585 return true 3586 } 3587 // match: (CMPQconst x [0]) 3588 // cond: 3589 // result: (TESTQ x x) 3590 for { 3591 if v.AuxInt != 0 { 3592 break 3593 } 3594 x := v.Args[0] 3595 v.reset(OpAMD64TESTQ) 3596 v.AddArg(x) 3597 v.AddArg(x) 3598 return true 3599 } 3600 return false 3601 } 3602 func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { 3603 b := v.Block 3604 _ = b 3605 // match: (CMPW x (MOVLconst [c])) 3606 // cond: 3607 // result: (CMPWconst x [int64(int16(c))]) 3608 for { 3609 _ = v.Args[1] 3610 x := v.Args[0] 3611 v_1 := v.Args[1] 3612 if v_1.Op != OpAMD64MOVLconst { 3613 break 3614 } 3615 c := v_1.AuxInt 3616 v.reset(OpAMD64CMPWconst) 3617 v.AuxInt = int64(int16(c)) 3618 v.AddArg(x) 3619 return true 3620 } 3621 // match: (CMPW (MOVLconst [c]) x) 3622 // cond: 3623 // result: (InvertFlags (CMPWconst x [int64(int16(c))])) 3624 for { 3625 _ = v.Args[1] 3626 v_0 := v.Args[0] 3627 if v_0.Op != OpAMD64MOVLconst { 3628 break 3629 } 3630 c := v_0.AuxInt 3631 x := v.Args[1] 3632 v.reset(OpAMD64InvertFlags) 3633 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 3634 v0.AuxInt = int64(int16(c)) 3635 v0.AddArg(x) 3636 v.AddArg(v0) 3637 return true 3638 } 3639 return false 3640 } 3641 func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool { 3642 // match: (CMPWconst (MOVLconst [x]) [y]) 3643 // cond: int16(x)==int16(y) 3644 // result: (FlagEQ) 3645 for { 3646 y := v.AuxInt 3647 v_0 := v.Args[0] 3648 if v_0.Op != OpAMD64MOVLconst { 3649 break 3650 } 3651 x := v_0.AuxInt 3652 if !(int16(x) == int16(y)) { 3653 break 3654 } 3655 v.reset(OpAMD64FlagEQ) 3656 return true 3657 } 3658 // match: (CMPWconst (MOVLconst [x]) [y]) 3659 // cond: int16(x)<int16(y) && uint16(x)<uint16(y) 3660 // result: (FlagLT_ULT) 3661 for { 3662 y := v.AuxInt 3663 v_0 := v.Args[0] 3664 if v_0.Op != OpAMD64MOVLconst { 3665 break 3666 } 3667 x := v_0.AuxInt 3668 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { 3669 break 3670 } 3671 v.reset(OpAMD64FlagLT_ULT) 3672 return true 3673 } 3674 // match: (CMPWconst (MOVLconst [x]) [y]) 3675 // cond: int16(x)<int16(y) && uint16(x)>uint16(y) 3676 // result: (FlagLT_UGT) 3677 for { 3678 y := v.AuxInt 3679 v_0 := v.Args[0] 3680 if v_0.Op != OpAMD64MOVLconst { 3681 break 3682 } 3683 x := v_0.AuxInt 3684 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { 3685 break 3686 } 3687 v.reset(OpAMD64FlagLT_UGT) 3688 return true 3689 } 3690 // match: (CMPWconst (MOVLconst [x]) [y]) 3691 // cond: int16(x)>int16(y) && uint16(x)<uint16(y) 3692 // result: (FlagGT_ULT) 3693 for { 3694 y := v.AuxInt 3695 v_0 := v.Args[0] 3696 if v_0.Op != OpAMD64MOVLconst { 3697 break 3698 } 3699 x := v_0.AuxInt 3700 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { 3701 break 3702 } 3703 v.reset(OpAMD64FlagGT_ULT) 3704 return true 3705 } 3706 // match: (CMPWconst (MOVLconst [x]) [y]) 3707 // cond: int16(x)>int16(y) && uint16(x)>uint16(y) 3708 // result: (FlagGT_UGT) 3709 for { 3710 y := v.AuxInt 3711 v_0 := v.Args[0] 3712 if v_0.Op != OpAMD64MOVLconst { 3713 break 3714 } 3715 x := v_0.AuxInt 3716 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { 3717 break 3718 } 3719 v.reset(OpAMD64FlagGT_UGT) 3720 return true 3721 } 3722 // match: (CMPWconst (ANDLconst _ [m]) [n]) 3723 // cond: 0 <= int16(m) && int16(m) < int16(n) 3724 // result: (FlagLT_ULT) 3725 for { 3726 n := v.AuxInt 3727 v_0 := v.Args[0] 3728 if v_0.Op != OpAMD64ANDLconst { 3729 break 3730 } 3731 m := v_0.AuxInt 3732 if !(0 <= int16(m) && int16(m) < int16(n)) { 3733 break 3734 } 3735 v.reset(OpAMD64FlagLT_ULT) 3736 return true 3737 } 3738 // match: (CMPWconst (ANDL x y) [0]) 3739 // cond: 3740 // result: (TESTW x y) 3741 for { 3742 if v.AuxInt != 0 { 3743 break 3744 } 3745 v_0 := v.Args[0] 3746 if v_0.Op != OpAMD64ANDL { 3747 break 3748 } 3749 _ = v_0.Args[1] 3750 x := v_0.Args[0] 3751 y := v_0.Args[1] 3752 v.reset(OpAMD64TESTW) 3753 v.AddArg(x) 3754 v.AddArg(y) 3755 return true 3756 } 3757 // match: (CMPWconst (ANDLconst [c] x) [0]) 3758 // cond: 3759 // result: (TESTWconst [int64(int16(c))] x) 3760 for { 3761 if v.AuxInt != 0 { 3762 break 3763 } 3764 v_0 := v.Args[0] 3765 if v_0.Op != OpAMD64ANDLconst { 3766 break 3767 } 3768 c := v_0.AuxInt 3769 x := v_0.Args[0] 3770 v.reset(OpAMD64TESTWconst) 3771 v.AuxInt = int64(int16(c)) 3772 v.AddArg(x) 3773 return true 3774 } 3775 // match: (CMPWconst x [0]) 3776 // cond: 3777 // result: (TESTW x x) 3778 for { 3779 if v.AuxInt != 0 { 3780 break 3781 } 3782 x := v.Args[0] 3783 v.reset(OpAMD64TESTW) 3784 v.AddArg(x) 3785 v.AddArg(x) 3786 return true 3787 } 3788 return false 3789 } 3790 func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool { 3791 // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3792 // cond: is32Bit(off1+off2) 3793 // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) 3794 for { 3795 off1 := v.AuxInt 3796 sym := v.Aux 3797 _ = v.Args[3] 3798 v_0 := v.Args[0] 3799 if v_0.Op != OpAMD64ADDQconst { 3800 break 3801 } 3802 off2 := v_0.AuxInt 3803 ptr := v_0.Args[0] 3804 old := v.Args[1] 3805 new_ := v.Args[2] 3806 mem := v.Args[3] 3807 if !(is32Bit(off1 + off2)) { 3808 break 3809 } 3810 v.reset(OpAMD64CMPXCHGLlock) 3811 v.AuxInt = off1 + off2 3812 v.Aux = sym 3813 v.AddArg(ptr) 3814 v.AddArg(old) 3815 v.AddArg(new_) 3816 v.AddArg(mem) 3817 return true 3818 } 3819 return false 3820 } 3821 func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool { 3822 // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) 3823 // cond: is32Bit(off1+off2) 3824 // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) 3825 for { 3826 off1 := v.AuxInt 3827 sym := v.Aux 3828 _ = v.Args[3] 3829 v_0 := v.Args[0] 3830 if v_0.Op != OpAMD64ADDQconst { 3831 break 3832 } 3833 off2 := v_0.AuxInt 3834 ptr := v_0.Args[0] 3835 old := v.Args[1] 3836 new_ := v.Args[2] 3837 mem := v.Args[3] 3838 if !(is32Bit(off1 + off2)) { 3839 break 3840 } 3841 v.reset(OpAMD64CMPXCHGQlock) 3842 v.AuxInt = off1 + off2 3843 v.Aux = sym 3844 v.AddArg(ptr) 3845 v.AddArg(old) 3846 v.AddArg(new_) 3847 v.AddArg(mem) 3848 return true 3849 } 3850 return false 3851 } 3852 func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool { 3853 // match: (LEAL [c] {s} (ADDLconst [d] x)) 3854 // cond: is32Bit(c+d) 3855 // result: (LEAL [c+d] {s} x) 3856 for { 3857 c := v.AuxInt 3858 s := v.Aux 3859 v_0 := v.Args[0] 3860 if v_0.Op != OpAMD64ADDLconst { 3861 break 3862 } 3863 d := v_0.AuxInt 3864 x := v_0.Args[0] 3865 if !(is32Bit(c + d)) { 3866 break 3867 } 3868 v.reset(OpAMD64LEAL) 3869 v.AuxInt = c + d 3870 v.Aux = s 3871 v.AddArg(x) 3872 return true 3873 } 3874 return false 3875 } 3876 func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool { 3877 // match: (LEAQ [c] {s} (ADDQconst [d] x)) 3878 // cond: is32Bit(c+d) 3879 // result: (LEAQ [c+d] {s} x) 3880 for { 3881 c := v.AuxInt 3882 s := v.Aux 3883 v_0 := v.Args[0] 3884 if v_0.Op != OpAMD64ADDQconst { 3885 break 3886 } 3887 d := v_0.AuxInt 3888 x := v_0.Args[0] 3889 if !(is32Bit(c + d)) { 3890 break 3891 } 3892 v.reset(OpAMD64LEAQ) 3893 v.AuxInt = c + d 3894 v.Aux = s 3895 v.AddArg(x) 3896 return true 3897 } 3898 // match: (LEAQ [c] {s} (ADDQ x y)) 3899 // cond: x.Op != OpSB && y.Op != OpSB 3900 // result: (LEAQ1 [c] {s} x y) 3901 for { 3902 c := v.AuxInt 3903 s := v.Aux 3904 v_0 := v.Args[0] 3905 if v_0.Op != OpAMD64ADDQ { 3906 break 3907 } 3908 _ = v_0.Args[1] 3909 x := v_0.Args[0] 3910 y := v_0.Args[1] 3911 if !(x.Op != OpSB && y.Op != OpSB) { 3912 break 3913 } 3914 v.reset(OpAMD64LEAQ1) 3915 v.AuxInt = c 3916 v.Aux = s 3917 v.AddArg(x) 3918 v.AddArg(y) 3919 return true 3920 } 3921 // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) 3922 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3923 // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) 3924 for { 3925 off1 := v.AuxInt 3926 sym1 := v.Aux 3927 v_0 := v.Args[0] 3928 if v_0.Op != OpAMD64LEAQ { 3929 break 3930 } 3931 off2 := v_0.AuxInt 3932 sym2 := v_0.Aux 3933 x := v_0.Args[0] 3934 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3935 break 3936 } 3937 v.reset(OpAMD64LEAQ) 3938 v.AuxInt = off1 + off2 3939 v.Aux = mergeSym(sym1, sym2) 3940 v.AddArg(x) 3941 return true 3942 } 3943 // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) 3944 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3945 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 3946 for { 3947 off1 := v.AuxInt 3948 sym1 := v.Aux 3949 v_0 := v.Args[0] 3950 if v_0.Op != OpAMD64LEAQ1 { 3951 break 3952 } 3953 off2 := v_0.AuxInt 3954 sym2 := v_0.Aux 3955 _ = v_0.Args[1] 3956 x := v_0.Args[0] 3957 y := v_0.Args[1] 3958 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3959 break 3960 } 3961 v.reset(OpAMD64LEAQ1) 3962 v.AuxInt = off1 + off2 3963 v.Aux = mergeSym(sym1, sym2) 3964 v.AddArg(x) 3965 v.AddArg(y) 3966 return true 3967 } 3968 // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) 3969 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3970 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 3971 for { 3972 off1 := v.AuxInt 3973 sym1 := v.Aux 3974 v_0 := v.Args[0] 3975 if v_0.Op != OpAMD64LEAQ2 { 3976 break 3977 } 3978 off2 := v_0.AuxInt 3979 sym2 := v_0.Aux 3980 _ = v_0.Args[1] 3981 x := v_0.Args[0] 3982 y := v_0.Args[1] 3983 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 3984 break 3985 } 3986 v.reset(OpAMD64LEAQ2) 3987 v.AuxInt = off1 + off2 3988 v.Aux = mergeSym(sym1, sym2) 3989 v.AddArg(x) 3990 v.AddArg(y) 3991 return true 3992 } 3993 // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) 3994 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 3995 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 3996 for { 3997 off1 := v.AuxInt 3998 sym1 := v.Aux 3999 v_0 := v.Args[0] 4000 if v_0.Op != OpAMD64LEAQ4 { 4001 break 4002 } 4003 off2 := v_0.AuxInt 4004 sym2 := v_0.Aux 4005 _ = v_0.Args[1] 4006 x := v_0.Args[0] 4007 y := v_0.Args[1] 4008 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4009 break 4010 } 4011 v.reset(OpAMD64LEAQ4) 4012 v.AuxInt = off1 + off2 4013 v.Aux = mergeSym(sym1, sym2) 4014 v.AddArg(x) 4015 v.AddArg(y) 4016 return true 4017 } 4018 // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) 4019 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4020 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 4021 for { 4022 off1 := v.AuxInt 4023 sym1 := v.Aux 4024 v_0 := v.Args[0] 4025 if v_0.Op != OpAMD64LEAQ8 { 4026 break 4027 } 4028 off2 := v_0.AuxInt 4029 sym2 := v_0.Aux 4030 _ = v_0.Args[1] 4031 x := v_0.Args[0] 4032 y := v_0.Args[1] 4033 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4034 break 4035 } 4036 v.reset(OpAMD64LEAQ8) 4037 v.AuxInt = off1 + off2 4038 v.Aux = mergeSym(sym1, sym2) 4039 v.AddArg(x) 4040 v.AddArg(y) 4041 return true 4042 } 4043 return false 4044 } 4045 func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool { 4046 // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) 4047 // cond: is32Bit(c+d) && x.Op != OpSB 4048 // result: (LEAQ1 [c+d] {s} x y) 4049 for { 4050 c := v.AuxInt 4051 s := v.Aux 4052 _ = v.Args[1] 4053 v_0 := v.Args[0] 4054 if v_0.Op != OpAMD64ADDQconst { 4055 break 4056 } 4057 d := v_0.AuxInt 4058 x := v_0.Args[0] 4059 y := v.Args[1] 4060 if !(is32Bit(c+d) && x.Op != OpSB) { 4061 break 4062 } 4063 v.reset(OpAMD64LEAQ1) 4064 v.AuxInt = c + d 4065 v.Aux = s 4066 v.AddArg(x) 4067 v.AddArg(y) 4068 return true 4069 } 4070 // match: (LEAQ1 [c] {s} y (ADDQconst [d] x)) 4071 // cond: is32Bit(c+d) && x.Op != OpSB 4072 // result: (LEAQ1 [c+d] {s} x y) 4073 for { 4074 c := v.AuxInt 4075 s := v.Aux 4076 _ = v.Args[1] 4077 y := v.Args[0] 4078 v_1 := v.Args[1] 4079 if v_1.Op != OpAMD64ADDQconst { 4080 break 4081 } 4082 d := v_1.AuxInt 4083 x := v_1.Args[0] 4084 if !(is32Bit(c+d) && x.Op != OpSB) { 4085 break 4086 } 4087 v.reset(OpAMD64LEAQ1) 4088 v.AuxInt = c + d 4089 v.Aux = s 4090 v.AddArg(x) 4091 v.AddArg(y) 4092 return true 4093 } 4094 // match: (LEAQ1 [c] {s} x (SHLQconst [1] y)) 4095 // cond: 4096 // result: (LEAQ2 [c] {s} x y) 4097 for { 4098 c := v.AuxInt 4099 s := v.Aux 4100 _ = v.Args[1] 4101 x := v.Args[0] 4102 v_1 := v.Args[1] 4103 if v_1.Op != OpAMD64SHLQconst { 4104 break 4105 } 4106 if v_1.AuxInt != 1 { 4107 break 4108 } 4109 y := v_1.Args[0] 4110 v.reset(OpAMD64LEAQ2) 4111 v.AuxInt = c 4112 v.Aux = s 4113 v.AddArg(x) 4114 v.AddArg(y) 4115 return true 4116 } 4117 // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x) 4118 // cond: 4119 // result: (LEAQ2 [c] {s} x y) 4120 for { 4121 c := v.AuxInt 4122 s := v.Aux 4123 _ = v.Args[1] 4124 v_0 := v.Args[0] 4125 if v_0.Op != OpAMD64SHLQconst { 4126 break 4127 } 4128 if v_0.AuxInt != 1 { 4129 break 4130 } 4131 y := v_0.Args[0] 4132 x := v.Args[1] 4133 v.reset(OpAMD64LEAQ2) 4134 v.AuxInt = c 4135 v.Aux = s 4136 v.AddArg(x) 4137 v.AddArg(y) 4138 return true 4139 } 4140 // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) 4141 // cond: 4142 // result: (LEAQ4 [c] {s} x y) 4143 for { 4144 c := v.AuxInt 4145 s := v.Aux 4146 _ = v.Args[1] 4147 x := v.Args[0] 4148 v_1 := v.Args[1] 4149 if v_1.Op != OpAMD64SHLQconst { 4150 break 4151 } 4152 if v_1.AuxInt != 2 { 4153 break 4154 } 4155 y := v_1.Args[0] 4156 v.reset(OpAMD64LEAQ4) 4157 v.AuxInt = c 4158 v.Aux = s 4159 v.AddArg(x) 4160 v.AddArg(y) 4161 return true 4162 } 4163 // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x) 4164 // cond: 4165 // result: (LEAQ4 [c] {s} x y) 4166 for { 4167 c := v.AuxInt 4168 s := v.Aux 4169 _ = v.Args[1] 4170 v_0 := v.Args[0] 4171 if v_0.Op != OpAMD64SHLQconst { 4172 break 4173 } 4174 if v_0.AuxInt != 2 { 4175 break 4176 } 4177 y := v_0.Args[0] 4178 x := v.Args[1] 4179 v.reset(OpAMD64LEAQ4) 4180 v.AuxInt = c 4181 v.Aux = s 4182 v.AddArg(x) 4183 v.AddArg(y) 4184 return true 4185 } 4186 // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) 4187 // cond: 4188 // result: (LEAQ8 [c] {s} x y) 4189 for { 4190 c := v.AuxInt 4191 s := v.Aux 4192 _ = v.Args[1] 4193 x := v.Args[0] 4194 v_1 := v.Args[1] 4195 if v_1.Op != OpAMD64SHLQconst { 4196 break 4197 } 4198 if v_1.AuxInt != 3 { 4199 break 4200 } 4201 y := v_1.Args[0] 4202 v.reset(OpAMD64LEAQ8) 4203 v.AuxInt = c 4204 v.Aux = s 4205 v.AddArg(x) 4206 v.AddArg(y) 4207 return true 4208 } 4209 // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x) 4210 // cond: 4211 // result: (LEAQ8 [c] {s} x y) 4212 for { 4213 c := v.AuxInt 4214 s := v.Aux 4215 _ = v.Args[1] 4216 v_0 := v.Args[0] 4217 if v_0.Op != OpAMD64SHLQconst { 4218 break 4219 } 4220 if v_0.AuxInt != 3 { 4221 break 4222 } 4223 y := v_0.Args[0] 4224 x := v.Args[1] 4225 v.reset(OpAMD64LEAQ8) 4226 v.AuxInt = c 4227 v.Aux = s 4228 v.AddArg(x) 4229 v.AddArg(y) 4230 return true 4231 } 4232 // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4233 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4234 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 4235 for { 4236 off1 := v.AuxInt 4237 sym1 := v.Aux 4238 _ = v.Args[1] 4239 v_0 := v.Args[0] 4240 if v_0.Op != OpAMD64LEAQ { 4241 break 4242 } 4243 off2 := v_0.AuxInt 4244 sym2 := v_0.Aux 4245 x := v_0.Args[0] 4246 y := v.Args[1] 4247 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4248 break 4249 } 4250 v.reset(OpAMD64LEAQ1) 4251 v.AuxInt = off1 + off2 4252 v.Aux = mergeSym(sym1, sym2) 4253 v.AddArg(x) 4254 v.AddArg(y) 4255 return true 4256 } 4257 // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x)) 4258 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4259 // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) 4260 for { 4261 off1 := v.AuxInt 4262 sym1 := v.Aux 4263 _ = v.Args[1] 4264 y := v.Args[0] 4265 v_1 := v.Args[1] 4266 if v_1.Op != OpAMD64LEAQ { 4267 break 4268 } 4269 off2 := v_1.AuxInt 4270 sym2 := v_1.Aux 4271 x := v_1.Args[0] 4272 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4273 break 4274 } 4275 v.reset(OpAMD64LEAQ1) 4276 v.AuxInt = off1 + off2 4277 v.Aux = mergeSym(sym1, sym2) 4278 v.AddArg(x) 4279 v.AddArg(y) 4280 return true 4281 } 4282 return false 4283 } 4284 func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool { 4285 // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) 4286 // cond: is32Bit(c+d) && x.Op != OpSB 4287 // result: (LEAQ2 [c+d] {s} x y) 4288 for { 4289 c := v.AuxInt 4290 s := v.Aux 4291 _ = v.Args[1] 4292 v_0 := v.Args[0] 4293 if v_0.Op != OpAMD64ADDQconst { 4294 break 4295 } 4296 d := v_0.AuxInt 4297 x := v_0.Args[0] 4298 y := v.Args[1] 4299 if !(is32Bit(c+d) && x.Op != OpSB) { 4300 break 4301 } 4302 v.reset(OpAMD64LEAQ2) 4303 v.AuxInt = c + d 4304 v.Aux = s 4305 v.AddArg(x) 4306 v.AddArg(y) 4307 return true 4308 } 4309 // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) 4310 // cond: is32Bit(c+2*d) && y.Op != OpSB 4311 // result: (LEAQ2 [c+2*d] {s} x y) 4312 for { 4313 c := v.AuxInt 4314 s := v.Aux 4315 _ = v.Args[1] 4316 x := v.Args[0] 4317 v_1 := v.Args[1] 4318 if v_1.Op != OpAMD64ADDQconst { 4319 break 4320 } 4321 d := v_1.AuxInt 4322 y := v_1.Args[0] 4323 if !(is32Bit(c+2*d) && y.Op != OpSB) { 4324 break 4325 } 4326 v.reset(OpAMD64LEAQ2) 4327 v.AuxInt = c + 2*d 4328 v.Aux = s 4329 v.AddArg(x) 4330 v.AddArg(y) 4331 return true 4332 } 4333 // match: (LEAQ2 [c] {s} x (SHLQconst [1] y)) 4334 // cond: 4335 // result: (LEAQ4 [c] {s} x y) 4336 for { 4337 c := v.AuxInt 4338 s := v.Aux 4339 _ = v.Args[1] 4340 x := v.Args[0] 4341 v_1 := v.Args[1] 4342 if v_1.Op != OpAMD64SHLQconst { 4343 break 4344 } 4345 if v_1.AuxInt != 1 { 4346 break 4347 } 4348 y := v_1.Args[0] 4349 v.reset(OpAMD64LEAQ4) 4350 v.AuxInt = c 4351 v.Aux = s 4352 v.AddArg(x) 4353 v.AddArg(y) 4354 return true 4355 } 4356 // match: (LEAQ2 [c] {s} x (SHLQconst [2] y)) 4357 // cond: 4358 // result: (LEAQ8 [c] {s} x y) 4359 for { 4360 c := v.AuxInt 4361 s := v.Aux 4362 _ = v.Args[1] 4363 x := v.Args[0] 4364 v_1 := v.Args[1] 4365 if v_1.Op != OpAMD64SHLQconst { 4366 break 4367 } 4368 if v_1.AuxInt != 2 { 4369 break 4370 } 4371 y := v_1.Args[0] 4372 v.reset(OpAMD64LEAQ8) 4373 v.AuxInt = c 4374 v.Aux = s 4375 v.AddArg(x) 4376 v.AddArg(y) 4377 return true 4378 } 4379 // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4380 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4381 // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) 4382 for { 4383 off1 := v.AuxInt 4384 sym1 := v.Aux 4385 _ = v.Args[1] 4386 v_0 := v.Args[0] 4387 if v_0.Op != OpAMD64LEAQ { 4388 break 4389 } 4390 off2 := v_0.AuxInt 4391 sym2 := v_0.Aux 4392 x := v_0.Args[0] 4393 y := v.Args[1] 4394 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4395 break 4396 } 4397 v.reset(OpAMD64LEAQ2) 4398 v.AuxInt = off1 + off2 4399 v.Aux = mergeSym(sym1, sym2) 4400 v.AddArg(x) 4401 v.AddArg(y) 4402 return true 4403 } 4404 return false 4405 } 4406 func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool { 4407 // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) 4408 // cond: is32Bit(c+d) && x.Op != OpSB 4409 // result: (LEAQ4 [c+d] {s} x y) 4410 for { 4411 c := v.AuxInt 4412 s := v.Aux 4413 _ = v.Args[1] 4414 v_0 := v.Args[0] 4415 if v_0.Op != OpAMD64ADDQconst { 4416 break 4417 } 4418 d := v_0.AuxInt 4419 x := v_0.Args[0] 4420 y := v.Args[1] 4421 if !(is32Bit(c+d) && x.Op != OpSB) { 4422 break 4423 } 4424 v.reset(OpAMD64LEAQ4) 4425 v.AuxInt = c + d 4426 v.Aux = s 4427 v.AddArg(x) 4428 v.AddArg(y) 4429 return true 4430 } 4431 // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) 4432 // cond: is32Bit(c+4*d) && y.Op != OpSB 4433 // result: (LEAQ4 [c+4*d] {s} x y) 4434 for { 4435 c := v.AuxInt 4436 s := v.Aux 4437 _ = v.Args[1] 4438 x := v.Args[0] 4439 v_1 := v.Args[1] 4440 if v_1.Op != OpAMD64ADDQconst { 4441 break 4442 } 4443 d := v_1.AuxInt 4444 y := v_1.Args[0] 4445 if !(is32Bit(c+4*d) && y.Op != OpSB) { 4446 break 4447 } 4448 v.reset(OpAMD64LEAQ4) 4449 v.AuxInt = c + 4*d 4450 v.Aux = s 4451 v.AddArg(x) 4452 v.AddArg(y) 4453 return true 4454 } 4455 // match: (LEAQ4 [c] {s} x (SHLQconst [1] y)) 4456 // cond: 4457 // result: (LEAQ8 [c] {s} x y) 4458 for { 4459 c := v.AuxInt 4460 s := v.Aux 4461 _ = v.Args[1] 4462 x := v.Args[0] 4463 v_1 := v.Args[1] 4464 if v_1.Op != OpAMD64SHLQconst { 4465 break 4466 } 4467 if v_1.AuxInt != 1 { 4468 break 4469 } 4470 y := v_1.Args[0] 4471 v.reset(OpAMD64LEAQ8) 4472 v.AuxInt = c 4473 v.Aux = s 4474 v.AddArg(x) 4475 v.AddArg(y) 4476 return true 4477 } 4478 // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4479 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4480 // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) 4481 for { 4482 off1 := v.AuxInt 4483 sym1 := v.Aux 4484 _ = v.Args[1] 4485 v_0 := v.Args[0] 4486 if v_0.Op != OpAMD64LEAQ { 4487 break 4488 } 4489 off2 := v_0.AuxInt 4490 sym2 := v_0.Aux 4491 x := v_0.Args[0] 4492 y := v.Args[1] 4493 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4494 break 4495 } 4496 v.reset(OpAMD64LEAQ4) 4497 v.AuxInt = off1 + off2 4498 v.Aux = mergeSym(sym1, sym2) 4499 v.AddArg(x) 4500 v.AddArg(y) 4501 return true 4502 } 4503 return false 4504 } 4505 func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool { 4506 // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) 4507 // cond: is32Bit(c+d) && x.Op != OpSB 4508 // result: (LEAQ8 [c+d] {s} x y) 4509 for { 4510 c := v.AuxInt 4511 s := v.Aux 4512 _ = v.Args[1] 4513 v_0 := v.Args[0] 4514 if v_0.Op != OpAMD64ADDQconst { 4515 break 4516 } 4517 d := v_0.AuxInt 4518 x := v_0.Args[0] 4519 y := v.Args[1] 4520 if !(is32Bit(c+d) && x.Op != OpSB) { 4521 break 4522 } 4523 v.reset(OpAMD64LEAQ8) 4524 v.AuxInt = c + d 4525 v.Aux = s 4526 v.AddArg(x) 4527 v.AddArg(y) 4528 return true 4529 } 4530 // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) 4531 // cond: is32Bit(c+8*d) && y.Op != OpSB 4532 // result: (LEAQ8 [c+8*d] {s} x y) 4533 for { 4534 c := v.AuxInt 4535 s := v.Aux 4536 _ = v.Args[1] 4537 x := v.Args[0] 4538 v_1 := v.Args[1] 4539 if v_1.Op != OpAMD64ADDQconst { 4540 break 4541 } 4542 d := v_1.AuxInt 4543 y := v_1.Args[0] 4544 if !(is32Bit(c+8*d) && y.Op != OpSB) { 4545 break 4546 } 4547 v.reset(OpAMD64LEAQ8) 4548 v.AuxInt = c + 8*d 4549 v.Aux = s 4550 v.AddArg(x) 4551 v.AddArg(y) 4552 return true 4553 } 4554 // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) 4555 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB 4556 // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) 4557 for { 4558 off1 := v.AuxInt 4559 sym1 := v.Aux 4560 _ = v.Args[1] 4561 v_0 := v.Args[0] 4562 if v_0.Op != OpAMD64LEAQ { 4563 break 4564 } 4565 off2 := v_0.AuxInt 4566 sym2 := v_0.Aux 4567 x := v_0.Args[0] 4568 y := v.Args[1] 4569 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { 4570 break 4571 } 4572 v.reset(OpAMD64LEAQ8) 4573 v.AuxInt = off1 + off2 4574 v.Aux = mergeSym(sym1, sym2) 4575 v.AddArg(x) 4576 v.AddArg(y) 4577 return true 4578 } 4579 return false 4580 } 4581 func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { 4582 b := v.Block 4583 _ = b 4584 // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) 4585 // cond: x.Uses == 1 && clobber(x) 4586 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4587 for { 4588 x := v.Args[0] 4589 if x.Op != OpAMD64MOVBload { 4590 break 4591 } 4592 off := x.AuxInt 4593 sym := x.Aux 4594 _ = x.Args[1] 4595 ptr := x.Args[0] 4596 mem := x.Args[1] 4597 if !(x.Uses == 1 && clobber(x)) { 4598 break 4599 } 4600 b = x.Block 4601 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4602 v.reset(OpCopy) 4603 v.AddArg(v0) 4604 v0.AuxInt = off 4605 v0.Aux = sym 4606 v0.AddArg(ptr) 4607 v0.AddArg(mem) 4608 return true 4609 } 4610 // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) 4611 // cond: x.Uses == 1 && clobber(x) 4612 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4613 for { 4614 x := v.Args[0] 4615 if x.Op != OpAMD64MOVWload { 4616 break 4617 } 4618 off := x.AuxInt 4619 sym := x.Aux 4620 _ = x.Args[1] 4621 ptr := x.Args[0] 4622 mem := x.Args[1] 4623 if !(x.Uses == 1 && clobber(x)) { 4624 break 4625 } 4626 b = x.Block 4627 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4628 v.reset(OpCopy) 4629 v.AddArg(v0) 4630 v0.AuxInt = off 4631 v0.Aux = sym 4632 v0.AddArg(ptr) 4633 v0.AddArg(mem) 4634 return true 4635 } 4636 // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) 4637 // cond: x.Uses == 1 && clobber(x) 4638 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4639 for { 4640 x := v.Args[0] 4641 if x.Op != OpAMD64MOVLload { 4642 break 4643 } 4644 off := x.AuxInt 4645 sym := x.Aux 4646 _ = x.Args[1] 4647 ptr := x.Args[0] 4648 mem := x.Args[1] 4649 if !(x.Uses == 1 && clobber(x)) { 4650 break 4651 } 4652 b = x.Block 4653 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4654 v.reset(OpCopy) 4655 v.AddArg(v0) 4656 v0.AuxInt = off 4657 v0.Aux = sym 4658 v0.AddArg(ptr) 4659 v0.AddArg(mem) 4660 return true 4661 } 4662 // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) 4663 // cond: x.Uses == 1 && clobber(x) 4664 // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) 4665 for { 4666 x := v.Args[0] 4667 if x.Op != OpAMD64MOVQload { 4668 break 4669 } 4670 off := x.AuxInt 4671 sym := x.Aux 4672 _ = x.Args[1] 4673 ptr := x.Args[0] 4674 mem := x.Args[1] 4675 if !(x.Uses == 1 && clobber(x)) { 4676 break 4677 } 4678 b = x.Block 4679 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type) 4680 v.reset(OpCopy) 4681 v.AddArg(v0) 4682 v0.AuxInt = off 4683 v0.Aux = sym 4684 v0.AddArg(ptr) 4685 v0.AddArg(mem) 4686 return true 4687 } 4688 // match: (MOVBQSX (ANDLconst [c] x)) 4689 // cond: c & 0x80 == 0 4690 // result: (ANDLconst [c & 0x7f] x) 4691 for { 4692 v_0 := v.Args[0] 4693 if v_0.Op != OpAMD64ANDLconst { 4694 break 4695 } 4696 c := v_0.AuxInt 4697 x := v_0.Args[0] 4698 if !(c&0x80 == 0) { 4699 break 4700 } 4701 v.reset(OpAMD64ANDLconst) 4702 v.AuxInt = c & 0x7f 4703 v.AddArg(x) 4704 return true 4705 } 4706 // match: (MOVBQSX (MOVBQSX x)) 4707 // cond: 4708 // result: (MOVBQSX x) 4709 for { 4710 v_0 := v.Args[0] 4711 if v_0.Op != OpAMD64MOVBQSX { 4712 break 4713 } 4714 x := v_0.Args[0] 4715 v.reset(OpAMD64MOVBQSX) 4716 v.AddArg(x) 4717 return true 4718 } 4719 return false 4720 } 4721 func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool { 4722 // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4723 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4724 // result: (MOVBQSX x) 4725 for { 4726 off := v.AuxInt 4727 sym := v.Aux 4728 _ = v.Args[1] 4729 ptr := v.Args[0] 4730 v_1 := v.Args[1] 4731 if v_1.Op != OpAMD64MOVBstore { 4732 break 4733 } 4734 off2 := v_1.AuxInt 4735 sym2 := v_1.Aux 4736 _ = v_1.Args[2] 4737 ptr2 := v_1.Args[0] 4738 x := v_1.Args[1] 4739 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4740 break 4741 } 4742 v.reset(OpAMD64MOVBQSX) 4743 v.AddArg(x) 4744 return true 4745 } 4746 // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4747 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4748 // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4749 for { 4750 off1 := v.AuxInt 4751 sym1 := v.Aux 4752 _ = v.Args[1] 4753 v_0 := v.Args[0] 4754 if v_0.Op != OpAMD64LEAQ { 4755 break 4756 } 4757 off2 := v_0.AuxInt 4758 sym2 := v_0.Aux 4759 base := v_0.Args[0] 4760 mem := v.Args[1] 4761 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 4762 break 4763 } 4764 v.reset(OpAMD64MOVBQSXload) 4765 v.AuxInt = off1 + off2 4766 v.Aux = mergeSym(sym1, sym2) 4767 v.AddArg(base) 4768 v.AddArg(mem) 4769 return true 4770 } 4771 return false 4772 } 4773 func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { 4774 b := v.Block 4775 _ = b 4776 // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) 4777 // cond: x.Uses == 1 && clobber(x) 4778 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4779 for { 4780 x := v.Args[0] 4781 if x.Op != OpAMD64MOVBload { 4782 break 4783 } 4784 off := x.AuxInt 4785 sym := x.Aux 4786 _ = x.Args[1] 4787 ptr := x.Args[0] 4788 mem := x.Args[1] 4789 if !(x.Uses == 1 && clobber(x)) { 4790 break 4791 } 4792 b = x.Block 4793 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4794 v.reset(OpCopy) 4795 v.AddArg(v0) 4796 v0.AuxInt = off 4797 v0.Aux = sym 4798 v0.AddArg(ptr) 4799 v0.AddArg(mem) 4800 return true 4801 } 4802 // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) 4803 // cond: x.Uses == 1 && clobber(x) 4804 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4805 for { 4806 x := v.Args[0] 4807 if x.Op != OpAMD64MOVWload { 4808 break 4809 } 4810 off := x.AuxInt 4811 sym := x.Aux 4812 _ = x.Args[1] 4813 ptr := x.Args[0] 4814 mem := x.Args[1] 4815 if !(x.Uses == 1 && clobber(x)) { 4816 break 4817 } 4818 b = x.Block 4819 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4820 v.reset(OpCopy) 4821 v.AddArg(v0) 4822 v0.AuxInt = off 4823 v0.Aux = sym 4824 v0.AddArg(ptr) 4825 v0.AddArg(mem) 4826 return true 4827 } 4828 // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) 4829 // cond: x.Uses == 1 && clobber(x) 4830 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4831 for { 4832 x := v.Args[0] 4833 if x.Op != OpAMD64MOVLload { 4834 break 4835 } 4836 off := x.AuxInt 4837 sym := x.Aux 4838 _ = x.Args[1] 4839 ptr := x.Args[0] 4840 mem := x.Args[1] 4841 if !(x.Uses == 1 && clobber(x)) { 4842 break 4843 } 4844 b = x.Block 4845 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4846 v.reset(OpCopy) 4847 v.AddArg(v0) 4848 v0.AuxInt = off 4849 v0.Aux = sym 4850 v0.AddArg(ptr) 4851 v0.AddArg(mem) 4852 return true 4853 } 4854 // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) 4855 // cond: x.Uses == 1 && clobber(x) 4856 // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) 4857 for { 4858 x := v.Args[0] 4859 if x.Op != OpAMD64MOVQload { 4860 break 4861 } 4862 off := x.AuxInt 4863 sym := x.Aux 4864 _ = x.Args[1] 4865 ptr := x.Args[0] 4866 mem := x.Args[1] 4867 if !(x.Uses == 1 && clobber(x)) { 4868 break 4869 } 4870 b = x.Block 4871 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type) 4872 v.reset(OpCopy) 4873 v.AddArg(v0) 4874 v0.AuxInt = off 4875 v0.Aux = sym 4876 v0.AddArg(ptr) 4877 v0.AddArg(mem) 4878 return true 4879 } 4880 // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) 4881 // cond: x.Uses == 1 && clobber(x) 4882 // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem) 4883 for { 4884 x := v.Args[0] 4885 if x.Op != OpAMD64MOVBloadidx1 { 4886 break 4887 } 4888 off := x.AuxInt 4889 sym := x.Aux 4890 _ = x.Args[2] 4891 ptr := x.Args[0] 4892 idx := x.Args[1] 4893 mem := x.Args[2] 4894 if !(x.Uses == 1 && clobber(x)) { 4895 break 4896 } 4897 b = x.Block 4898 v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type) 4899 v.reset(OpCopy) 4900 v.AddArg(v0) 4901 v0.AuxInt = off 4902 v0.Aux = sym 4903 v0.AddArg(ptr) 4904 v0.AddArg(idx) 4905 v0.AddArg(mem) 4906 return true 4907 } 4908 // match: (MOVBQZX (ANDLconst [c] x)) 4909 // cond: 4910 // result: (ANDLconst [c & 0xff] x) 4911 for { 4912 v_0 := v.Args[0] 4913 if v_0.Op != OpAMD64ANDLconst { 4914 break 4915 } 4916 c := v_0.AuxInt 4917 x := v_0.Args[0] 4918 v.reset(OpAMD64ANDLconst) 4919 v.AuxInt = c & 0xff 4920 v.AddArg(x) 4921 return true 4922 } 4923 // match: (MOVBQZX (MOVBQZX x)) 4924 // cond: 4925 // result: (MOVBQZX x) 4926 for { 4927 v_0 := v.Args[0] 4928 if v_0.Op != OpAMD64MOVBQZX { 4929 break 4930 } 4931 x := v_0.Args[0] 4932 v.reset(OpAMD64MOVBQZX) 4933 v.AddArg(x) 4934 return true 4935 } 4936 return false 4937 } 4938 func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { 4939 // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) 4940 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 4941 // result: (MOVBQZX x) 4942 for { 4943 off := v.AuxInt 4944 sym := v.Aux 4945 _ = v.Args[1] 4946 ptr := v.Args[0] 4947 v_1 := v.Args[1] 4948 if v_1.Op != OpAMD64MOVBstore { 4949 break 4950 } 4951 off2 := v_1.AuxInt 4952 sym2 := v_1.Aux 4953 _ = v_1.Args[2] 4954 ptr2 := v_1.Args[0] 4955 x := v_1.Args[1] 4956 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 4957 break 4958 } 4959 v.reset(OpAMD64MOVBQZX) 4960 v.AddArg(x) 4961 return true 4962 } 4963 // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) 4964 // cond: is32Bit(off1+off2) 4965 // result: (MOVBload [off1+off2] {sym} ptr mem) 4966 for { 4967 off1 := v.AuxInt 4968 sym := v.Aux 4969 _ = v.Args[1] 4970 v_0 := v.Args[0] 4971 if v_0.Op != OpAMD64ADDQconst { 4972 break 4973 } 4974 off2 := v_0.AuxInt 4975 ptr := v_0.Args[0] 4976 mem := v.Args[1] 4977 if !(is32Bit(off1 + off2)) { 4978 break 4979 } 4980 v.reset(OpAMD64MOVBload) 4981 v.AuxInt = off1 + off2 4982 v.Aux = sym 4983 v.AddArg(ptr) 4984 v.AddArg(mem) 4985 return true 4986 } 4987 // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 4988 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 4989 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 4990 for { 4991 off1 := v.AuxInt 4992 sym1 := v.Aux 4993 _ = v.Args[1] 4994 v_0 := v.Args[0] 4995 if v_0.Op != OpAMD64LEAQ { 4996 break 4997 } 4998 off2 := v_0.AuxInt 4999 sym2 := v_0.Aux 5000 base := v_0.Args[0] 5001 mem := v.Args[1] 5002 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5003 break 5004 } 5005 v.reset(OpAMD64MOVBload) 5006 v.AuxInt = off1 + off2 5007 v.Aux = mergeSym(sym1, sym2) 5008 v.AddArg(base) 5009 v.AddArg(mem) 5010 return true 5011 } 5012 // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 5013 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5014 // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 5015 for { 5016 off1 := v.AuxInt 5017 sym1 := v.Aux 5018 _ = v.Args[1] 5019 v_0 := v.Args[0] 5020 if v_0.Op != OpAMD64LEAQ1 { 5021 break 5022 } 5023 off2 := v_0.AuxInt 5024 sym2 := v_0.Aux 5025 _ = v_0.Args[1] 5026 ptr := v_0.Args[0] 5027 idx := v_0.Args[1] 5028 mem := v.Args[1] 5029 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5030 break 5031 } 5032 v.reset(OpAMD64MOVBloadidx1) 5033 v.AuxInt = off1 + off2 5034 v.Aux = mergeSym(sym1, sym2) 5035 v.AddArg(ptr) 5036 v.AddArg(idx) 5037 v.AddArg(mem) 5038 return true 5039 } 5040 // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) 5041 // cond: ptr.Op != OpSB 5042 // result: (MOVBloadidx1 [off] {sym} ptr idx mem) 5043 for { 5044 off := v.AuxInt 5045 sym := v.Aux 5046 _ = v.Args[1] 5047 v_0 := v.Args[0] 5048 if v_0.Op != OpAMD64ADDQ { 5049 break 5050 } 5051 _ = v_0.Args[1] 5052 ptr := v_0.Args[0] 5053 idx := v_0.Args[1] 5054 mem := v.Args[1] 5055 if !(ptr.Op != OpSB) { 5056 break 5057 } 5058 v.reset(OpAMD64MOVBloadidx1) 5059 v.AuxInt = off 5060 v.Aux = sym 5061 v.AddArg(ptr) 5062 v.AddArg(idx) 5063 v.AddArg(mem) 5064 return true 5065 } 5066 // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 5067 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 5068 // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) 5069 for { 5070 off1 := v.AuxInt 5071 sym1 := v.Aux 5072 _ = v.Args[1] 5073 v_0 := v.Args[0] 5074 if v_0.Op != OpAMD64LEAL { 5075 break 5076 } 5077 off2 := v_0.AuxInt 5078 sym2 := v_0.Aux 5079 base := v_0.Args[0] 5080 mem := v.Args[1] 5081 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 5082 break 5083 } 5084 v.reset(OpAMD64MOVBload) 5085 v.AuxInt = off1 + off2 5086 v.Aux = mergeSym(sym1, sym2) 5087 v.AddArg(base) 5088 v.AddArg(mem) 5089 return true 5090 } 5091 // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) 5092 // cond: is32Bit(off1+off2) 5093 // result: (MOVBload [off1+off2] {sym} ptr mem) 5094 for { 5095 off1 := v.AuxInt 5096 sym := v.Aux 5097 _ = v.Args[1] 5098 v_0 := v.Args[0] 5099 if v_0.Op != OpAMD64ADDLconst { 5100 break 5101 } 5102 off2 := v_0.AuxInt 5103 ptr := v_0.Args[0] 5104 mem := v.Args[1] 5105 if !(is32Bit(off1 + off2)) { 5106 break 5107 } 5108 v.reset(OpAMD64MOVBload) 5109 v.AuxInt = off1 + off2 5110 v.Aux = sym 5111 v.AddArg(ptr) 5112 v.AddArg(mem) 5113 return true 5114 } 5115 return false 5116 } 5117 func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { 5118 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 5119 // cond: is32Bit(c+d) 5120 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5121 for { 5122 c := v.AuxInt 5123 sym := v.Aux 5124 _ = v.Args[2] 5125 v_0 := v.Args[0] 5126 if v_0.Op != OpAMD64ADDQconst { 5127 break 5128 } 5129 d := v_0.AuxInt 5130 ptr := v_0.Args[0] 5131 idx := v.Args[1] 5132 mem := v.Args[2] 5133 if !(is32Bit(c + d)) { 5134 break 5135 } 5136 v.reset(OpAMD64MOVBloadidx1) 5137 v.AuxInt = c + d 5138 v.Aux = sym 5139 v.AddArg(ptr) 5140 v.AddArg(idx) 5141 v.AddArg(mem) 5142 return true 5143 } 5144 // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 5145 // cond: is32Bit(c+d) 5146 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5147 for { 5148 c := v.AuxInt 5149 sym := v.Aux 5150 _ = v.Args[2] 5151 idx := v.Args[0] 5152 v_1 := v.Args[1] 5153 if v_1.Op != OpAMD64ADDQconst { 5154 break 5155 } 5156 d := v_1.AuxInt 5157 ptr := v_1.Args[0] 5158 mem := v.Args[2] 5159 if !(is32Bit(c + d)) { 5160 break 5161 } 5162 v.reset(OpAMD64MOVBloadidx1) 5163 v.AuxInt = c + d 5164 v.Aux = sym 5165 v.AddArg(ptr) 5166 v.AddArg(idx) 5167 v.AddArg(mem) 5168 return true 5169 } 5170 // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 5171 // cond: is32Bit(c+d) 5172 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5173 for { 5174 c := v.AuxInt 5175 sym := v.Aux 5176 _ = v.Args[2] 5177 ptr := v.Args[0] 5178 v_1 := v.Args[1] 5179 if v_1.Op != OpAMD64ADDQconst { 5180 break 5181 } 5182 d := v_1.AuxInt 5183 idx := v_1.Args[0] 5184 mem := v.Args[2] 5185 if !(is32Bit(c + d)) { 5186 break 5187 } 5188 v.reset(OpAMD64MOVBloadidx1) 5189 v.AuxInt = c + d 5190 v.Aux = sym 5191 v.AddArg(ptr) 5192 v.AddArg(idx) 5193 v.AddArg(mem) 5194 return true 5195 } 5196 // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 5197 // cond: is32Bit(c+d) 5198 // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) 5199 for { 5200 c := v.AuxInt 5201 sym := v.Aux 5202 _ = v.Args[2] 5203 v_0 := v.Args[0] 5204 if v_0.Op != OpAMD64ADDQconst { 5205 break 5206 } 5207 d := v_0.AuxInt 5208 idx := v_0.Args[0] 5209 ptr := v.Args[1] 5210 mem := v.Args[2] 5211 if !(is32Bit(c + d)) { 5212 break 5213 } 5214 v.reset(OpAMD64MOVBloadidx1) 5215 v.AuxInt = c + d 5216 v.Aux = sym 5217 v.AddArg(ptr) 5218 v.AddArg(idx) 5219 v.AddArg(mem) 5220 return true 5221 } 5222 return false 5223 } 5224 func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { 5225 // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem) 5226 // cond: y.Uses == 1 5227 // result: (SETLmem [off] {sym} ptr x mem) 5228 for { 5229 off := v.AuxInt 5230 sym := v.Aux 5231 _ = v.Args[2] 5232 ptr := v.Args[0] 5233 y := v.Args[1] 5234 if y.Op != OpAMD64SETL { 5235 break 5236 } 5237 x := y.Args[0] 5238 mem := v.Args[2] 5239 if !(y.Uses == 1) { 5240 break 5241 } 5242 v.reset(OpAMD64SETLmem) 5243 v.AuxInt = off 5244 v.Aux = sym 5245 v.AddArg(ptr) 5246 v.AddArg(x) 5247 v.AddArg(mem) 5248 return true 5249 } 5250 // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem) 5251 // cond: y.Uses == 1 5252 // result: (SETLEmem [off] {sym} ptr x mem) 5253 for { 5254 off := v.AuxInt 5255 sym := v.Aux 5256 _ = v.Args[2] 5257 ptr := v.Args[0] 5258 y := v.Args[1] 5259 if y.Op != OpAMD64SETLE { 5260 break 5261 } 5262 x := y.Args[0] 5263 mem := v.Args[2] 5264 if !(y.Uses == 1) { 5265 break 5266 } 5267 v.reset(OpAMD64SETLEmem) 5268 v.AuxInt = off 5269 v.Aux = sym 5270 v.AddArg(ptr) 5271 v.AddArg(x) 5272 v.AddArg(mem) 5273 return true 5274 } 5275 // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem) 5276 // cond: y.Uses == 1 5277 // result: (SETGmem [off] {sym} ptr x mem) 5278 for { 5279 off := v.AuxInt 5280 sym := v.Aux 5281 _ = v.Args[2] 5282 ptr := v.Args[0] 5283 y := v.Args[1] 5284 if y.Op != OpAMD64SETG { 5285 break 5286 } 5287 x := y.Args[0] 5288 mem := v.Args[2] 5289 if !(y.Uses == 1) { 5290 break 5291 } 5292 v.reset(OpAMD64SETGmem) 5293 v.AuxInt = off 5294 v.Aux = sym 5295 v.AddArg(ptr) 5296 v.AddArg(x) 5297 v.AddArg(mem) 5298 return true 5299 } 5300 // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem) 5301 // cond: y.Uses == 1 5302 // result: (SETGEmem [off] {sym} ptr x mem) 5303 for { 5304 off := v.AuxInt 5305 sym := v.Aux 5306 _ = v.Args[2] 5307 ptr := v.Args[0] 5308 y := v.Args[1] 5309 if y.Op != OpAMD64SETGE { 5310 break 5311 } 5312 x := y.Args[0] 5313 mem := v.Args[2] 5314 if !(y.Uses == 1) { 5315 break 5316 } 5317 v.reset(OpAMD64SETGEmem) 5318 v.AuxInt = off 5319 v.Aux = sym 5320 v.AddArg(ptr) 5321 v.AddArg(x) 5322 v.AddArg(mem) 5323 return true 5324 } 5325 // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) 5326 // cond: y.Uses == 1 5327 // result: (SETEQmem [off] {sym} ptr x mem) 5328 for { 5329 off := v.AuxInt 5330 sym := v.Aux 5331 _ = v.Args[2] 5332 ptr := v.Args[0] 5333 y := v.Args[1] 5334 if y.Op != OpAMD64SETEQ { 5335 break 5336 } 5337 x := y.Args[0] 5338 mem := v.Args[2] 5339 if !(y.Uses == 1) { 5340 break 5341 } 5342 v.reset(OpAMD64SETEQmem) 5343 v.AuxInt = off 5344 v.Aux = sym 5345 v.AddArg(ptr) 5346 v.AddArg(x) 5347 v.AddArg(mem) 5348 return true 5349 } 5350 // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem) 5351 // cond: y.Uses == 1 5352 // result: (SETNEmem [off] {sym} ptr x mem) 5353 for { 5354 off := v.AuxInt 5355 sym := v.Aux 5356 _ = v.Args[2] 5357 ptr := v.Args[0] 5358 y := v.Args[1] 5359 if y.Op != OpAMD64SETNE { 5360 break 5361 } 5362 x := y.Args[0] 5363 mem := v.Args[2] 5364 if !(y.Uses == 1) { 5365 break 5366 } 5367 v.reset(OpAMD64SETNEmem) 5368 v.AuxInt = off 5369 v.Aux = sym 5370 v.AddArg(ptr) 5371 v.AddArg(x) 5372 v.AddArg(mem) 5373 return true 5374 } 5375 // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem) 5376 // cond: y.Uses == 1 5377 // result: (SETBmem [off] {sym} ptr x mem) 5378 for { 5379 off := v.AuxInt 5380 sym := v.Aux 5381 _ = v.Args[2] 5382 ptr := v.Args[0] 5383 y := v.Args[1] 5384 if y.Op != OpAMD64SETB { 5385 break 5386 } 5387 x := y.Args[0] 5388 mem := v.Args[2] 5389 if !(y.Uses == 1) { 5390 break 5391 } 5392 v.reset(OpAMD64SETBmem) 5393 v.AuxInt = off 5394 v.Aux = sym 5395 v.AddArg(ptr) 5396 v.AddArg(x) 5397 v.AddArg(mem) 5398 return true 5399 } 5400 // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem) 5401 // cond: y.Uses == 1 5402 // result: (SETBEmem [off] {sym} ptr x mem) 5403 for { 5404 off := v.AuxInt 5405 sym := v.Aux 5406 _ = v.Args[2] 5407 ptr := v.Args[0] 5408 y := v.Args[1] 5409 if y.Op != OpAMD64SETBE { 5410 break 5411 } 5412 x := y.Args[0] 5413 mem := v.Args[2] 5414 if !(y.Uses == 1) { 5415 break 5416 } 5417 v.reset(OpAMD64SETBEmem) 5418 v.AuxInt = off 5419 v.Aux = sym 5420 v.AddArg(ptr) 5421 v.AddArg(x) 5422 v.AddArg(mem) 5423 return true 5424 } 5425 // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem) 5426 // cond: y.Uses == 1 5427 // result: (SETAmem [off] {sym} ptr x mem) 5428 for { 5429 off := v.AuxInt 5430 sym := v.Aux 5431 _ = v.Args[2] 5432 ptr := v.Args[0] 5433 y := v.Args[1] 5434 if y.Op != OpAMD64SETA { 5435 break 5436 } 5437 x := y.Args[0] 5438 mem := v.Args[2] 5439 if !(y.Uses == 1) { 5440 break 5441 } 5442 v.reset(OpAMD64SETAmem) 5443 v.AuxInt = off 5444 v.Aux = sym 5445 v.AddArg(ptr) 5446 v.AddArg(x) 5447 v.AddArg(mem) 5448 return true 5449 } 5450 // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem) 5451 // cond: y.Uses == 1 5452 // result: (SETAEmem [off] {sym} ptr x mem) 5453 for { 5454 off := v.AuxInt 5455 sym := v.Aux 5456 _ = v.Args[2] 5457 ptr := v.Args[0] 5458 y := v.Args[1] 5459 if y.Op != OpAMD64SETAE { 5460 break 5461 } 5462 x := y.Args[0] 5463 mem := v.Args[2] 5464 if !(y.Uses == 1) { 5465 break 5466 } 5467 v.reset(OpAMD64SETAEmem) 5468 v.AuxInt = off 5469 v.Aux = sym 5470 v.AddArg(ptr) 5471 v.AddArg(x) 5472 v.AddArg(mem) 5473 return true 5474 } 5475 return false 5476 } 5477 func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { 5478 b := v.Block 5479 _ = b 5480 // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) 5481 // cond: 5482 // result: (MOVBstore [off] {sym} ptr x mem) 5483 for { 5484 off := v.AuxInt 5485 sym := v.Aux 5486 _ = v.Args[2] 5487 ptr := v.Args[0] 5488 v_1 := v.Args[1] 5489 if v_1.Op != OpAMD64MOVBQSX { 5490 break 5491 } 5492 x := v_1.Args[0] 5493 mem := v.Args[2] 5494 v.reset(OpAMD64MOVBstore) 5495 v.AuxInt = off 5496 v.Aux = sym 5497 v.AddArg(ptr) 5498 v.AddArg(x) 5499 v.AddArg(mem) 5500 return true 5501 } 5502 // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) 5503 // cond: 5504 // result: (MOVBstore [off] {sym} ptr x mem) 5505 for { 5506 off := v.AuxInt 5507 sym := v.Aux 5508 _ = v.Args[2] 5509 ptr := v.Args[0] 5510 v_1 := v.Args[1] 5511 if v_1.Op != OpAMD64MOVBQZX { 5512 break 5513 } 5514 x := v_1.Args[0] 5515 mem := v.Args[2] 5516 v.reset(OpAMD64MOVBstore) 5517 v.AuxInt = off 5518 v.Aux = sym 5519 v.AddArg(ptr) 5520 v.AddArg(x) 5521 v.AddArg(mem) 5522 return true 5523 } 5524 // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 5525 // cond: is32Bit(off1+off2) 5526 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 5527 for { 5528 off1 := v.AuxInt 5529 sym := v.Aux 5530 _ = v.Args[2] 5531 v_0 := v.Args[0] 5532 if v_0.Op != OpAMD64ADDQconst { 5533 break 5534 } 5535 off2 := v_0.AuxInt 5536 ptr := v_0.Args[0] 5537 val := v.Args[1] 5538 mem := v.Args[2] 5539 if !(is32Bit(off1 + off2)) { 5540 break 5541 } 5542 v.reset(OpAMD64MOVBstore) 5543 v.AuxInt = off1 + off2 5544 v.Aux = sym 5545 v.AddArg(ptr) 5546 v.AddArg(val) 5547 v.AddArg(mem) 5548 return true 5549 } 5550 // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) 5551 // cond: validOff(off) 5552 // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) 5553 for { 5554 off := v.AuxInt 5555 sym := v.Aux 5556 _ = v.Args[2] 5557 ptr := v.Args[0] 5558 v_1 := v.Args[1] 5559 if v_1.Op != OpAMD64MOVLconst { 5560 break 5561 } 5562 c := v_1.AuxInt 5563 mem := v.Args[2] 5564 if !(validOff(off)) { 5565 break 5566 } 5567 v.reset(OpAMD64MOVBstoreconst) 5568 v.AuxInt = makeValAndOff(int64(int8(c)), off) 5569 v.Aux = sym 5570 v.AddArg(ptr) 5571 v.AddArg(mem) 5572 return true 5573 } 5574 // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 5575 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5576 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 5577 for { 5578 off1 := v.AuxInt 5579 sym1 := v.Aux 5580 _ = v.Args[2] 5581 v_0 := v.Args[0] 5582 if v_0.Op != OpAMD64LEAQ { 5583 break 5584 } 5585 off2 := v_0.AuxInt 5586 sym2 := v_0.Aux 5587 base := v_0.Args[0] 5588 val := v.Args[1] 5589 mem := v.Args[2] 5590 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5591 break 5592 } 5593 v.reset(OpAMD64MOVBstore) 5594 v.AuxInt = off1 + off2 5595 v.Aux = mergeSym(sym1, sym2) 5596 v.AddArg(base) 5597 v.AddArg(val) 5598 v.AddArg(mem) 5599 return true 5600 } 5601 // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 5602 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 5603 // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 5604 for { 5605 off1 := v.AuxInt 5606 sym1 := v.Aux 5607 _ = v.Args[2] 5608 v_0 := v.Args[0] 5609 if v_0.Op != OpAMD64LEAQ1 { 5610 break 5611 } 5612 off2 := v_0.AuxInt 5613 sym2 := v_0.Aux 5614 _ = v_0.Args[1] 5615 ptr := v_0.Args[0] 5616 idx := v_0.Args[1] 5617 val := v.Args[1] 5618 mem := v.Args[2] 5619 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 5620 break 5621 } 5622 v.reset(OpAMD64MOVBstoreidx1) 5623 v.AuxInt = off1 + off2 5624 v.Aux = mergeSym(sym1, sym2) 5625 v.AddArg(ptr) 5626 v.AddArg(idx) 5627 v.AddArg(val) 5628 v.AddArg(mem) 5629 return true 5630 } 5631 // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) 5632 // cond: ptr.Op != OpSB 5633 // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) 5634 for { 5635 off := v.AuxInt 5636 sym := v.Aux 5637 _ = v.Args[2] 5638 v_0 := v.Args[0] 5639 if v_0.Op != OpAMD64ADDQ { 5640 break 5641 } 5642 _ = v_0.Args[1] 5643 ptr := v_0.Args[0] 5644 idx := v_0.Args[1] 5645 val := v.Args[1] 5646 mem := v.Args[2] 5647 if !(ptr.Op != OpSB) { 5648 break 5649 } 5650 v.reset(OpAMD64MOVBstoreidx1) 5651 v.AuxInt = off 5652 v.Aux = sym 5653 v.AddArg(ptr) 5654 v.AddArg(idx) 5655 v.AddArg(val) 5656 v.AddArg(mem) 5657 return true 5658 } 5659 // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) 5660 // cond: x0.Uses == 1 && clobber(x0) 5661 // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem) 5662 for { 5663 i := v.AuxInt 5664 s := v.Aux 5665 _ = v.Args[2] 5666 p := v.Args[0] 5667 w := v.Args[1] 5668 x0 := v.Args[2] 5669 if x0.Op != OpAMD64MOVBstore { 5670 break 5671 } 5672 if x0.AuxInt != i-1 { 5673 break 5674 } 5675 if x0.Aux != s { 5676 break 5677 } 5678 _ = x0.Args[2] 5679 if p != x0.Args[0] { 5680 break 5681 } 5682 x0_1 := x0.Args[1] 5683 if x0_1.Op != OpAMD64SHRWconst { 5684 break 5685 } 5686 if x0_1.AuxInt != 8 { 5687 break 5688 } 5689 if w != x0_1.Args[0] { 5690 break 5691 } 5692 mem := x0.Args[2] 5693 if !(x0.Uses == 1 && clobber(x0)) { 5694 break 5695 } 5696 v.reset(OpAMD64MOVWstore) 5697 v.AuxInt = i - 1 5698 v.Aux = s 5699 v.AddArg(p) 5700 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 5701 v0.AuxInt = 8 5702 v0.AddArg(w) 5703 v.AddArg(v0) 5704 v.AddArg(mem) 5705 return true 5706 } 5707 // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) 5708 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 5709 // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem) 5710 for { 5711 i := v.AuxInt 5712 s := v.Aux 5713 _ = v.Args[2] 5714 p := v.Args[0] 5715 w := v.Args[1] 5716 x2 := v.Args[2] 5717 if x2.Op != OpAMD64MOVBstore { 5718 break 5719 } 5720 if x2.AuxInt != i-1 { 5721 break 5722 } 5723 if x2.Aux != s { 5724 break 5725 } 5726 _ = x2.Args[2] 5727 if p != x2.Args[0] { 5728 break 5729 } 5730 x2_1 := x2.Args[1] 5731 if x2_1.Op != OpAMD64SHRLconst { 5732 break 5733 } 5734 if x2_1.AuxInt != 8 { 5735 break 5736 } 5737 if w != x2_1.Args[0] { 5738 break 5739 } 5740 x1 := x2.Args[2] 5741 if x1.Op != OpAMD64MOVBstore { 5742 break 5743 } 5744 if x1.AuxInt != i-2 { 5745 break 5746 } 5747 if x1.Aux != s { 5748 break 5749 } 5750 _ = x1.Args[2] 5751 if p != x1.Args[0] { 5752 break 5753 } 5754 x1_1 := x1.Args[1] 5755 if x1_1.Op != OpAMD64SHRLconst { 5756 break 5757 } 5758 if x1_1.AuxInt != 16 { 5759 break 5760 } 5761 if w != x1_1.Args[0] { 5762 break 5763 } 5764 x0 := x1.Args[2] 5765 if x0.Op != OpAMD64MOVBstore { 5766 break 5767 } 5768 if x0.AuxInt != i-3 { 5769 break 5770 } 5771 if x0.Aux != s { 5772 break 5773 } 5774 _ = x0.Args[2] 5775 if p != x0.Args[0] { 5776 break 5777 } 5778 x0_1 := x0.Args[1] 5779 if x0_1.Op != OpAMD64SHRLconst { 5780 break 5781 } 5782 if x0_1.AuxInt != 24 { 5783 break 5784 } 5785 if w != x0_1.Args[0] { 5786 break 5787 } 5788 mem := x0.Args[2] 5789 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 5790 break 5791 } 5792 v.reset(OpAMD64MOVLstore) 5793 v.AuxInt = i - 3 5794 v.Aux = s 5795 v.AddArg(p) 5796 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 5797 v0.AddArg(w) 5798 v.AddArg(v0) 5799 v.AddArg(mem) 5800 return true 5801 } 5802 // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) 5803 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 5804 // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem) 5805 for { 5806 i := v.AuxInt 5807 s := v.Aux 5808 _ = v.Args[2] 5809 p := v.Args[0] 5810 w := v.Args[1] 5811 x6 := v.Args[2] 5812 if x6.Op != OpAMD64MOVBstore { 5813 break 5814 } 5815 if x6.AuxInt != i-1 { 5816 break 5817 } 5818 if x6.Aux != s { 5819 break 5820 } 5821 _ = x6.Args[2] 5822 if p != x6.Args[0] { 5823 break 5824 } 5825 x6_1 := x6.Args[1] 5826 if x6_1.Op != OpAMD64SHRQconst { 5827 break 5828 } 5829 if x6_1.AuxInt != 8 { 5830 break 5831 } 5832 if w != x6_1.Args[0] { 5833 break 5834 } 5835 x5 := x6.Args[2] 5836 if x5.Op != OpAMD64MOVBstore { 5837 break 5838 } 5839 if x5.AuxInt != i-2 { 5840 break 5841 } 5842 if x5.Aux != s { 5843 break 5844 } 5845 _ = x5.Args[2] 5846 if p != x5.Args[0] { 5847 break 5848 } 5849 x5_1 := x5.Args[1] 5850 if x5_1.Op != OpAMD64SHRQconst { 5851 break 5852 } 5853 if x5_1.AuxInt != 16 { 5854 break 5855 } 5856 if w != x5_1.Args[0] { 5857 break 5858 } 5859 x4 := x5.Args[2] 5860 if x4.Op != OpAMD64MOVBstore { 5861 break 5862 } 5863 if x4.AuxInt != i-3 { 5864 break 5865 } 5866 if x4.Aux != s { 5867 break 5868 } 5869 _ = x4.Args[2] 5870 if p != x4.Args[0] { 5871 break 5872 } 5873 x4_1 := x4.Args[1] 5874 if x4_1.Op != OpAMD64SHRQconst { 5875 break 5876 } 5877 if x4_1.AuxInt != 24 { 5878 break 5879 } 5880 if w != x4_1.Args[0] { 5881 break 5882 } 5883 x3 := x4.Args[2] 5884 if x3.Op != OpAMD64MOVBstore { 5885 break 5886 } 5887 if x3.AuxInt != i-4 { 5888 break 5889 } 5890 if x3.Aux != s { 5891 break 5892 } 5893 _ = x3.Args[2] 5894 if p != x3.Args[0] { 5895 break 5896 } 5897 x3_1 := x3.Args[1] 5898 if x3_1.Op != OpAMD64SHRQconst { 5899 break 5900 } 5901 if x3_1.AuxInt != 32 { 5902 break 5903 } 5904 if w != x3_1.Args[0] { 5905 break 5906 } 5907 x2 := x3.Args[2] 5908 if x2.Op != OpAMD64MOVBstore { 5909 break 5910 } 5911 if x2.AuxInt != i-5 { 5912 break 5913 } 5914 if x2.Aux != s { 5915 break 5916 } 5917 _ = x2.Args[2] 5918 if p != x2.Args[0] { 5919 break 5920 } 5921 x2_1 := x2.Args[1] 5922 if x2_1.Op != OpAMD64SHRQconst { 5923 break 5924 } 5925 if x2_1.AuxInt != 40 { 5926 break 5927 } 5928 if w != x2_1.Args[0] { 5929 break 5930 } 5931 x1 := x2.Args[2] 5932 if x1.Op != OpAMD64MOVBstore { 5933 break 5934 } 5935 if x1.AuxInt != i-6 { 5936 break 5937 } 5938 if x1.Aux != s { 5939 break 5940 } 5941 _ = x1.Args[2] 5942 if p != x1.Args[0] { 5943 break 5944 } 5945 x1_1 := x1.Args[1] 5946 if x1_1.Op != OpAMD64SHRQconst { 5947 break 5948 } 5949 if x1_1.AuxInt != 48 { 5950 break 5951 } 5952 if w != x1_1.Args[0] { 5953 break 5954 } 5955 x0 := x1.Args[2] 5956 if x0.Op != OpAMD64MOVBstore { 5957 break 5958 } 5959 if x0.AuxInt != i-7 { 5960 break 5961 } 5962 if x0.Aux != s { 5963 break 5964 } 5965 _ = x0.Args[2] 5966 if p != x0.Args[0] { 5967 break 5968 } 5969 x0_1 := x0.Args[1] 5970 if x0_1.Op != OpAMD64SHRQconst { 5971 break 5972 } 5973 if x0_1.AuxInt != 56 { 5974 break 5975 } 5976 if w != x0_1.Args[0] { 5977 break 5978 } 5979 mem := x0.Args[2] 5980 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 5981 break 5982 } 5983 v.reset(OpAMD64MOVQstore) 5984 v.AuxInt = i - 7 5985 v.Aux = s 5986 v.AddArg(p) 5987 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 5988 v0.AddArg(w) 5989 v.AddArg(v0) 5990 v.AddArg(mem) 5991 return true 5992 } 5993 return false 5994 } 5995 func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { 5996 b := v.Block 5997 _ = b 5998 typ := &b.Func.Config.Types 5999 _ = typ 6000 // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) 6001 // cond: x.Uses == 1 && clobber(x) 6002 // result: (MOVWstore [i-1] {s} p w mem) 6003 for { 6004 i := v.AuxInt 6005 s := v.Aux 6006 _ = v.Args[2] 6007 p := v.Args[0] 6008 v_1 := v.Args[1] 6009 if v_1.Op != OpAMD64SHRQconst { 6010 break 6011 } 6012 if v_1.AuxInt != 8 { 6013 break 6014 } 6015 w := v_1.Args[0] 6016 x := v.Args[2] 6017 if x.Op != OpAMD64MOVBstore { 6018 break 6019 } 6020 if x.AuxInt != i-1 { 6021 break 6022 } 6023 if x.Aux != s { 6024 break 6025 } 6026 _ = x.Args[2] 6027 if p != x.Args[0] { 6028 break 6029 } 6030 if w != x.Args[1] { 6031 break 6032 } 6033 mem := x.Args[2] 6034 if !(x.Uses == 1 && clobber(x)) { 6035 break 6036 } 6037 v.reset(OpAMD64MOVWstore) 6038 v.AuxInt = i - 1 6039 v.Aux = s 6040 v.AddArg(p) 6041 v.AddArg(w) 6042 v.AddArg(mem) 6043 return true 6044 } 6045 // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) 6046 // cond: x.Uses == 1 && clobber(x) 6047 // result: (MOVWstore [i-1] {s} p w0 mem) 6048 for { 6049 i := v.AuxInt 6050 s := v.Aux 6051 _ = v.Args[2] 6052 p := v.Args[0] 6053 v_1 := v.Args[1] 6054 if v_1.Op != OpAMD64SHRQconst { 6055 break 6056 } 6057 j := v_1.AuxInt 6058 w := v_1.Args[0] 6059 x := v.Args[2] 6060 if x.Op != OpAMD64MOVBstore { 6061 break 6062 } 6063 if x.AuxInt != i-1 { 6064 break 6065 } 6066 if x.Aux != s { 6067 break 6068 } 6069 _ = x.Args[2] 6070 if p != x.Args[0] { 6071 break 6072 } 6073 w0 := x.Args[1] 6074 if w0.Op != OpAMD64SHRQconst { 6075 break 6076 } 6077 if w0.AuxInt != j-8 { 6078 break 6079 } 6080 if w != w0.Args[0] { 6081 break 6082 } 6083 mem := x.Args[2] 6084 if !(x.Uses == 1 && clobber(x)) { 6085 break 6086 } 6087 v.reset(OpAMD64MOVWstore) 6088 v.AuxInt = i - 1 6089 v.Aux = s 6090 v.AddArg(p) 6091 v.AddArg(w0) 6092 v.AddArg(mem) 6093 return true 6094 } 6095 // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem)) 6096 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 6097 // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) 6098 for { 6099 i := v.AuxInt 6100 s := v.Aux 6101 _ = v.Args[2] 6102 p := v.Args[0] 6103 x1 := v.Args[1] 6104 if x1.Op != OpAMD64MOVBload { 6105 break 6106 } 6107 j := x1.AuxInt 6108 s2 := x1.Aux 6109 _ = x1.Args[1] 6110 p2 := x1.Args[0] 6111 mem := x1.Args[1] 6112 mem2 := v.Args[2] 6113 if mem2.Op != OpAMD64MOVBstore { 6114 break 6115 } 6116 if mem2.AuxInt != i-1 { 6117 break 6118 } 6119 if mem2.Aux != s { 6120 break 6121 } 6122 _ = mem2.Args[2] 6123 if p != mem2.Args[0] { 6124 break 6125 } 6126 x2 := mem2.Args[1] 6127 if x2.Op != OpAMD64MOVBload { 6128 break 6129 } 6130 if x2.AuxInt != j-1 { 6131 break 6132 } 6133 if x2.Aux != s2 { 6134 break 6135 } 6136 _ = x2.Args[1] 6137 if p2 != x2.Args[0] { 6138 break 6139 } 6140 if mem != x2.Args[1] { 6141 break 6142 } 6143 if mem != mem2.Args[2] { 6144 break 6145 } 6146 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 6147 break 6148 } 6149 v.reset(OpAMD64MOVWstore) 6150 v.AuxInt = i - 1 6151 v.Aux = s 6152 v.AddArg(p) 6153 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 6154 v0.AuxInt = j - 1 6155 v0.Aux = s2 6156 v0.AddArg(p2) 6157 v0.AddArg(mem) 6158 v.AddArg(v0) 6159 v.AddArg(mem) 6160 return true 6161 } 6162 // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 6163 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 6164 // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 6165 for { 6166 off1 := v.AuxInt 6167 sym1 := v.Aux 6168 _ = v.Args[2] 6169 v_0 := v.Args[0] 6170 if v_0.Op != OpAMD64LEAL { 6171 break 6172 } 6173 off2 := v_0.AuxInt 6174 sym2 := v_0.Aux 6175 base := v_0.Args[0] 6176 val := v.Args[1] 6177 mem := v.Args[2] 6178 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 6179 break 6180 } 6181 v.reset(OpAMD64MOVBstore) 6182 v.AuxInt = off1 + off2 6183 v.Aux = mergeSym(sym1, sym2) 6184 v.AddArg(base) 6185 v.AddArg(val) 6186 v.AddArg(mem) 6187 return true 6188 } 6189 // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 6190 // cond: is32Bit(off1+off2) 6191 // result: (MOVBstore [off1+off2] {sym} ptr val mem) 6192 for { 6193 off1 := v.AuxInt 6194 sym := v.Aux 6195 _ = v.Args[2] 6196 v_0 := v.Args[0] 6197 if v_0.Op != OpAMD64ADDLconst { 6198 break 6199 } 6200 off2 := v_0.AuxInt 6201 ptr := v_0.Args[0] 6202 val := v.Args[1] 6203 mem := v.Args[2] 6204 if !(is32Bit(off1 + off2)) { 6205 break 6206 } 6207 v.reset(OpAMD64MOVBstore) 6208 v.AuxInt = off1 + off2 6209 v.Aux = sym 6210 v.AddArg(ptr) 6211 v.AddArg(val) 6212 v.AddArg(mem) 6213 return true 6214 } 6215 return false 6216 } 6217 func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { 6218 // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 6219 // cond: ValAndOff(sc).canAdd(off) 6220 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6221 for { 6222 sc := v.AuxInt 6223 s := v.Aux 6224 _ = v.Args[1] 6225 v_0 := v.Args[0] 6226 if v_0.Op != OpAMD64ADDQconst { 6227 break 6228 } 6229 off := v_0.AuxInt 6230 ptr := v_0.Args[0] 6231 mem := v.Args[1] 6232 if !(ValAndOff(sc).canAdd(off)) { 6233 break 6234 } 6235 v.reset(OpAMD64MOVBstoreconst) 6236 v.AuxInt = ValAndOff(sc).add(off) 6237 v.Aux = s 6238 v.AddArg(ptr) 6239 v.AddArg(mem) 6240 return true 6241 } 6242 // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 6243 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6244 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6245 for { 6246 sc := v.AuxInt 6247 sym1 := v.Aux 6248 _ = v.Args[1] 6249 v_0 := v.Args[0] 6250 if v_0.Op != OpAMD64LEAQ { 6251 break 6252 } 6253 off := v_0.AuxInt 6254 sym2 := v_0.Aux 6255 ptr := v_0.Args[0] 6256 mem := v.Args[1] 6257 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6258 break 6259 } 6260 v.reset(OpAMD64MOVBstoreconst) 6261 v.AuxInt = ValAndOff(sc).add(off) 6262 v.Aux = mergeSym(sym1, sym2) 6263 v.AddArg(ptr) 6264 v.AddArg(mem) 6265 return true 6266 } 6267 // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 6268 // cond: canMergeSym(sym1, sym2) 6269 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 6270 for { 6271 x := v.AuxInt 6272 sym1 := v.Aux 6273 _ = v.Args[1] 6274 v_0 := v.Args[0] 6275 if v_0.Op != OpAMD64LEAQ1 { 6276 break 6277 } 6278 off := v_0.AuxInt 6279 sym2 := v_0.Aux 6280 _ = v_0.Args[1] 6281 ptr := v_0.Args[0] 6282 idx := v_0.Args[1] 6283 mem := v.Args[1] 6284 if !(canMergeSym(sym1, sym2)) { 6285 break 6286 } 6287 v.reset(OpAMD64MOVBstoreconstidx1) 6288 v.AuxInt = ValAndOff(x).add(off) 6289 v.Aux = mergeSym(sym1, sym2) 6290 v.AddArg(ptr) 6291 v.AddArg(idx) 6292 v.AddArg(mem) 6293 return true 6294 } 6295 // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) 6296 // cond: 6297 // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) 6298 for { 6299 x := v.AuxInt 6300 sym := v.Aux 6301 _ = v.Args[1] 6302 v_0 := v.Args[0] 6303 if v_0.Op != OpAMD64ADDQ { 6304 break 6305 } 6306 _ = v_0.Args[1] 6307 ptr := v_0.Args[0] 6308 idx := v_0.Args[1] 6309 mem := v.Args[1] 6310 v.reset(OpAMD64MOVBstoreconstidx1) 6311 v.AuxInt = x 6312 v.Aux = sym 6313 v.AddArg(ptr) 6314 v.AddArg(idx) 6315 v.AddArg(mem) 6316 return true 6317 } 6318 // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) 6319 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 6320 // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) 6321 for { 6322 c := v.AuxInt 6323 s := v.Aux 6324 _ = v.Args[1] 6325 p := v.Args[0] 6326 x := v.Args[1] 6327 if x.Op != OpAMD64MOVBstoreconst { 6328 break 6329 } 6330 a := x.AuxInt 6331 if x.Aux != s { 6332 break 6333 } 6334 _ = x.Args[1] 6335 if p != x.Args[0] { 6336 break 6337 } 6338 mem := x.Args[1] 6339 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 6340 break 6341 } 6342 v.reset(OpAMD64MOVWstoreconst) 6343 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 6344 v.Aux = s 6345 v.AddArg(p) 6346 v.AddArg(mem) 6347 return true 6348 } 6349 // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 6350 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 6351 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 6352 for { 6353 sc := v.AuxInt 6354 sym1 := v.Aux 6355 _ = v.Args[1] 6356 v_0 := v.Args[0] 6357 if v_0.Op != OpAMD64LEAL { 6358 break 6359 } 6360 off := v_0.AuxInt 6361 sym2 := v_0.Aux 6362 ptr := v_0.Args[0] 6363 mem := v.Args[1] 6364 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 6365 break 6366 } 6367 v.reset(OpAMD64MOVBstoreconst) 6368 v.AuxInt = ValAndOff(sc).add(off) 6369 v.Aux = mergeSym(sym1, sym2) 6370 v.AddArg(ptr) 6371 v.AddArg(mem) 6372 return true 6373 } 6374 // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 6375 // cond: ValAndOff(sc).canAdd(off) 6376 // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 6377 for { 6378 sc := v.AuxInt 6379 s := v.Aux 6380 _ = v.Args[1] 6381 v_0 := v.Args[0] 6382 if v_0.Op != OpAMD64ADDLconst { 6383 break 6384 } 6385 off := v_0.AuxInt 6386 ptr := v_0.Args[0] 6387 mem := v.Args[1] 6388 if !(ValAndOff(sc).canAdd(off)) { 6389 break 6390 } 6391 v.reset(OpAMD64MOVBstoreconst) 6392 v.AuxInt = ValAndOff(sc).add(off) 6393 v.Aux = s 6394 v.AddArg(ptr) 6395 v.AddArg(mem) 6396 return true 6397 } 6398 return false 6399 } 6400 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { 6401 // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 6402 // cond: ValAndOff(x).canAdd(c) 6403 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6404 for { 6405 x := v.AuxInt 6406 sym := v.Aux 6407 _ = v.Args[2] 6408 v_0 := v.Args[0] 6409 if v_0.Op != OpAMD64ADDQconst { 6410 break 6411 } 6412 c := v_0.AuxInt 6413 ptr := v_0.Args[0] 6414 idx := v.Args[1] 6415 mem := v.Args[2] 6416 if !(ValAndOff(x).canAdd(c)) { 6417 break 6418 } 6419 v.reset(OpAMD64MOVBstoreconstidx1) 6420 v.AuxInt = ValAndOff(x).add(c) 6421 v.Aux = sym 6422 v.AddArg(ptr) 6423 v.AddArg(idx) 6424 v.AddArg(mem) 6425 return true 6426 } 6427 // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 6428 // cond: ValAndOff(x).canAdd(c) 6429 // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 6430 for { 6431 x := v.AuxInt 6432 sym := v.Aux 6433 _ = v.Args[2] 6434 ptr := v.Args[0] 6435 v_1 := v.Args[1] 6436 if v_1.Op != OpAMD64ADDQconst { 6437 break 6438 } 6439 c := v_1.AuxInt 6440 idx := v_1.Args[0] 6441 mem := v.Args[2] 6442 if !(ValAndOff(x).canAdd(c)) { 6443 break 6444 } 6445 v.reset(OpAMD64MOVBstoreconstidx1) 6446 v.AuxInt = ValAndOff(x).add(c) 6447 v.Aux = sym 6448 v.AddArg(ptr) 6449 v.AddArg(idx) 6450 v.AddArg(mem) 6451 return true 6452 } 6453 // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) 6454 // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) 6455 // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) 6456 for { 6457 c := v.AuxInt 6458 s := v.Aux 6459 _ = v.Args[2] 6460 p := v.Args[0] 6461 i := v.Args[1] 6462 x := v.Args[2] 6463 if x.Op != OpAMD64MOVBstoreconstidx1 { 6464 break 6465 } 6466 a := x.AuxInt 6467 if x.Aux != s { 6468 break 6469 } 6470 _ = x.Args[2] 6471 if p != x.Args[0] { 6472 break 6473 } 6474 if i != x.Args[1] { 6475 break 6476 } 6477 mem := x.Args[2] 6478 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { 6479 break 6480 } 6481 v.reset(OpAMD64MOVWstoreconstidx1) 6482 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) 6483 v.Aux = s 6484 v.AddArg(p) 6485 v.AddArg(i) 6486 v.AddArg(mem) 6487 return true 6488 } 6489 return false 6490 } 6491 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { 6492 b := v.Block 6493 _ = b 6494 // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 6495 // cond: is32Bit(c+d) 6496 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 6497 for { 6498 c := v.AuxInt 6499 sym := v.Aux 6500 _ = v.Args[3] 6501 v_0 := v.Args[0] 6502 if v_0.Op != OpAMD64ADDQconst { 6503 break 6504 } 6505 d := v_0.AuxInt 6506 ptr := v_0.Args[0] 6507 idx := v.Args[1] 6508 val := v.Args[2] 6509 mem := v.Args[3] 6510 if !(is32Bit(c + d)) { 6511 break 6512 } 6513 v.reset(OpAMD64MOVBstoreidx1) 6514 v.AuxInt = c + d 6515 v.Aux = sym 6516 v.AddArg(ptr) 6517 v.AddArg(idx) 6518 v.AddArg(val) 6519 v.AddArg(mem) 6520 return true 6521 } 6522 // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 6523 // cond: is32Bit(c+d) 6524 // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) 6525 for { 6526 c := v.AuxInt 6527 sym := v.Aux 6528 _ = v.Args[3] 6529 ptr := v.Args[0] 6530 v_1 := v.Args[1] 6531 if v_1.Op != OpAMD64ADDQconst { 6532 break 6533 } 6534 d := v_1.AuxInt 6535 idx := v_1.Args[0] 6536 val := v.Args[2] 6537 mem := v.Args[3] 6538 if !(is32Bit(c + d)) { 6539 break 6540 } 6541 v.reset(OpAMD64MOVBstoreidx1) 6542 v.AuxInt = c + d 6543 v.Aux = sym 6544 v.AddArg(ptr) 6545 v.AddArg(idx) 6546 v.AddArg(val) 6547 v.AddArg(mem) 6548 return true 6549 } 6550 // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) 6551 // cond: x0.Uses == 1 && clobber(x0) 6552 // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst <w.Type> [8] w) mem) 6553 for { 6554 i := v.AuxInt 6555 s := v.Aux 6556 _ = v.Args[3] 6557 p := v.Args[0] 6558 idx := v.Args[1] 6559 w := v.Args[2] 6560 x0 := v.Args[3] 6561 if x0.Op != OpAMD64MOVBstoreidx1 { 6562 break 6563 } 6564 if x0.AuxInt != i-1 { 6565 break 6566 } 6567 if x0.Aux != s { 6568 break 6569 } 6570 _ = x0.Args[3] 6571 if p != x0.Args[0] { 6572 break 6573 } 6574 if idx != x0.Args[1] { 6575 break 6576 } 6577 x0_2 := x0.Args[2] 6578 if x0_2.Op != OpAMD64SHRWconst { 6579 break 6580 } 6581 if x0_2.AuxInt != 8 { 6582 break 6583 } 6584 if w != x0_2.Args[0] { 6585 break 6586 } 6587 mem := x0.Args[3] 6588 if !(x0.Uses == 1 && clobber(x0)) { 6589 break 6590 } 6591 v.reset(OpAMD64MOVWstoreidx1) 6592 v.AuxInt = i - 1 6593 v.Aux = s 6594 v.AddArg(p) 6595 v.AddArg(idx) 6596 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) 6597 v0.AuxInt = 8 6598 v0.AddArg(w) 6599 v.AddArg(v0) 6600 v.AddArg(mem) 6601 return true 6602 } 6603 // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) 6604 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) 6605 // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL <w.Type> w) mem) 6606 for { 6607 i := v.AuxInt 6608 s := v.Aux 6609 _ = v.Args[3] 6610 p := v.Args[0] 6611 idx := v.Args[1] 6612 w := v.Args[2] 6613 x2 := v.Args[3] 6614 if x2.Op != OpAMD64MOVBstoreidx1 { 6615 break 6616 } 6617 if x2.AuxInt != i-1 { 6618 break 6619 } 6620 if x2.Aux != s { 6621 break 6622 } 6623 _ = x2.Args[3] 6624 if p != x2.Args[0] { 6625 break 6626 } 6627 if idx != x2.Args[1] { 6628 break 6629 } 6630 x2_2 := x2.Args[2] 6631 if x2_2.Op != OpAMD64SHRLconst { 6632 break 6633 } 6634 if x2_2.AuxInt != 8 { 6635 break 6636 } 6637 if w != x2_2.Args[0] { 6638 break 6639 } 6640 x1 := x2.Args[3] 6641 if x1.Op != OpAMD64MOVBstoreidx1 { 6642 break 6643 } 6644 if x1.AuxInt != i-2 { 6645 break 6646 } 6647 if x1.Aux != s { 6648 break 6649 } 6650 _ = x1.Args[3] 6651 if p != x1.Args[0] { 6652 break 6653 } 6654 if idx != x1.Args[1] { 6655 break 6656 } 6657 x1_2 := x1.Args[2] 6658 if x1_2.Op != OpAMD64SHRLconst { 6659 break 6660 } 6661 if x1_2.AuxInt != 16 { 6662 break 6663 } 6664 if w != x1_2.Args[0] { 6665 break 6666 } 6667 x0 := x1.Args[3] 6668 if x0.Op != OpAMD64MOVBstoreidx1 { 6669 break 6670 } 6671 if x0.AuxInt != i-3 { 6672 break 6673 } 6674 if x0.Aux != s { 6675 break 6676 } 6677 _ = x0.Args[3] 6678 if p != x0.Args[0] { 6679 break 6680 } 6681 if idx != x0.Args[1] { 6682 break 6683 } 6684 x0_2 := x0.Args[2] 6685 if x0_2.Op != OpAMD64SHRLconst { 6686 break 6687 } 6688 if x0_2.AuxInt != 24 { 6689 break 6690 } 6691 if w != x0_2.Args[0] { 6692 break 6693 } 6694 mem := x0.Args[3] 6695 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { 6696 break 6697 } 6698 v.reset(OpAMD64MOVLstoreidx1) 6699 v.AuxInt = i - 3 6700 v.Aux = s 6701 v.AddArg(p) 6702 v.AddArg(idx) 6703 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) 6704 v0.AddArg(w) 6705 v.AddArg(v0) 6706 v.AddArg(mem) 6707 return true 6708 } 6709 // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) 6710 // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) 6711 // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ <w.Type> w) mem) 6712 for { 6713 i := v.AuxInt 6714 s := v.Aux 6715 _ = v.Args[3] 6716 p := v.Args[0] 6717 idx := v.Args[1] 6718 w := v.Args[2] 6719 x6 := v.Args[3] 6720 if x6.Op != OpAMD64MOVBstoreidx1 { 6721 break 6722 } 6723 if x6.AuxInt != i-1 { 6724 break 6725 } 6726 if x6.Aux != s { 6727 break 6728 } 6729 _ = x6.Args[3] 6730 if p != x6.Args[0] { 6731 break 6732 } 6733 if idx != x6.Args[1] { 6734 break 6735 } 6736 x6_2 := x6.Args[2] 6737 if x6_2.Op != OpAMD64SHRQconst { 6738 break 6739 } 6740 if x6_2.AuxInt != 8 { 6741 break 6742 } 6743 if w != x6_2.Args[0] { 6744 break 6745 } 6746 x5 := x6.Args[3] 6747 if x5.Op != OpAMD64MOVBstoreidx1 { 6748 break 6749 } 6750 if x5.AuxInt != i-2 { 6751 break 6752 } 6753 if x5.Aux != s { 6754 break 6755 } 6756 _ = x5.Args[3] 6757 if p != x5.Args[0] { 6758 break 6759 } 6760 if idx != x5.Args[1] { 6761 break 6762 } 6763 x5_2 := x5.Args[2] 6764 if x5_2.Op != OpAMD64SHRQconst { 6765 break 6766 } 6767 if x5_2.AuxInt != 16 { 6768 break 6769 } 6770 if w != x5_2.Args[0] { 6771 break 6772 } 6773 x4 := x5.Args[3] 6774 if x4.Op != OpAMD64MOVBstoreidx1 { 6775 break 6776 } 6777 if x4.AuxInt != i-3 { 6778 break 6779 } 6780 if x4.Aux != s { 6781 break 6782 } 6783 _ = x4.Args[3] 6784 if p != x4.Args[0] { 6785 break 6786 } 6787 if idx != x4.Args[1] { 6788 break 6789 } 6790 x4_2 := x4.Args[2] 6791 if x4_2.Op != OpAMD64SHRQconst { 6792 break 6793 } 6794 if x4_2.AuxInt != 24 { 6795 break 6796 } 6797 if w != x4_2.Args[0] { 6798 break 6799 } 6800 x3 := x4.Args[3] 6801 if x3.Op != OpAMD64MOVBstoreidx1 { 6802 break 6803 } 6804 if x3.AuxInt != i-4 { 6805 break 6806 } 6807 if x3.Aux != s { 6808 break 6809 } 6810 _ = x3.Args[3] 6811 if p != x3.Args[0] { 6812 break 6813 } 6814 if idx != x3.Args[1] { 6815 break 6816 } 6817 x3_2 := x3.Args[2] 6818 if x3_2.Op != OpAMD64SHRQconst { 6819 break 6820 } 6821 if x3_2.AuxInt != 32 { 6822 break 6823 } 6824 if w != x3_2.Args[0] { 6825 break 6826 } 6827 x2 := x3.Args[3] 6828 if x2.Op != OpAMD64MOVBstoreidx1 { 6829 break 6830 } 6831 if x2.AuxInt != i-5 { 6832 break 6833 } 6834 if x2.Aux != s { 6835 break 6836 } 6837 _ = x2.Args[3] 6838 if p != x2.Args[0] { 6839 break 6840 } 6841 if idx != x2.Args[1] { 6842 break 6843 } 6844 x2_2 := x2.Args[2] 6845 if x2_2.Op != OpAMD64SHRQconst { 6846 break 6847 } 6848 if x2_2.AuxInt != 40 { 6849 break 6850 } 6851 if w != x2_2.Args[0] { 6852 break 6853 } 6854 x1 := x2.Args[3] 6855 if x1.Op != OpAMD64MOVBstoreidx1 { 6856 break 6857 } 6858 if x1.AuxInt != i-6 { 6859 break 6860 } 6861 if x1.Aux != s { 6862 break 6863 } 6864 _ = x1.Args[3] 6865 if p != x1.Args[0] { 6866 break 6867 } 6868 if idx != x1.Args[1] { 6869 break 6870 } 6871 x1_2 := x1.Args[2] 6872 if x1_2.Op != OpAMD64SHRQconst { 6873 break 6874 } 6875 if x1_2.AuxInt != 48 { 6876 break 6877 } 6878 if w != x1_2.Args[0] { 6879 break 6880 } 6881 x0 := x1.Args[3] 6882 if x0.Op != OpAMD64MOVBstoreidx1 { 6883 break 6884 } 6885 if x0.AuxInt != i-7 { 6886 break 6887 } 6888 if x0.Aux != s { 6889 break 6890 } 6891 _ = x0.Args[3] 6892 if p != x0.Args[0] { 6893 break 6894 } 6895 if idx != x0.Args[1] { 6896 break 6897 } 6898 x0_2 := x0.Args[2] 6899 if x0_2.Op != OpAMD64SHRQconst { 6900 break 6901 } 6902 if x0_2.AuxInt != 56 { 6903 break 6904 } 6905 if w != x0_2.Args[0] { 6906 break 6907 } 6908 mem := x0.Args[3] 6909 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { 6910 break 6911 } 6912 v.reset(OpAMD64MOVQstoreidx1) 6913 v.AuxInt = i - 7 6914 v.Aux = s 6915 v.AddArg(p) 6916 v.AddArg(idx) 6917 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) 6918 v0.AddArg(w) 6919 v.AddArg(v0) 6920 v.AddArg(mem) 6921 return true 6922 } 6923 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) 6924 // cond: x.Uses == 1 && clobber(x) 6925 // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) 6926 for { 6927 i := v.AuxInt 6928 s := v.Aux 6929 _ = v.Args[3] 6930 p := v.Args[0] 6931 idx := v.Args[1] 6932 v_2 := v.Args[2] 6933 if v_2.Op != OpAMD64SHRQconst { 6934 break 6935 } 6936 if v_2.AuxInt != 8 { 6937 break 6938 } 6939 w := v_2.Args[0] 6940 x := v.Args[3] 6941 if x.Op != OpAMD64MOVBstoreidx1 { 6942 break 6943 } 6944 if x.AuxInt != i-1 { 6945 break 6946 } 6947 if x.Aux != s { 6948 break 6949 } 6950 _ = x.Args[3] 6951 if p != x.Args[0] { 6952 break 6953 } 6954 if idx != x.Args[1] { 6955 break 6956 } 6957 if w != x.Args[2] { 6958 break 6959 } 6960 mem := x.Args[3] 6961 if !(x.Uses == 1 && clobber(x)) { 6962 break 6963 } 6964 v.reset(OpAMD64MOVWstoreidx1) 6965 v.AuxInt = i - 1 6966 v.Aux = s 6967 v.AddArg(p) 6968 v.AddArg(idx) 6969 v.AddArg(w) 6970 v.AddArg(mem) 6971 return true 6972 } 6973 // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) 6974 // cond: x.Uses == 1 && clobber(x) 6975 // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) 6976 for { 6977 i := v.AuxInt 6978 s := v.Aux 6979 _ = v.Args[3] 6980 p := v.Args[0] 6981 idx := v.Args[1] 6982 v_2 := v.Args[2] 6983 if v_2.Op != OpAMD64SHRQconst { 6984 break 6985 } 6986 j := v_2.AuxInt 6987 w := v_2.Args[0] 6988 x := v.Args[3] 6989 if x.Op != OpAMD64MOVBstoreidx1 { 6990 break 6991 } 6992 if x.AuxInt != i-1 { 6993 break 6994 } 6995 if x.Aux != s { 6996 break 6997 } 6998 _ = x.Args[3] 6999 if p != x.Args[0] { 7000 break 7001 } 7002 if idx != x.Args[1] { 7003 break 7004 } 7005 w0 := x.Args[2] 7006 if w0.Op != OpAMD64SHRQconst { 7007 break 7008 } 7009 if w0.AuxInt != j-8 { 7010 break 7011 } 7012 if w != w0.Args[0] { 7013 break 7014 } 7015 mem := x.Args[3] 7016 if !(x.Uses == 1 && clobber(x)) { 7017 break 7018 } 7019 v.reset(OpAMD64MOVWstoreidx1) 7020 v.AuxInt = i - 1 7021 v.Aux = s 7022 v.AddArg(p) 7023 v.AddArg(idx) 7024 v.AddArg(w0) 7025 v.AddArg(mem) 7026 return true 7027 } 7028 return false 7029 } 7030 func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { 7031 b := v.Block 7032 _ = b 7033 // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) 7034 // cond: x.Uses == 1 && clobber(x) 7035 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 7036 for { 7037 x := v.Args[0] 7038 if x.Op != OpAMD64MOVLload { 7039 break 7040 } 7041 off := x.AuxInt 7042 sym := x.Aux 7043 _ = x.Args[1] 7044 ptr := x.Args[0] 7045 mem := x.Args[1] 7046 if !(x.Uses == 1 && clobber(x)) { 7047 break 7048 } 7049 b = x.Block 7050 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 7051 v.reset(OpCopy) 7052 v.AddArg(v0) 7053 v0.AuxInt = off 7054 v0.Aux = sym 7055 v0.AddArg(ptr) 7056 v0.AddArg(mem) 7057 return true 7058 } 7059 // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) 7060 // cond: x.Uses == 1 && clobber(x) 7061 // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) 7062 for { 7063 x := v.Args[0] 7064 if x.Op != OpAMD64MOVQload { 7065 break 7066 } 7067 off := x.AuxInt 7068 sym := x.Aux 7069 _ = x.Args[1] 7070 ptr := x.Args[0] 7071 mem := x.Args[1] 7072 if !(x.Uses == 1 && clobber(x)) { 7073 break 7074 } 7075 b = x.Block 7076 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) 7077 v.reset(OpCopy) 7078 v.AddArg(v0) 7079 v0.AuxInt = off 7080 v0.Aux = sym 7081 v0.AddArg(ptr) 7082 v0.AddArg(mem) 7083 return true 7084 } 7085 // match: (MOVLQSX (ANDLconst [c] x)) 7086 // cond: c & 0x80000000 == 0 7087 // result: (ANDLconst [c & 0x7fffffff] x) 7088 for { 7089 v_0 := v.Args[0] 7090 if v_0.Op != OpAMD64ANDLconst { 7091 break 7092 } 7093 c := v_0.AuxInt 7094 x := v_0.Args[0] 7095 if !(c&0x80000000 == 0) { 7096 break 7097 } 7098 v.reset(OpAMD64ANDLconst) 7099 v.AuxInt = c & 0x7fffffff 7100 v.AddArg(x) 7101 return true 7102 } 7103 // match: (MOVLQSX (MOVLQSX x)) 7104 // cond: 7105 // result: (MOVLQSX x) 7106 for { 7107 v_0 := v.Args[0] 7108 if v_0.Op != OpAMD64MOVLQSX { 7109 break 7110 } 7111 x := v_0.Args[0] 7112 v.reset(OpAMD64MOVLQSX) 7113 v.AddArg(x) 7114 return true 7115 } 7116 // match: (MOVLQSX (MOVWQSX x)) 7117 // cond: 7118 // result: (MOVWQSX x) 7119 for { 7120 v_0 := v.Args[0] 7121 if v_0.Op != OpAMD64MOVWQSX { 7122 break 7123 } 7124 x := v_0.Args[0] 7125 v.reset(OpAMD64MOVWQSX) 7126 v.AddArg(x) 7127 return true 7128 } 7129 // match: (MOVLQSX (MOVBQSX x)) 7130 // cond: 7131 // result: (MOVBQSX x) 7132 for { 7133 v_0 := v.Args[0] 7134 if v_0.Op != OpAMD64MOVBQSX { 7135 break 7136 } 7137 x := v_0.Args[0] 7138 v.reset(OpAMD64MOVBQSX) 7139 v.AddArg(x) 7140 return true 7141 } 7142 return false 7143 } 7144 func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool { 7145 // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 7146 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 7147 // result: (MOVLQSX x) 7148 for { 7149 off := v.AuxInt 7150 sym := v.Aux 7151 _ = v.Args[1] 7152 ptr := v.Args[0] 7153 v_1 := v.Args[1] 7154 if v_1.Op != OpAMD64MOVLstore { 7155 break 7156 } 7157 off2 := v_1.AuxInt 7158 sym2 := v_1.Aux 7159 _ = v_1.Args[2] 7160 ptr2 := v_1.Args[0] 7161 x := v_1.Args[1] 7162 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 7163 break 7164 } 7165 v.reset(OpAMD64MOVLQSX) 7166 v.AddArg(x) 7167 return true 7168 } 7169 // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7170 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7171 // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7172 for { 7173 off1 := v.AuxInt 7174 sym1 := v.Aux 7175 _ = v.Args[1] 7176 v_0 := v.Args[0] 7177 if v_0.Op != OpAMD64LEAQ { 7178 break 7179 } 7180 off2 := v_0.AuxInt 7181 sym2 := v_0.Aux 7182 base := v_0.Args[0] 7183 mem := v.Args[1] 7184 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7185 break 7186 } 7187 v.reset(OpAMD64MOVLQSXload) 7188 v.AuxInt = off1 + off2 7189 v.Aux = mergeSym(sym1, sym2) 7190 v.AddArg(base) 7191 v.AddArg(mem) 7192 return true 7193 } 7194 return false 7195 } 7196 func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { 7197 b := v.Block 7198 _ = b 7199 // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) 7200 // cond: x.Uses == 1 && clobber(x) 7201 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 7202 for { 7203 x := v.Args[0] 7204 if x.Op != OpAMD64MOVLload { 7205 break 7206 } 7207 off := x.AuxInt 7208 sym := x.Aux 7209 _ = x.Args[1] 7210 ptr := x.Args[0] 7211 mem := x.Args[1] 7212 if !(x.Uses == 1 && clobber(x)) { 7213 break 7214 } 7215 b = x.Block 7216 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 7217 v.reset(OpCopy) 7218 v.AddArg(v0) 7219 v0.AuxInt = off 7220 v0.Aux = sym 7221 v0.AddArg(ptr) 7222 v0.AddArg(mem) 7223 return true 7224 } 7225 // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) 7226 // cond: x.Uses == 1 && clobber(x) 7227 // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem) 7228 for { 7229 x := v.Args[0] 7230 if x.Op != OpAMD64MOVQload { 7231 break 7232 } 7233 off := x.AuxInt 7234 sym := x.Aux 7235 _ = x.Args[1] 7236 ptr := x.Args[0] 7237 mem := x.Args[1] 7238 if !(x.Uses == 1 && clobber(x)) { 7239 break 7240 } 7241 b = x.Block 7242 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) 7243 v.reset(OpCopy) 7244 v.AddArg(v0) 7245 v0.AuxInt = off 7246 v0.Aux = sym 7247 v0.AddArg(ptr) 7248 v0.AddArg(mem) 7249 return true 7250 } 7251 // match: (MOVLQZX x) 7252 // cond: zeroUpper32Bits(x,3) 7253 // result: x 7254 for { 7255 x := v.Args[0] 7256 if !(zeroUpper32Bits(x, 3)) { 7257 break 7258 } 7259 v.reset(OpCopy) 7260 v.Type = x.Type 7261 v.AddArg(x) 7262 return true 7263 } 7264 // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) 7265 // cond: x.Uses == 1 && clobber(x) 7266 // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem) 7267 for { 7268 x := v.Args[0] 7269 if x.Op != OpAMD64MOVLloadidx1 { 7270 break 7271 } 7272 off := x.AuxInt 7273 sym := x.Aux 7274 _ = x.Args[2] 7275 ptr := x.Args[0] 7276 idx := x.Args[1] 7277 mem := x.Args[2] 7278 if !(x.Uses == 1 && clobber(x)) { 7279 break 7280 } 7281 b = x.Block 7282 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) 7283 v.reset(OpCopy) 7284 v.AddArg(v0) 7285 v0.AuxInt = off 7286 v0.Aux = sym 7287 v0.AddArg(ptr) 7288 v0.AddArg(idx) 7289 v0.AddArg(mem) 7290 return true 7291 } 7292 // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) 7293 // cond: x.Uses == 1 && clobber(x) 7294 // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem) 7295 for { 7296 x := v.Args[0] 7297 if x.Op != OpAMD64MOVLloadidx4 { 7298 break 7299 } 7300 off := x.AuxInt 7301 sym := x.Aux 7302 _ = x.Args[2] 7303 ptr := x.Args[0] 7304 idx := x.Args[1] 7305 mem := x.Args[2] 7306 if !(x.Uses == 1 && clobber(x)) { 7307 break 7308 } 7309 b = x.Block 7310 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) 7311 v.reset(OpCopy) 7312 v.AddArg(v0) 7313 v0.AuxInt = off 7314 v0.Aux = sym 7315 v0.AddArg(ptr) 7316 v0.AddArg(idx) 7317 v0.AddArg(mem) 7318 return true 7319 } 7320 // match: (MOVLQZX (ANDLconst [c] x)) 7321 // cond: 7322 // result: (ANDLconst [c] x) 7323 for { 7324 v_0 := v.Args[0] 7325 if v_0.Op != OpAMD64ANDLconst { 7326 break 7327 } 7328 c := v_0.AuxInt 7329 x := v_0.Args[0] 7330 v.reset(OpAMD64ANDLconst) 7331 v.AuxInt = c 7332 v.AddArg(x) 7333 return true 7334 } 7335 // match: (MOVLQZX (MOVLQZX x)) 7336 // cond: 7337 // result: (MOVLQZX x) 7338 for { 7339 v_0 := v.Args[0] 7340 if v_0.Op != OpAMD64MOVLQZX { 7341 break 7342 } 7343 x := v_0.Args[0] 7344 v.reset(OpAMD64MOVLQZX) 7345 v.AddArg(x) 7346 return true 7347 } 7348 // match: (MOVLQZX (MOVWQZX x)) 7349 // cond: 7350 // result: (MOVWQZX x) 7351 for { 7352 v_0 := v.Args[0] 7353 if v_0.Op != OpAMD64MOVWQZX { 7354 break 7355 } 7356 x := v_0.Args[0] 7357 v.reset(OpAMD64MOVWQZX) 7358 v.AddArg(x) 7359 return true 7360 } 7361 // match: (MOVLQZX (MOVBQZX x)) 7362 // cond: 7363 // result: (MOVBQZX x) 7364 for { 7365 v_0 := v.Args[0] 7366 if v_0.Op != OpAMD64MOVBQZX { 7367 break 7368 } 7369 x := v_0.Args[0] 7370 v.reset(OpAMD64MOVBQZX) 7371 v.AddArg(x) 7372 return true 7373 } 7374 return false 7375 } 7376 func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool { 7377 // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 7378 // cond: is32Bit(off1+off2) 7379 // result: (MOVLatomicload [off1+off2] {sym} ptr mem) 7380 for { 7381 off1 := v.AuxInt 7382 sym := v.Aux 7383 _ = v.Args[1] 7384 v_0 := v.Args[0] 7385 if v_0.Op != OpAMD64ADDQconst { 7386 break 7387 } 7388 off2 := v_0.AuxInt 7389 ptr := v_0.Args[0] 7390 mem := v.Args[1] 7391 if !(is32Bit(off1 + off2)) { 7392 break 7393 } 7394 v.reset(OpAMD64MOVLatomicload) 7395 v.AuxInt = off1 + off2 7396 v.Aux = sym 7397 v.AddArg(ptr) 7398 v.AddArg(mem) 7399 return true 7400 } 7401 // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 7402 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7403 // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 7404 for { 7405 off1 := v.AuxInt 7406 sym1 := v.Aux 7407 _ = v.Args[1] 7408 v_0 := v.Args[0] 7409 if v_0.Op != OpAMD64LEAQ { 7410 break 7411 } 7412 off2 := v_0.AuxInt 7413 sym2 := v_0.Aux 7414 ptr := v_0.Args[0] 7415 mem := v.Args[1] 7416 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7417 break 7418 } 7419 v.reset(OpAMD64MOVLatomicload) 7420 v.AuxInt = off1 + off2 7421 v.Aux = mergeSym(sym1, sym2) 7422 v.AddArg(ptr) 7423 v.AddArg(mem) 7424 return true 7425 } 7426 return false 7427 } 7428 func rewriteValueAMD64_OpAMD64MOVLf2i_0(v *Value) bool { 7429 b := v.Block 7430 _ = b 7431 // match: (MOVLf2i <t> (Arg <u> [off] {sym})) 7432 // cond: t.Size() == u.Size() 7433 // result: @b.Func.Entry (Arg <t> [off] {sym}) 7434 for { 7435 t := v.Type 7436 v_0 := v.Args[0] 7437 if v_0.Op != OpArg { 7438 break 7439 } 7440 u := v_0.Type 7441 off := v_0.AuxInt 7442 sym := v_0.Aux 7443 if !(t.Size() == u.Size()) { 7444 break 7445 } 7446 b = b.Func.Entry 7447 v0 := b.NewValue0(v.Pos, OpArg, t) 7448 v.reset(OpCopy) 7449 v.AddArg(v0) 7450 v0.AuxInt = off 7451 v0.Aux = sym 7452 return true 7453 } 7454 return false 7455 } 7456 func rewriteValueAMD64_OpAMD64MOVLi2f_0(v *Value) bool { 7457 b := v.Block 7458 _ = b 7459 // match: (MOVLi2f <t> (Arg <u> [off] {sym})) 7460 // cond: t.Size() == u.Size() 7461 // result: @b.Func.Entry (Arg <t> [off] {sym}) 7462 for { 7463 t := v.Type 7464 v_0 := v.Args[0] 7465 if v_0.Op != OpArg { 7466 break 7467 } 7468 u := v_0.Type 7469 off := v_0.AuxInt 7470 sym := v_0.Aux 7471 if !(t.Size() == u.Size()) { 7472 break 7473 } 7474 b = b.Func.Entry 7475 v0 := b.NewValue0(v.Pos, OpArg, t) 7476 v.reset(OpCopy) 7477 v.AddArg(v0) 7478 v0.AuxInt = off 7479 v0.Aux = sym 7480 return true 7481 } 7482 return false 7483 } 7484 func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { 7485 // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) 7486 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 7487 // result: (MOVLQZX x) 7488 for { 7489 off := v.AuxInt 7490 sym := v.Aux 7491 _ = v.Args[1] 7492 ptr := v.Args[0] 7493 v_1 := v.Args[1] 7494 if v_1.Op != OpAMD64MOVLstore { 7495 break 7496 } 7497 off2 := v_1.AuxInt 7498 sym2 := v_1.Aux 7499 _ = v_1.Args[2] 7500 ptr2 := v_1.Args[0] 7501 x := v_1.Args[1] 7502 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 7503 break 7504 } 7505 v.reset(OpAMD64MOVLQZX) 7506 v.AddArg(x) 7507 return true 7508 } 7509 // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) 7510 // cond: is32Bit(off1+off2) 7511 // result: (MOVLload [off1+off2] {sym} ptr mem) 7512 for { 7513 off1 := v.AuxInt 7514 sym := v.Aux 7515 _ = v.Args[1] 7516 v_0 := v.Args[0] 7517 if v_0.Op != OpAMD64ADDQconst { 7518 break 7519 } 7520 off2 := v_0.AuxInt 7521 ptr := v_0.Args[0] 7522 mem := v.Args[1] 7523 if !(is32Bit(off1 + off2)) { 7524 break 7525 } 7526 v.reset(OpAMD64MOVLload) 7527 v.AuxInt = off1 + off2 7528 v.Aux = sym 7529 v.AddArg(ptr) 7530 v.AddArg(mem) 7531 return true 7532 } 7533 // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 7534 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7535 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7536 for { 7537 off1 := v.AuxInt 7538 sym1 := v.Aux 7539 _ = v.Args[1] 7540 v_0 := v.Args[0] 7541 if v_0.Op != OpAMD64LEAQ { 7542 break 7543 } 7544 off2 := v_0.AuxInt 7545 sym2 := v_0.Aux 7546 base := v_0.Args[0] 7547 mem := v.Args[1] 7548 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7549 break 7550 } 7551 v.reset(OpAMD64MOVLload) 7552 v.AuxInt = off1 + off2 7553 v.Aux = mergeSym(sym1, sym2) 7554 v.AddArg(base) 7555 v.AddArg(mem) 7556 return true 7557 } 7558 // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 7559 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7560 // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7561 for { 7562 off1 := v.AuxInt 7563 sym1 := v.Aux 7564 _ = v.Args[1] 7565 v_0 := v.Args[0] 7566 if v_0.Op != OpAMD64LEAQ1 { 7567 break 7568 } 7569 off2 := v_0.AuxInt 7570 sym2 := v_0.Aux 7571 _ = v_0.Args[1] 7572 ptr := v_0.Args[0] 7573 idx := v_0.Args[1] 7574 mem := v.Args[1] 7575 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7576 break 7577 } 7578 v.reset(OpAMD64MOVLloadidx1) 7579 v.AuxInt = off1 + off2 7580 v.Aux = mergeSym(sym1, sym2) 7581 v.AddArg(ptr) 7582 v.AddArg(idx) 7583 v.AddArg(mem) 7584 return true 7585 } 7586 // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 7587 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7588 // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7589 for { 7590 off1 := v.AuxInt 7591 sym1 := v.Aux 7592 _ = v.Args[1] 7593 v_0 := v.Args[0] 7594 if v_0.Op != OpAMD64LEAQ4 { 7595 break 7596 } 7597 off2 := v_0.AuxInt 7598 sym2 := v_0.Aux 7599 _ = v_0.Args[1] 7600 ptr := v_0.Args[0] 7601 idx := v_0.Args[1] 7602 mem := v.Args[1] 7603 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7604 break 7605 } 7606 v.reset(OpAMD64MOVLloadidx4) 7607 v.AuxInt = off1 + off2 7608 v.Aux = mergeSym(sym1, sym2) 7609 v.AddArg(ptr) 7610 v.AddArg(idx) 7611 v.AddArg(mem) 7612 return true 7613 } 7614 // match: (MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 7615 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 7616 // result: (MOVLloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 7617 for { 7618 off1 := v.AuxInt 7619 sym1 := v.Aux 7620 _ = v.Args[1] 7621 v_0 := v.Args[0] 7622 if v_0.Op != OpAMD64LEAQ8 { 7623 break 7624 } 7625 off2 := v_0.AuxInt 7626 sym2 := v_0.Aux 7627 _ = v_0.Args[1] 7628 ptr := v_0.Args[0] 7629 idx := v_0.Args[1] 7630 mem := v.Args[1] 7631 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 7632 break 7633 } 7634 v.reset(OpAMD64MOVLloadidx8) 7635 v.AuxInt = off1 + off2 7636 v.Aux = mergeSym(sym1, sym2) 7637 v.AddArg(ptr) 7638 v.AddArg(idx) 7639 v.AddArg(mem) 7640 return true 7641 } 7642 // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) 7643 // cond: ptr.Op != OpSB 7644 // result: (MOVLloadidx1 [off] {sym} ptr idx mem) 7645 for { 7646 off := v.AuxInt 7647 sym := v.Aux 7648 _ = v.Args[1] 7649 v_0 := v.Args[0] 7650 if v_0.Op != OpAMD64ADDQ { 7651 break 7652 } 7653 _ = v_0.Args[1] 7654 ptr := v_0.Args[0] 7655 idx := v_0.Args[1] 7656 mem := v.Args[1] 7657 if !(ptr.Op != OpSB) { 7658 break 7659 } 7660 v.reset(OpAMD64MOVLloadidx1) 7661 v.AuxInt = off 7662 v.Aux = sym 7663 v.AddArg(ptr) 7664 v.AddArg(idx) 7665 v.AddArg(mem) 7666 return true 7667 } 7668 // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 7669 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 7670 // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) 7671 for { 7672 off1 := v.AuxInt 7673 sym1 := v.Aux 7674 _ = v.Args[1] 7675 v_0 := v.Args[0] 7676 if v_0.Op != OpAMD64LEAL { 7677 break 7678 } 7679 off2 := v_0.AuxInt 7680 sym2 := v_0.Aux 7681 base := v_0.Args[0] 7682 mem := v.Args[1] 7683 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 7684 break 7685 } 7686 v.reset(OpAMD64MOVLload) 7687 v.AuxInt = off1 + off2 7688 v.Aux = mergeSym(sym1, sym2) 7689 v.AddArg(base) 7690 v.AddArg(mem) 7691 return true 7692 } 7693 // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) 7694 // cond: is32Bit(off1+off2) 7695 // result: (MOVLload [off1+off2] {sym} ptr mem) 7696 for { 7697 off1 := v.AuxInt 7698 sym := v.Aux 7699 _ = v.Args[1] 7700 v_0 := v.Args[0] 7701 if v_0.Op != OpAMD64ADDLconst { 7702 break 7703 } 7704 off2 := v_0.AuxInt 7705 ptr := v_0.Args[0] 7706 mem := v.Args[1] 7707 if !(is32Bit(off1 + off2)) { 7708 break 7709 } 7710 v.reset(OpAMD64MOVLload) 7711 v.AuxInt = off1 + off2 7712 v.Aux = sym 7713 v.AddArg(ptr) 7714 v.AddArg(mem) 7715 return true 7716 } 7717 // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) 7718 // cond: 7719 // result: (MOVLf2i val) 7720 for { 7721 off := v.AuxInt 7722 sym := v.Aux 7723 _ = v.Args[1] 7724 ptr := v.Args[0] 7725 v_1 := v.Args[1] 7726 if v_1.Op != OpAMD64MOVSSstore { 7727 break 7728 } 7729 if v_1.AuxInt != off { 7730 break 7731 } 7732 if v_1.Aux != sym { 7733 break 7734 } 7735 _ = v_1.Args[2] 7736 if ptr != v_1.Args[0] { 7737 break 7738 } 7739 val := v_1.Args[1] 7740 v.reset(OpAMD64MOVLf2i) 7741 v.AddArg(val) 7742 return true 7743 } 7744 return false 7745 } 7746 func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { 7747 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 7748 // cond: 7749 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 7750 for { 7751 c := v.AuxInt 7752 sym := v.Aux 7753 _ = v.Args[2] 7754 ptr := v.Args[0] 7755 v_1 := v.Args[1] 7756 if v_1.Op != OpAMD64SHLQconst { 7757 break 7758 } 7759 if v_1.AuxInt != 2 { 7760 break 7761 } 7762 idx := v_1.Args[0] 7763 mem := v.Args[2] 7764 v.reset(OpAMD64MOVLloadidx4) 7765 v.AuxInt = c 7766 v.Aux = sym 7767 v.AddArg(ptr) 7768 v.AddArg(idx) 7769 v.AddArg(mem) 7770 return true 7771 } 7772 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [2] idx) ptr mem) 7773 // cond: 7774 // result: (MOVLloadidx4 [c] {sym} ptr idx mem) 7775 for { 7776 c := v.AuxInt 7777 sym := v.Aux 7778 _ = v.Args[2] 7779 v_0 := v.Args[0] 7780 if v_0.Op != OpAMD64SHLQconst { 7781 break 7782 } 7783 if v_0.AuxInt != 2 { 7784 break 7785 } 7786 idx := v_0.Args[0] 7787 ptr := v.Args[1] 7788 mem := v.Args[2] 7789 v.reset(OpAMD64MOVLloadidx4) 7790 v.AuxInt = c 7791 v.Aux = sym 7792 v.AddArg(ptr) 7793 v.AddArg(idx) 7794 v.AddArg(mem) 7795 return true 7796 } 7797 // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 7798 // cond: 7799 // result: (MOVLloadidx8 [c] {sym} ptr idx mem) 7800 for { 7801 c := v.AuxInt 7802 sym := v.Aux 7803 _ = v.Args[2] 7804 ptr := v.Args[0] 7805 v_1 := v.Args[1] 7806 if v_1.Op != OpAMD64SHLQconst { 7807 break 7808 } 7809 if v_1.AuxInt != 3 { 7810 break 7811 } 7812 idx := v_1.Args[0] 7813 mem := v.Args[2] 7814 v.reset(OpAMD64MOVLloadidx8) 7815 v.AuxInt = c 7816 v.Aux = sym 7817 v.AddArg(ptr) 7818 v.AddArg(idx) 7819 v.AddArg(mem) 7820 return true 7821 } 7822 // match: (MOVLloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 7823 // cond: 7824 // result: (MOVLloadidx8 [c] {sym} ptr idx mem) 7825 for { 7826 c := v.AuxInt 7827 sym := v.Aux 7828 _ = v.Args[2] 7829 v_0 := v.Args[0] 7830 if v_0.Op != OpAMD64SHLQconst { 7831 break 7832 } 7833 if v_0.AuxInt != 3 { 7834 break 7835 } 7836 idx := v_0.Args[0] 7837 ptr := v.Args[1] 7838 mem := v.Args[2] 7839 v.reset(OpAMD64MOVLloadidx8) 7840 v.AuxInt = c 7841 v.Aux = sym 7842 v.AddArg(ptr) 7843 v.AddArg(idx) 7844 v.AddArg(mem) 7845 return true 7846 } 7847 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 7848 // cond: is32Bit(c+d) 7849 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7850 for { 7851 c := v.AuxInt 7852 sym := v.Aux 7853 _ = v.Args[2] 7854 v_0 := v.Args[0] 7855 if v_0.Op != OpAMD64ADDQconst { 7856 break 7857 } 7858 d := v_0.AuxInt 7859 ptr := v_0.Args[0] 7860 idx := v.Args[1] 7861 mem := v.Args[2] 7862 if !(is32Bit(c + d)) { 7863 break 7864 } 7865 v.reset(OpAMD64MOVLloadidx1) 7866 v.AuxInt = c + d 7867 v.Aux = sym 7868 v.AddArg(ptr) 7869 v.AddArg(idx) 7870 v.AddArg(mem) 7871 return true 7872 } 7873 // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 7874 // cond: is32Bit(c+d) 7875 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7876 for { 7877 c := v.AuxInt 7878 sym := v.Aux 7879 _ = v.Args[2] 7880 idx := v.Args[0] 7881 v_1 := v.Args[1] 7882 if v_1.Op != OpAMD64ADDQconst { 7883 break 7884 } 7885 d := v_1.AuxInt 7886 ptr := v_1.Args[0] 7887 mem := v.Args[2] 7888 if !(is32Bit(c + d)) { 7889 break 7890 } 7891 v.reset(OpAMD64MOVLloadidx1) 7892 v.AuxInt = c + d 7893 v.Aux = sym 7894 v.AddArg(ptr) 7895 v.AddArg(idx) 7896 v.AddArg(mem) 7897 return true 7898 } 7899 // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 7900 // cond: is32Bit(c+d) 7901 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7902 for { 7903 c := v.AuxInt 7904 sym := v.Aux 7905 _ = v.Args[2] 7906 ptr := v.Args[0] 7907 v_1 := v.Args[1] 7908 if v_1.Op != OpAMD64ADDQconst { 7909 break 7910 } 7911 d := v_1.AuxInt 7912 idx := v_1.Args[0] 7913 mem := v.Args[2] 7914 if !(is32Bit(c + d)) { 7915 break 7916 } 7917 v.reset(OpAMD64MOVLloadidx1) 7918 v.AuxInt = c + d 7919 v.Aux = sym 7920 v.AddArg(ptr) 7921 v.AddArg(idx) 7922 v.AddArg(mem) 7923 return true 7924 } 7925 // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 7926 // cond: is32Bit(c+d) 7927 // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) 7928 for { 7929 c := v.AuxInt 7930 sym := v.Aux 7931 _ = v.Args[2] 7932 v_0 := v.Args[0] 7933 if v_0.Op != OpAMD64ADDQconst { 7934 break 7935 } 7936 d := v_0.AuxInt 7937 idx := v_0.Args[0] 7938 ptr := v.Args[1] 7939 mem := v.Args[2] 7940 if !(is32Bit(c + d)) { 7941 break 7942 } 7943 v.reset(OpAMD64MOVLloadidx1) 7944 v.AuxInt = c + d 7945 v.Aux = sym 7946 v.AddArg(ptr) 7947 v.AddArg(idx) 7948 v.AddArg(mem) 7949 return true 7950 } 7951 return false 7952 } 7953 func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { 7954 // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 7955 // cond: is32Bit(c+d) 7956 // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) 7957 for { 7958 c := v.AuxInt 7959 sym := v.Aux 7960 _ = v.Args[2] 7961 v_0 := v.Args[0] 7962 if v_0.Op != OpAMD64ADDQconst { 7963 break 7964 } 7965 d := v_0.AuxInt 7966 ptr := v_0.Args[0] 7967 idx := v.Args[1] 7968 mem := v.Args[2] 7969 if !(is32Bit(c + d)) { 7970 break 7971 } 7972 v.reset(OpAMD64MOVLloadidx4) 7973 v.AuxInt = c + d 7974 v.Aux = sym 7975 v.AddArg(ptr) 7976 v.AddArg(idx) 7977 v.AddArg(mem) 7978 return true 7979 } 7980 // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 7981 // cond: is32Bit(c+4*d) 7982 // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) 7983 for { 7984 c := v.AuxInt 7985 sym := v.Aux 7986 _ = v.Args[2] 7987 ptr := v.Args[0] 7988 v_1 := v.Args[1] 7989 if v_1.Op != OpAMD64ADDQconst { 7990 break 7991 } 7992 d := v_1.AuxInt 7993 idx := v_1.Args[0] 7994 mem := v.Args[2] 7995 if !(is32Bit(c + 4*d)) { 7996 break 7997 } 7998 v.reset(OpAMD64MOVLloadidx4) 7999 v.AuxInt = c + 4*d 8000 v.Aux = sym 8001 v.AddArg(ptr) 8002 v.AddArg(idx) 8003 v.AddArg(mem) 8004 return true 8005 } 8006 return false 8007 } 8008 func rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v *Value) bool { 8009 // match: (MOVLloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 8010 // cond: is32Bit(c+d) 8011 // result: (MOVLloadidx8 [c+d] {sym} ptr idx mem) 8012 for { 8013 c := v.AuxInt 8014 sym := v.Aux 8015 _ = v.Args[2] 8016 v_0 := v.Args[0] 8017 if v_0.Op != OpAMD64ADDQconst { 8018 break 8019 } 8020 d := v_0.AuxInt 8021 ptr := v_0.Args[0] 8022 idx := v.Args[1] 8023 mem := v.Args[2] 8024 if !(is32Bit(c + d)) { 8025 break 8026 } 8027 v.reset(OpAMD64MOVLloadidx8) 8028 v.AuxInt = c + d 8029 v.Aux = sym 8030 v.AddArg(ptr) 8031 v.AddArg(idx) 8032 v.AddArg(mem) 8033 return true 8034 } 8035 // match: (MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 8036 // cond: is32Bit(c+8*d) 8037 // result: (MOVLloadidx8 [c+8*d] {sym} ptr idx mem) 8038 for { 8039 c := v.AuxInt 8040 sym := v.Aux 8041 _ = v.Args[2] 8042 ptr := v.Args[0] 8043 v_1 := v.Args[1] 8044 if v_1.Op != OpAMD64ADDQconst { 8045 break 8046 } 8047 d := v_1.AuxInt 8048 idx := v_1.Args[0] 8049 mem := v.Args[2] 8050 if !(is32Bit(c + 8*d)) { 8051 break 8052 } 8053 v.reset(OpAMD64MOVLloadidx8) 8054 v.AuxInt = c + 8*d 8055 v.Aux = sym 8056 v.AddArg(ptr) 8057 v.AddArg(idx) 8058 v.AddArg(mem) 8059 return true 8060 } 8061 return false 8062 } 8063 func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { 8064 // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) 8065 // cond: 8066 // result: (MOVLstore [off] {sym} ptr x mem) 8067 for { 8068 off := v.AuxInt 8069 sym := v.Aux 8070 _ = v.Args[2] 8071 ptr := v.Args[0] 8072 v_1 := v.Args[1] 8073 if v_1.Op != OpAMD64MOVLQSX { 8074 break 8075 } 8076 x := v_1.Args[0] 8077 mem := v.Args[2] 8078 v.reset(OpAMD64MOVLstore) 8079 v.AuxInt = off 8080 v.Aux = sym 8081 v.AddArg(ptr) 8082 v.AddArg(x) 8083 v.AddArg(mem) 8084 return true 8085 } 8086 // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) 8087 // cond: 8088 // result: (MOVLstore [off] {sym} ptr x mem) 8089 for { 8090 off := v.AuxInt 8091 sym := v.Aux 8092 _ = v.Args[2] 8093 ptr := v.Args[0] 8094 v_1 := v.Args[1] 8095 if v_1.Op != OpAMD64MOVLQZX { 8096 break 8097 } 8098 x := v_1.Args[0] 8099 mem := v.Args[2] 8100 v.reset(OpAMD64MOVLstore) 8101 v.AuxInt = off 8102 v.Aux = sym 8103 v.AddArg(ptr) 8104 v.AddArg(x) 8105 v.AddArg(mem) 8106 return true 8107 } 8108 // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 8109 // cond: is32Bit(off1+off2) 8110 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 8111 for { 8112 off1 := v.AuxInt 8113 sym := v.Aux 8114 _ = v.Args[2] 8115 v_0 := v.Args[0] 8116 if v_0.Op != OpAMD64ADDQconst { 8117 break 8118 } 8119 off2 := v_0.AuxInt 8120 ptr := v_0.Args[0] 8121 val := v.Args[1] 8122 mem := v.Args[2] 8123 if !(is32Bit(off1 + off2)) { 8124 break 8125 } 8126 v.reset(OpAMD64MOVLstore) 8127 v.AuxInt = off1 + off2 8128 v.Aux = sym 8129 v.AddArg(ptr) 8130 v.AddArg(val) 8131 v.AddArg(mem) 8132 return true 8133 } 8134 // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) 8135 // cond: validOff(off) 8136 // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) 8137 for { 8138 off := v.AuxInt 8139 sym := v.Aux 8140 _ = v.Args[2] 8141 ptr := v.Args[0] 8142 v_1 := v.Args[1] 8143 if v_1.Op != OpAMD64MOVLconst { 8144 break 8145 } 8146 c := v_1.AuxInt 8147 mem := v.Args[2] 8148 if !(validOff(off)) { 8149 break 8150 } 8151 v.reset(OpAMD64MOVLstoreconst) 8152 v.AuxInt = makeValAndOff(int64(int32(c)), off) 8153 v.Aux = sym 8154 v.AddArg(ptr) 8155 v.AddArg(mem) 8156 return true 8157 } 8158 // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 8159 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8160 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8161 for { 8162 off1 := v.AuxInt 8163 sym1 := v.Aux 8164 _ = v.Args[2] 8165 v_0 := v.Args[0] 8166 if v_0.Op != OpAMD64LEAQ { 8167 break 8168 } 8169 off2 := v_0.AuxInt 8170 sym2 := v_0.Aux 8171 base := v_0.Args[0] 8172 val := v.Args[1] 8173 mem := v.Args[2] 8174 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8175 break 8176 } 8177 v.reset(OpAMD64MOVLstore) 8178 v.AuxInt = off1 + off2 8179 v.Aux = mergeSym(sym1, sym2) 8180 v.AddArg(base) 8181 v.AddArg(val) 8182 v.AddArg(mem) 8183 return true 8184 } 8185 // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 8186 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8187 // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8188 for { 8189 off1 := v.AuxInt 8190 sym1 := v.Aux 8191 _ = v.Args[2] 8192 v_0 := v.Args[0] 8193 if v_0.Op != OpAMD64LEAQ1 { 8194 break 8195 } 8196 off2 := v_0.AuxInt 8197 sym2 := v_0.Aux 8198 _ = v_0.Args[1] 8199 ptr := v_0.Args[0] 8200 idx := v_0.Args[1] 8201 val := v.Args[1] 8202 mem := v.Args[2] 8203 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8204 break 8205 } 8206 v.reset(OpAMD64MOVLstoreidx1) 8207 v.AuxInt = off1 + off2 8208 v.Aux = mergeSym(sym1, sym2) 8209 v.AddArg(ptr) 8210 v.AddArg(idx) 8211 v.AddArg(val) 8212 v.AddArg(mem) 8213 return true 8214 } 8215 // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 8216 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8217 // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8218 for { 8219 off1 := v.AuxInt 8220 sym1 := v.Aux 8221 _ = v.Args[2] 8222 v_0 := v.Args[0] 8223 if v_0.Op != OpAMD64LEAQ4 { 8224 break 8225 } 8226 off2 := v_0.AuxInt 8227 sym2 := v_0.Aux 8228 _ = v_0.Args[1] 8229 ptr := v_0.Args[0] 8230 idx := v_0.Args[1] 8231 val := v.Args[1] 8232 mem := v.Args[2] 8233 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8234 break 8235 } 8236 v.reset(OpAMD64MOVLstoreidx4) 8237 v.AuxInt = off1 + off2 8238 v.Aux = mergeSym(sym1, sym2) 8239 v.AddArg(ptr) 8240 v.AddArg(idx) 8241 v.AddArg(val) 8242 v.AddArg(mem) 8243 return true 8244 } 8245 // match: (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 8246 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 8247 // result: (MOVLstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 8248 for { 8249 off1 := v.AuxInt 8250 sym1 := v.Aux 8251 _ = v.Args[2] 8252 v_0 := v.Args[0] 8253 if v_0.Op != OpAMD64LEAQ8 { 8254 break 8255 } 8256 off2 := v_0.AuxInt 8257 sym2 := v_0.Aux 8258 _ = v_0.Args[1] 8259 ptr := v_0.Args[0] 8260 idx := v_0.Args[1] 8261 val := v.Args[1] 8262 mem := v.Args[2] 8263 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 8264 break 8265 } 8266 v.reset(OpAMD64MOVLstoreidx8) 8267 v.AuxInt = off1 + off2 8268 v.Aux = mergeSym(sym1, sym2) 8269 v.AddArg(ptr) 8270 v.AddArg(idx) 8271 v.AddArg(val) 8272 v.AddArg(mem) 8273 return true 8274 } 8275 // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) 8276 // cond: ptr.Op != OpSB 8277 // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) 8278 for { 8279 off := v.AuxInt 8280 sym := v.Aux 8281 _ = v.Args[2] 8282 v_0 := v.Args[0] 8283 if v_0.Op != OpAMD64ADDQ { 8284 break 8285 } 8286 _ = v_0.Args[1] 8287 ptr := v_0.Args[0] 8288 idx := v_0.Args[1] 8289 val := v.Args[1] 8290 mem := v.Args[2] 8291 if !(ptr.Op != OpSB) { 8292 break 8293 } 8294 v.reset(OpAMD64MOVLstoreidx1) 8295 v.AuxInt = off 8296 v.Aux = sym 8297 v.AddArg(ptr) 8298 v.AddArg(idx) 8299 v.AddArg(val) 8300 v.AddArg(mem) 8301 return true 8302 } 8303 // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) 8304 // cond: x.Uses == 1 && clobber(x) 8305 // result: (MOVQstore [i-4] {s} p w mem) 8306 for { 8307 i := v.AuxInt 8308 s := v.Aux 8309 _ = v.Args[2] 8310 p := v.Args[0] 8311 v_1 := v.Args[1] 8312 if v_1.Op != OpAMD64SHRQconst { 8313 break 8314 } 8315 if v_1.AuxInt != 32 { 8316 break 8317 } 8318 w := v_1.Args[0] 8319 x := v.Args[2] 8320 if x.Op != OpAMD64MOVLstore { 8321 break 8322 } 8323 if x.AuxInt != i-4 { 8324 break 8325 } 8326 if x.Aux != s { 8327 break 8328 } 8329 _ = x.Args[2] 8330 if p != x.Args[0] { 8331 break 8332 } 8333 if w != x.Args[1] { 8334 break 8335 } 8336 mem := x.Args[2] 8337 if !(x.Uses == 1 && clobber(x)) { 8338 break 8339 } 8340 v.reset(OpAMD64MOVQstore) 8341 v.AuxInt = i - 4 8342 v.Aux = s 8343 v.AddArg(p) 8344 v.AddArg(w) 8345 v.AddArg(mem) 8346 return true 8347 } 8348 return false 8349 } 8350 func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { 8351 b := v.Block 8352 _ = b 8353 typ := &b.Func.Config.Types 8354 _ = typ 8355 // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) 8356 // cond: x.Uses == 1 && clobber(x) 8357 // result: (MOVQstore [i-4] {s} p w0 mem) 8358 for { 8359 i := v.AuxInt 8360 s := v.Aux 8361 _ = v.Args[2] 8362 p := v.Args[0] 8363 v_1 := v.Args[1] 8364 if v_1.Op != OpAMD64SHRQconst { 8365 break 8366 } 8367 j := v_1.AuxInt 8368 w := v_1.Args[0] 8369 x := v.Args[2] 8370 if x.Op != OpAMD64MOVLstore { 8371 break 8372 } 8373 if x.AuxInt != i-4 { 8374 break 8375 } 8376 if x.Aux != s { 8377 break 8378 } 8379 _ = x.Args[2] 8380 if p != x.Args[0] { 8381 break 8382 } 8383 w0 := x.Args[1] 8384 if w0.Op != OpAMD64SHRQconst { 8385 break 8386 } 8387 if w0.AuxInt != j-32 { 8388 break 8389 } 8390 if w != w0.Args[0] { 8391 break 8392 } 8393 mem := x.Args[2] 8394 if !(x.Uses == 1 && clobber(x)) { 8395 break 8396 } 8397 v.reset(OpAMD64MOVQstore) 8398 v.AuxInt = i - 4 8399 v.Aux = s 8400 v.AddArg(p) 8401 v.AddArg(w0) 8402 v.AddArg(mem) 8403 return true 8404 } 8405 // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem)) 8406 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 8407 // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) 8408 for { 8409 i := v.AuxInt 8410 s := v.Aux 8411 _ = v.Args[2] 8412 p := v.Args[0] 8413 x1 := v.Args[1] 8414 if x1.Op != OpAMD64MOVLload { 8415 break 8416 } 8417 j := x1.AuxInt 8418 s2 := x1.Aux 8419 _ = x1.Args[1] 8420 p2 := x1.Args[0] 8421 mem := x1.Args[1] 8422 mem2 := v.Args[2] 8423 if mem2.Op != OpAMD64MOVLstore { 8424 break 8425 } 8426 if mem2.AuxInt != i-4 { 8427 break 8428 } 8429 if mem2.Aux != s { 8430 break 8431 } 8432 _ = mem2.Args[2] 8433 if p != mem2.Args[0] { 8434 break 8435 } 8436 x2 := mem2.Args[1] 8437 if x2.Op != OpAMD64MOVLload { 8438 break 8439 } 8440 if x2.AuxInt != j-4 { 8441 break 8442 } 8443 if x2.Aux != s2 { 8444 break 8445 } 8446 _ = x2.Args[1] 8447 if p2 != x2.Args[0] { 8448 break 8449 } 8450 if mem != x2.Args[1] { 8451 break 8452 } 8453 if mem != mem2.Args[2] { 8454 break 8455 } 8456 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 8457 break 8458 } 8459 v.reset(OpAMD64MOVQstore) 8460 v.AuxInt = i - 4 8461 v.Aux = s 8462 v.AddArg(p) 8463 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 8464 v0.AuxInt = j - 4 8465 v0.Aux = s2 8466 v0.AddArg(p2) 8467 v0.AddArg(mem) 8468 v.AddArg(v0) 8469 v.AddArg(mem) 8470 return true 8471 } 8472 // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 8473 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 8474 // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 8475 for { 8476 off1 := v.AuxInt 8477 sym1 := v.Aux 8478 _ = v.Args[2] 8479 v_0 := v.Args[0] 8480 if v_0.Op != OpAMD64LEAL { 8481 break 8482 } 8483 off2 := v_0.AuxInt 8484 sym2 := v_0.Aux 8485 base := v_0.Args[0] 8486 val := v.Args[1] 8487 mem := v.Args[2] 8488 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 8489 break 8490 } 8491 v.reset(OpAMD64MOVLstore) 8492 v.AuxInt = off1 + off2 8493 v.Aux = mergeSym(sym1, sym2) 8494 v.AddArg(base) 8495 v.AddArg(val) 8496 v.AddArg(mem) 8497 return true 8498 } 8499 // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 8500 // cond: is32Bit(off1+off2) 8501 // result: (MOVLstore [off1+off2] {sym} ptr val mem) 8502 for { 8503 off1 := v.AuxInt 8504 sym := v.Aux 8505 _ = v.Args[2] 8506 v_0 := v.Args[0] 8507 if v_0.Op != OpAMD64ADDLconst { 8508 break 8509 } 8510 off2 := v_0.AuxInt 8511 ptr := v_0.Args[0] 8512 val := v.Args[1] 8513 mem := v.Args[2] 8514 if !(is32Bit(off1 + off2)) { 8515 break 8516 } 8517 v.reset(OpAMD64MOVLstore) 8518 v.AuxInt = off1 + off2 8519 v.Aux = sym 8520 v.AddArg(ptr) 8521 v.AddArg(val) 8522 v.AddArg(mem) 8523 return true 8524 } 8525 // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) 8526 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) 8527 // result: (ADDLconstmem {sym} [makeValAndOff(c,off)] ptr mem) 8528 for { 8529 off := v.AuxInt 8530 sym := v.Aux 8531 _ = v.Args[2] 8532 ptr := v.Args[0] 8533 a := v.Args[1] 8534 if a.Op != OpAMD64ADDLconst { 8535 break 8536 } 8537 c := a.AuxInt 8538 l := a.Args[0] 8539 if l.Op != OpAMD64MOVLload { 8540 break 8541 } 8542 if l.AuxInt != off { 8543 break 8544 } 8545 if l.Aux != sym { 8546 break 8547 } 8548 _ = l.Args[1] 8549 ptr2 := l.Args[0] 8550 mem := l.Args[1] 8551 if mem != v.Args[2] { 8552 break 8553 } 8554 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) { 8555 break 8556 } 8557 v.reset(OpAMD64ADDLconstmem) 8558 v.AuxInt = makeValAndOff(c, off) 8559 v.Aux = sym 8560 v.AddArg(ptr) 8561 v.AddArg(mem) 8562 return true 8563 } 8564 // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem) 8565 // cond: 8566 // result: (MOVSSstore [off] {sym} ptr val mem) 8567 for { 8568 off := v.AuxInt 8569 sym := v.Aux 8570 _ = v.Args[2] 8571 ptr := v.Args[0] 8572 v_1 := v.Args[1] 8573 if v_1.Op != OpAMD64MOVLf2i { 8574 break 8575 } 8576 val := v_1.Args[0] 8577 mem := v.Args[2] 8578 v.reset(OpAMD64MOVSSstore) 8579 v.AuxInt = off 8580 v.Aux = sym 8581 v.AddArg(ptr) 8582 v.AddArg(val) 8583 v.AddArg(mem) 8584 return true 8585 } 8586 return false 8587 } 8588 func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { 8589 b := v.Block 8590 _ = b 8591 typ := &b.Func.Config.Types 8592 _ = typ 8593 // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 8594 // cond: ValAndOff(sc).canAdd(off) 8595 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8596 for { 8597 sc := v.AuxInt 8598 s := v.Aux 8599 _ = v.Args[1] 8600 v_0 := v.Args[0] 8601 if v_0.Op != OpAMD64ADDQconst { 8602 break 8603 } 8604 off := v_0.AuxInt 8605 ptr := v_0.Args[0] 8606 mem := v.Args[1] 8607 if !(ValAndOff(sc).canAdd(off)) { 8608 break 8609 } 8610 v.reset(OpAMD64MOVLstoreconst) 8611 v.AuxInt = ValAndOff(sc).add(off) 8612 v.Aux = s 8613 v.AddArg(ptr) 8614 v.AddArg(mem) 8615 return true 8616 } 8617 // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 8618 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8619 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8620 for { 8621 sc := v.AuxInt 8622 sym1 := v.Aux 8623 _ = v.Args[1] 8624 v_0 := v.Args[0] 8625 if v_0.Op != OpAMD64LEAQ { 8626 break 8627 } 8628 off := v_0.AuxInt 8629 sym2 := v_0.Aux 8630 ptr := v_0.Args[0] 8631 mem := v.Args[1] 8632 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8633 break 8634 } 8635 v.reset(OpAMD64MOVLstoreconst) 8636 v.AuxInt = ValAndOff(sc).add(off) 8637 v.Aux = mergeSym(sym1, sym2) 8638 v.AddArg(ptr) 8639 v.AddArg(mem) 8640 return true 8641 } 8642 // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 8643 // cond: canMergeSym(sym1, sym2) 8644 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8645 for { 8646 x := v.AuxInt 8647 sym1 := v.Aux 8648 _ = v.Args[1] 8649 v_0 := v.Args[0] 8650 if v_0.Op != OpAMD64LEAQ1 { 8651 break 8652 } 8653 off := v_0.AuxInt 8654 sym2 := v_0.Aux 8655 _ = v_0.Args[1] 8656 ptr := v_0.Args[0] 8657 idx := v_0.Args[1] 8658 mem := v.Args[1] 8659 if !(canMergeSym(sym1, sym2)) { 8660 break 8661 } 8662 v.reset(OpAMD64MOVLstoreconstidx1) 8663 v.AuxInt = ValAndOff(x).add(off) 8664 v.Aux = mergeSym(sym1, sym2) 8665 v.AddArg(ptr) 8666 v.AddArg(idx) 8667 v.AddArg(mem) 8668 return true 8669 } 8670 // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) 8671 // cond: canMergeSym(sym1, sym2) 8672 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 8673 for { 8674 x := v.AuxInt 8675 sym1 := v.Aux 8676 _ = v.Args[1] 8677 v_0 := v.Args[0] 8678 if v_0.Op != OpAMD64LEAQ4 { 8679 break 8680 } 8681 off := v_0.AuxInt 8682 sym2 := v_0.Aux 8683 _ = v_0.Args[1] 8684 ptr := v_0.Args[0] 8685 idx := v_0.Args[1] 8686 mem := v.Args[1] 8687 if !(canMergeSym(sym1, sym2)) { 8688 break 8689 } 8690 v.reset(OpAMD64MOVLstoreconstidx4) 8691 v.AuxInt = ValAndOff(x).add(off) 8692 v.Aux = mergeSym(sym1, sym2) 8693 v.AddArg(ptr) 8694 v.AddArg(idx) 8695 v.AddArg(mem) 8696 return true 8697 } 8698 // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) 8699 // cond: 8700 // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) 8701 for { 8702 x := v.AuxInt 8703 sym := v.Aux 8704 _ = v.Args[1] 8705 v_0 := v.Args[0] 8706 if v_0.Op != OpAMD64ADDQ { 8707 break 8708 } 8709 _ = v_0.Args[1] 8710 ptr := v_0.Args[0] 8711 idx := v_0.Args[1] 8712 mem := v.Args[1] 8713 v.reset(OpAMD64MOVLstoreconstidx1) 8714 v.AuxInt = x 8715 v.Aux = sym 8716 v.AddArg(ptr) 8717 v.AddArg(idx) 8718 v.AddArg(mem) 8719 return true 8720 } 8721 // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) 8722 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 8723 // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 8724 for { 8725 c := v.AuxInt 8726 s := v.Aux 8727 _ = v.Args[1] 8728 p := v.Args[0] 8729 x := v.Args[1] 8730 if x.Op != OpAMD64MOVLstoreconst { 8731 break 8732 } 8733 a := x.AuxInt 8734 if x.Aux != s { 8735 break 8736 } 8737 _ = x.Args[1] 8738 if p != x.Args[0] { 8739 break 8740 } 8741 mem := x.Args[1] 8742 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 8743 break 8744 } 8745 v.reset(OpAMD64MOVQstore) 8746 v.AuxInt = ValAndOff(a).Off() 8747 v.Aux = s 8748 v.AddArg(p) 8749 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 8750 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 8751 v.AddArg(v0) 8752 v.AddArg(mem) 8753 return true 8754 } 8755 // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 8756 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 8757 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 8758 for { 8759 sc := v.AuxInt 8760 sym1 := v.Aux 8761 _ = v.Args[1] 8762 v_0 := v.Args[0] 8763 if v_0.Op != OpAMD64LEAL { 8764 break 8765 } 8766 off := v_0.AuxInt 8767 sym2 := v_0.Aux 8768 ptr := v_0.Args[0] 8769 mem := v.Args[1] 8770 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 8771 break 8772 } 8773 v.reset(OpAMD64MOVLstoreconst) 8774 v.AuxInt = ValAndOff(sc).add(off) 8775 v.Aux = mergeSym(sym1, sym2) 8776 v.AddArg(ptr) 8777 v.AddArg(mem) 8778 return true 8779 } 8780 // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 8781 // cond: ValAndOff(sc).canAdd(off) 8782 // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 8783 for { 8784 sc := v.AuxInt 8785 s := v.Aux 8786 _ = v.Args[1] 8787 v_0 := v.Args[0] 8788 if v_0.Op != OpAMD64ADDLconst { 8789 break 8790 } 8791 off := v_0.AuxInt 8792 ptr := v_0.Args[0] 8793 mem := v.Args[1] 8794 if !(ValAndOff(sc).canAdd(off)) { 8795 break 8796 } 8797 v.reset(OpAMD64MOVLstoreconst) 8798 v.AuxInt = ValAndOff(sc).add(off) 8799 v.Aux = s 8800 v.AddArg(ptr) 8801 v.AddArg(mem) 8802 return true 8803 } 8804 return false 8805 } 8806 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { 8807 b := v.Block 8808 _ = b 8809 typ := &b.Func.Config.Types 8810 _ = typ 8811 // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 8812 // cond: 8813 // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) 8814 for { 8815 c := v.AuxInt 8816 sym := v.Aux 8817 _ = v.Args[2] 8818 ptr := v.Args[0] 8819 v_1 := v.Args[1] 8820 if v_1.Op != OpAMD64SHLQconst { 8821 break 8822 } 8823 if v_1.AuxInt != 2 { 8824 break 8825 } 8826 idx := v_1.Args[0] 8827 mem := v.Args[2] 8828 v.reset(OpAMD64MOVLstoreconstidx4) 8829 v.AuxInt = c 8830 v.Aux = sym 8831 v.AddArg(ptr) 8832 v.AddArg(idx) 8833 v.AddArg(mem) 8834 return true 8835 } 8836 // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 8837 // cond: ValAndOff(x).canAdd(c) 8838 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8839 for { 8840 x := v.AuxInt 8841 sym := v.Aux 8842 _ = v.Args[2] 8843 v_0 := v.Args[0] 8844 if v_0.Op != OpAMD64ADDQconst { 8845 break 8846 } 8847 c := v_0.AuxInt 8848 ptr := v_0.Args[0] 8849 idx := v.Args[1] 8850 mem := v.Args[2] 8851 if !(ValAndOff(x).canAdd(c)) { 8852 break 8853 } 8854 v.reset(OpAMD64MOVLstoreconstidx1) 8855 v.AuxInt = ValAndOff(x).add(c) 8856 v.Aux = sym 8857 v.AddArg(ptr) 8858 v.AddArg(idx) 8859 v.AddArg(mem) 8860 return true 8861 } 8862 // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 8863 // cond: ValAndOff(x).canAdd(c) 8864 // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8865 for { 8866 x := v.AuxInt 8867 sym := v.Aux 8868 _ = v.Args[2] 8869 ptr := v.Args[0] 8870 v_1 := v.Args[1] 8871 if v_1.Op != OpAMD64ADDQconst { 8872 break 8873 } 8874 c := v_1.AuxInt 8875 idx := v_1.Args[0] 8876 mem := v.Args[2] 8877 if !(ValAndOff(x).canAdd(c)) { 8878 break 8879 } 8880 v.reset(OpAMD64MOVLstoreconstidx1) 8881 v.AuxInt = ValAndOff(x).add(c) 8882 v.Aux = sym 8883 v.AddArg(ptr) 8884 v.AddArg(idx) 8885 v.AddArg(mem) 8886 return true 8887 } 8888 // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) 8889 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 8890 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 8891 for { 8892 c := v.AuxInt 8893 s := v.Aux 8894 _ = v.Args[2] 8895 p := v.Args[0] 8896 i := v.Args[1] 8897 x := v.Args[2] 8898 if x.Op != OpAMD64MOVLstoreconstidx1 { 8899 break 8900 } 8901 a := x.AuxInt 8902 if x.Aux != s { 8903 break 8904 } 8905 _ = x.Args[2] 8906 if p != x.Args[0] { 8907 break 8908 } 8909 if i != x.Args[1] { 8910 break 8911 } 8912 mem := x.Args[2] 8913 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 8914 break 8915 } 8916 v.reset(OpAMD64MOVQstoreidx1) 8917 v.AuxInt = ValAndOff(a).Off() 8918 v.Aux = s 8919 v.AddArg(p) 8920 v.AddArg(i) 8921 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 8922 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 8923 v.AddArg(v0) 8924 v.AddArg(mem) 8925 return true 8926 } 8927 return false 8928 } 8929 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { 8930 b := v.Block 8931 _ = b 8932 typ := &b.Func.Config.Types 8933 _ = typ 8934 // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) 8935 // cond: ValAndOff(x).canAdd(c) 8936 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) 8937 for { 8938 x := v.AuxInt 8939 sym := v.Aux 8940 _ = v.Args[2] 8941 v_0 := v.Args[0] 8942 if v_0.Op != OpAMD64ADDQconst { 8943 break 8944 } 8945 c := v_0.AuxInt 8946 ptr := v_0.Args[0] 8947 idx := v.Args[1] 8948 mem := v.Args[2] 8949 if !(ValAndOff(x).canAdd(c)) { 8950 break 8951 } 8952 v.reset(OpAMD64MOVLstoreconstidx4) 8953 v.AuxInt = ValAndOff(x).add(c) 8954 v.Aux = sym 8955 v.AddArg(ptr) 8956 v.AddArg(idx) 8957 v.AddArg(mem) 8958 return true 8959 } 8960 // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) 8961 // cond: ValAndOff(x).canAdd(4*c) 8962 // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) 8963 for { 8964 x := v.AuxInt 8965 sym := v.Aux 8966 _ = v.Args[2] 8967 ptr := v.Args[0] 8968 v_1 := v.Args[1] 8969 if v_1.Op != OpAMD64ADDQconst { 8970 break 8971 } 8972 c := v_1.AuxInt 8973 idx := v_1.Args[0] 8974 mem := v.Args[2] 8975 if !(ValAndOff(x).canAdd(4 * c)) { 8976 break 8977 } 8978 v.reset(OpAMD64MOVLstoreconstidx4) 8979 v.AuxInt = ValAndOff(x).add(4 * c) 8980 v.Aux = sym 8981 v.AddArg(ptr) 8982 v.AddArg(idx) 8983 v.AddArg(mem) 8984 return true 8985 } 8986 // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) 8987 // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) 8988 // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) 8989 for { 8990 c := v.AuxInt 8991 s := v.Aux 8992 _ = v.Args[2] 8993 p := v.Args[0] 8994 i := v.Args[1] 8995 x := v.Args[2] 8996 if x.Op != OpAMD64MOVLstoreconstidx4 { 8997 break 8998 } 8999 a := x.AuxInt 9000 if x.Aux != s { 9001 break 9002 } 9003 _ = x.Args[2] 9004 if p != x.Args[0] { 9005 break 9006 } 9007 if i != x.Args[1] { 9008 break 9009 } 9010 mem := x.Args[2] 9011 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { 9012 break 9013 } 9014 v.reset(OpAMD64MOVQstoreidx1) 9015 v.AuxInt = ValAndOff(a).Off() 9016 v.Aux = s 9017 v.AddArg(p) 9018 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 9019 v0.AuxInt = 2 9020 v0.AddArg(i) 9021 v.AddArg(v0) 9022 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 9023 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 9024 v.AddArg(v1) 9025 v.AddArg(mem) 9026 return true 9027 } 9028 return false 9029 } 9030 func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { 9031 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 9032 // cond: 9033 // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) 9034 for { 9035 c := v.AuxInt 9036 sym := v.Aux 9037 _ = v.Args[3] 9038 ptr := v.Args[0] 9039 v_1 := v.Args[1] 9040 if v_1.Op != OpAMD64SHLQconst { 9041 break 9042 } 9043 if v_1.AuxInt != 2 { 9044 break 9045 } 9046 idx := v_1.Args[0] 9047 val := v.Args[2] 9048 mem := v.Args[3] 9049 v.reset(OpAMD64MOVLstoreidx4) 9050 v.AuxInt = c 9051 v.Aux = sym 9052 v.AddArg(ptr) 9053 v.AddArg(idx) 9054 v.AddArg(val) 9055 v.AddArg(mem) 9056 return true 9057 } 9058 // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 9059 // cond: 9060 // result: (MOVLstoreidx8 [c] {sym} ptr idx val mem) 9061 for { 9062 c := v.AuxInt 9063 sym := v.Aux 9064 _ = v.Args[3] 9065 ptr := v.Args[0] 9066 v_1 := v.Args[1] 9067 if v_1.Op != OpAMD64SHLQconst { 9068 break 9069 } 9070 if v_1.AuxInt != 3 { 9071 break 9072 } 9073 idx := v_1.Args[0] 9074 val := v.Args[2] 9075 mem := v.Args[3] 9076 v.reset(OpAMD64MOVLstoreidx8) 9077 v.AuxInt = c 9078 v.Aux = sym 9079 v.AddArg(ptr) 9080 v.AddArg(idx) 9081 v.AddArg(val) 9082 v.AddArg(mem) 9083 return true 9084 } 9085 // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9086 // cond: is32Bit(c+d) 9087 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 9088 for { 9089 c := v.AuxInt 9090 sym := v.Aux 9091 _ = v.Args[3] 9092 v_0 := v.Args[0] 9093 if v_0.Op != OpAMD64ADDQconst { 9094 break 9095 } 9096 d := v_0.AuxInt 9097 ptr := v_0.Args[0] 9098 idx := v.Args[1] 9099 val := v.Args[2] 9100 mem := v.Args[3] 9101 if !(is32Bit(c + d)) { 9102 break 9103 } 9104 v.reset(OpAMD64MOVLstoreidx1) 9105 v.AuxInt = c + d 9106 v.Aux = sym 9107 v.AddArg(ptr) 9108 v.AddArg(idx) 9109 v.AddArg(val) 9110 v.AddArg(mem) 9111 return true 9112 } 9113 // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9114 // cond: is32Bit(c+d) 9115 // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) 9116 for { 9117 c := v.AuxInt 9118 sym := v.Aux 9119 _ = v.Args[3] 9120 ptr := v.Args[0] 9121 v_1 := v.Args[1] 9122 if v_1.Op != OpAMD64ADDQconst { 9123 break 9124 } 9125 d := v_1.AuxInt 9126 idx := v_1.Args[0] 9127 val := v.Args[2] 9128 mem := v.Args[3] 9129 if !(is32Bit(c + d)) { 9130 break 9131 } 9132 v.reset(OpAMD64MOVLstoreidx1) 9133 v.AuxInt = c + d 9134 v.Aux = sym 9135 v.AddArg(ptr) 9136 v.AddArg(idx) 9137 v.AddArg(val) 9138 v.AddArg(mem) 9139 return true 9140 } 9141 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) 9142 // cond: x.Uses == 1 && clobber(x) 9143 // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) 9144 for { 9145 i := v.AuxInt 9146 s := v.Aux 9147 _ = v.Args[3] 9148 p := v.Args[0] 9149 idx := v.Args[1] 9150 v_2 := v.Args[2] 9151 if v_2.Op != OpAMD64SHRQconst { 9152 break 9153 } 9154 if v_2.AuxInt != 32 { 9155 break 9156 } 9157 w := v_2.Args[0] 9158 x := v.Args[3] 9159 if x.Op != OpAMD64MOVLstoreidx1 { 9160 break 9161 } 9162 if x.AuxInt != i-4 { 9163 break 9164 } 9165 if x.Aux != s { 9166 break 9167 } 9168 _ = x.Args[3] 9169 if p != x.Args[0] { 9170 break 9171 } 9172 if idx != x.Args[1] { 9173 break 9174 } 9175 if w != x.Args[2] { 9176 break 9177 } 9178 mem := x.Args[3] 9179 if !(x.Uses == 1 && clobber(x)) { 9180 break 9181 } 9182 v.reset(OpAMD64MOVQstoreidx1) 9183 v.AuxInt = i - 4 9184 v.Aux = s 9185 v.AddArg(p) 9186 v.AddArg(idx) 9187 v.AddArg(w) 9188 v.AddArg(mem) 9189 return true 9190 } 9191 // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 9192 // cond: x.Uses == 1 && clobber(x) 9193 // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) 9194 for { 9195 i := v.AuxInt 9196 s := v.Aux 9197 _ = v.Args[3] 9198 p := v.Args[0] 9199 idx := v.Args[1] 9200 v_2 := v.Args[2] 9201 if v_2.Op != OpAMD64SHRQconst { 9202 break 9203 } 9204 j := v_2.AuxInt 9205 w := v_2.Args[0] 9206 x := v.Args[3] 9207 if x.Op != OpAMD64MOVLstoreidx1 { 9208 break 9209 } 9210 if x.AuxInt != i-4 { 9211 break 9212 } 9213 if x.Aux != s { 9214 break 9215 } 9216 _ = x.Args[3] 9217 if p != x.Args[0] { 9218 break 9219 } 9220 if idx != x.Args[1] { 9221 break 9222 } 9223 w0 := x.Args[2] 9224 if w0.Op != OpAMD64SHRQconst { 9225 break 9226 } 9227 if w0.AuxInt != j-32 { 9228 break 9229 } 9230 if w != w0.Args[0] { 9231 break 9232 } 9233 mem := x.Args[3] 9234 if !(x.Uses == 1 && clobber(x)) { 9235 break 9236 } 9237 v.reset(OpAMD64MOVQstoreidx1) 9238 v.AuxInt = i - 4 9239 v.Aux = s 9240 v.AddArg(p) 9241 v.AddArg(idx) 9242 v.AddArg(w0) 9243 v.AddArg(mem) 9244 return true 9245 } 9246 return false 9247 } 9248 func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { 9249 b := v.Block 9250 _ = b 9251 // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9252 // cond: is32Bit(c+d) 9253 // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) 9254 for { 9255 c := v.AuxInt 9256 sym := v.Aux 9257 _ = v.Args[3] 9258 v_0 := v.Args[0] 9259 if v_0.Op != OpAMD64ADDQconst { 9260 break 9261 } 9262 d := v_0.AuxInt 9263 ptr := v_0.Args[0] 9264 idx := v.Args[1] 9265 val := v.Args[2] 9266 mem := v.Args[3] 9267 if !(is32Bit(c + d)) { 9268 break 9269 } 9270 v.reset(OpAMD64MOVLstoreidx4) 9271 v.AuxInt = c + d 9272 v.Aux = sym 9273 v.AddArg(ptr) 9274 v.AddArg(idx) 9275 v.AddArg(val) 9276 v.AddArg(mem) 9277 return true 9278 } 9279 // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9280 // cond: is32Bit(c+4*d) 9281 // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) 9282 for { 9283 c := v.AuxInt 9284 sym := v.Aux 9285 _ = v.Args[3] 9286 ptr := v.Args[0] 9287 v_1 := v.Args[1] 9288 if v_1.Op != OpAMD64ADDQconst { 9289 break 9290 } 9291 d := v_1.AuxInt 9292 idx := v_1.Args[0] 9293 val := v.Args[2] 9294 mem := v.Args[3] 9295 if !(is32Bit(c + 4*d)) { 9296 break 9297 } 9298 v.reset(OpAMD64MOVLstoreidx4) 9299 v.AuxInt = c + 4*d 9300 v.Aux = sym 9301 v.AddArg(ptr) 9302 v.AddArg(idx) 9303 v.AddArg(val) 9304 v.AddArg(mem) 9305 return true 9306 } 9307 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) 9308 // cond: x.Uses == 1 && clobber(x) 9309 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem) 9310 for { 9311 i := v.AuxInt 9312 s := v.Aux 9313 _ = v.Args[3] 9314 p := v.Args[0] 9315 idx := v.Args[1] 9316 v_2 := v.Args[2] 9317 if v_2.Op != OpAMD64SHRQconst { 9318 break 9319 } 9320 if v_2.AuxInt != 32 { 9321 break 9322 } 9323 w := v_2.Args[0] 9324 x := v.Args[3] 9325 if x.Op != OpAMD64MOVLstoreidx4 { 9326 break 9327 } 9328 if x.AuxInt != i-4 { 9329 break 9330 } 9331 if x.Aux != s { 9332 break 9333 } 9334 _ = x.Args[3] 9335 if p != x.Args[0] { 9336 break 9337 } 9338 if idx != x.Args[1] { 9339 break 9340 } 9341 if w != x.Args[2] { 9342 break 9343 } 9344 mem := x.Args[3] 9345 if !(x.Uses == 1 && clobber(x)) { 9346 break 9347 } 9348 v.reset(OpAMD64MOVQstoreidx1) 9349 v.AuxInt = i - 4 9350 v.Aux = s 9351 v.AddArg(p) 9352 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 9353 v0.AuxInt = 2 9354 v0.AddArg(idx) 9355 v.AddArg(v0) 9356 v.AddArg(w) 9357 v.AddArg(mem) 9358 return true 9359 } 9360 // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) 9361 // cond: x.Uses == 1 && clobber(x) 9362 // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem) 9363 for { 9364 i := v.AuxInt 9365 s := v.Aux 9366 _ = v.Args[3] 9367 p := v.Args[0] 9368 idx := v.Args[1] 9369 v_2 := v.Args[2] 9370 if v_2.Op != OpAMD64SHRQconst { 9371 break 9372 } 9373 j := v_2.AuxInt 9374 w := v_2.Args[0] 9375 x := v.Args[3] 9376 if x.Op != OpAMD64MOVLstoreidx4 { 9377 break 9378 } 9379 if x.AuxInt != i-4 { 9380 break 9381 } 9382 if x.Aux != s { 9383 break 9384 } 9385 _ = x.Args[3] 9386 if p != x.Args[0] { 9387 break 9388 } 9389 if idx != x.Args[1] { 9390 break 9391 } 9392 w0 := x.Args[2] 9393 if w0.Op != OpAMD64SHRQconst { 9394 break 9395 } 9396 if w0.AuxInt != j-32 { 9397 break 9398 } 9399 if w != w0.Args[0] { 9400 break 9401 } 9402 mem := x.Args[3] 9403 if !(x.Uses == 1 && clobber(x)) { 9404 break 9405 } 9406 v.reset(OpAMD64MOVQstoreidx1) 9407 v.AuxInt = i - 4 9408 v.Aux = s 9409 v.AddArg(p) 9410 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 9411 v0.AuxInt = 2 9412 v0.AddArg(idx) 9413 v.AddArg(v0) 9414 v.AddArg(w0) 9415 v.AddArg(mem) 9416 return true 9417 } 9418 return false 9419 } 9420 func rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v *Value) bool { 9421 // match: (MOVLstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 9422 // cond: is32Bit(c+d) 9423 // result: (MOVLstoreidx8 [c+d] {sym} ptr idx val mem) 9424 for { 9425 c := v.AuxInt 9426 sym := v.Aux 9427 _ = v.Args[3] 9428 v_0 := v.Args[0] 9429 if v_0.Op != OpAMD64ADDQconst { 9430 break 9431 } 9432 d := v_0.AuxInt 9433 ptr := v_0.Args[0] 9434 idx := v.Args[1] 9435 val := v.Args[2] 9436 mem := v.Args[3] 9437 if !(is32Bit(c + d)) { 9438 break 9439 } 9440 v.reset(OpAMD64MOVLstoreidx8) 9441 v.AuxInt = c + d 9442 v.Aux = sym 9443 v.AddArg(ptr) 9444 v.AddArg(idx) 9445 v.AddArg(val) 9446 v.AddArg(mem) 9447 return true 9448 } 9449 // match: (MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 9450 // cond: is32Bit(c+8*d) 9451 // result: (MOVLstoreidx8 [c+8*d] {sym} ptr idx val mem) 9452 for { 9453 c := v.AuxInt 9454 sym := v.Aux 9455 _ = v.Args[3] 9456 ptr := v.Args[0] 9457 v_1 := v.Args[1] 9458 if v_1.Op != OpAMD64ADDQconst { 9459 break 9460 } 9461 d := v_1.AuxInt 9462 idx := v_1.Args[0] 9463 val := v.Args[2] 9464 mem := v.Args[3] 9465 if !(is32Bit(c + 8*d)) { 9466 break 9467 } 9468 v.reset(OpAMD64MOVLstoreidx8) 9469 v.AuxInt = c + 8*d 9470 v.Aux = sym 9471 v.AddArg(ptr) 9472 v.AddArg(idx) 9473 v.AddArg(val) 9474 v.AddArg(mem) 9475 return true 9476 } 9477 return false 9478 } 9479 func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool { 9480 // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) 9481 // cond: is32Bit(off1+off2) 9482 // result: (MOVOload [off1+off2] {sym} ptr mem) 9483 for { 9484 off1 := v.AuxInt 9485 sym := v.Aux 9486 _ = v.Args[1] 9487 v_0 := v.Args[0] 9488 if v_0.Op != OpAMD64ADDQconst { 9489 break 9490 } 9491 off2 := v_0.AuxInt 9492 ptr := v_0.Args[0] 9493 mem := v.Args[1] 9494 if !(is32Bit(off1 + off2)) { 9495 break 9496 } 9497 v.reset(OpAMD64MOVOload) 9498 v.AuxInt = off1 + off2 9499 v.Aux = sym 9500 v.AddArg(ptr) 9501 v.AddArg(mem) 9502 return true 9503 } 9504 // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9505 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9506 // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9507 for { 9508 off1 := v.AuxInt 9509 sym1 := v.Aux 9510 _ = v.Args[1] 9511 v_0 := v.Args[0] 9512 if v_0.Op != OpAMD64LEAQ { 9513 break 9514 } 9515 off2 := v_0.AuxInt 9516 sym2 := v_0.Aux 9517 base := v_0.Args[0] 9518 mem := v.Args[1] 9519 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9520 break 9521 } 9522 v.reset(OpAMD64MOVOload) 9523 v.AuxInt = off1 + off2 9524 v.Aux = mergeSym(sym1, sym2) 9525 v.AddArg(base) 9526 v.AddArg(mem) 9527 return true 9528 } 9529 return false 9530 } 9531 func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool { 9532 // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 9533 // cond: is32Bit(off1+off2) 9534 // result: (MOVOstore [off1+off2] {sym} ptr val mem) 9535 for { 9536 off1 := v.AuxInt 9537 sym := v.Aux 9538 _ = v.Args[2] 9539 v_0 := v.Args[0] 9540 if v_0.Op != OpAMD64ADDQconst { 9541 break 9542 } 9543 off2 := v_0.AuxInt 9544 ptr := v_0.Args[0] 9545 val := v.Args[1] 9546 mem := v.Args[2] 9547 if !(is32Bit(off1 + off2)) { 9548 break 9549 } 9550 v.reset(OpAMD64MOVOstore) 9551 v.AuxInt = off1 + off2 9552 v.Aux = sym 9553 v.AddArg(ptr) 9554 v.AddArg(val) 9555 v.AddArg(mem) 9556 return true 9557 } 9558 // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 9559 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9560 // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 9561 for { 9562 off1 := v.AuxInt 9563 sym1 := v.Aux 9564 _ = v.Args[2] 9565 v_0 := v.Args[0] 9566 if v_0.Op != OpAMD64LEAQ { 9567 break 9568 } 9569 off2 := v_0.AuxInt 9570 sym2 := v_0.Aux 9571 base := v_0.Args[0] 9572 val := v.Args[1] 9573 mem := v.Args[2] 9574 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9575 break 9576 } 9577 v.reset(OpAMD64MOVOstore) 9578 v.AuxInt = off1 + off2 9579 v.Aux = mergeSym(sym1, sym2) 9580 v.AddArg(base) 9581 v.AddArg(val) 9582 v.AddArg(mem) 9583 return true 9584 } 9585 return false 9586 } 9587 func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool { 9588 // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) 9589 // cond: is32Bit(off1+off2) 9590 // result: (MOVQatomicload [off1+off2] {sym} ptr mem) 9591 for { 9592 off1 := v.AuxInt 9593 sym := v.Aux 9594 _ = v.Args[1] 9595 v_0 := v.Args[0] 9596 if v_0.Op != OpAMD64ADDQconst { 9597 break 9598 } 9599 off2 := v_0.AuxInt 9600 ptr := v_0.Args[0] 9601 mem := v.Args[1] 9602 if !(is32Bit(off1 + off2)) { 9603 break 9604 } 9605 v.reset(OpAMD64MOVQatomicload) 9606 v.AuxInt = off1 + off2 9607 v.Aux = sym 9608 v.AddArg(ptr) 9609 v.AddArg(mem) 9610 return true 9611 } 9612 // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) 9613 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9614 // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) 9615 for { 9616 off1 := v.AuxInt 9617 sym1 := v.Aux 9618 _ = v.Args[1] 9619 v_0 := v.Args[0] 9620 if v_0.Op != OpAMD64LEAQ { 9621 break 9622 } 9623 off2 := v_0.AuxInt 9624 sym2 := v_0.Aux 9625 ptr := v_0.Args[0] 9626 mem := v.Args[1] 9627 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9628 break 9629 } 9630 v.reset(OpAMD64MOVQatomicload) 9631 v.AuxInt = off1 + off2 9632 v.Aux = mergeSym(sym1, sym2) 9633 v.AddArg(ptr) 9634 v.AddArg(mem) 9635 return true 9636 } 9637 return false 9638 } 9639 func rewriteValueAMD64_OpAMD64MOVQf2i_0(v *Value) bool { 9640 b := v.Block 9641 _ = b 9642 // match: (MOVQf2i <t> (Arg <u> [off] {sym})) 9643 // cond: t.Size() == u.Size() 9644 // result: @b.Func.Entry (Arg <t> [off] {sym}) 9645 for { 9646 t := v.Type 9647 v_0 := v.Args[0] 9648 if v_0.Op != OpArg { 9649 break 9650 } 9651 u := v_0.Type 9652 off := v_0.AuxInt 9653 sym := v_0.Aux 9654 if !(t.Size() == u.Size()) { 9655 break 9656 } 9657 b = b.Func.Entry 9658 v0 := b.NewValue0(v.Pos, OpArg, t) 9659 v.reset(OpCopy) 9660 v.AddArg(v0) 9661 v0.AuxInt = off 9662 v0.Aux = sym 9663 return true 9664 } 9665 return false 9666 } 9667 func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool { 9668 b := v.Block 9669 _ = b 9670 // match: (MOVQi2f <t> (Arg <u> [off] {sym})) 9671 // cond: t.Size() == u.Size() 9672 // result: @b.Func.Entry (Arg <t> [off] {sym}) 9673 for { 9674 t := v.Type 9675 v_0 := v.Args[0] 9676 if v_0.Op != OpArg { 9677 break 9678 } 9679 u := v_0.Type 9680 off := v_0.AuxInt 9681 sym := v_0.Aux 9682 if !(t.Size() == u.Size()) { 9683 break 9684 } 9685 b = b.Func.Entry 9686 v0 := b.NewValue0(v.Pos, OpArg, t) 9687 v.reset(OpCopy) 9688 v.AddArg(v0) 9689 v0.AuxInt = off 9690 v0.Aux = sym 9691 return true 9692 } 9693 return false 9694 } 9695 func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { 9696 // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) 9697 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 9698 // result: x 9699 for { 9700 off := v.AuxInt 9701 sym := v.Aux 9702 _ = v.Args[1] 9703 ptr := v.Args[0] 9704 v_1 := v.Args[1] 9705 if v_1.Op != OpAMD64MOVQstore { 9706 break 9707 } 9708 off2 := v_1.AuxInt 9709 sym2 := v_1.Aux 9710 _ = v_1.Args[2] 9711 ptr2 := v_1.Args[0] 9712 x := v_1.Args[1] 9713 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 9714 break 9715 } 9716 v.reset(OpCopy) 9717 v.Type = x.Type 9718 v.AddArg(x) 9719 return true 9720 } 9721 // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) 9722 // cond: is32Bit(off1+off2) 9723 // result: (MOVQload [off1+off2] {sym} ptr mem) 9724 for { 9725 off1 := v.AuxInt 9726 sym := v.Aux 9727 _ = v.Args[1] 9728 v_0 := v.Args[0] 9729 if v_0.Op != OpAMD64ADDQconst { 9730 break 9731 } 9732 off2 := v_0.AuxInt 9733 ptr := v_0.Args[0] 9734 mem := v.Args[1] 9735 if !(is32Bit(off1 + off2)) { 9736 break 9737 } 9738 v.reset(OpAMD64MOVQload) 9739 v.AuxInt = off1 + off2 9740 v.Aux = sym 9741 v.AddArg(ptr) 9742 v.AddArg(mem) 9743 return true 9744 } 9745 // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 9746 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9747 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9748 for { 9749 off1 := v.AuxInt 9750 sym1 := v.Aux 9751 _ = v.Args[1] 9752 v_0 := v.Args[0] 9753 if v_0.Op != OpAMD64LEAQ { 9754 break 9755 } 9756 off2 := v_0.AuxInt 9757 sym2 := v_0.Aux 9758 base := v_0.Args[0] 9759 mem := v.Args[1] 9760 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9761 break 9762 } 9763 v.reset(OpAMD64MOVQload) 9764 v.AuxInt = off1 + off2 9765 v.Aux = mergeSym(sym1, sym2) 9766 v.AddArg(base) 9767 v.AddArg(mem) 9768 return true 9769 } 9770 // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 9771 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9772 // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9773 for { 9774 off1 := v.AuxInt 9775 sym1 := v.Aux 9776 _ = v.Args[1] 9777 v_0 := v.Args[0] 9778 if v_0.Op != OpAMD64LEAQ1 { 9779 break 9780 } 9781 off2 := v_0.AuxInt 9782 sym2 := v_0.Aux 9783 _ = v_0.Args[1] 9784 ptr := v_0.Args[0] 9785 idx := v_0.Args[1] 9786 mem := v.Args[1] 9787 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9788 break 9789 } 9790 v.reset(OpAMD64MOVQloadidx1) 9791 v.AuxInt = off1 + off2 9792 v.Aux = mergeSym(sym1, sym2) 9793 v.AddArg(ptr) 9794 v.AddArg(idx) 9795 v.AddArg(mem) 9796 return true 9797 } 9798 // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 9799 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 9800 // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 9801 for { 9802 off1 := v.AuxInt 9803 sym1 := v.Aux 9804 _ = v.Args[1] 9805 v_0 := v.Args[0] 9806 if v_0.Op != OpAMD64LEAQ8 { 9807 break 9808 } 9809 off2 := v_0.AuxInt 9810 sym2 := v_0.Aux 9811 _ = v_0.Args[1] 9812 ptr := v_0.Args[0] 9813 idx := v_0.Args[1] 9814 mem := v.Args[1] 9815 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 9816 break 9817 } 9818 v.reset(OpAMD64MOVQloadidx8) 9819 v.AuxInt = off1 + off2 9820 v.Aux = mergeSym(sym1, sym2) 9821 v.AddArg(ptr) 9822 v.AddArg(idx) 9823 v.AddArg(mem) 9824 return true 9825 } 9826 // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) 9827 // cond: ptr.Op != OpSB 9828 // result: (MOVQloadidx1 [off] {sym} ptr idx mem) 9829 for { 9830 off := v.AuxInt 9831 sym := v.Aux 9832 _ = v.Args[1] 9833 v_0 := v.Args[0] 9834 if v_0.Op != OpAMD64ADDQ { 9835 break 9836 } 9837 _ = v_0.Args[1] 9838 ptr := v_0.Args[0] 9839 idx := v_0.Args[1] 9840 mem := v.Args[1] 9841 if !(ptr.Op != OpSB) { 9842 break 9843 } 9844 v.reset(OpAMD64MOVQloadidx1) 9845 v.AuxInt = off 9846 v.Aux = sym 9847 v.AddArg(ptr) 9848 v.AddArg(idx) 9849 v.AddArg(mem) 9850 return true 9851 } 9852 // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 9853 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 9854 // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) 9855 for { 9856 off1 := v.AuxInt 9857 sym1 := v.Aux 9858 _ = v.Args[1] 9859 v_0 := v.Args[0] 9860 if v_0.Op != OpAMD64LEAL { 9861 break 9862 } 9863 off2 := v_0.AuxInt 9864 sym2 := v_0.Aux 9865 base := v_0.Args[0] 9866 mem := v.Args[1] 9867 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 9868 break 9869 } 9870 v.reset(OpAMD64MOVQload) 9871 v.AuxInt = off1 + off2 9872 v.Aux = mergeSym(sym1, sym2) 9873 v.AddArg(base) 9874 v.AddArg(mem) 9875 return true 9876 } 9877 // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) 9878 // cond: is32Bit(off1+off2) 9879 // result: (MOVQload [off1+off2] {sym} ptr mem) 9880 for { 9881 off1 := v.AuxInt 9882 sym := v.Aux 9883 _ = v.Args[1] 9884 v_0 := v.Args[0] 9885 if v_0.Op != OpAMD64ADDLconst { 9886 break 9887 } 9888 off2 := v_0.AuxInt 9889 ptr := v_0.Args[0] 9890 mem := v.Args[1] 9891 if !(is32Bit(off1 + off2)) { 9892 break 9893 } 9894 v.reset(OpAMD64MOVQload) 9895 v.AuxInt = off1 + off2 9896 v.Aux = sym 9897 v.AddArg(ptr) 9898 v.AddArg(mem) 9899 return true 9900 } 9901 // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) 9902 // cond: 9903 // result: (MOVQf2i val) 9904 for { 9905 off := v.AuxInt 9906 sym := v.Aux 9907 _ = v.Args[1] 9908 ptr := v.Args[0] 9909 v_1 := v.Args[1] 9910 if v_1.Op != OpAMD64MOVSDstore { 9911 break 9912 } 9913 if v_1.AuxInt != off { 9914 break 9915 } 9916 if v_1.Aux != sym { 9917 break 9918 } 9919 _ = v_1.Args[2] 9920 if ptr != v_1.Args[0] { 9921 break 9922 } 9923 val := v_1.Args[1] 9924 v.reset(OpAMD64MOVQf2i) 9925 v.AddArg(val) 9926 return true 9927 } 9928 return false 9929 } 9930 func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { 9931 // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 9932 // cond: 9933 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 9934 for { 9935 c := v.AuxInt 9936 sym := v.Aux 9937 _ = v.Args[2] 9938 ptr := v.Args[0] 9939 v_1 := v.Args[1] 9940 if v_1.Op != OpAMD64SHLQconst { 9941 break 9942 } 9943 if v_1.AuxInt != 3 { 9944 break 9945 } 9946 idx := v_1.Args[0] 9947 mem := v.Args[2] 9948 v.reset(OpAMD64MOVQloadidx8) 9949 v.AuxInt = c 9950 v.Aux = sym 9951 v.AddArg(ptr) 9952 v.AddArg(idx) 9953 v.AddArg(mem) 9954 return true 9955 } 9956 // match: (MOVQloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) 9957 // cond: 9958 // result: (MOVQloadidx8 [c] {sym} ptr idx mem) 9959 for { 9960 c := v.AuxInt 9961 sym := v.Aux 9962 _ = v.Args[2] 9963 v_0 := v.Args[0] 9964 if v_0.Op != OpAMD64SHLQconst { 9965 break 9966 } 9967 if v_0.AuxInt != 3 { 9968 break 9969 } 9970 idx := v_0.Args[0] 9971 ptr := v.Args[1] 9972 mem := v.Args[2] 9973 v.reset(OpAMD64MOVQloadidx8) 9974 v.AuxInt = c 9975 v.Aux = sym 9976 v.AddArg(ptr) 9977 v.AddArg(idx) 9978 v.AddArg(mem) 9979 return true 9980 } 9981 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 9982 // cond: is32Bit(c+d) 9983 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 9984 for { 9985 c := v.AuxInt 9986 sym := v.Aux 9987 _ = v.Args[2] 9988 v_0 := v.Args[0] 9989 if v_0.Op != OpAMD64ADDQconst { 9990 break 9991 } 9992 d := v_0.AuxInt 9993 ptr := v_0.Args[0] 9994 idx := v.Args[1] 9995 mem := v.Args[2] 9996 if !(is32Bit(c + d)) { 9997 break 9998 } 9999 v.reset(OpAMD64MOVQloadidx1) 10000 v.AuxInt = c + d 10001 v.Aux = sym 10002 v.AddArg(ptr) 10003 v.AddArg(idx) 10004 v.AddArg(mem) 10005 return true 10006 } 10007 // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 10008 // cond: is32Bit(c+d) 10009 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 10010 for { 10011 c := v.AuxInt 10012 sym := v.Aux 10013 _ = v.Args[2] 10014 idx := v.Args[0] 10015 v_1 := v.Args[1] 10016 if v_1.Op != OpAMD64ADDQconst { 10017 break 10018 } 10019 d := v_1.AuxInt 10020 ptr := v_1.Args[0] 10021 mem := v.Args[2] 10022 if !(is32Bit(c + d)) { 10023 break 10024 } 10025 v.reset(OpAMD64MOVQloadidx1) 10026 v.AuxInt = c + d 10027 v.Aux = sym 10028 v.AddArg(ptr) 10029 v.AddArg(idx) 10030 v.AddArg(mem) 10031 return true 10032 } 10033 // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 10034 // cond: is32Bit(c+d) 10035 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 10036 for { 10037 c := v.AuxInt 10038 sym := v.Aux 10039 _ = v.Args[2] 10040 ptr := v.Args[0] 10041 v_1 := v.Args[1] 10042 if v_1.Op != OpAMD64ADDQconst { 10043 break 10044 } 10045 d := v_1.AuxInt 10046 idx := v_1.Args[0] 10047 mem := v.Args[2] 10048 if !(is32Bit(c + d)) { 10049 break 10050 } 10051 v.reset(OpAMD64MOVQloadidx1) 10052 v.AuxInt = c + d 10053 v.Aux = sym 10054 v.AddArg(ptr) 10055 v.AddArg(idx) 10056 v.AddArg(mem) 10057 return true 10058 } 10059 // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 10060 // cond: is32Bit(c+d) 10061 // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) 10062 for { 10063 c := v.AuxInt 10064 sym := v.Aux 10065 _ = v.Args[2] 10066 v_0 := v.Args[0] 10067 if v_0.Op != OpAMD64ADDQconst { 10068 break 10069 } 10070 d := v_0.AuxInt 10071 idx := v_0.Args[0] 10072 ptr := v.Args[1] 10073 mem := v.Args[2] 10074 if !(is32Bit(c + d)) { 10075 break 10076 } 10077 v.reset(OpAMD64MOVQloadidx1) 10078 v.AuxInt = c + d 10079 v.Aux = sym 10080 v.AddArg(ptr) 10081 v.AddArg(idx) 10082 v.AddArg(mem) 10083 return true 10084 } 10085 return false 10086 } 10087 func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { 10088 // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 10089 // cond: is32Bit(c+d) 10090 // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) 10091 for { 10092 c := v.AuxInt 10093 sym := v.Aux 10094 _ = v.Args[2] 10095 v_0 := v.Args[0] 10096 if v_0.Op != OpAMD64ADDQconst { 10097 break 10098 } 10099 d := v_0.AuxInt 10100 ptr := v_0.Args[0] 10101 idx := v.Args[1] 10102 mem := v.Args[2] 10103 if !(is32Bit(c + d)) { 10104 break 10105 } 10106 v.reset(OpAMD64MOVQloadidx8) 10107 v.AuxInt = c + d 10108 v.Aux = sym 10109 v.AddArg(ptr) 10110 v.AddArg(idx) 10111 v.AddArg(mem) 10112 return true 10113 } 10114 // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 10115 // cond: is32Bit(c+8*d) 10116 // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) 10117 for { 10118 c := v.AuxInt 10119 sym := v.Aux 10120 _ = v.Args[2] 10121 ptr := v.Args[0] 10122 v_1 := v.Args[1] 10123 if v_1.Op != OpAMD64ADDQconst { 10124 break 10125 } 10126 d := v_1.AuxInt 10127 idx := v_1.Args[0] 10128 mem := v.Args[2] 10129 if !(is32Bit(c + 8*d)) { 10130 break 10131 } 10132 v.reset(OpAMD64MOVQloadidx8) 10133 v.AuxInt = c + 8*d 10134 v.Aux = sym 10135 v.AddArg(ptr) 10136 v.AddArg(idx) 10137 v.AddArg(mem) 10138 return true 10139 } 10140 return false 10141 } 10142 func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { 10143 // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 10144 // cond: is32Bit(off1+off2) 10145 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 10146 for { 10147 off1 := v.AuxInt 10148 sym := v.Aux 10149 _ = v.Args[2] 10150 v_0 := v.Args[0] 10151 if v_0.Op != OpAMD64ADDQconst { 10152 break 10153 } 10154 off2 := v_0.AuxInt 10155 ptr := v_0.Args[0] 10156 val := v.Args[1] 10157 mem := v.Args[2] 10158 if !(is32Bit(off1 + off2)) { 10159 break 10160 } 10161 v.reset(OpAMD64MOVQstore) 10162 v.AuxInt = off1 + off2 10163 v.Aux = sym 10164 v.AddArg(ptr) 10165 v.AddArg(val) 10166 v.AddArg(mem) 10167 return true 10168 } 10169 // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) 10170 // cond: validValAndOff(c,off) 10171 // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) 10172 for { 10173 off := v.AuxInt 10174 sym := v.Aux 10175 _ = v.Args[2] 10176 ptr := v.Args[0] 10177 v_1 := v.Args[1] 10178 if v_1.Op != OpAMD64MOVQconst { 10179 break 10180 } 10181 c := v_1.AuxInt 10182 mem := v.Args[2] 10183 if !(validValAndOff(c, off)) { 10184 break 10185 } 10186 v.reset(OpAMD64MOVQstoreconst) 10187 v.AuxInt = makeValAndOff(c, off) 10188 v.Aux = sym 10189 v.AddArg(ptr) 10190 v.AddArg(mem) 10191 return true 10192 } 10193 // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 10194 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10195 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10196 for { 10197 off1 := v.AuxInt 10198 sym1 := v.Aux 10199 _ = v.Args[2] 10200 v_0 := v.Args[0] 10201 if v_0.Op != OpAMD64LEAQ { 10202 break 10203 } 10204 off2 := v_0.AuxInt 10205 sym2 := v_0.Aux 10206 base := v_0.Args[0] 10207 val := v.Args[1] 10208 mem := v.Args[2] 10209 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10210 break 10211 } 10212 v.reset(OpAMD64MOVQstore) 10213 v.AuxInt = off1 + off2 10214 v.Aux = mergeSym(sym1, sym2) 10215 v.AddArg(base) 10216 v.AddArg(val) 10217 v.AddArg(mem) 10218 return true 10219 } 10220 // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 10221 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10222 // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10223 for { 10224 off1 := v.AuxInt 10225 sym1 := v.Aux 10226 _ = v.Args[2] 10227 v_0 := v.Args[0] 10228 if v_0.Op != OpAMD64LEAQ1 { 10229 break 10230 } 10231 off2 := v_0.AuxInt 10232 sym2 := v_0.Aux 10233 _ = v_0.Args[1] 10234 ptr := v_0.Args[0] 10235 idx := v_0.Args[1] 10236 val := v.Args[1] 10237 mem := v.Args[2] 10238 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10239 break 10240 } 10241 v.reset(OpAMD64MOVQstoreidx1) 10242 v.AuxInt = off1 + off2 10243 v.Aux = mergeSym(sym1, sym2) 10244 v.AddArg(ptr) 10245 v.AddArg(idx) 10246 v.AddArg(val) 10247 v.AddArg(mem) 10248 return true 10249 } 10250 // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 10251 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10252 // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 10253 for { 10254 off1 := v.AuxInt 10255 sym1 := v.Aux 10256 _ = v.Args[2] 10257 v_0 := v.Args[0] 10258 if v_0.Op != OpAMD64LEAQ8 { 10259 break 10260 } 10261 off2 := v_0.AuxInt 10262 sym2 := v_0.Aux 10263 _ = v_0.Args[1] 10264 ptr := v_0.Args[0] 10265 idx := v_0.Args[1] 10266 val := v.Args[1] 10267 mem := v.Args[2] 10268 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10269 break 10270 } 10271 v.reset(OpAMD64MOVQstoreidx8) 10272 v.AuxInt = off1 + off2 10273 v.Aux = mergeSym(sym1, sym2) 10274 v.AddArg(ptr) 10275 v.AddArg(idx) 10276 v.AddArg(val) 10277 v.AddArg(mem) 10278 return true 10279 } 10280 // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) 10281 // cond: ptr.Op != OpSB 10282 // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) 10283 for { 10284 off := v.AuxInt 10285 sym := v.Aux 10286 _ = v.Args[2] 10287 v_0 := v.Args[0] 10288 if v_0.Op != OpAMD64ADDQ { 10289 break 10290 } 10291 _ = v_0.Args[1] 10292 ptr := v_0.Args[0] 10293 idx := v_0.Args[1] 10294 val := v.Args[1] 10295 mem := v.Args[2] 10296 if !(ptr.Op != OpSB) { 10297 break 10298 } 10299 v.reset(OpAMD64MOVQstoreidx1) 10300 v.AuxInt = off 10301 v.Aux = sym 10302 v.AddArg(ptr) 10303 v.AddArg(idx) 10304 v.AddArg(val) 10305 v.AddArg(mem) 10306 return true 10307 } 10308 // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 10309 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 10310 // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 10311 for { 10312 off1 := v.AuxInt 10313 sym1 := v.Aux 10314 _ = v.Args[2] 10315 v_0 := v.Args[0] 10316 if v_0.Op != OpAMD64LEAL { 10317 break 10318 } 10319 off2 := v_0.AuxInt 10320 sym2 := v_0.Aux 10321 base := v_0.Args[0] 10322 val := v.Args[1] 10323 mem := v.Args[2] 10324 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 10325 break 10326 } 10327 v.reset(OpAMD64MOVQstore) 10328 v.AuxInt = off1 + off2 10329 v.Aux = mergeSym(sym1, sym2) 10330 v.AddArg(base) 10331 v.AddArg(val) 10332 v.AddArg(mem) 10333 return true 10334 } 10335 // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 10336 // cond: is32Bit(off1+off2) 10337 // result: (MOVQstore [off1+off2] {sym} ptr val mem) 10338 for { 10339 off1 := v.AuxInt 10340 sym := v.Aux 10341 _ = v.Args[2] 10342 v_0 := v.Args[0] 10343 if v_0.Op != OpAMD64ADDLconst { 10344 break 10345 } 10346 off2 := v_0.AuxInt 10347 ptr := v_0.Args[0] 10348 val := v.Args[1] 10349 mem := v.Args[2] 10350 if !(is32Bit(off1 + off2)) { 10351 break 10352 } 10353 v.reset(OpAMD64MOVQstore) 10354 v.AuxInt = off1 + off2 10355 v.Aux = sym 10356 v.AddArg(ptr) 10357 v.AddArg(val) 10358 v.AddArg(mem) 10359 return true 10360 } 10361 // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) 10362 // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) 10363 // result: (ADDQconstmem {sym} [makeValAndOff(c,off)] ptr mem) 10364 for { 10365 off := v.AuxInt 10366 sym := v.Aux 10367 _ = v.Args[2] 10368 ptr := v.Args[0] 10369 a := v.Args[1] 10370 if a.Op != OpAMD64ADDQconst { 10371 break 10372 } 10373 c := a.AuxInt 10374 l := a.Args[0] 10375 if l.Op != OpAMD64MOVQload { 10376 break 10377 } 10378 if l.AuxInt != off { 10379 break 10380 } 10381 if l.Aux != sym { 10382 break 10383 } 10384 _ = l.Args[1] 10385 ptr2 := l.Args[0] 10386 mem := l.Args[1] 10387 if mem != v.Args[2] { 10388 break 10389 } 10390 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) { 10391 break 10392 } 10393 v.reset(OpAMD64ADDQconstmem) 10394 v.AuxInt = makeValAndOff(c, off) 10395 v.Aux = sym 10396 v.AddArg(ptr) 10397 v.AddArg(mem) 10398 return true 10399 } 10400 // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem) 10401 // cond: 10402 // result: (MOVSDstore [off] {sym} ptr val mem) 10403 for { 10404 off := v.AuxInt 10405 sym := v.Aux 10406 _ = v.Args[2] 10407 ptr := v.Args[0] 10408 v_1 := v.Args[1] 10409 if v_1.Op != OpAMD64MOVQf2i { 10410 break 10411 } 10412 val := v_1.Args[0] 10413 mem := v.Args[2] 10414 v.reset(OpAMD64MOVSDstore) 10415 v.AuxInt = off 10416 v.Aux = sym 10417 v.AddArg(ptr) 10418 v.AddArg(val) 10419 v.AddArg(mem) 10420 return true 10421 } 10422 return false 10423 } 10424 func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { 10425 b := v.Block 10426 _ = b 10427 config := b.Func.Config 10428 _ = config 10429 // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 10430 // cond: ValAndOff(sc).canAdd(off) 10431 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 10432 for { 10433 sc := v.AuxInt 10434 s := v.Aux 10435 _ = v.Args[1] 10436 v_0 := v.Args[0] 10437 if v_0.Op != OpAMD64ADDQconst { 10438 break 10439 } 10440 off := v_0.AuxInt 10441 ptr := v_0.Args[0] 10442 mem := v.Args[1] 10443 if !(ValAndOff(sc).canAdd(off)) { 10444 break 10445 } 10446 v.reset(OpAMD64MOVQstoreconst) 10447 v.AuxInt = ValAndOff(sc).add(off) 10448 v.Aux = s 10449 v.AddArg(ptr) 10450 v.AddArg(mem) 10451 return true 10452 } 10453 // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 10454 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 10455 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 10456 for { 10457 sc := v.AuxInt 10458 sym1 := v.Aux 10459 _ = v.Args[1] 10460 v_0 := v.Args[0] 10461 if v_0.Op != OpAMD64LEAQ { 10462 break 10463 } 10464 off := v_0.AuxInt 10465 sym2 := v_0.Aux 10466 ptr := v_0.Args[0] 10467 mem := v.Args[1] 10468 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 10469 break 10470 } 10471 v.reset(OpAMD64MOVQstoreconst) 10472 v.AuxInt = ValAndOff(sc).add(off) 10473 v.Aux = mergeSym(sym1, sym2) 10474 v.AddArg(ptr) 10475 v.AddArg(mem) 10476 return true 10477 } 10478 // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 10479 // cond: canMergeSym(sym1, sym2) 10480 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 10481 for { 10482 x := v.AuxInt 10483 sym1 := v.Aux 10484 _ = v.Args[1] 10485 v_0 := v.Args[0] 10486 if v_0.Op != OpAMD64LEAQ1 { 10487 break 10488 } 10489 off := v_0.AuxInt 10490 sym2 := v_0.Aux 10491 _ = v_0.Args[1] 10492 ptr := v_0.Args[0] 10493 idx := v_0.Args[1] 10494 mem := v.Args[1] 10495 if !(canMergeSym(sym1, sym2)) { 10496 break 10497 } 10498 v.reset(OpAMD64MOVQstoreconstidx1) 10499 v.AuxInt = ValAndOff(x).add(off) 10500 v.Aux = mergeSym(sym1, sym2) 10501 v.AddArg(ptr) 10502 v.AddArg(idx) 10503 v.AddArg(mem) 10504 return true 10505 } 10506 // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) 10507 // cond: canMergeSym(sym1, sym2) 10508 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 10509 for { 10510 x := v.AuxInt 10511 sym1 := v.Aux 10512 _ = v.Args[1] 10513 v_0 := v.Args[0] 10514 if v_0.Op != OpAMD64LEAQ8 { 10515 break 10516 } 10517 off := v_0.AuxInt 10518 sym2 := v_0.Aux 10519 _ = v_0.Args[1] 10520 ptr := v_0.Args[0] 10521 idx := v_0.Args[1] 10522 mem := v.Args[1] 10523 if !(canMergeSym(sym1, sym2)) { 10524 break 10525 } 10526 v.reset(OpAMD64MOVQstoreconstidx8) 10527 v.AuxInt = ValAndOff(x).add(off) 10528 v.Aux = mergeSym(sym1, sym2) 10529 v.AddArg(ptr) 10530 v.AddArg(idx) 10531 v.AddArg(mem) 10532 return true 10533 } 10534 // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) 10535 // cond: 10536 // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) 10537 for { 10538 x := v.AuxInt 10539 sym := v.Aux 10540 _ = v.Args[1] 10541 v_0 := v.Args[0] 10542 if v_0.Op != OpAMD64ADDQ { 10543 break 10544 } 10545 _ = v_0.Args[1] 10546 ptr := v_0.Args[0] 10547 idx := v_0.Args[1] 10548 mem := v.Args[1] 10549 v.reset(OpAMD64MOVQstoreconstidx1) 10550 v.AuxInt = x 10551 v.Aux = sym 10552 v.AddArg(ptr) 10553 v.AddArg(idx) 10554 v.AddArg(mem) 10555 return true 10556 } 10557 // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) 10558 // cond: config.useSSE && x.Uses == 1 && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x) 10559 // result: (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem) 10560 for { 10561 c := v.AuxInt 10562 s := v.Aux 10563 _ = v.Args[1] 10564 p := v.Args[0] 10565 x := v.Args[1] 10566 if x.Op != OpAMD64MOVQstoreconst { 10567 break 10568 } 10569 c2 := x.AuxInt 10570 if x.Aux != s { 10571 break 10572 } 10573 _ = x.Args[1] 10574 if p != x.Args[0] { 10575 break 10576 } 10577 mem := x.Args[1] 10578 if !(config.useSSE && x.Uses == 1 && ValAndOff(c2).Off()+8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x)) { 10579 break 10580 } 10581 v.reset(OpAMD64MOVOstore) 10582 v.AuxInt = ValAndOff(c2).Off() 10583 v.Aux = s 10584 v.AddArg(p) 10585 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 10586 v0.AuxInt = 0 10587 v.AddArg(v0) 10588 v.AddArg(mem) 10589 return true 10590 } 10591 // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 10592 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 10593 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 10594 for { 10595 sc := v.AuxInt 10596 sym1 := v.Aux 10597 _ = v.Args[1] 10598 v_0 := v.Args[0] 10599 if v_0.Op != OpAMD64LEAL { 10600 break 10601 } 10602 off := v_0.AuxInt 10603 sym2 := v_0.Aux 10604 ptr := v_0.Args[0] 10605 mem := v.Args[1] 10606 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 10607 break 10608 } 10609 v.reset(OpAMD64MOVQstoreconst) 10610 v.AuxInt = ValAndOff(sc).add(off) 10611 v.Aux = mergeSym(sym1, sym2) 10612 v.AddArg(ptr) 10613 v.AddArg(mem) 10614 return true 10615 } 10616 // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 10617 // cond: ValAndOff(sc).canAdd(off) 10618 // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 10619 for { 10620 sc := v.AuxInt 10621 s := v.Aux 10622 _ = v.Args[1] 10623 v_0 := v.Args[0] 10624 if v_0.Op != OpAMD64ADDLconst { 10625 break 10626 } 10627 off := v_0.AuxInt 10628 ptr := v_0.Args[0] 10629 mem := v.Args[1] 10630 if !(ValAndOff(sc).canAdd(off)) { 10631 break 10632 } 10633 v.reset(OpAMD64MOVQstoreconst) 10634 v.AuxInt = ValAndOff(sc).add(off) 10635 v.Aux = s 10636 v.AddArg(ptr) 10637 v.AddArg(mem) 10638 return true 10639 } 10640 return false 10641 } 10642 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { 10643 // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 10644 // cond: 10645 // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) 10646 for { 10647 c := v.AuxInt 10648 sym := v.Aux 10649 _ = v.Args[2] 10650 ptr := v.Args[0] 10651 v_1 := v.Args[1] 10652 if v_1.Op != OpAMD64SHLQconst { 10653 break 10654 } 10655 if v_1.AuxInt != 3 { 10656 break 10657 } 10658 idx := v_1.Args[0] 10659 mem := v.Args[2] 10660 v.reset(OpAMD64MOVQstoreconstidx8) 10661 v.AuxInt = c 10662 v.Aux = sym 10663 v.AddArg(ptr) 10664 v.AddArg(idx) 10665 v.AddArg(mem) 10666 return true 10667 } 10668 // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 10669 // cond: ValAndOff(x).canAdd(c) 10670 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10671 for { 10672 x := v.AuxInt 10673 sym := v.Aux 10674 _ = v.Args[2] 10675 v_0 := v.Args[0] 10676 if v_0.Op != OpAMD64ADDQconst { 10677 break 10678 } 10679 c := v_0.AuxInt 10680 ptr := v_0.Args[0] 10681 idx := v.Args[1] 10682 mem := v.Args[2] 10683 if !(ValAndOff(x).canAdd(c)) { 10684 break 10685 } 10686 v.reset(OpAMD64MOVQstoreconstidx1) 10687 v.AuxInt = ValAndOff(x).add(c) 10688 v.Aux = sym 10689 v.AddArg(ptr) 10690 v.AddArg(idx) 10691 v.AddArg(mem) 10692 return true 10693 } 10694 // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 10695 // cond: ValAndOff(x).canAdd(c) 10696 // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10697 for { 10698 x := v.AuxInt 10699 sym := v.Aux 10700 _ = v.Args[2] 10701 ptr := v.Args[0] 10702 v_1 := v.Args[1] 10703 if v_1.Op != OpAMD64ADDQconst { 10704 break 10705 } 10706 c := v_1.AuxInt 10707 idx := v_1.Args[0] 10708 mem := v.Args[2] 10709 if !(ValAndOff(x).canAdd(c)) { 10710 break 10711 } 10712 v.reset(OpAMD64MOVQstoreconstidx1) 10713 v.AuxInt = ValAndOff(x).add(c) 10714 v.Aux = sym 10715 v.AddArg(ptr) 10716 v.AddArg(idx) 10717 v.AddArg(mem) 10718 return true 10719 } 10720 return false 10721 } 10722 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { 10723 // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) 10724 // cond: ValAndOff(x).canAdd(c) 10725 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) 10726 for { 10727 x := v.AuxInt 10728 sym := v.Aux 10729 _ = v.Args[2] 10730 v_0 := v.Args[0] 10731 if v_0.Op != OpAMD64ADDQconst { 10732 break 10733 } 10734 c := v_0.AuxInt 10735 ptr := v_0.Args[0] 10736 idx := v.Args[1] 10737 mem := v.Args[2] 10738 if !(ValAndOff(x).canAdd(c)) { 10739 break 10740 } 10741 v.reset(OpAMD64MOVQstoreconstidx8) 10742 v.AuxInt = ValAndOff(x).add(c) 10743 v.Aux = sym 10744 v.AddArg(ptr) 10745 v.AddArg(idx) 10746 v.AddArg(mem) 10747 return true 10748 } 10749 // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) 10750 // cond: ValAndOff(x).canAdd(8*c) 10751 // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) 10752 for { 10753 x := v.AuxInt 10754 sym := v.Aux 10755 _ = v.Args[2] 10756 ptr := v.Args[0] 10757 v_1 := v.Args[1] 10758 if v_1.Op != OpAMD64ADDQconst { 10759 break 10760 } 10761 c := v_1.AuxInt 10762 idx := v_1.Args[0] 10763 mem := v.Args[2] 10764 if !(ValAndOff(x).canAdd(8 * c)) { 10765 break 10766 } 10767 v.reset(OpAMD64MOVQstoreconstidx8) 10768 v.AuxInt = ValAndOff(x).add(8 * c) 10769 v.Aux = sym 10770 v.AddArg(ptr) 10771 v.AddArg(idx) 10772 v.AddArg(mem) 10773 return true 10774 } 10775 return false 10776 } 10777 func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { 10778 // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 10779 // cond: 10780 // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) 10781 for { 10782 c := v.AuxInt 10783 sym := v.Aux 10784 _ = v.Args[3] 10785 ptr := v.Args[0] 10786 v_1 := v.Args[1] 10787 if v_1.Op != OpAMD64SHLQconst { 10788 break 10789 } 10790 if v_1.AuxInt != 3 { 10791 break 10792 } 10793 idx := v_1.Args[0] 10794 val := v.Args[2] 10795 mem := v.Args[3] 10796 v.reset(OpAMD64MOVQstoreidx8) 10797 v.AuxInt = c 10798 v.Aux = sym 10799 v.AddArg(ptr) 10800 v.AddArg(idx) 10801 v.AddArg(val) 10802 v.AddArg(mem) 10803 return true 10804 } 10805 // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10806 // cond: is32Bit(c+d) 10807 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 10808 for { 10809 c := v.AuxInt 10810 sym := v.Aux 10811 _ = v.Args[3] 10812 v_0 := v.Args[0] 10813 if v_0.Op != OpAMD64ADDQconst { 10814 break 10815 } 10816 d := v_0.AuxInt 10817 ptr := v_0.Args[0] 10818 idx := v.Args[1] 10819 val := v.Args[2] 10820 mem := v.Args[3] 10821 if !(is32Bit(c + d)) { 10822 break 10823 } 10824 v.reset(OpAMD64MOVQstoreidx1) 10825 v.AuxInt = c + d 10826 v.Aux = sym 10827 v.AddArg(ptr) 10828 v.AddArg(idx) 10829 v.AddArg(val) 10830 v.AddArg(mem) 10831 return true 10832 } 10833 // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10834 // cond: is32Bit(c+d) 10835 // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) 10836 for { 10837 c := v.AuxInt 10838 sym := v.Aux 10839 _ = v.Args[3] 10840 ptr := v.Args[0] 10841 v_1 := v.Args[1] 10842 if v_1.Op != OpAMD64ADDQconst { 10843 break 10844 } 10845 d := v_1.AuxInt 10846 idx := v_1.Args[0] 10847 val := v.Args[2] 10848 mem := v.Args[3] 10849 if !(is32Bit(c + d)) { 10850 break 10851 } 10852 v.reset(OpAMD64MOVQstoreidx1) 10853 v.AuxInt = c + d 10854 v.Aux = sym 10855 v.AddArg(ptr) 10856 v.AddArg(idx) 10857 v.AddArg(val) 10858 v.AddArg(mem) 10859 return true 10860 } 10861 return false 10862 } 10863 func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { 10864 // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 10865 // cond: is32Bit(c+d) 10866 // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) 10867 for { 10868 c := v.AuxInt 10869 sym := v.Aux 10870 _ = v.Args[3] 10871 v_0 := v.Args[0] 10872 if v_0.Op != OpAMD64ADDQconst { 10873 break 10874 } 10875 d := v_0.AuxInt 10876 ptr := v_0.Args[0] 10877 idx := v.Args[1] 10878 val := v.Args[2] 10879 mem := v.Args[3] 10880 if !(is32Bit(c + d)) { 10881 break 10882 } 10883 v.reset(OpAMD64MOVQstoreidx8) 10884 v.AuxInt = c + d 10885 v.Aux = sym 10886 v.AddArg(ptr) 10887 v.AddArg(idx) 10888 v.AddArg(val) 10889 v.AddArg(mem) 10890 return true 10891 } 10892 // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 10893 // cond: is32Bit(c+8*d) 10894 // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) 10895 for { 10896 c := v.AuxInt 10897 sym := v.Aux 10898 _ = v.Args[3] 10899 ptr := v.Args[0] 10900 v_1 := v.Args[1] 10901 if v_1.Op != OpAMD64ADDQconst { 10902 break 10903 } 10904 d := v_1.AuxInt 10905 idx := v_1.Args[0] 10906 val := v.Args[2] 10907 mem := v.Args[3] 10908 if !(is32Bit(c + 8*d)) { 10909 break 10910 } 10911 v.reset(OpAMD64MOVQstoreidx8) 10912 v.AuxInt = c + 8*d 10913 v.Aux = sym 10914 v.AddArg(ptr) 10915 v.AddArg(idx) 10916 v.AddArg(val) 10917 v.AddArg(mem) 10918 return true 10919 } 10920 return false 10921 } 10922 func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { 10923 // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) 10924 // cond: is32Bit(off1+off2) 10925 // result: (MOVSDload [off1+off2] {sym} ptr mem) 10926 for { 10927 off1 := v.AuxInt 10928 sym := v.Aux 10929 _ = v.Args[1] 10930 v_0 := v.Args[0] 10931 if v_0.Op != OpAMD64ADDQconst { 10932 break 10933 } 10934 off2 := v_0.AuxInt 10935 ptr := v_0.Args[0] 10936 mem := v.Args[1] 10937 if !(is32Bit(off1 + off2)) { 10938 break 10939 } 10940 v.reset(OpAMD64MOVSDload) 10941 v.AuxInt = off1 + off2 10942 v.Aux = sym 10943 v.AddArg(ptr) 10944 v.AddArg(mem) 10945 return true 10946 } 10947 // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 10948 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10949 // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) 10950 for { 10951 off1 := v.AuxInt 10952 sym1 := v.Aux 10953 _ = v.Args[1] 10954 v_0 := v.Args[0] 10955 if v_0.Op != OpAMD64LEAQ { 10956 break 10957 } 10958 off2 := v_0.AuxInt 10959 sym2 := v_0.Aux 10960 base := v_0.Args[0] 10961 mem := v.Args[1] 10962 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10963 break 10964 } 10965 v.reset(OpAMD64MOVSDload) 10966 v.AuxInt = off1 + off2 10967 v.Aux = mergeSym(sym1, sym2) 10968 v.AddArg(base) 10969 v.AddArg(mem) 10970 return true 10971 } 10972 // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 10973 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 10974 // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 10975 for { 10976 off1 := v.AuxInt 10977 sym1 := v.Aux 10978 _ = v.Args[1] 10979 v_0 := v.Args[0] 10980 if v_0.Op != OpAMD64LEAQ1 { 10981 break 10982 } 10983 off2 := v_0.AuxInt 10984 sym2 := v_0.Aux 10985 _ = v_0.Args[1] 10986 ptr := v_0.Args[0] 10987 idx := v_0.Args[1] 10988 mem := v.Args[1] 10989 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 10990 break 10991 } 10992 v.reset(OpAMD64MOVSDloadidx1) 10993 v.AuxInt = off1 + off2 10994 v.Aux = mergeSym(sym1, sym2) 10995 v.AddArg(ptr) 10996 v.AddArg(idx) 10997 v.AddArg(mem) 10998 return true 10999 } 11000 // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) 11001 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11002 // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11003 for { 11004 off1 := v.AuxInt 11005 sym1 := v.Aux 11006 _ = v.Args[1] 11007 v_0 := v.Args[0] 11008 if v_0.Op != OpAMD64LEAQ8 { 11009 break 11010 } 11011 off2 := v_0.AuxInt 11012 sym2 := v_0.Aux 11013 _ = v_0.Args[1] 11014 ptr := v_0.Args[0] 11015 idx := v_0.Args[1] 11016 mem := v.Args[1] 11017 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11018 break 11019 } 11020 v.reset(OpAMD64MOVSDloadidx8) 11021 v.AuxInt = off1 + off2 11022 v.Aux = mergeSym(sym1, sym2) 11023 v.AddArg(ptr) 11024 v.AddArg(idx) 11025 v.AddArg(mem) 11026 return true 11027 } 11028 // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) 11029 // cond: ptr.Op != OpSB 11030 // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) 11031 for { 11032 off := v.AuxInt 11033 sym := v.Aux 11034 _ = v.Args[1] 11035 v_0 := v.Args[0] 11036 if v_0.Op != OpAMD64ADDQ { 11037 break 11038 } 11039 _ = v_0.Args[1] 11040 ptr := v_0.Args[0] 11041 idx := v_0.Args[1] 11042 mem := v.Args[1] 11043 if !(ptr.Op != OpSB) { 11044 break 11045 } 11046 v.reset(OpAMD64MOVSDloadidx1) 11047 v.AuxInt = off 11048 v.Aux = sym 11049 v.AddArg(ptr) 11050 v.AddArg(idx) 11051 v.AddArg(mem) 11052 return true 11053 } 11054 // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) 11055 // cond: 11056 // result: (MOVQi2f val) 11057 for { 11058 off := v.AuxInt 11059 sym := v.Aux 11060 _ = v.Args[1] 11061 ptr := v.Args[0] 11062 v_1 := v.Args[1] 11063 if v_1.Op != OpAMD64MOVQstore { 11064 break 11065 } 11066 if v_1.AuxInt != off { 11067 break 11068 } 11069 if v_1.Aux != sym { 11070 break 11071 } 11072 _ = v_1.Args[2] 11073 if ptr != v_1.Args[0] { 11074 break 11075 } 11076 val := v_1.Args[1] 11077 v.reset(OpAMD64MOVQi2f) 11078 v.AddArg(val) 11079 return true 11080 } 11081 return false 11082 } 11083 func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { 11084 // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) 11085 // cond: 11086 // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) 11087 for { 11088 c := v.AuxInt 11089 sym := v.Aux 11090 _ = v.Args[2] 11091 ptr := v.Args[0] 11092 v_1 := v.Args[1] 11093 if v_1.Op != OpAMD64SHLQconst { 11094 break 11095 } 11096 if v_1.AuxInt != 3 { 11097 break 11098 } 11099 idx := v_1.Args[0] 11100 mem := v.Args[2] 11101 v.reset(OpAMD64MOVSDloadidx8) 11102 v.AuxInt = c 11103 v.Aux = sym 11104 v.AddArg(ptr) 11105 v.AddArg(idx) 11106 v.AddArg(mem) 11107 return true 11108 } 11109 // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 11110 // cond: is32Bit(c+d) 11111 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 11112 for { 11113 c := v.AuxInt 11114 sym := v.Aux 11115 _ = v.Args[2] 11116 v_0 := v.Args[0] 11117 if v_0.Op != OpAMD64ADDQconst { 11118 break 11119 } 11120 d := v_0.AuxInt 11121 ptr := v_0.Args[0] 11122 idx := v.Args[1] 11123 mem := v.Args[2] 11124 if !(is32Bit(c + d)) { 11125 break 11126 } 11127 v.reset(OpAMD64MOVSDloadidx1) 11128 v.AuxInt = c + d 11129 v.Aux = sym 11130 v.AddArg(ptr) 11131 v.AddArg(idx) 11132 v.AddArg(mem) 11133 return true 11134 } 11135 // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 11136 // cond: is32Bit(c+d) 11137 // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) 11138 for { 11139 c := v.AuxInt 11140 sym := v.Aux 11141 _ = v.Args[2] 11142 ptr := v.Args[0] 11143 v_1 := v.Args[1] 11144 if v_1.Op != OpAMD64ADDQconst { 11145 break 11146 } 11147 d := v_1.AuxInt 11148 idx := v_1.Args[0] 11149 mem := v.Args[2] 11150 if !(is32Bit(c + d)) { 11151 break 11152 } 11153 v.reset(OpAMD64MOVSDloadidx1) 11154 v.AuxInt = c + d 11155 v.Aux = sym 11156 v.AddArg(ptr) 11157 v.AddArg(idx) 11158 v.AddArg(mem) 11159 return true 11160 } 11161 return false 11162 } 11163 func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { 11164 // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) 11165 // cond: is32Bit(c+d) 11166 // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) 11167 for { 11168 c := v.AuxInt 11169 sym := v.Aux 11170 _ = v.Args[2] 11171 v_0 := v.Args[0] 11172 if v_0.Op != OpAMD64ADDQconst { 11173 break 11174 } 11175 d := v_0.AuxInt 11176 ptr := v_0.Args[0] 11177 idx := v.Args[1] 11178 mem := v.Args[2] 11179 if !(is32Bit(c + d)) { 11180 break 11181 } 11182 v.reset(OpAMD64MOVSDloadidx8) 11183 v.AuxInt = c + d 11184 v.Aux = sym 11185 v.AddArg(ptr) 11186 v.AddArg(idx) 11187 v.AddArg(mem) 11188 return true 11189 } 11190 // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) 11191 // cond: is32Bit(c+8*d) 11192 // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) 11193 for { 11194 c := v.AuxInt 11195 sym := v.Aux 11196 _ = v.Args[2] 11197 ptr := v.Args[0] 11198 v_1 := v.Args[1] 11199 if v_1.Op != OpAMD64ADDQconst { 11200 break 11201 } 11202 d := v_1.AuxInt 11203 idx := v_1.Args[0] 11204 mem := v.Args[2] 11205 if !(is32Bit(c + 8*d)) { 11206 break 11207 } 11208 v.reset(OpAMD64MOVSDloadidx8) 11209 v.AuxInt = c + 8*d 11210 v.Aux = sym 11211 v.AddArg(ptr) 11212 v.AddArg(idx) 11213 v.AddArg(mem) 11214 return true 11215 } 11216 return false 11217 } 11218 func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { 11219 // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 11220 // cond: is32Bit(off1+off2) 11221 // result: (MOVSDstore [off1+off2] {sym} ptr val mem) 11222 for { 11223 off1 := v.AuxInt 11224 sym := v.Aux 11225 _ = v.Args[2] 11226 v_0 := v.Args[0] 11227 if v_0.Op != OpAMD64ADDQconst { 11228 break 11229 } 11230 off2 := v_0.AuxInt 11231 ptr := v_0.Args[0] 11232 val := v.Args[1] 11233 mem := v.Args[2] 11234 if !(is32Bit(off1 + off2)) { 11235 break 11236 } 11237 v.reset(OpAMD64MOVSDstore) 11238 v.AuxInt = off1 + off2 11239 v.Aux = sym 11240 v.AddArg(ptr) 11241 v.AddArg(val) 11242 v.AddArg(mem) 11243 return true 11244 } 11245 // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 11246 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11247 // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11248 for { 11249 off1 := v.AuxInt 11250 sym1 := v.Aux 11251 _ = v.Args[2] 11252 v_0 := v.Args[0] 11253 if v_0.Op != OpAMD64LEAQ { 11254 break 11255 } 11256 off2 := v_0.AuxInt 11257 sym2 := v_0.Aux 11258 base := v_0.Args[0] 11259 val := v.Args[1] 11260 mem := v.Args[2] 11261 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11262 break 11263 } 11264 v.reset(OpAMD64MOVSDstore) 11265 v.AuxInt = off1 + off2 11266 v.Aux = mergeSym(sym1, sym2) 11267 v.AddArg(base) 11268 v.AddArg(val) 11269 v.AddArg(mem) 11270 return true 11271 } 11272 // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 11273 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11274 // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11275 for { 11276 off1 := v.AuxInt 11277 sym1 := v.Aux 11278 _ = v.Args[2] 11279 v_0 := v.Args[0] 11280 if v_0.Op != OpAMD64LEAQ1 { 11281 break 11282 } 11283 off2 := v_0.AuxInt 11284 sym2 := v_0.Aux 11285 _ = v_0.Args[1] 11286 ptr := v_0.Args[0] 11287 idx := v_0.Args[1] 11288 val := v.Args[1] 11289 mem := v.Args[2] 11290 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11291 break 11292 } 11293 v.reset(OpAMD64MOVSDstoreidx1) 11294 v.AuxInt = off1 + off2 11295 v.Aux = mergeSym(sym1, sym2) 11296 v.AddArg(ptr) 11297 v.AddArg(idx) 11298 v.AddArg(val) 11299 v.AddArg(mem) 11300 return true 11301 } 11302 // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) 11303 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11304 // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11305 for { 11306 off1 := v.AuxInt 11307 sym1 := v.Aux 11308 _ = v.Args[2] 11309 v_0 := v.Args[0] 11310 if v_0.Op != OpAMD64LEAQ8 { 11311 break 11312 } 11313 off2 := v_0.AuxInt 11314 sym2 := v_0.Aux 11315 _ = v_0.Args[1] 11316 ptr := v_0.Args[0] 11317 idx := v_0.Args[1] 11318 val := v.Args[1] 11319 mem := v.Args[2] 11320 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11321 break 11322 } 11323 v.reset(OpAMD64MOVSDstoreidx8) 11324 v.AuxInt = off1 + off2 11325 v.Aux = mergeSym(sym1, sym2) 11326 v.AddArg(ptr) 11327 v.AddArg(idx) 11328 v.AddArg(val) 11329 v.AddArg(mem) 11330 return true 11331 } 11332 // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) 11333 // cond: ptr.Op != OpSB 11334 // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) 11335 for { 11336 off := v.AuxInt 11337 sym := v.Aux 11338 _ = v.Args[2] 11339 v_0 := v.Args[0] 11340 if v_0.Op != OpAMD64ADDQ { 11341 break 11342 } 11343 _ = v_0.Args[1] 11344 ptr := v_0.Args[0] 11345 idx := v_0.Args[1] 11346 val := v.Args[1] 11347 mem := v.Args[2] 11348 if !(ptr.Op != OpSB) { 11349 break 11350 } 11351 v.reset(OpAMD64MOVSDstoreidx1) 11352 v.AuxInt = off 11353 v.Aux = sym 11354 v.AddArg(ptr) 11355 v.AddArg(idx) 11356 v.AddArg(val) 11357 v.AddArg(mem) 11358 return true 11359 } 11360 // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) 11361 // cond: 11362 // result: (MOVQstore [off] {sym} ptr val mem) 11363 for { 11364 off := v.AuxInt 11365 sym := v.Aux 11366 _ = v.Args[2] 11367 ptr := v.Args[0] 11368 v_1 := v.Args[1] 11369 if v_1.Op != OpAMD64MOVQi2f { 11370 break 11371 } 11372 val := v_1.Args[0] 11373 mem := v.Args[2] 11374 v.reset(OpAMD64MOVQstore) 11375 v.AuxInt = off 11376 v.Aux = sym 11377 v.AddArg(ptr) 11378 v.AddArg(val) 11379 v.AddArg(mem) 11380 return true 11381 } 11382 return false 11383 } 11384 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { 11385 // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) 11386 // cond: 11387 // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) 11388 for { 11389 c := v.AuxInt 11390 sym := v.Aux 11391 _ = v.Args[3] 11392 ptr := v.Args[0] 11393 v_1 := v.Args[1] 11394 if v_1.Op != OpAMD64SHLQconst { 11395 break 11396 } 11397 if v_1.AuxInt != 3 { 11398 break 11399 } 11400 idx := v_1.Args[0] 11401 val := v.Args[2] 11402 mem := v.Args[3] 11403 v.reset(OpAMD64MOVSDstoreidx8) 11404 v.AuxInt = c 11405 v.Aux = sym 11406 v.AddArg(ptr) 11407 v.AddArg(idx) 11408 v.AddArg(val) 11409 v.AddArg(mem) 11410 return true 11411 } 11412 // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 11413 // cond: is32Bit(c+d) 11414 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 11415 for { 11416 c := v.AuxInt 11417 sym := v.Aux 11418 _ = v.Args[3] 11419 v_0 := v.Args[0] 11420 if v_0.Op != OpAMD64ADDQconst { 11421 break 11422 } 11423 d := v_0.AuxInt 11424 ptr := v_0.Args[0] 11425 idx := v.Args[1] 11426 val := v.Args[2] 11427 mem := v.Args[3] 11428 if !(is32Bit(c + d)) { 11429 break 11430 } 11431 v.reset(OpAMD64MOVSDstoreidx1) 11432 v.AuxInt = c + d 11433 v.Aux = sym 11434 v.AddArg(ptr) 11435 v.AddArg(idx) 11436 v.AddArg(val) 11437 v.AddArg(mem) 11438 return true 11439 } 11440 // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 11441 // cond: is32Bit(c+d) 11442 // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) 11443 for { 11444 c := v.AuxInt 11445 sym := v.Aux 11446 _ = v.Args[3] 11447 ptr := v.Args[0] 11448 v_1 := v.Args[1] 11449 if v_1.Op != OpAMD64ADDQconst { 11450 break 11451 } 11452 d := v_1.AuxInt 11453 idx := v_1.Args[0] 11454 val := v.Args[2] 11455 mem := v.Args[3] 11456 if !(is32Bit(c + d)) { 11457 break 11458 } 11459 v.reset(OpAMD64MOVSDstoreidx1) 11460 v.AuxInt = c + d 11461 v.Aux = sym 11462 v.AddArg(ptr) 11463 v.AddArg(idx) 11464 v.AddArg(val) 11465 v.AddArg(mem) 11466 return true 11467 } 11468 return false 11469 } 11470 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { 11471 // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) 11472 // cond: is32Bit(c+d) 11473 // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) 11474 for { 11475 c := v.AuxInt 11476 sym := v.Aux 11477 _ = v.Args[3] 11478 v_0 := v.Args[0] 11479 if v_0.Op != OpAMD64ADDQconst { 11480 break 11481 } 11482 d := v_0.AuxInt 11483 ptr := v_0.Args[0] 11484 idx := v.Args[1] 11485 val := v.Args[2] 11486 mem := v.Args[3] 11487 if !(is32Bit(c + d)) { 11488 break 11489 } 11490 v.reset(OpAMD64MOVSDstoreidx8) 11491 v.AuxInt = c + d 11492 v.Aux = sym 11493 v.AddArg(ptr) 11494 v.AddArg(idx) 11495 v.AddArg(val) 11496 v.AddArg(mem) 11497 return true 11498 } 11499 // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) 11500 // cond: is32Bit(c+8*d) 11501 // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) 11502 for { 11503 c := v.AuxInt 11504 sym := v.Aux 11505 _ = v.Args[3] 11506 ptr := v.Args[0] 11507 v_1 := v.Args[1] 11508 if v_1.Op != OpAMD64ADDQconst { 11509 break 11510 } 11511 d := v_1.AuxInt 11512 idx := v_1.Args[0] 11513 val := v.Args[2] 11514 mem := v.Args[3] 11515 if !(is32Bit(c + 8*d)) { 11516 break 11517 } 11518 v.reset(OpAMD64MOVSDstoreidx8) 11519 v.AuxInt = c + 8*d 11520 v.Aux = sym 11521 v.AddArg(ptr) 11522 v.AddArg(idx) 11523 v.AddArg(val) 11524 v.AddArg(mem) 11525 return true 11526 } 11527 return false 11528 } 11529 func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { 11530 // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) 11531 // cond: is32Bit(off1+off2) 11532 // result: (MOVSSload [off1+off2] {sym} ptr mem) 11533 for { 11534 off1 := v.AuxInt 11535 sym := v.Aux 11536 _ = v.Args[1] 11537 v_0 := v.Args[0] 11538 if v_0.Op != OpAMD64ADDQconst { 11539 break 11540 } 11541 off2 := v_0.AuxInt 11542 ptr := v_0.Args[0] 11543 mem := v.Args[1] 11544 if !(is32Bit(off1 + off2)) { 11545 break 11546 } 11547 v.reset(OpAMD64MOVSSload) 11548 v.AuxInt = off1 + off2 11549 v.Aux = sym 11550 v.AddArg(ptr) 11551 v.AddArg(mem) 11552 return true 11553 } 11554 // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 11555 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11556 // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) 11557 for { 11558 off1 := v.AuxInt 11559 sym1 := v.Aux 11560 _ = v.Args[1] 11561 v_0 := v.Args[0] 11562 if v_0.Op != OpAMD64LEAQ { 11563 break 11564 } 11565 off2 := v_0.AuxInt 11566 sym2 := v_0.Aux 11567 base := v_0.Args[0] 11568 mem := v.Args[1] 11569 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11570 break 11571 } 11572 v.reset(OpAMD64MOVSSload) 11573 v.AuxInt = off1 + off2 11574 v.Aux = mergeSym(sym1, sym2) 11575 v.AddArg(base) 11576 v.AddArg(mem) 11577 return true 11578 } 11579 // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 11580 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11581 // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11582 for { 11583 off1 := v.AuxInt 11584 sym1 := v.Aux 11585 _ = v.Args[1] 11586 v_0 := v.Args[0] 11587 if v_0.Op != OpAMD64LEAQ1 { 11588 break 11589 } 11590 off2 := v_0.AuxInt 11591 sym2 := v_0.Aux 11592 _ = v_0.Args[1] 11593 ptr := v_0.Args[0] 11594 idx := v_0.Args[1] 11595 mem := v.Args[1] 11596 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11597 break 11598 } 11599 v.reset(OpAMD64MOVSSloadidx1) 11600 v.AuxInt = off1 + off2 11601 v.Aux = mergeSym(sym1, sym2) 11602 v.AddArg(ptr) 11603 v.AddArg(idx) 11604 v.AddArg(mem) 11605 return true 11606 } 11607 // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) 11608 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11609 // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 11610 for { 11611 off1 := v.AuxInt 11612 sym1 := v.Aux 11613 _ = v.Args[1] 11614 v_0 := v.Args[0] 11615 if v_0.Op != OpAMD64LEAQ4 { 11616 break 11617 } 11618 off2 := v_0.AuxInt 11619 sym2 := v_0.Aux 11620 _ = v_0.Args[1] 11621 ptr := v_0.Args[0] 11622 idx := v_0.Args[1] 11623 mem := v.Args[1] 11624 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11625 break 11626 } 11627 v.reset(OpAMD64MOVSSloadidx4) 11628 v.AuxInt = off1 + off2 11629 v.Aux = mergeSym(sym1, sym2) 11630 v.AddArg(ptr) 11631 v.AddArg(idx) 11632 v.AddArg(mem) 11633 return true 11634 } 11635 // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) 11636 // cond: ptr.Op != OpSB 11637 // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) 11638 for { 11639 off := v.AuxInt 11640 sym := v.Aux 11641 _ = v.Args[1] 11642 v_0 := v.Args[0] 11643 if v_0.Op != OpAMD64ADDQ { 11644 break 11645 } 11646 _ = v_0.Args[1] 11647 ptr := v_0.Args[0] 11648 idx := v_0.Args[1] 11649 mem := v.Args[1] 11650 if !(ptr.Op != OpSB) { 11651 break 11652 } 11653 v.reset(OpAMD64MOVSSloadidx1) 11654 v.AuxInt = off 11655 v.Aux = sym 11656 v.AddArg(ptr) 11657 v.AddArg(idx) 11658 v.AddArg(mem) 11659 return true 11660 } 11661 // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) 11662 // cond: 11663 // result: (MOVLi2f val) 11664 for { 11665 off := v.AuxInt 11666 sym := v.Aux 11667 _ = v.Args[1] 11668 ptr := v.Args[0] 11669 v_1 := v.Args[1] 11670 if v_1.Op != OpAMD64MOVLstore { 11671 break 11672 } 11673 if v_1.AuxInt != off { 11674 break 11675 } 11676 if v_1.Aux != sym { 11677 break 11678 } 11679 _ = v_1.Args[2] 11680 if ptr != v_1.Args[0] { 11681 break 11682 } 11683 val := v_1.Args[1] 11684 v.reset(OpAMD64MOVLi2f) 11685 v.AddArg(val) 11686 return true 11687 } 11688 return false 11689 } 11690 func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { 11691 // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) 11692 // cond: 11693 // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) 11694 for { 11695 c := v.AuxInt 11696 sym := v.Aux 11697 _ = v.Args[2] 11698 ptr := v.Args[0] 11699 v_1 := v.Args[1] 11700 if v_1.Op != OpAMD64SHLQconst { 11701 break 11702 } 11703 if v_1.AuxInt != 2 { 11704 break 11705 } 11706 idx := v_1.Args[0] 11707 mem := v.Args[2] 11708 v.reset(OpAMD64MOVSSloadidx4) 11709 v.AuxInt = c 11710 v.Aux = sym 11711 v.AddArg(ptr) 11712 v.AddArg(idx) 11713 v.AddArg(mem) 11714 return true 11715 } 11716 // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 11717 // cond: is32Bit(c+d) 11718 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 11719 for { 11720 c := v.AuxInt 11721 sym := v.Aux 11722 _ = v.Args[2] 11723 v_0 := v.Args[0] 11724 if v_0.Op != OpAMD64ADDQconst { 11725 break 11726 } 11727 d := v_0.AuxInt 11728 ptr := v_0.Args[0] 11729 idx := v.Args[1] 11730 mem := v.Args[2] 11731 if !(is32Bit(c + d)) { 11732 break 11733 } 11734 v.reset(OpAMD64MOVSSloadidx1) 11735 v.AuxInt = c + d 11736 v.Aux = sym 11737 v.AddArg(ptr) 11738 v.AddArg(idx) 11739 v.AddArg(mem) 11740 return true 11741 } 11742 // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 11743 // cond: is32Bit(c+d) 11744 // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) 11745 for { 11746 c := v.AuxInt 11747 sym := v.Aux 11748 _ = v.Args[2] 11749 ptr := v.Args[0] 11750 v_1 := v.Args[1] 11751 if v_1.Op != OpAMD64ADDQconst { 11752 break 11753 } 11754 d := v_1.AuxInt 11755 idx := v_1.Args[0] 11756 mem := v.Args[2] 11757 if !(is32Bit(c + d)) { 11758 break 11759 } 11760 v.reset(OpAMD64MOVSSloadidx1) 11761 v.AuxInt = c + d 11762 v.Aux = sym 11763 v.AddArg(ptr) 11764 v.AddArg(idx) 11765 v.AddArg(mem) 11766 return true 11767 } 11768 return false 11769 } 11770 func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { 11771 // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) 11772 // cond: is32Bit(c+d) 11773 // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) 11774 for { 11775 c := v.AuxInt 11776 sym := v.Aux 11777 _ = v.Args[2] 11778 v_0 := v.Args[0] 11779 if v_0.Op != OpAMD64ADDQconst { 11780 break 11781 } 11782 d := v_0.AuxInt 11783 ptr := v_0.Args[0] 11784 idx := v.Args[1] 11785 mem := v.Args[2] 11786 if !(is32Bit(c + d)) { 11787 break 11788 } 11789 v.reset(OpAMD64MOVSSloadidx4) 11790 v.AuxInt = c + d 11791 v.Aux = sym 11792 v.AddArg(ptr) 11793 v.AddArg(idx) 11794 v.AddArg(mem) 11795 return true 11796 } 11797 // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) 11798 // cond: is32Bit(c+4*d) 11799 // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) 11800 for { 11801 c := v.AuxInt 11802 sym := v.Aux 11803 _ = v.Args[2] 11804 ptr := v.Args[0] 11805 v_1 := v.Args[1] 11806 if v_1.Op != OpAMD64ADDQconst { 11807 break 11808 } 11809 d := v_1.AuxInt 11810 idx := v_1.Args[0] 11811 mem := v.Args[2] 11812 if !(is32Bit(c + 4*d)) { 11813 break 11814 } 11815 v.reset(OpAMD64MOVSSloadidx4) 11816 v.AuxInt = c + 4*d 11817 v.Aux = sym 11818 v.AddArg(ptr) 11819 v.AddArg(idx) 11820 v.AddArg(mem) 11821 return true 11822 } 11823 return false 11824 } 11825 func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { 11826 // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 11827 // cond: is32Bit(off1+off2) 11828 // result: (MOVSSstore [off1+off2] {sym} ptr val mem) 11829 for { 11830 off1 := v.AuxInt 11831 sym := v.Aux 11832 _ = v.Args[2] 11833 v_0 := v.Args[0] 11834 if v_0.Op != OpAMD64ADDQconst { 11835 break 11836 } 11837 off2 := v_0.AuxInt 11838 ptr := v_0.Args[0] 11839 val := v.Args[1] 11840 mem := v.Args[2] 11841 if !(is32Bit(off1 + off2)) { 11842 break 11843 } 11844 v.reset(OpAMD64MOVSSstore) 11845 v.AuxInt = off1 + off2 11846 v.Aux = sym 11847 v.AddArg(ptr) 11848 v.AddArg(val) 11849 v.AddArg(mem) 11850 return true 11851 } 11852 // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 11853 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11854 // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 11855 for { 11856 off1 := v.AuxInt 11857 sym1 := v.Aux 11858 _ = v.Args[2] 11859 v_0 := v.Args[0] 11860 if v_0.Op != OpAMD64LEAQ { 11861 break 11862 } 11863 off2 := v_0.AuxInt 11864 sym2 := v_0.Aux 11865 base := v_0.Args[0] 11866 val := v.Args[1] 11867 mem := v.Args[2] 11868 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11869 break 11870 } 11871 v.reset(OpAMD64MOVSSstore) 11872 v.AuxInt = off1 + off2 11873 v.Aux = mergeSym(sym1, sym2) 11874 v.AddArg(base) 11875 v.AddArg(val) 11876 v.AddArg(mem) 11877 return true 11878 } 11879 // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 11880 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11881 // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11882 for { 11883 off1 := v.AuxInt 11884 sym1 := v.Aux 11885 _ = v.Args[2] 11886 v_0 := v.Args[0] 11887 if v_0.Op != OpAMD64LEAQ1 { 11888 break 11889 } 11890 off2 := v_0.AuxInt 11891 sym2 := v_0.Aux 11892 _ = v_0.Args[1] 11893 ptr := v_0.Args[0] 11894 idx := v_0.Args[1] 11895 val := v.Args[1] 11896 mem := v.Args[2] 11897 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11898 break 11899 } 11900 v.reset(OpAMD64MOVSSstoreidx1) 11901 v.AuxInt = off1 + off2 11902 v.Aux = mergeSym(sym1, sym2) 11903 v.AddArg(ptr) 11904 v.AddArg(idx) 11905 v.AddArg(val) 11906 v.AddArg(mem) 11907 return true 11908 } 11909 // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) 11910 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 11911 // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 11912 for { 11913 off1 := v.AuxInt 11914 sym1 := v.Aux 11915 _ = v.Args[2] 11916 v_0 := v.Args[0] 11917 if v_0.Op != OpAMD64LEAQ4 { 11918 break 11919 } 11920 off2 := v_0.AuxInt 11921 sym2 := v_0.Aux 11922 _ = v_0.Args[1] 11923 ptr := v_0.Args[0] 11924 idx := v_0.Args[1] 11925 val := v.Args[1] 11926 mem := v.Args[2] 11927 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 11928 break 11929 } 11930 v.reset(OpAMD64MOVSSstoreidx4) 11931 v.AuxInt = off1 + off2 11932 v.Aux = mergeSym(sym1, sym2) 11933 v.AddArg(ptr) 11934 v.AddArg(idx) 11935 v.AddArg(val) 11936 v.AddArg(mem) 11937 return true 11938 } 11939 // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) 11940 // cond: ptr.Op != OpSB 11941 // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) 11942 for { 11943 off := v.AuxInt 11944 sym := v.Aux 11945 _ = v.Args[2] 11946 v_0 := v.Args[0] 11947 if v_0.Op != OpAMD64ADDQ { 11948 break 11949 } 11950 _ = v_0.Args[1] 11951 ptr := v_0.Args[0] 11952 idx := v_0.Args[1] 11953 val := v.Args[1] 11954 mem := v.Args[2] 11955 if !(ptr.Op != OpSB) { 11956 break 11957 } 11958 v.reset(OpAMD64MOVSSstoreidx1) 11959 v.AuxInt = off 11960 v.Aux = sym 11961 v.AddArg(ptr) 11962 v.AddArg(idx) 11963 v.AddArg(val) 11964 v.AddArg(mem) 11965 return true 11966 } 11967 // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) 11968 // cond: 11969 // result: (MOVLstore [off] {sym} ptr val mem) 11970 for { 11971 off := v.AuxInt 11972 sym := v.Aux 11973 _ = v.Args[2] 11974 ptr := v.Args[0] 11975 v_1 := v.Args[1] 11976 if v_1.Op != OpAMD64MOVLi2f { 11977 break 11978 } 11979 val := v_1.Args[0] 11980 mem := v.Args[2] 11981 v.reset(OpAMD64MOVLstore) 11982 v.AuxInt = off 11983 v.Aux = sym 11984 v.AddArg(ptr) 11985 v.AddArg(val) 11986 v.AddArg(mem) 11987 return true 11988 } 11989 return false 11990 } 11991 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { 11992 // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) 11993 // cond: 11994 // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) 11995 for { 11996 c := v.AuxInt 11997 sym := v.Aux 11998 _ = v.Args[3] 11999 ptr := v.Args[0] 12000 v_1 := v.Args[1] 12001 if v_1.Op != OpAMD64SHLQconst { 12002 break 12003 } 12004 if v_1.AuxInt != 2 { 12005 break 12006 } 12007 idx := v_1.Args[0] 12008 val := v.Args[2] 12009 mem := v.Args[3] 12010 v.reset(OpAMD64MOVSSstoreidx4) 12011 v.AuxInt = c 12012 v.Aux = sym 12013 v.AddArg(ptr) 12014 v.AddArg(idx) 12015 v.AddArg(val) 12016 v.AddArg(mem) 12017 return true 12018 } 12019 // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 12020 // cond: is32Bit(c+d) 12021 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 12022 for { 12023 c := v.AuxInt 12024 sym := v.Aux 12025 _ = v.Args[3] 12026 v_0 := v.Args[0] 12027 if v_0.Op != OpAMD64ADDQconst { 12028 break 12029 } 12030 d := v_0.AuxInt 12031 ptr := v_0.Args[0] 12032 idx := v.Args[1] 12033 val := v.Args[2] 12034 mem := v.Args[3] 12035 if !(is32Bit(c + d)) { 12036 break 12037 } 12038 v.reset(OpAMD64MOVSSstoreidx1) 12039 v.AuxInt = c + d 12040 v.Aux = sym 12041 v.AddArg(ptr) 12042 v.AddArg(idx) 12043 v.AddArg(val) 12044 v.AddArg(mem) 12045 return true 12046 } 12047 // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 12048 // cond: is32Bit(c+d) 12049 // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) 12050 for { 12051 c := v.AuxInt 12052 sym := v.Aux 12053 _ = v.Args[3] 12054 ptr := v.Args[0] 12055 v_1 := v.Args[1] 12056 if v_1.Op != OpAMD64ADDQconst { 12057 break 12058 } 12059 d := v_1.AuxInt 12060 idx := v_1.Args[0] 12061 val := v.Args[2] 12062 mem := v.Args[3] 12063 if !(is32Bit(c + d)) { 12064 break 12065 } 12066 v.reset(OpAMD64MOVSSstoreidx1) 12067 v.AuxInt = c + d 12068 v.Aux = sym 12069 v.AddArg(ptr) 12070 v.AddArg(idx) 12071 v.AddArg(val) 12072 v.AddArg(mem) 12073 return true 12074 } 12075 return false 12076 } 12077 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { 12078 // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) 12079 // cond: is32Bit(c+d) 12080 // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) 12081 for { 12082 c := v.AuxInt 12083 sym := v.Aux 12084 _ = v.Args[3] 12085 v_0 := v.Args[0] 12086 if v_0.Op != OpAMD64ADDQconst { 12087 break 12088 } 12089 d := v_0.AuxInt 12090 ptr := v_0.Args[0] 12091 idx := v.Args[1] 12092 val := v.Args[2] 12093 mem := v.Args[3] 12094 if !(is32Bit(c + d)) { 12095 break 12096 } 12097 v.reset(OpAMD64MOVSSstoreidx4) 12098 v.AuxInt = c + d 12099 v.Aux = sym 12100 v.AddArg(ptr) 12101 v.AddArg(idx) 12102 v.AddArg(val) 12103 v.AddArg(mem) 12104 return true 12105 } 12106 // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) 12107 // cond: is32Bit(c+4*d) 12108 // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) 12109 for { 12110 c := v.AuxInt 12111 sym := v.Aux 12112 _ = v.Args[3] 12113 ptr := v.Args[0] 12114 v_1 := v.Args[1] 12115 if v_1.Op != OpAMD64ADDQconst { 12116 break 12117 } 12118 d := v_1.AuxInt 12119 idx := v_1.Args[0] 12120 val := v.Args[2] 12121 mem := v.Args[3] 12122 if !(is32Bit(c + 4*d)) { 12123 break 12124 } 12125 v.reset(OpAMD64MOVSSstoreidx4) 12126 v.AuxInt = c + 4*d 12127 v.Aux = sym 12128 v.AddArg(ptr) 12129 v.AddArg(idx) 12130 v.AddArg(val) 12131 v.AddArg(mem) 12132 return true 12133 } 12134 return false 12135 } 12136 func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { 12137 b := v.Block 12138 _ = b 12139 // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) 12140 // cond: x.Uses == 1 && clobber(x) 12141 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 12142 for { 12143 x := v.Args[0] 12144 if x.Op != OpAMD64MOVWload { 12145 break 12146 } 12147 off := x.AuxInt 12148 sym := x.Aux 12149 _ = x.Args[1] 12150 ptr := x.Args[0] 12151 mem := x.Args[1] 12152 if !(x.Uses == 1 && clobber(x)) { 12153 break 12154 } 12155 b = x.Block 12156 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 12157 v.reset(OpCopy) 12158 v.AddArg(v0) 12159 v0.AuxInt = off 12160 v0.Aux = sym 12161 v0.AddArg(ptr) 12162 v0.AddArg(mem) 12163 return true 12164 } 12165 // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) 12166 // cond: x.Uses == 1 && clobber(x) 12167 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 12168 for { 12169 x := v.Args[0] 12170 if x.Op != OpAMD64MOVLload { 12171 break 12172 } 12173 off := x.AuxInt 12174 sym := x.Aux 12175 _ = x.Args[1] 12176 ptr := x.Args[0] 12177 mem := x.Args[1] 12178 if !(x.Uses == 1 && clobber(x)) { 12179 break 12180 } 12181 b = x.Block 12182 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 12183 v.reset(OpCopy) 12184 v.AddArg(v0) 12185 v0.AuxInt = off 12186 v0.Aux = sym 12187 v0.AddArg(ptr) 12188 v0.AddArg(mem) 12189 return true 12190 } 12191 // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) 12192 // cond: x.Uses == 1 && clobber(x) 12193 // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) 12194 for { 12195 x := v.Args[0] 12196 if x.Op != OpAMD64MOVQload { 12197 break 12198 } 12199 off := x.AuxInt 12200 sym := x.Aux 12201 _ = x.Args[1] 12202 ptr := x.Args[0] 12203 mem := x.Args[1] 12204 if !(x.Uses == 1 && clobber(x)) { 12205 break 12206 } 12207 b = x.Block 12208 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) 12209 v.reset(OpCopy) 12210 v.AddArg(v0) 12211 v0.AuxInt = off 12212 v0.Aux = sym 12213 v0.AddArg(ptr) 12214 v0.AddArg(mem) 12215 return true 12216 } 12217 // match: (MOVWQSX (ANDLconst [c] x)) 12218 // cond: c & 0x8000 == 0 12219 // result: (ANDLconst [c & 0x7fff] x) 12220 for { 12221 v_0 := v.Args[0] 12222 if v_0.Op != OpAMD64ANDLconst { 12223 break 12224 } 12225 c := v_0.AuxInt 12226 x := v_0.Args[0] 12227 if !(c&0x8000 == 0) { 12228 break 12229 } 12230 v.reset(OpAMD64ANDLconst) 12231 v.AuxInt = c & 0x7fff 12232 v.AddArg(x) 12233 return true 12234 } 12235 // match: (MOVWQSX (MOVWQSX x)) 12236 // cond: 12237 // result: (MOVWQSX x) 12238 for { 12239 v_0 := v.Args[0] 12240 if v_0.Op != OpAMD64MOVWQSX { 12241 break 12242 } 12243 x := v_0.Args[0] 12244 v.reset(OpAMD64MOVWQSX) 12245 v.AddArg(x) 12246 return true 12247 } 12248 // match: (MOVWQSX (MOVBQSX x)) 12249 // cond: 12250 // result: (MOVBQSX x) 12251 for { 12252 v_0 := v.Args[0] 12253 if v_0.Op != OpAMD64MOVBQSX { 12254 break 12255 } 12256 x := v_0.Args[0] 12257 v.reset(OpAMD64MOVBQSX) 12258 v.AddArg(x) 12259 return true 12260 } 12261 return false 12262 } 12263 func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool { 12264 // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 12265 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12266 // result: (MOVWQSX x) 12267 for { 12268 off := v.AuxInt 12269 sym := v.Aux 12270 _ = v.Args[1] 12271 ptr := v.Args[0] 12272 v_1 := v.Args[1] 12273 if v_1.Op != OpAMD64MOVWstore { 12274 break 12275 } 12276 off2 := v_1.AuxInt 12277 sym2 := v_1.Aux 12278 _ = v_1.Args[2] 12279 ptr2 := v_1.Args[0] 12280 x := v_1.Args[1] 12281 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12282 break 12283 } 12284 v.reset(OpAMD64MOVWQSX) 12285 v.AddArg(x) 12286 return true 12287 } 12288 // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12289 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12290 // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12291 for { 12292 off1 := v.AuxInt 12293 sym1 := v.Aux 12294 _ = v.Args[1] 12295 v_0 := v.Args[0] 12296 if v_0.Op != OpAMD64LEAQ { 12297 break 12298 } 12299 off2 := v_0.AuxInt 12300 sym2 := v_0.Aux 12301 base := v_0.Args[0] 12302 mem := v.Args[1] 12303 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12304 break 12305 } 12306 v.reset(OpAMD64MOVWQSXload) 12307 v.AuxInt = off1 + off2 12308 v.Aux = mergeSym(sym1, sym2) 12309 v.AddArg(base) 12310 v.AddArg(mem) 12311 return true 12312 } 12313 return false 12314 } 12315 func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { 12316 b := v.Block 12317 _ = b 12318 // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) 12319 // cond: x.Uses == 1 && clobber(x) 12320 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12321 for { 12322 x := v.Args[0] 12323 if x.Op != OpAMD64MOVWload { 12324 break 12325 } 12326 off := x.AuxInt 12327 sym := x.Aux 12328 _ = x.Args[1] 12329 ptr := x.Args[0] 12330 mem := x.Args[1] 12331 if !(x.Uses == 1 && clobber(x)) { 12332 break 12333 } 12334 b = x.Block 12335 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 12336 v.reset(OpCopy) 12337 v.AddArg(v0) 12338 v0.AuxInt = off 12339 v0.Aux = sym 12340 v0.AddArg(ptr) 12341 v0.AddArg(mem) 12342 return true 12343 } 12344 // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) 12345 // cond: x.Uses == 1 && clobber(x) 12346 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12347 for { 12348 x := v.Args[0] 12349 if x.Op != OpAMD64MOVLload { 12350 break 12351 } 12352 off := x.AuxInt 12353 sym := x.Aux 12354 _ = x.Args[1] 12355 ptr := x.Args[0] 12356 mem := x.Args[1] 12357 if !(x.Uses == 1 && clobber(x)) { 12358 break 12359 } 12360 b = x.Block 12361 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 12362 v.reset(OpCopy) 12363 v.AddArg(v0) 12364 v0.AuxInt = off 12365 v0.Aux = sym 12366 v0.AddArg(ptr) 12367 v0.AddArg(mem) 12368 return true 12369 } 12370 // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) 12371 // cond: x.Uses == 1 && clobber(x) 12372 // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem) 12373 for { 12374 x := v.Args[0] 12375 if x.Op != OpAMD64MOVQload { 12376 break 12377 } 12378 off := x.AuxInt 12379 sym := x.Aux 12380 _ = x.Args[1] 12381 ptr := x.Args[0] 12382 mem := x.Args[1] 12383 if !(x.Uses == 1 && clobber(x)) { 12384 break 12385 } 12386 b = x.Block 12387 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) 12388 v.reset(OpCopy) 12389 v.AddArg(v0) 12390 v0.AuxInt = off 12391 v0.Aux = sym 12392 v0.AddArg(ptr) 12393 v0.AddArg(mem) 12394 return true 12395 } 12396 // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) 12397 // cond: x.Uses == 1 && clobber(x) 12398 // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem) 12399 for { 12400 x := v.Args[0] 12401 if x.Op != OpAMD64MOVWloadidx1 { 12402 break 12403 } 12404 off := x.AuxInt 12405 sym := x.Aux 12406 _ = x.Args[2] 12407 ptr := x.Args[0] 12408 idx := x.Args[1] 12409 mem := x.Args[2] 12410 if !(x.Uses == 1 && clobber(x)) { 12411 break 12412 } 12413 b = x.Block 12414 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 12415 v.reset(OpCopy) 12416 v.AddArg(v0) 12417 v0.AuxInt = off 12418 v0.Aux = sym 12419 v0.AddArg(ptr) 12420 v0.AddArg(idx) 12421 v0.AddArg(mem) 12422 return true 12423 } 12424 // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) 12425 // cond: x.Uses == 1 && clobber(x) 12426 // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem) 12427 for { 12428 x := v.Args[0] 12429 if x.Op != OpAMD64MOVWloadidx2 { 12430 break 12431 } 12432 off := x.AuxInt 12433 sym := x.Aux 12434 _ = x.Args[2] 12435 ptr := x.Args[0] 12436 idx := x.Args[1] 12437 mem := x.Args[2] 12438 if !(x.Uses == 1 && clobber(x)) { 12439 break 12440 } 12441 b = x.Block 12442 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) 12443 v.reset(OpCopy) 12444 v.AddArg(v0) 12445 v0.AuxInt = off 12446 v0.Aux = sym 12447 v0.AddArg(ptr) 12448 v0.AddArg(idx) 12449 v0.AddArg(mem) 12450 return true 12451 } 12452 // match: (MOVWQZX (ANDLconst [c] x)) 12453 // cond: 12454 // result: (ANDLconst [c & 0xffff] x) 12455 for { 12456 v_0 := v.Args[0] 12457 if v_0.Op != OpAMD64ANDLconst { 12458 break 12459 } 12460 c := v_0.AuxInt 12461 x := v_0.Args[0] 12462 v.reset(OpAMD64ANDLconst) 12463 v.AuxInt = c & 0xffff 12464 v.AddArg(x) 12465 return true 12466 } 12467 // match: (MOVWQZX (MOVWQZX x)) 12468 // cond: 12469 // result: (MOVWQZX x) 12470 for { 12471 v_0 := v.Args[0] 12472 if v_0.Op != OpAMD64MOVWQZX { 12473 break 12474 } 12475 x := v_0.Args[0] 12476 v.reset(OpAMD64MOVWQZX) 12477 v.AddArg(x) 12478 return true 12479 } 12480 // match: (MOVWQZX (MOVBQZX x)) 12481 // cond: 12482 // result: (MOVBQZX x) 12483 for { 12484 v_0 := v.Args[0] 12485 if v_0.Op != OpAMD64MOVBQZX { 12486 break 12487 } 12488 x := v_0.Args[0] 12489 v.reset(OpAMD64MOVBQZX) 12490 v.AddArg(x) 12491 return true 12492 } 12493 return false 12494 } 12495 func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { 12496 // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) 12497 // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) 12498 // result: (MOVWQZX x) 12499 for { 12500 off := v.AuxInt 12501 sym := v.Aux 12502 _ = v.Args[1] 12503 ptr := v.Args[0] 12504 v_1 := v.Args[1] 12505 if v_1.Op != OpAMD64MOVWstore { 12506 break 12507 } 12508 off2 := v_1.AuxInt 12509 sym2 := v_1.Aux 12510 _ = v_1.Args[2] 12511 ptr2 := v_1.Args[0] 12512 x := v_1.Args[1] 12513 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { 12514 break 12515 } 12516 v.reset(OpAMD64MOVWQZX) 12517 v.AddArg(x) 12518 return true 12519 } 12520 // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) 12521 // cond: is32Bit(off1+off2) 12522 // result: (MOVWload [off1+off2] {sym} ptr mem) 12523 for { 12524 off1 := v.AuxInt 12525 sym := v.Aux 12526 _ = v.Args[1] 12527 v_0 := v.Args[0] 12528 if v_0.Op != OpAMD64ADDQconst { 12529 break 12530 } 12531 off2 := v_0.AuxInt 12532 ptr := v_0.Args[0] 12533 mem := v.Args[1] 12534 if !(is32Bit(off1 + off2)) { 12535 break 12536 } 12537 v.reset(OpAMD64MOVWload) 12538 v.AuxInt = off1 + off2 12539 v.Aux = sym 12540 v.AddArg(ptr) 12541 v.AddArg(mem) 12542 return true 12543 } 12544 // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) 12545 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12546 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12547 for { 12548 off1 := v.AuxInt 12549 sym1 := v.Aux 12550 _ = v.Args[1] 12551 v_0 := v.Args[0] 12552 if v_0.Op != OpAMD64LEAQ { 12553 break 12554 } 12555 off2 := v_0.AuxInt 12556 sym2 := v_0.Aux 12557 base := v_0.Args[0] 12558 mem := v.Args[1] 12559 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12560 break 12561 } 12562 v.reset(OpAMD64MOVWload) 12563 v.AuxInt = off1 + off2 12564 v.Aux = mergeSym(sym1, sym2) 12565 v.AddArg(base) 12566 v.AddArg(mem) 12567 return true 12568 } 12569 // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) 12570 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12571 // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 12572 for { 12573 off1 := v.AuxInt 12574 sym1 := v.Aux 12575 _ = v.Args[1] 12576 v_0 := v.Args[0] 12577 if v_0.Op != OpAMD64LEAQ1 { 12578 break 12579 } 12580 off2 := v_0.AuxInt 12581 sym2 := v_0.Aux 12582 _ = v_0.Args[1] 12583 ptr := v_0.Args[0] 12584 idx := v_0.Args[1] 12585 mem := v.Args[1] 12586 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12587 break 12588 } 12589 v.reset(OpAMD64MOVWloadidx1) 12590 v.AuxInt = off1 + off2 12591 v.Aux = mergeSym(sym1, sym2) 12592 v.AddArg(ptr) 12593 v.AddArg(idx) 12594 v.AddArg(mem) 12595 return true 12596 } 12597 // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) 12598 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 12599 // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) 12600 for { 12601 off1 := v.AuxInt 12602 sym1 := v.Aux 12603 _ = v.Args[1] 12604 v_0 := v.Args[0] 12605 if v_0.Op != OpAMD64LEAQ2 { 12606 break 12607 } 12608 off2 := v_0.AuxInt 12609 sym2 := v_0.Aux 12610 _ = v_0.Args[1] 12611 ptr := v_0.Args[0] 12612 idx := v_0.Args[1] 12613 mem := v.Args[1] 12614 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 12615 break 12616 } 12617 v.reset(OpAMD64MOVWloadidx2) 12618 v.AuxInt = off1 + off2 12619 v.Aux = mergeSym(sym1, sym2) 12620 v.AddArg(ptr) 12621 v.AddArg(idx) 12622 v.AddArg(mem) 12623 return true 12624 } 12625 // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) 12626 // cond: ptr.Op != OpSB 12627 // result: (MOVWloadidx1 [off] {sym} ptr idx mem) 12628 for { 12629 off := v.AuxInt 12630 sym := v.Aux 12631 _ = v.Args[1] 12632 v_0 := v.Args[0] 12633 if v_0.Op != OpAMD64ADDQ { 12634 break 12635 } 12636 _ = v_0.Args[1] 12637 ptr := v_0.Args[0] 12638 idx := v_0.Args[1] 12639 mem := v.Args[1] 12640 if !(ptr.Op != OpSB) { 12641 break 12642 } 12643 v.reset(OpAMD64MOVWloadidx1) 12644 v.AuxInt = off 12645 v.Aux = sym 12646 v.AddArg(ptr) 12647 v.AddArg(idx) 12648 v.AddArg(mem) 12649 return true 12650 } 12651 // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) 12652 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 12653 // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) 12654 for { 12655 off1 := v.AuxInt 12656 sym1 := v.Aux 12657 _ = v.Args[1] 12658 v_0 := v.Args[0] 12659 if v_0.Op != OpAMD64LEAL { 12660 break 12661 } 12662 off2 := v_0.AuxInt 12663 sym2 := v_0.Aux 12664 base := v_0.Args[0] 12665 mem := v.Args[1] 12666 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 12667 break 12668 } 12669 v.reset(OpAMD64MOVWload) 12670 v.AuxInt = off1 + off2 12671 v.Aux = mergeSym(sym1, sym2) 12672 v.AddArg(base) 12673 v.AddArg(mem) 12674 return true 12675 } 12676 // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) 12677 // cond: is32Bit(off1+off2) 12678 // result: (MOVWload [off1+off2] {sym} ptr mem) 12679 for { 12680 off1 := v.AuxInt 12681 sym := v.Aux 12682 _ = v.Args[1] 12683 v_0 := v.Args[0] 12684 if v_0.Op != OpAMD64ADDLconst { 12685 break 12686 } 12687 off2 := v_0.AuxInt 12688 ptr := v_0.Args[0] 12689 mem := v.Args[1] 12690 if !(is32Bit(off1 + off2)) { 12691 break 12692 } 12693 v.reset(OpAMD64MOVWload) 12694 v.AuxInt = off1 + off2 12695 v.Aux = sym 12696 v.AddArg(ptr) 12697 v.AddArg(mem) 12698 return true 12699 } 12700 return false 12701 } 12702 func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { 12703 // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 12704 // cond: 12705 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 12706 for { 12707 c := v.AuxInt 12708 sym := v.Aux 12709 _ = v.Args[2] 12710 ptr := v.Args[0] 12711 v_1 := v.Args[1] 12712 if v_1.Op != OpAMD64SHLQconst { 12713 break 12714 } 12715 if v_1.AuxInt != 1 { 12716 break 12717 } 12718 idx := v_1.Args[0] 12719 mem := v.Args[2] 12720 v.reset(OpAMD64MOVWloadidx2) 12721 v.AuxInt = c 12722 v.Aux = sym 12723 v.AddArg(ptr) 12724 v.AddArg(idx) 12725 v.AddArg(mem) 12726 return true 12727 } 12728 // match: (MOVWloadidx1 [c] {sym} (SHLQconst [1] idx) ptr mem) 12729 // cond: 12730 // result: (MOVWloadidx2 [c] {sym} ptr idx mem) 12731 for { 12732 c := v.AuxInt 12733 sym := v.Aux 12734 _ = v.Args[2] 12735 v_0 := v.Args[0] 12736 if v_0.Op != OpAMD64SHLQconst { 12737 break 12738 } 12739 if v_0.AuxInt != 1 { 12740 break 12741 } 12742 idx := v_0.Args[0] 12743 ptr := v.Args[1] 12744 mem := v.Args[2] 12745 v.reset(OpAMD64MOVWloadidx2) 12746 v.AuxInt = c 12747 v.Aux = sym 12748 v.AddArg(ptr) 12749 v.AddArg(idx) 12750 v.AddArg(mem) 12751 return true 12752 } 12753 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) 12754 // cond: is32Bit(c+d) 12755 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12756 for { 12757 c := v.AuxInt 12758 sym := v.Aux 12759 _ = v.Args[2] 12760 v_0 := v.Args[0] 12761 if v_0.Op != OpAMD64ADDQconst { 12762 break 12763 } 12764 d := v_0.AuxInt 12765 ptr := v_0.Args[0] 12766 idx := v.Args[1] 12767 mem := v.Args[2] 12768 if !(is32Bit(c + d)) { 12769 break 12770 } 12771 v.reset(OpAMD64MOVWloadidx1) 12772 v.AuxInt = c + d 12773 v.Aux = sym 12774 v.AddArg(ptr) 12775 v.AddArg(idx) 12776 v.AddArg(mem) 12777 return true 12778 } 12779 // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) 12780 // cond: is32Bit(c+d) 12781 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12782 for { 12783 c := v.AuxInt 12784 sym := v.Aux 12785 _ = v.Args[2] 12786 idx := v.Args[0] 12787 v_1 := v.Args[1] 12788 if v_1.Op != OpAMD64ADDQconst { 12789 break 12790 } 12791 d := v_1.AuxInt 12792 ptr := v_1.Args[0] 12793 mem := v.Args[2] 12794 if !(is32Bit(c + d)) { 12795 break 12796 } 12797 v.reset(OpAMD64MOVWloadidx1) 12798 v.AuxInt = c + d 12799 v.Aux = sym 12800 v.AddArg(ptr) 12801 v.AddArg(idx) 12802 v.AddArg(mem) 12803 return true 12804 } 12805 // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) 12806 // cond: is32Bit(c+d) 12807 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12808 for { 12809 c := v.AuxInt 12810 sym := v.Aux 12811 _ = v.Args[2] 12812 ptr := v.Args[0] 12813 v_1 := v.Args[1] 12814 if v_1.Op != OpAMD64ADDQconst { 12815 break 12816 } 12817 d := v_1.AuxInt 12818 idx := v_1.Args[0] 12819 mem := v.Args[2] 12820 if !(is32Bit(c + d)) { 12821 break 12822 } 12823 v.reset(OpAMD64MOVWloadidx1) 12824 v.AuxInt = c + d 12825 v.Aux = sym 12826 v.AddArg(ptr) 12827 v.AddArg(idx) 12828 v.AddArg(mem) 12829 return true 12830 } 12831 // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) 12832 // cond: is32Bit(c+d) 12833 // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) 12834 for { 12835 c := v.AuxInt 12836 sym := v.Aux 12837 _ = v.Args[2] 12838 v_0 := v.Args[0] 12839 if v_0.Op != OpAMD64ADDQconst { 12840 break 12841 } 12842 d := v_0.AuxInt 12843 idx := v_0.Args[0] 12844 ptr := v.Args[1] 12845 mem := v.Args[2] 12846 if !(is32Bit(c + d)) { 12847 break 12848 } 12849 v.reset(OpAMD64MOVWloadidx1) 12850 v.AuxInt = c + d 12851 v.Aux = sym 12852 v.AddArg(ptr) 12853 v.AddArg(idx) 12854 v.AddArg(mem) 12855 return true 12856 } 12857 return false 12858 } 12859 func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { 12860 // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) 12861 // cond: is32Bit(c+d) 12862 // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) 12863 for { 12864 c := v.AuxInt 12865 sym := v.Aux 12866 _ = v.Args[2] 12867 v_0 := v.Args[0] 12868 if v_0.Op != OpAMD64ADDQconst { 12869 break 12870 } 12871 d := v_0.AuxInt 12872 ptr := v_0.Args[0] 12873 idx := v.Args[1] 12874 mem := v.Args[2] 12875 if !(is32Bit(c + d)) { 12876 break 12877 } 12878 v.reset(OpAMD64MOVWloadidx2) 12879 v.AuxInt = c + d 12880 v.Aux = sym 12881 v.AddArg(ptr) 12882 v.AddArg(idx) 12883 v.AddArg(mem) 12884 return true 12885 } 12886 // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) 12887 // cond: is32Bit(c+2*d) 12888 // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) 12889 for { 12890 c := v.AuxInt 12891 sym := v.Aux 12892 _ = v.Args[2] 12893 ptr := v.Args[0] 12894 v_1 := v.Args[1] 12895 if v_1.Op != OpAMD64ADDQconst { 12896 break 12897 } 12898 d := v_1.AuxInt 12899 idx := v_1.Args[0] 12900 mem := v.Args[2] 12901 if !(is32Bit(c + 2*d)) { 12902 break 12903 } 12904 v.reset(OpAMD64MOVWloadidx2) 12905 v.AuxInt = c + 2*d 12906 v.Aux = sym 12907 v.AddArg(ptr) 12908 v.AddArg(idx) 12909 v.AddArg(mem) 12910 return true 12911 } 12912 return false 12913 } 12914 func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { 12915 // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) 12916 // cond: 12917 // result: (MOVWstore [off] {sym} ptr x mem) 12918 for { 12919 off := v.AuxInt 12920 sym := v.Aux 12921 _ = v.Args[2] 12922 ptr := v.Args[0] 12923 v_1 := v.Args[1] 12924 if v_1.Op != OpAMD64MOVWQSX { 12925 break 12926 } 12927 x := v_1.Args[0] 12928 mem := v.Args[2] 12929 v.reset(OpAMD64MOVWstore) 12930 v.AuxInt = off 12931 v.Aux = sym 12932 v.AddArg(ptr) 12933 v.AddArg(x) 12934 v.AddArg(mem) 12935 return true 12936 } 12937 // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) 12938 // cond: 12939 // result: (MOVWstore [off] {sym} ptr x mem) 12940 for { 12941 off := v.AuxInt 12942 sym := v.Aux 12943 _ = v.Args[2] 12944 ptr := v.Args[0] 12945 v_1 := v.Args[1] 12946 if v_1.Op != OpAMD64MOVWQZX { 12947 break 12948 } 12949 x := v_1.Args[0] 12950 mem := v.Args[2] 12951 v.reset(OpAMD64MOVWstore) 12952 v.AuxInt = off 12953 v.Aux = sym 12954 v.AddArg(ptr) 12955 v.AddArg(x) 12956 v.AddArg(mem) 12957 return true 12958 } 12959 // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) 12960 // cond: is32Bit(off1+off2) 12961 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 12962 for { 12963 off1 := v.AuxInt 12964 sym := v.Aux 12965 _ = v.Args[2] 12966 v_0 := v.Args[0] 12967 if v_0.Op != OpAMD64ADDQconst { 12968 break 12969 } 12970 off2 := v_0.AuxInt 12971 ptr := v_0.Args[0] 12972 val := v.Args[1] 12973 mem := v.Args[2] 12974 if !(is32Bit(off1 + off2)) { 12975 break 12976 } 12977 v.reset(OpAMD64MOVWstore) 12978 v.AuxInt = off1 + off2 12979 v.Aux = sym 12980 v.AddArg(ptr) 12981 v.AddArg(val) 12982 v.AddArg(mem) 12983 return true 12984 } 12985 // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) 12986 // cond: validOff(off) 12987 // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) 12988 for { 12989 off := v.AuxInt 12990 sym := v.Aux 12991 _ = v.Args[2] 12992 ptr := v.Args[0] 12993 v_1 := v.Args[1] 12994 if v_1.Op != OpAMD64MOVLconst { 12995 break 12996 } 12997 c := v_1.AuxInt 12998 mem := v.Args[2] 12999 if !(validOff(off)) { 13000 break 13001 } 13002 v.reset(OpAMD64MOVWstoreconst) 13003 v.AuxInt = makeValAndOff(int64(int16(c)), off) 13004 v.Aux = sym 13005 v.AddArg(ptr) 13006 v.AddArg(mem) 13007 return true 13008 } 13009 // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) 13010 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 13011 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 13012 for { 13013 off1 := v.AuxInt 13014 sym1 := v.Aux 13015 _ = v.Args[2] 13016 v_0 := v.Args[0] 13017 if v_0.Op != OpAMD64LEAQ { 13018 break 13019 } 13020 off2 := v_0.AuxInt 13021 sym2 := v_0.Aux 13022 base := v_0.Args[0] 13023 val := v.Args[1] 13024 mem := v.Args[2] 13025 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13026 break 13027 } 13028 v.reset(OpAMD64MOVWstore) 13029 v.AuxInt = off1 + off2 13030 v.Aux = mergeSym(sym1, sym2) 13031 v.AddArg(base) 13032 v.AddArg(val) 13033 v.AddArg(mem) 13034 return true 13035 } 13036 // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) 13037 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 13038 // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 13039 for { 13040 off1 := v.AuxInt 13041 sym1 := v.Aux 13042 _ = v.Args[2] 13043 v_0 := v.Args[0] 13044 if v_0.Op != OpAMD64LEAQ1 { 13045 break 13046 } 13047 off2 := v_0.AuxInt 13048 sym2 := v_0.Aux 13049 _ = v_0.Args[1] 13050 ptr := v_0.Args[0] 13051 idx := v_0.Args[1] 13052 val := v.Args[1] 13053 mem := v.Args[2] 13054 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13055 break 13056 } 13057 v.reset(OpAMD64MOVWstoreidx1) 13058 v.AuxInt = off1 + off2 13059 v.Aux = mergeSym(sym1, sym2) 13060 v.AddArg(ptr) 13061 v.AddArg(idx) 13062 v.AddArg(val) 13063 v.AddArg(mem) 13064 return true 13065 } 13066 // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) 13067 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) 13068 // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) 13069 for { 13070 off1 := v.AuxInt 13071 sym1 := v.Aux 13072 _ = v.Args[2] 13073 v_0 := v.Args[0] 13074 if v_0.Op != OpAMD64LEAQ2 { 13075 break 13076 } 13077 off2 := v_0.AuxInt 13078 sym2 := v_0.Aux 13079 _ = v_0.Args[1] 13080 ptr := v_0.Args[0] 13081 idx := v_0.Args[1] 13082 val := v.Args[1] 13083 mem := v.Args[2] 13084 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { 13085 break 13086 } 13087 v.reset(OpAMD64MOVWstoreidx2) 13088 v.AuxInt = off1 + off2 13089 v.Aux = mergeSym(sym1, sym2) 13090 v.AddArg(ptr) 13091 v.AddArg(idx) 13092 v.AddArg(val) 13093 v.AddArg(mem) 13094 return true 13095 } 13096 // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) 13097 // cond: ptr.Op != OpSB 13098 // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) 13099 for { 13100 off := v.AuxInt 13101 sym := v.Aux 13102 _ = v.Args[2] 13103 v_0 := v.Args[0] 13104 if v_0.Op != OpAMD64ADDQ { 13105 break 13106 } 13107 _ = v_0.Args[1] 13108 ptr := v_0.Args[0] 13109 idx := v_0.Args[1] 13110 val := v.Args[1] 13111 mem := v.Args[2] 13112 if !(ptr.Op != OpSB) { 13113 break 13114 } 13115 v.reset(OpAMD64MOVWstoreidx1) 13116 v.AuxInt = off 13117 v.Aux = sym 13118 v.AddArg(ptr) 13119 v.AddArg(idx) 13120 v.AddArg(val) 13121 v.AddArg(mem) 13122 return true 13123 } 13124 // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) 13125 // cond: x.Uses == 1 && clobber(x) 13126 // result: (MOVLstore [i-2] {s} p w mem) 13127 for { 13128 i := v.AuxInt 13129 s := v.Aux 13130 _ = v.Args[2] 13131 p := v.Args[0] 13132 v_1 := v.Args[1] 13133 if v_1.Op != OpAMD64SHRQconst { 13134 break 13135 } 13136 if v_1.AuxInt != 16 { 13137 break 13138 } 13139 w := v_1.Args[0] 13140 x := v.Args[2] 13141 if x.Op != OpAMD64MOVWstore { 13142 break 13143 } 13144 if x.AuxInt != i-2 { 13145 break 13146 } 13147 if x.Aux != s { 13148 break 13149 } 13150 _ = x.Args[2] 13151 if p != x.Args[0] { 13152 break 13153 } 13154 if w != x.Args[1] { 13155 break 13156 } 13157 mem := x.Args[2] 13158 if !(x.Uses == 1 && clobber(x)) { 13159 break 13160 } 13161 v.reset(OpAMD64MOVLstore) 13162 v.AuxInt = i - 2 13163 v.Aux = s 13164 v.AddArg(p) 13165 v.AddArg(w) 13166 v.AddArg(mem) 13167 return true 13168 } 13169 // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) 13170 // cond: x.Uses == 1 && clobber(x) 13171 // result: (MOVLstore [i-2] {s} p w0 mem) 13172 for { 13173 i := v.AuxInt 13174 s := v.Aux 13175 _ = v.Args[2] 13176 p := v.Args[0] 13177 v_1 := v.Args[1] 13178 if v_1.Op != OpAMD64SHRQconst { 13179 break 13180 } 13181 j := v_1.AuxInt 13182 w := v_1.Args[0] 13183 x := v.Args[2] 13184 if x.Op != OpAMD64MOVWstore { 13185 break 13186 } 13187 if x.AuxInt != i-2 { 13188 break 13189 } 13190 if x.Aux != s { 13191 break 13192 } 13193 _ = x.Args[2] 13194 if p != x.Args[0] { 13195 break 13196 } 13197 w0 := x.Args[1] 13198 if w0.Op != OpAMD64SHRQconst { 13199 break 13200 } 13201 if w0.AuxInt != j-16 { 13202 break 13203 } 13204 if w != w0.Args[0] { 13205 break 13206 } 13207 mem := x.Args[2] 13208 if !(x.Uses == 1 && clobber(x)) { 13209 break 13210 } 13211 v.reset(OpAMD64MOVLstore) 13212 v.AuxInt = i - 2 13213 v.Aux = s 13214 v.AddArg(p) 13215 v.AddArg(w0) 13216 v.AddArg(mem) 13217 return true 13218 } 13219 return false 13220 } 13221 func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { 13222 b := v.Block 13223 _ = b 13224 typ := &b.Func.Config.Types 13225 _ = typ 13226 // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem)) 13227 // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) 13228 // result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem) 13229 for { 13230 i := v.AuxInt 13231 s := v.Aux 13232 _ = v.Args[2] 13233 p := v.Args[0] 13234 x1 := v.Args[1] 13235 if x1.Op != OpAMD64MOVWload { 13236 break 13237 } 13238 j := x1.AuxInt 13239 s2 := x1.Aux 13240 _ = x1.Args[1] 13241 p2 := x1.Args[0] 13242 mem := x1.Args[1] 13243 mem2 := v.Args[2] 13244 if mem2.Op != OpAMD64MOVWstore { 13245 break 13246 } 13247 if mem2.AuxInt != i-2 { 13248 break 13249 } 13250 if mem2.Aux != s { 13251 break 13252 } 13253 _ = mem2.Args[2] 13254 if p != mem2.Args[0] { 13255 break 13256 } 13257 x2 := mem2.Args[1] 13258 if x2.Op != OpAMD64MOVWload { 13259 break 13260 } 13261 if x2.AuxInt != j-2 { 13262 break 13263 } 13264 if x2.Aux != s2 { 13265 break 13266 } 13267 _ = x2.Args[1] 13268 if p2 != x2.Args[0] { 13269 break 13270 } 13271 if mem != x2.Args[1] { 13272 break 13273 } 13274 if mem != mem2.Args[2] { 13275 break 13276 } 13277 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { 13278 break 13279 } 13280 v.reset(OpAMD64MOVLstore) 13281 v.AuxInt = i - 2 13282 v.Aux = s 13283 v.AddArg(p) 13284 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 13285 v0.AuxInt = j - 2 13286 v0.Aux = s2 13287 v0.AddArg(p2) 13288 v0.AddArg(mem) 13289 v.AddArg(v0) 13290 v.AddArg(mem) 13291 return true 13292 } 13293 // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) 13294 // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) 13295 // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) 13296 for { 13297 off1 := v.AuxInt 13298 sym1 := v.Aux 13299 _ = v.Args[2] 13300 v_0 := v.Args[0] 13301 if v_0.Op != OpAMD64LEAL { 13302 break 13303 } 13304 off2 := v_0.AuxInt 13305 sym2 := v_0.Aux 13306 base := v_0.Args[0] 13307 val := v.Args[1] 13308 mem := v.Args[2] 13309 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { 13310 break 13311 } 13312 v.reset(OpAMD64MOVWstore) 13313 v.AuxInt = off1 + off2 13314 v.Aux = mergeSym(sym1, sym2) 13315 v.AddArg(base) 13316 v.AddArg(val) 13317 v.AddArg(mem) 13318 return true 13319 } 13320 // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) 13321 // cond: is32Bit(off1+off2) 13322 // result: (MOVWstore [off1+off2] {sym} ptr val mem) 13323 for { 13324 off1 := v.AuxInt 13325 sym := v.Aux 13326 _ = v.Args[2] 13327 v_0 := v.Args[0] 13328 if v_0.Op != OpAMD64ADDLconst { 13329 break 13330 } 13331 off2 := v_0.AuxInt 13332 ptr := v_0.Args[0] 13333 val := v.Args[1] 13334 mem := v.Args[2] 13335 if !(is32Bit(off1 + off2)) { 13336 break 13337 } 13338 v.reset(OpAMD64MOVWstore) 13339 v.AuxInt = off1 + off2 13340 v.Aux = sym 13341 v.AddArg(ptr) 13342 v.AddArg(val) 13343 v.AddArg(mem) 13344 return true 13345 } 13346 return false 13347 } 13348 func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { 13349 // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) 13350 // cond: ValAndOff(sc).canAdd(off) 13351 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 13352 for { 13353 sc := v.AuxInt 13354 s := v.Aux 13355 _ = v.Args[1] 13356 v_0 := v.Args[0] 13357 if v_0.Op != OpAMD64ADDQconst { 13358 break 13359 } 13360 off := v_0.AuxInt 13361 ptr := v_0.Args[0] 13362 mem := v.Args[1] 13363 if !(ValAndOff(sc).canAdd(off)) { 13364 break 13365 } 13366 v.reset(OpAMD64MOVWstoreconst) 13367 v.AuxInt = ValAndOff(sc).add(off) 13368 v.Aux = s 13369 v.AddArg(ptr) 13370 v.AddArg(mem) 13371 return true 13372 } 13373 // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) 13374 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 13375 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 13376 for { 13377 sc := v.AuxInt 13378 sym1 := v.Aux 13379 _ = v.Args[1] 13380 v_0 := v.Args[0] 13381 if v_0.Op != OpAMD64LEAQ { 13382 break 13383 } 13384 off := v_0.AuxInt 13385 sym2 := v_0.Aux 13386 ptr := v_0.Args[0] 13387 mem := v.Args[1] 13388 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 13389 break 13390 } 13391 v.reset(OpAMD64MOVWstoreconst) 13392 v.AuxInt = ValAndOff(sc).add(off) 13393 v.Aux = mergeSym(sym1, sym2) 13394 v.AddArg(ptr) 13395 v.AddArg(mem) 13396 return true 13397 } 13398 // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) 13399 // cond: canMergeSym(sym1, sym2) 13400 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 13401 for { 13402 x := v.AuxInt 13403 sym1 := v.Aux 13404 _ = v.Args[1] 13405 v_0 := v.Args[0] 13406 if v_0.Op != OpAMD64LEAQ1 { 13407 break 13408 } 13409 off := v_0.AuxInt 13410 sym2 := v_0.Aux 13411 _ = v_0.Args[1] 13412 ptr := v_0.Args[0] 13413 idx := v_0.Args[1] 13414 mem := v.Args[1] 13415 if !(canMergeSym(sym1, sym2)) { 13416 break 13417 } 13418 v.reset(OpAMD64MOVWstoreconstidx1) 13419 v.AuxInt = ValAndOff(x).add(off) 13420 v.Aux = mergeSym(sym1, sym2) 13421 v.AddArg(ptr) 13422 v.AddArg(idx) 13423 v.AddArg(mem) 13424 return true 13425 } 13426 // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) 13427 // cond: canMergeSym(sym1, sym2) 13428 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) 13429 for { 13430 x := v.AuxInt 13431 sym1 := v.Aux 13432 _ = v.Args[1] 13433 v_0 := v.Args[0] 13434 if v_0.Op != OpAMD64LEAQ2 { 13435 break 13436 } 13437 off := v_0.AuxInt 13438 sym2 := v_0.Aux 13439 _ = v_0.Args[1] 13440 ptr := v_0.Args[0] 13441 idx := v_0.Args[1] 13442 mem := v.Args[1] 13443 if !(canMergeSym(sym1, sym2)) { 13444 break 13445 } 13446 v.reset(OpAMD64MOVWstoreconstidx2) 13447 v.AuxInt = ValAndOff(x).add(off) 13448 v.Aux = mergeSym(sym1, sym2) 13449 v.AddArg(ptr) 13450 v.AddArg(idx) 13451 v.AddArg(mem) 13452 return true 13453 } 13454 // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) 13455 // cond: 13456 // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) 13457 for { 13458 x := v.AuxInt 13459 sym := v.Aux 13460 _ = v.Args[1] 13461 v_0 := v.Args[0] 13462 if v_0.Op != OpAMD64ADDQ { 13463 break 13464 } 13465 _ = v_0.Args[1] 13466 ptr := v_0.Args[0] 13467 idx := v_0.Args[1] 13468 mem := v.Args[1] 13469 v.reset(OpAMD64MOVWstoreconstidx1) 13470 v.AuxInt = x 13471 v.Aux = sym 13472 v.AddArg(ptr) 13473 v.AddArg(idx) 13474 v.AddArg(mem) 13475 return true 13476 } 13477 // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) 13478 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 13479 // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) 13480 for { 13481 c := v.AuxInt 13482 s := v.Aux 13483 _ = v.Args[1] 13484 p := v.Args[0] 13485 x := v.Args[1] 13486 if x.Op != OpAMD64MOVWstoreconst { 13487 break 13488 } 13489 a := x.AuxInt 13490 if x.Aux != s { 13491 break 13492 } 13493 _ = x.Args[1] 13494 if p != x.Args[0] { 13495 break 13496 } 13497 mem := x.Args[1] 13498 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 13499 break 13500 } 13501 v.reset(OpAMD64MOVLstoreconst) 13502 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 13503 v.Aux = s 13504 v.AddArg(p) 13505 v.AddArg(mem) 13506 return true 13507 } 13508 // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) 13509 // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) 13510 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) 13511 for { 13512 sc := v.AuxInt 13513 sym1 := v.Aux 13514 _ = v.Args[1] 13515 v_0 := v.Args[0] 13516 if v_0.Op != OpAMD64LEAL { 13517 break 13518 } 13519 off := v_0.AuxInt 13520 sym2 := v_0.Aux 13521 ptr := v_0.Args[0] 13522 mem := v.Args[1] 13523 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { 13524 break 13525 } 13526 v.reset(OpAMD64MOVWstoreconst) 13527 v.AuxInt = ValAndOff(sc).add(off) 13528 v.Aux = mergeSym(sym1, sym2) 13529 v.AddArg(ptr) 13530 v.AddArg(mem) 13531 return true 13532 } 13533 // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) 13534 // cond: ValAndOff(sc).canAdd(off) 13535 // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) 13536 for { 13537 sc := v.AuxInt 13538 s := v.Aux 13539 _ = v.Args[1] 13540 v_0 := v.Args[0] 13541 if v_0.Op != OpAMD64ADDLconst { 13542 break 13543 } 13544 off := v_0.AuxInt 13545 ptr := v_0.Args[0] 13546 mem := v.Args[1] 13547 if !(ValAndOff(sc).canAdd(off)) { 13548 break 13549 } 13550 v.reset(OpAMD64MOVWstoreconst) 13551 v.AuxInt = ValAndOff(sc).add(off) 13552 v.Aux = s 13553 v.AddArg(ptr) 13554 v.AddArg(mem) 13555 return true 13556 } 13557 return false 13558 } 13559 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { 13560 // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) 13561 // cond: 13562 // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) 13563 for { 13564 c := v.AuxInt 13565 sym := v.Aux 13566 _ = v.Args[2] 13567 ptr := v.Args[0] 13568 v_1 := v.Args[1] 13569 if v_1.Op != OpAMD64SHLQconst { 13570 break 13571 } 13572 if v_1.AuxInt != 1 { 13573 break 13574 } 13575 idx := v_1.Args[0] 13576 mem := v.Args[2] 13577 v.reset(OpAMD64MOVWstoreconstidx2) 13578 v.AuxInt = c 13579 v.Aux = sym 13580 v.AddArg(ptr) 13581 v.AddArg(idx) 13582 v.AddArg(mem) 13583 return true 13584 } 13585 // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) 13586 // cond: ValAndOff(x).canAdd(c) 13587 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13588 for { 13589 x := v.AuxInt 13590 sym := v.Aux 13591 _ = v.Args[2] 13592 v_0 := v.Args[0] 13593 if v_0.Op != OpAMD64ADDQconst { 13594 break 13595 } 13596 c := v_0.AuxInt 13597 ptr := v_0.Args[0] 13598 idx := v.Args[1] 13599 mem := v.Args[2] 13600 if !(ValAndOff(x).canAdd(c)) { 13601 break 13602 } 13603 v.reset(OpAMD64MOVWstoreconstidx1) 13604 v.AuxInt = ValAndOff(x).add(c) 13605 v.Aux = sym 13606 v.AddArg(ptr) 13607 v.AddArg(idx) 13608 v.AddArg(mem) 13609 return true 13610 } 13611 // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) 13612 // cond: ValAndOff(x).canAdd(c) 13613 // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13614 for { 13615 x := v.AuxInt 13616 sym := v.Aux 13617 _ = v.Args[2] 13618 ptr := v.Args[0] 13619 v_1 := v.Args[1] 13620 if v_1.Op != OpAMD64ADDQconst { 13621 break 13622 } 13623 c := v_1.AuxInt 13624 idx := v_1.Args[0] 13625 mem := v.Args[2] 13626 if !(ValAndOff(x).canAdd(c)) { 13627 break 13628 } 13629 v.reset(OpAMD64MOVWstoreconstidx1) 13630 v.AuxInt = ValAndOff(x).add(c) 13631 v.Aux = sym 13632 v.AddArg(ptr) 13633 v.AddArg(idx) 13634 v.AddArg(mem) 13635 return true 13636 } 13637 // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) 13638 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 13639 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) 13640 for { 13641 c := v.AuxInt 13642 s := v.Aux 13643 _ = v.Args[2] 13644 p := v.Args[0] 13645 i := v.Args[1] 13646 x := v.Args[2] 13647 if x.Op != OpAMD64MOVWstoreconstidx1 { 13648 break 13649 } 13650 a := x.AuxInt 13651 if x.Aux != s { 13652 break 13653 } 13654 _ = x.Args[2] 13655 if p != x.Args[0] { 13656 break 13657 } 13658 if i != x.Args[1] { 13659 break 13660 } 13661 mem := x.Args[2] 13662 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 13663 break 13664 } 13665 v.reset(OpAMD64MOVLstoreconstidx1) 13666 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 13667 v.Aux = s 13668 v.AddArg(p) 13669 v.AddArg(i) 13670 v.AddArg(mem) 13671 return true 13672 } 13673 return false 13674 } 13675 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { 13676 b := v.Block 13677 _ = b 13678 // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) 13679 // cond: ValAndOff(x).canAdd(c) 13680 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) 13681 for { 13682 x := v.AuxInt 13683 sym := v.Aux 13684 _ = v.Args[2] 13685 v_0 := v.Args[0] 13686 if v_0.Op != OpAMD64ADDQconst { 13687 break 13688 } 13689 c := v_0.AuxInt 13690 ptr := v_0.Args[0] 13691 idx := v.Args[1] 13692 mem := v.Args[2] 13693 if !(ValAndOff(x).canAdd(c)) { 13694 break 13695 } 13696 v.reset(OpAMD64MOVWstoreconstidx2) 13697 v.AuxInt = ValAndOff(x).add(c) 13698 v.Aux = sym 13699 v.AddArg(ptr) 13700 v.AddArg(idx) 13701 v.AddArg(mem) 13702 return true 13703 } 13704 // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) 13705 // cond: ValAndOff(x).canAdd(2*c) 13706 // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) 13707 for { 13708 x := v.AuxInt 13709 sym := v.Aux 13710 _ = v.Args[2] 13711 ptr := v.Args[0] 13712 v_1 := v.Args[1] 13713 if v_1.Op != OpAMD64ADDQconst { 13714 break 13715 } 13716 c := v_1.AuxInt 13717 idx := v_1.Args[0] 13718 mem := v.Args[2] 13719 if !(ValAndOff(x).canAdd(2 * c)) { 13720 break 13721 } 13722 v.reset(OpAMD64MOVWstoreconstidx2) 13723 v.AuxInt = ValAndOff(x).add(2 * c) 13724 v.Aux = sym 13725 v.AddArg(ptr) 13726 v.AddArg(idx) 13727 v.AddArg(mem) 13728 return true 13729 } 13730 // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) 13731 // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) 13732 // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem) 13733 for { 13734 c := v.AuxInt 13735 s := v.Aux 13736 _ = v.Args[2] 13737 p := v.Args[0] 13738 i := v.Args[1] 13739 x := v.Args[2] 13740 if x.Op != OpAMD64MOVWstoreconstidx2 { 13741 break 13742 } 13743 a := x.AuxInt 13744 if x.Aux != s { 13745 break 13746 } 13747 _ = x.Args[2] 13748 if p != x.Args[0] { 13749 break 13750 } 13751 if i != x.Args[1] { 13752 break 13753 } 13754 mem := x.Args[2] 13755 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { 13756 break 13757 } 13758 v.reset(OpAMD64MOVLstoreconstidx1) 13759 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) 13760 v.Aux = s 13761 v.AddArg(p) 13762 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) 13763 v0.AuxInt = 1 13764 v0.AddArg(i) 13765 v.AddArg(v0) 13766 v.AddArg(mem) 13767 return true 13768 } 13769 return false 13770 } 13771 func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { 13772 // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) 13773 // cond: 13774 // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) 13775 for { 13776 c := v.AuxInt 13777 sym := v.Aux 13778 _ = v.Args[3] 13779 ptr := v.Args[0] 13780 v_1 := v.Args[1] 13781 if v_1.Op != OpAMD64SHLQconst { 13782 break 13783 } 13784 if v_1.AuxInt != 1 { 13785 break 13786 } 13787 idx := v_1.Args[0] 13788 val := v.Args[2] 13789 mem := v.Args[3] 13790 v.reset(OpAMD64MOVWstoreidx2) 13791 v.AuxInt = c 13792 v.Aux = sym 13793 v.AddArg(ptr) 13794 v.AddArg(idx) 13795 v.AddArg(val) 13796 v.AddArg(mem) 13797 return true 13798 } 13799 // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) 13800 // cond: is32Bit(c+d) 13801 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 13802 for { 13803 c := v.AuxInt 13804 sym := v.Aux 13805 _ = v.Args[3] 13806 v_0 := v.Args[0] 13807 if v_0.Op != OpAMD64ADDQconst { 13808 break 13809 } 13810 d := v_0.AuxInt 13811 ptr := v_0.Args[0] 13812 idx := v.Args[1] 13813 val := v.Args[2] 13814 mem := v.Args[3] 13815 if !(is32Bit(c + d)) { 13816 break 13817 } 13818 v.reset(OpAMD64MOVWstoreidx1) 13819 v.AuxInt = c + d 13820 v.Aux = sym 13821 v.AddArg(ptr) 13822 v.AddArg(idx) 13823 v.AddArg(val) 13824 v.AddArg(mem) 13825 return true 13826 } 13827 // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) 13828 // cond: is32Bit(c+d) 13829 // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) 13830 for { 13831 c := v.AuxInt 13832 sym := v.Aux 13833 _ = v.Args[3] 13834 ptr := v.Args[0] 13835 v_1 := v.Args[1] 13836 if v_1.Op != OpAMD64ADDQconst { 13837 break 13838 } 13839 d := v_1.AuxInt 13840 idx := v_1.Args[0] 13841 val := v.Args[2] 13842 mem := v.Args[3] 13843 if !(is32Bit(c + d)) { 13844 break 13845 } 13846 v.reset(OpAMD64MOVWstoreidx1) 13847 v.AuxInt = c + d 13848 v.Aux = sym 13849 v.AddArg(ptr) 13850 v.AddArg(idx) 13851 v.AddArg(val) 13852 v.AddArg(mem) 13853 return true 13854 } 13855 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) 13856 // cond: x.Uses == 1 && clobber(x) 13857 // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) 13858 for { 13859 i := v.AuxInt 13860 s := v.Aux 13861 _ = v.Args[3] 13862 p := v.Args[0] 13863 idx := v.Args[1] 13864 v_2 := v.Args[2] 13865 if v_2.Op != OpAMD64SHRQconst { 13866 break 13867 } 13868 if v_2.AuxInt != 16 { 13869 break 13870 } 13871 w := v_2.Args[0] 13872 x := v.Args[3] 13873 if x.Op != OpAMD64MOVWstoreidx1 { 13874 break 13875 } 13876 if x.AuxInt != i-2 { 13877 break 13878 } 13879 if x.Aux != s { 13880 break 13881 } 13882 _ = x.Args[3] 13883 if p != x.Args[0] { 13884 break 13885 } 13886 if idx != x.Args[1] { 13887 break 13888 } 13889 if w != x.Args[2] { 13890 break 13891 } 13892 mem := x.Args[3] 13893 if !(x.Uses == 1 && clobber(x)) { 13894 break 13895 } 13896 v.reset(OpAMD64MOVLstoreidx1) 13897 v.AuxInt = i - 2 13898 v.Aux = s 13899 v.AddArg(p) 13900 v.AddArg(idx) 13901 v.AddArg(w) 13902 v.AddArg(mem) 13903 return true 13904 } 13905 // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 13906 // cond: x.Uses == 1 && clobber(x) 13907 // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) 13908 for { 13909 i := v.AuxInt 13910 s := v.Aux 13911 _ = v.Args[3] 13912 p := v.Args[0] 13913 idx := v.Args[1] 13914 v_2 := v.Args[2] 13915 if v_2.Op != OpAMD64SHRQconst { 13916 break 13917 } 13918 j := v_2.AuxInt 13919 w := v_2.Args[0] 13920 x := v.Args[3] 13921 if x.Op != OpAMD64MOVWstoreidx1 { 13922 break 13923 } 13924 if x.AuxInt != i-2 { 13925 break 13926 } 13927 if x.Aux != s { 13928 break 13929 } 13930 _ = x.Args[3] 13931 if p != x.Args[0] { 13932 break 13933 } 13934 if idx != x.Args[1] { 13935 break 13936 } 13937 w0 := x.Args[2] 13938 if w0.Op != OpAMD64SHRQconst { 13939 break 13940 } 13941 if w0.AuxInt != j-16 { 13942 break 13943 } 13944 if w != w0.Args[0] { 13945 break 13946 } 13947 mem := x.Args[3] 13948 if !(x.Uses == 1 && clobber(x)) { 13949 break 13950 } 13951 v.reset(OpAMD64MOVLstoreidx1) 13952 v.AuxInt = i - 2 13953 v.Aux = s 13954 v.AddArg(p) 13955 v.AddArg(idx) 13956 v.AddArg(w0) 13957 v.AddArg(mem) 13958 return true 13959 } 13960 return false 13961 } 13962 func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { 13963 b := v.Block 13964 _ = b 13965 // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) 13966 // cond: is32Bit(c+d) 13967 // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) 13968 for { 13969 c := v.AuxInt 13970 sym := v.Aux 13971 _ = v.Args[3] 13972 v_0 := v.Args[0] 13973 if v_0.Op != OpAMD64ADDQconst { 13974 break 13975 } 13976 d := v_0.AuxInt 13977 ptr := v_0.Args[0] 13978 idx := v.Args[1] 13979 val := v.Args[2] 13980 mem := v.Args[3] 13981 if !(is32Bit(c + d)) { 13982 break 13983 } 13984 v.reset(OpAMD64MOVWstoreidx2) 13985 v.AuxInt = c + d 13986 v.Aux = sym 13987 v.AddArg(ptr) 13988 v.AddArg(idx) 13989 v.AddArg(val) 13990 v.AddArg(mem) 13991 return true 13992 } 13993 // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) 13994 // cond: is32Bit(c+2*d) 13995 // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) 13996 for { 13997 c := v.AuxInt 13998 sym := v.Aux 13999 _ = v.Args[3] 14000 ptr := v.Args[0] 14001 v_1 := v.Args[1] 14002 if v_1.Op != OpAMD64ADDQconst { 14003 break 14004 } 14005 d := v_1.AuxInt 14006 idx := v_1.Args[0] 14007 val := v.Args[2] 14008 mem := v.Args[3] 14009 if !(is32Bit(c + 2*d)) { 14010 break 14011 } 14012 v.reset(OpAMD64MOVWstoreidx2) 14013 v.AuxInt = c + 2*d 14014 v.Aux = sym 14015 v.AddArg(ptr) 14016 v.AddArg(idx) 14017 v.AddArg(val) 14018 v.AddArg(mem) 14019 return true 14020 } 14021 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) 14022 // cond: x.Uses == 1 && clobber(x) 14023 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem) 14024 for { 14025 i := v.AuxInt 14026 s := v.Aux 14027 _ = v.Args[3] 14028 p := v.Args[0] 14029 idx := v.Args[1] 14030 v_2 := v.Args[2] 14031 if v_2.Op != OpAMD64SHRQconst { 14032 break 14033 } 14034 if v_2.AuxInt != 16 { 14035 break 14036 } 14037 w := v_2.Args[0] 14038 x := v.Args[3] 14039 if x.Op != OpAMD64MOVWstoreidx2 { 14040 break 14041 } 14042 if x.AuxInt != i-2 { 14043 break 14044 } 14045 if x.Aux != s { 14046 break 14047 } 14048 _ = x.Args[3] 14049 if p != x.Args[0] { 14050 break 14051 } 14052 if idx != x.Args[1] { 14053 break 14054 } 14055 if w != x.Args[2] { 14056 break 14057 } 14058 mem := x.Args[3] 14059 if !(x.Uses == 1 && clobber(x)) { 14060 break 14061 } 14062 v.reset(OpAMD64MOVLstoreidx1) 14063 v.AuxInt = i - 2 14064 v.Aux = s 14065 v.AddArg(p) 14066 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 14067 v0.AuxInt = 1 14068 v0.AddArg(idx) 14069 v.AddArg(v0) 14070 v.AddArg(w) 14071 v.AddArg(mem) 14072 return true 14073 } 14074 // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) 14075 // cond: x.Uses == 1 && clobber(x) 14076 // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem) 14077 for { 14078 i := v.AuxInt 14079 s := v.Aux 14080 _ = v.Args[3] 14081 p := v.Args[0] 14082 idx := v.Args[1] 14083 v_2 := v.Args[2] 14084 if v_2.Op != OpAMD64SHRQconst { 14085 break 14086 } 14087 j := v_2.AuxInt 14088 w := v_2.Args[0] 14089 x := v.Args[3] 14090 if x.Op != OpAMD64MOVWstoreidx2 { 14091 break 14092 } 14093 if x.AuxInt != i-2 { 14094 break 14095 } 14096 if x.Aux != s { 14097 break 14098 } 14099 _ = x.Args[3] 14100 if p != x.Args[0] { 14101 break 14102 } 14103 if idx != x.Args[1] { 14104 break 14105 } 14106 w0 := x.Args[2] 14107 if w0.Op != OpAMD64SHRQconst { 14108 break 14109 } 14110 if w0.AuxInt != j-16 { 14111 break 14112 } 14113 if w != w0.Args[0] { 14114 break 14115 } 14116 mem := x.Args[3] 14117 if !(x.Uses == 1 && clobber(x)) { 14118 break 14119 } 14120 v.reset(OpAMD64MOVLstoreidx1) 14121 v.AuxInt = i - 2 14122 v.Aux = s 14123 v.AddArg(p) 14124 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) 14125 v0.AuxInt = 1 14126 v0.AddArg(idx) 14127 v.AddArg(v0) 14128 v.AddArg(w0) 14129 v.AddArg(mem) 14130 return true 14131 } 14132 return false 14133 } 14134 func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool { 14135 // match: (MULL x (MOVLconst [c])) 14136 // cond: 14137 // result: (MULLconst [c] x) 14138 for { 14139 _ = v.Args[1] 14140 x := v.Args[0] 14141 v_1 := v.Args[1] 14142 if v_1.Op != OpAMD64MOVLconst { 14143 break 14144 } 14145 c := v_1.AuxInt 14146 v.reset(OpAMD64MULLconst) 14147 v.AuxInt = c 14148 v.AddArg(x) 14149 return true 14150 } 14151 // match: (MULL (MOVLconst [c]) x) 14152 // cond: 14153 // result: (MULLconst [c] x) 14154 for { 14155 _ = v.Args[1] 14156 v_0 := v.Args[0] 14157 if v_0.Op != OpAMD64MOVLconst { 14158 break 14159 } 14160 c := v_0.AuxInt 14161 x := v.Args[1] 14162 v.reset(OpAMD64MULLconst) 14163 v.AuxInt = c 14164 v.AddArg(x) 14165 return true 14166 } 14167 return false 14168 } 14169 func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool { 14170 // match: (MULLconst [c] (MULLconst [d] x)) 14171 // cond: 14172 // result: (MULLconst [int64(int32(c * d))] x) 14173 for { 14174 c := v.AuxInt 14175 v_0 := v.Args[0] 14176 if v_0.Op != OpAMD64MULLconst { 14177 break 14178 } 14179 d := v_0.AuxInt 14180 x := v_0.Args[0] 14181 v.reset(OpAMD64MULLconst) 14182 v.AuxInt = int64(int32(c * d)) 14183 v.AddArg(x) 14184 return true 14185 } 14186 // match: (MULLconst [c] (MOVLconst [d])) 14187 // cond: 14188 // result: (MOVLconst [int64(int32(c*d))]) 14189 for { 14190 c := v.AuxInt 14191 v_0 := v.Args[0] 14192 if v_0.Op != OpAMD64MOVLconst { 14193 break 14194 } 14195 d := v_0.AuxInt 14196 v.reset(OpAMD64MOVLconst) 14197 v.AuxInt = int64(int32(c * d)) 14198 return true 14199 } 14200 return false 14201 } 14202 func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool { 14203 // match: (MULQ x (MOVQconst [c])) 14204 // cond: is32Bit(c) 14205 // result: (MULQconst [c] x) 14206 for { 14207 _ = v.Args[1] 14208 x := v.Args[0] 14209 v_1 := v.Args[1] 14210 if v_1.Op != OpAMD64MOVQconst { 14211 break 14212 } 14213 c := v_1.AuxInt 14214 if !(is32Bit(c)) { 14215 break 14216 } 14217 v.reset(OpAMD64MULQconst) 14218 v.AuxInt = c 14219 v.AddArg(x) 14220 return true 14221 } 14222 // match: (MULQ (MOVQconst [c]) x) 14223 // cond: is32Bit(c) 14224 // result: (MULQconst [c] x) 14225 for { 14226 _ = v.Args[1] 14227 v_0 := v.Args[0] 14228 if v_0.Op != OpAMD64MOVQconst { 14229 break 14230 } 14231 c := v_0.AuxInt 14232 x := v.Args[1] 14233 if !(is32Bit(c)) { 14234 break 14235 } 14236 v.reset(OpAMD64MULQconst) 14237 v.AuxInt = c 14238 v.AddArg(x) 14239 return true 14240 } 14241 return false 14242 } 14243 func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool { 14244 b := v.Block 14245 _ = b 14246 // match: (MULQconst [c] (MULQconst [d] x)) 14247 // cond: is32Bit(c*d) 14248 // result: (MULQconst [c * d] x) 14249 for { 14250 c := v.AuxInt 14251 v_0 := v.Args[0] 14252 if v_0.Op != OpAMD64MULQconst { 14253 break 14254 } 14255 d := v_0.AuxInt 14256 x := v_0.Args[0] 14257 if !(is32Bit(c * d)) { 14258 break 14259 } 14260 v.reset(OpAMD64MULQconst) 14261 v.AuxInt = c * d 14262 v.AddArg(x) 14263 return true 14264 } 14265 // match: (MULQconst [-1] x) 14266 // cond: 14267 // result: (NEGQ x) 14268 for { 14269 if v.AuxInt != -1 { 14270 break 14271 } 14272 x := v.Args[0] 14273 v.reset(OpAMD64NEGQ) 14274 v.AddArg(x) 14275 return true 14276 } 14277 // match: (MULQconst [0] _) 14278 // cond: 14279 // result: (MOVQconst [0]) 14280 for { 14281 if v.AuxInt != 0 { 14282 break 14283 } 14284 v.reset(OpAMD64MOVQconst) 14285 v.AuxInt = 0 14286 return true 14287 } 14288 // match: (MULQconst [1] x) 14289 // cond: 14290 // result: x 14291 for { 14292 if v.AuxInt != 1 { 14293 break 14294 } 14295 x := v.Args[0] 14296 v.reset(OpCopy) 14297 v.Type = x.Type 14298 v.AddArg(x) 14299 return true 14300 } 14301 // match: (MULQconst [3] x) 14302 // cond: 14303 // result: (LEAQ2 x x) 14304 for { 14305 if v.AuxInt != 3 { 14306 break 14307 } 14308 x := v.Args[0] 14309 v.reset(OpAMD64LEAQ2) 14310 v.AddArg(x) 14311 v.AddArg(x) 14312 return true 14313 } 14314 // match: (MULQconst [5] x) 14315 // cond: 14316 // result: (LEAQ4 x x) 14317 for { 14318 if v.AuxInt != 5 { 14319 break 14320 } 14321 x := v.Args[0] 14322 v.reset(OpAMD64LEAQ4) 14323 v.AddArg(x) 14324 v.AddArg(x) 14325 return true 14326 } 14327 // match: (MULQconst [7] x) 14328 // cond: 14329 // result: (LEAQ8 (NEGQ <v.Type> x) x) 14330 for { 14331 if v.AuxInt != 7 { 14332 break 14333 } 14334 x := v.Args[0] 14335 v.reset(OpAMD64LEAQ8) 14336 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, v.Type) 14337 v0.AddArg(x) 14338 v.AddArg(v0) 14339 v.AddArg(x) 14340 return true 14341 } 14342 // match: (MULQconst [9] x) 14343 // cond: 14344 // result: (LEAQ8 x x) 14345 for { 14346 if v.AuxInt != 9 { 14347 break 14348 } 14349 x := v.Args[0] 14350 v.reset(OpAMD64LEAQ8) 14351 v.AddArg(x) 14352 v.AddArg(x) 14353 return true 14354 } 14355 // match: (MULQconst [11] x) 14356 // cond: 14357 // result: (LEAQ2 x (LEAQ4 <v.Type> x x)) 14358 for { 14359 if v.AuxInt != 11 { 14360 break 14361 } 14362 x := v.Args[0] 14363 v.reset(OpAMD64LEAQ2) 14364 v.AddArg(x) 14365 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14366 v0.AddArg(x) 14367 v0.AddArg(x) 14368 v.AddArg(v0) 14369 return true 14370 } 14371 // match: (MULQconst [13] x) 14372 // cond: 14373 // result: (LEAQ4 x (LEAQ2 <v.Type> x x)) 14374 for { 14375 if v.AuxInt != 13 { 14376 break 14377 } 14378 x := v.Args[0] 14379 v.reset(OpAMD64LEAQ4) 14380 v.AddArg(x) 14381 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 14382 v0.AddArg(x) 14383 v0.AddArg(x) 14384 v.AddArg(v0) 14385 return true 14386 } 14387 return false 14388 } 14389 func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { 14390 b := v.Block 14391 _ = b 14392 // match: (MULQconst [21] x) 14393 // cond: 14394 // result: (LEAQ4 x (LEAQ4 <v.Type> x x)) 14395 for { 14396 if v.AuxInt != 21 { 14397 break 14398 } 14399 x := v.Args[0] 14400 v.reset(OpAMD64LEAQ4) 14401 v.AddArg(x) 14402 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14403 v0.AddArg(x) 14404 v0.AddArg(x) 14405 v.AddArg(v0) 14406 return true 14407 } 14408 // match: (MULQconst [25] x) 14409 // cond: 14410 // result: (LEAQ8 x (LEAQ2 <v.Type> x x)) 14411 for { 14412 if v.AuxInt != 25 { 14413 break 14414 } 14415 x := v.Args[0] 14416 v.reset(OpAMD64LEAQ8) 14417 v.AddArg(x) 14418 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 14419 v0.AddArg(x) 14420 v0.AddArg(x) 14421 v.AddArg(v0) 14422 return true 14423 } 14424 // match: (MULQconst [37] x) 14425 // cond: 14426 // result: (LEAQ4 x (LEAQ8 <v.Type> x x)) 14427 for { 14428 if v.AuxInt != 37 { 14429 break 14430 } 14431 x := v.Args[0] 14432 v.reset(OpAMD64LEAQ4) 14433 v.AddArg(x) 14434 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 14435 v0.AddArg(x) 14436 v0.AddArg(x) 14437 v.AddArg(v0) 14438 return true 14439 } 14440 // match: (MULQconst [41] x) 14441 // cond: 14442 // result: (LEAQ8 x (LEAQ4 <v.Type> x x)) 14443 for { 14444 if v.AuxInt != 41 { 14445 break 14446 } 14447 x := v.Args[0] 14448 v.reset(OpAMD64LEAQ8) 14449 v.AddArg(x) 14450 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14451 v0.AddArg(x) 14452 v0.AddArg(x) 14453 v.AddArg(v0) 14454 return true 14455 } 14456 // match: (MULQconst [73] x) 14457 // cond: 14458 // result: (LEAQ8 x (LEAQ8 <v.Type> x x)) 14459 for { 14460 if v.AuxInt != 73 { 14461 break 14462 } 14463 x := v.Args[0] 14464 v.reset(OpAMD64LEAQ8) 14465 v.AddArg(x) 14466 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 14467 v0.AddArg(x) 14468 v0.AddArg(x) 14469 v.AddArg(v0) 14470 return true 14471 } 14472 // match: (MULQconst [c] x) 14473 // cond: isPowerOfTwo(c+1) && c >= 15 14474 // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x) 14475 for { 14476 c := v.AuxInt 14477 x := v.Args[0] 14478 if !(isPowerOfTwo(c+1) && c >= 15) { 14479 break 14480 } 14481 v.reset(OpAMD64SUBQ) 14482 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14483 v0.AuxInt = log2(c + 1) 14484 v0.AddArg(x) 14485 v.AddArg(v0) 14486 v.AddArg(x) 14487 return true 14488 } 14489 // match: (MULQconst [c] x) 14490 // cond: isPowerOfTwo(c-1) && c >= 17 14491 // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x) 14492 for { 14493 c := v.AuxInt 14494 x := v.Args[0] 14495 if !(isPowerOfTwo(c-1) && c >= 17) { 14496 break 14497 } 14498 v.reset(OpAMD64LEAQ1) 14499 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14500 v0.AuxInt = log2(c - 1) 14501 v0.AddArg(x) 14502 v.AddArg(v0) 14503 v.AddArg(x) 14504 return true 14505 } 14506 // match: (MULQconst [c] x) 14507 // cond: isPowerOfTwo(c-2) && c >= 34 14508 // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x) 14509 for { 14510 c := v.AuxInt 14511 x := v.Args[0] 14512 if !(isPowerOfTwo(c-2) && c >= 34) { 14513 break 14514 } 14515 v.reset(OpAMD64LEAQ2) 14516 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14517 v0.AuxInt = log2(c - 2) 14518 v0.AddArg(x) 14519 v.AddArg(v0) 14520 v.AddArg(x) 14521 return true 14522 } 14523 // match: (MULQconst [c] x) 14524 // cond: isPowerOfTwo(c-4) && c >= 68 14525 // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x) 14526 for { 14527 c := v.AuxInt 14528 x := v.Args[0] 14529 if !(isPowerOfTwo(c-4) && c >= 68) { 14530 break 14531 } 14532 v.reset(OpAMD64LEAQ4) 14533 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14534 v0.AuxInt = log2(c - 4) 14535 v0.AddArg(x) 14536 v.AddArg(v0) 14537 v.AddArg(x) 14538 return true 14539 } 14540 // match: (MULQconst [c] x) 14541 // cond: isPowerOfTwo(c-8) && c >= 136 14542 // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x) 14543 for { 14544 c := v.AuxInt 14545 x := v.Args[0] 14546 if !(isPowerOfTwo(c-8) && c >= 136) { 14547 break 14548 } 14549 v.reset(OpAMD64LEAQ8) 14550 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 14551 v0.AuxInt = log2(c - 8) 14552 v0.AddArg(x) 14553 v.AddArg(v0) 14554 v.AddArg(x) 14555 return true 14556 } 14557 return false 14558 } 14559 func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { 14560 b := v.Block 14561 _ = b 14562 // match: (MULQconst [c] x) 14563 // cond: c%3 == 0 && isPowerOfTwo(c/3) 14564 // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x)) 14565 for { 14566 c := v.AuxInt 14567 x := v.Args[0] 14568 if !(c%3 == 0 && isPowerOfTwo(c/3)) { 14569 break 14570 } 14571 v.reset(OpAMD64SHLQconst) 14572 v.AuxInt = log2(c / 3) 14573 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) 14574 v0.AddArg(x) 14575 v0.AddArg(x) 14576 v.AddArg(v0) 14577 return true 14578 } 14579 // match: (MULQconst [c] x) 14580 // cond: c%5 == 0 && isPowerOfTwo(c/5) 14581 // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x)) 14582 for { 14583 c := v.AuxInt 14584 x := v.Args[0] 14585 if !(c%5 == 0 && isPowerOfTwo(c/5)) { 14586 break 14587 } 14588 v.reset(OpAMD64SHLQconst) 14589 v.AuxInt = log2(c / 5) 14590 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) 14591 v0.AddArg(x) 14592 v0.AddArg(x) 14593 v.AddArg(v0) 14594 return true 14595 } 14596 // match: (MULQconst [c] x) 14597 // cond: c%9 == 0 && isPowerOfTwo(c/9) 14598 // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x)) 14599 for { 14600 c := v.AuxInt 14601 x := v.Args[0] 14602 if !(c%9 == 0 && isPowerOfTwo(c/9)) { 14603 break 14604 } 14605 v.reset(OpAMD64SHLQconst) 14606 v.AuxInt = log2(c / 9) 14607 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) 14608 v0.AddArg(x) 14609 v0.AddArg(x) 14610 v.AddArg(v0) 14611 return true 14612 } 14613 // match: (MULQconst [c] (MOVQconst [d])) 14614 // cond: 14615 // result: (MOVQconst [c*d]) 14616 for { 14617 c := v.AuxInt 14618 v_0 := v.Args[0] 14619 if v_0.Op != OpAMD64MOVQconst { 14620 break 14621 } 14622 d := v_0.AuxInt 14623 v.reset(OpAMD64MOVQconst) 14624 v.AuxInt = c * d 14625 return true 14626 } 14627 return false 14628 } 14629 func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { 14630 // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) 14631 // cond: canMergeLoad(v, l, x) && clobber(l) 14632 // result: (MULSDmem x [off] {sym} ptr mem) 14633 for { 14634 _ = v.Args[1] 14635 x := v.Args[0] 14636 l := v.Args[1] 14637 if l.Op != OpAMD64MOVSDload { 14638 break 14639 } 14640 off := l.AuxInt 14641 sym := l.Aux 14642 _ = l.Args[1] 14643 ptr := l.Args[0] 14644 mem := l.Args[1] 14645 if !(canMergeLoad(v, l, x) && clobber(l)) { 14646 break 14647 } 14648 v.reset(OpAMD64MULSDmem) 14649 v.AuxInt = off 14650 v.Aux = sym 14651 v.AddArg(x) 14652 v.AddArg(ptr) 14653 v.AddArg(mem) 14654 return true 14655 } 14656 // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) 14657 // cond: canMergeLoad(v, l, x) && clobber(l) 14658 // result: (MULSDmem x [off] {sym} ptr mem) 14659 for { 14660 _ = v.Args[1] 14661 l := v.Args[0] 14662 if l.Op != OpAMD64MOVSDload { 14663 break 14664 } 14665 off := l.AuxInt 14666 sym := l.Aux 14667 _ = l.Args[1] 14668 ptr := l.Args[0] 14669 mem := l.Args[1] 14670 x := v.Args[1] 14671 if !(canMergeLoad(v, l, x) && clobber(l)) { 14672 break 14673 } 14674 v.reset(OpAMD64MULSDmem) 14675 v.AuxInt = off 14676 v.Aux = sym 14677 v.AddArg(x) 14678 v.AddArg(ptr) 14679 v.AddArg(mem) 14680 return true 14681 } 14682 return false 14683 } 14684 func rewriteValueAMD64_OpAMD64MULSDmem_0(v *Value) bool { 14685 b := v.Block 14686 _ = b 14687 typ := &b.Func.Config.Types 14688 _ = typ 14689 // match: (MULSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 14690 // cond: 14691 // result: (MULSD x (MOVQi2f y)) 14692 for { 14693 off := v.AuxInt 14694 sym := v.Aux 14695 _ = v.Args[2] 14696 x := v.Args[0] 14697 ptr := v.Args[1] 14698 v_2 := v.Args[2] 14699 if v_2.Op != OpAMD64MOVQstore { 14700 break 14701 } 14702 if v_2.AuxInt != off { 14703 break 14704 } 14705 if v_2.Aux != sym { 14706 break 14707 } 14708 _ = v_2.Args[2] 14709 if ptr != v_2.Args[0] { 14710 break 14711 } 14712 y := v_2.Args[1] 14713 v.reset(OpAMD64MULSD) 14714 v.AddArg(x) 14715 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 14716 v0.AddArg(y) 14717 v.AddArg(v0) 14718 return true 14719 } 14720 return false 14721 } 14722 func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { 14723 // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) 14724 // cond: canMergeLoad(v, l, x) && clobber(l) 14725 // result: (MULSSmem x [off] {sym} ptr mem) 14726 for { 14727 _ = v.Args[1] 14728 x := v.Args[0] 14729 l := v.Args[1] 14730 if l.Op != OpAMD64MOVSSload { 14731 break 14732 } 14733 off := l.AuxInt 14734 sym := l.Aux 14735 _ = l.Args[1] 14736 ptr := l.Args[0] 14737 mem := l.Args[1] 14738 if !(canMergeLoad(v, l, x) && clobber(l)) { 14739 break 14740 } 14741 v.reset(OpAMD64MULSSmem) 14742 v.AuxInt = off 14743 v.Aux = sym 14744 v.AddArg(x) 14745 v.AddArg(ptr) 14746 v.AddArg(mem) 14747 return true 14748 } 14749 // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) 14750 // cond: canMergeLoad(v, l, x) && clobber(l) 14751 // result: (MULSSmem x [off] {sym} ptr mem) 14752 for { 14753 _ = v.Args[1] 14754 l := v.Args[0] 14755 if l.Op != OpAMD64MOVSSload { 14756 break 14757 } 14758 off := l.AuxInt 14759 sym := l.Aux 14760 _ = l.Args[1] 14761 ptr := l.Args[0] 14762 mem := l.Args[1] 14763 x := v.Args[1] 14764 if !(canMergeLoad(v, l, x) && clobber(l)) { 14765 break 14766 } 14767 v.reset(OpAMD64MULSSmem) 14768 v.AuxInt = off 14769 v.Aux = sym 14770 v.AddArg(x) 14771 v.AddArg(ptr) 14772 v.AddArg(mem) 14773 return true 14774 } 14775 return false 14776 } 14777 func rewriteValueAMD64_OpAMD64MULSSmem_0(v *Value) bool { 14778 b := v.Block 14779 _ = b 14780 typ := &b.Func.Config.Types 14781 _ = typ 14782 // match: (MULSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 14783 // cond: 14784 // result: (MULSS x (MOVLi2f y)) 14785 for { 14786 off := v.AuxInt 14787 sym := v.Aux 14788 _ = v.Args[2] 14789 x := v.Args[0] 14790 ptr := v.Args[1] 14791 v_2 := v.Args[2] 14792 if v_2.Op != OpAMD64MOVLstore { 14793 break 14794 } 14795 if v_2.AuxInt != off { 14796 break 14797 } 14798 if v_2.Aux != sym { 14799 break 14800 } 14801 _ = v_2.Args[2] 14802 if ptr != v_2.Args[0] { 14803 break 14804 } 14805 y := v_2.Args[1] 14806 v.reset(OpAMD64MULSS) 14807 v.AddArg(x) 14808 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 14809 v0.AddArg(y) 14810 v.AddArg(v0) 14811 return true 14812 } 14813 return false 14814 } 14815 func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { 14816 // match: (NEGL (MOVLconst [c])) 14817 // cond: 14818 // result: (MOVLconst [int64(int32(-c))]) 14819 for { 14820 v_0 := v.Args[0] 14821 if v_0.Op != OpAMD64MOVLconst { 14822 break 14823 } 14824 c := v_0.AuxInt 14825 v.reset(OpAMD64MOVLconst) 14826 v.AuxInt = int64(int32(-c)) 14827 return true 14828 } 14829 return false 14830 } 14831 func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool { 14832 // match: (NEGQ (MOVQconst [c])) 14833 // cond: 14834 // result: (MOVQconst [-c]) 14835 for { 14836 v_0 := v.Args[0] 14837 if v_0.Op != OpAMD64MOVQconst { 14838 break 14839 } 14840 c := v_0.AuxInt 14841 v.reset(OpAMD64MOVQconst) 14842 v.AuxInt = -c 14843 return true 14844 } 14845 // match: (NEGQ (ADDQconst [c] (NEGQ x))) 14846 // cond: c != -(1<<31) 14847 // result: (ADDQconst [-c] x) 14848 for { 14849 v_0 := v.Args[0] 14850 if v_0.Op != OpAMD64ADDQconst { 14851 break 14852 } 14853 c := v_0.AuxInt 14854 v_0_0 := v_0.Args[0] 14855 if v_0_0.Op != OpAMD64NEGQ { 14856 break 14857 } 14858 x := v_0_0.Args[0] 14859 if !(c != -(1 << 31)) { 14860 break 14861 } 14862 v.reset(OpAMD64ADDQconst) 14863 v.AuxInt = -c 14864 v.AddArg(x) 14865 return true 14866 } 14867 return false 14868 } 14869 func rewriteValueAMD64_OpAMD64NOTL_0(v *Value) bool { 14870 // match: (NOTL (MOVLconst [c])) 14871 // cond: 14872 // result: (MOVLconst [^c]) 14873 for { 14874 v_0 := v.Args[0] 14875 if v_0.Op != OpAMD64MOVLconst { 14876 break 14877 } 14878 c := v_0.AuxInt 14879 v.reset(OpAMD64MOVLconst) 14880 v.AuxInt = ^c 14881 return true 14882 } 14883 return false 14884 } 14885 func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool { 14886 // match: (NOTQ (MOVQconst [c])) 14887 // cond: 14888 // result: (MOVQconst [^c]) 14889 for { 14890 v_0 := v.Args[0] 14891 if v_0.Op != OpAMD64MOVQconst { 14892 break 14893 } 14894 c := v_0.AuxInt 14895 v.reset(OpAMD64MOVQconst) 14896 v.AuxInt = ^c 14897 return true 14898 } 14899 return false 14900 } 14901 func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool { 14902 // match: (ORL x (MOVLconst [c])) 14903 // cond: 14904 // result: (ORLconst [c] x) 14905 for { 14906 _ = v.Args[1] 14907 x := v.Args[0] 14908 v_1 := v.Args[1] 14909 if v_1.Op != OpAMD64MOVLconst { 14910 break 14911 } 14912 c := v_1.AuxInt 14913 v.reset(OpAMD64ORLconst) 14914 v.AuxInt = c 14915 v.AddArg(x) 14916 return true 14917 } 14918 // match: (ORL (MOVLconst [c]) x) 14919 // cond: 14920 // result: (ORLconst [c] x) 14921 for { 14922 _ = v.Args[1] 14923 v_0 := v.Args[0] 14924 if v_0.Op != OpAMD64MOVLconst { 14925 break 14926 } 14927 c := v_0.AuxInt 14928 x := v.Args[1] 14929 v.reset(OpAMD64ORLconst) 14930 v.AuxInt = c 14931 v.AddArg(x) 14932 return true 14933 } 14934 // match: (ORL (SHLLconst x [c]) (SHRLconst x [d])) 14935 // cond: d==32-c 14936 // result: (ROLLconst x [c]) 14937 for { 14938 _ = v.Args[1] 14939 v_0 := v.Args[0] 14940 if v_0.Op != OpAMD64SHLLconst { 14941 break 14942 } 14943 c := v_0.AuxInt 14944 x := v_0.Args[0] 14945 v_1 := v.Args[1] 14946 if v_1.Op != OpAMD64SHRLconst { 14947 break 14948 } 14949 d := v_1.AuxInt 14950 if x != v_1.Args[0] { 14951 break 14952 } 14953 if !(d == 32-c) { 14954 break 14955 } 14956 v.reset(OpAMD64ROLLconst) 14957 v.AuxInt = c 14958 v.AddArg(x) 14959 return true 14960 } 14961 // match: (ORL (SHRLconst x [d]) (SHLLconst x [c])) 14962 // cond: d==32-c 14963 // result: (ROLLconst x [c]) 14964 for { 14965 _ = v.Args[1] 14966 v_0 := v.Args[0] 14967 if v_0.Op != OpAMD64SHRLconst { 14968 break 14969 } 14970 d := v_0.AuxInt 14971 x := v_0.Args[0] 14972 v_1 := v.Args[1] 14973 if v_1.Op != OpAMD64SHLLconst { 14974 break 14975 } 14976 c := v_1.AuxInt 14977 if x != v_1.Args[0] { 14978 break 14979 } 14980 if !(d == 32-c) { 14981 break 14982 } 14983 v.reset(OpAMD64ROLLconst) 14984 v.AuxInt = c 14985 v.AddArg(x) 14986 return true 14987 } 14988 // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 14989 // cond: d==16-c && c < 16 && t.Size() == 2 14990 // result: (ROLWconst x [c]) 14991 for { 14992 t := v.Type 14993 _ = v.Args[1] 14994 v_0 := v.Args[0] 14995 if v_0.Op != OpAMD64SHLLconst { 14996 break 14997 } 14998 c := v_0.AuxInt 14999 x := v_0.Args[0] 15000 v_1 := v.Args[1] 15001 if v_1.Op != OpAMD64SHRWconst { 15002 break 15003 } 15004 d := v_1.AuxInt 15005 if x != v_1.Args[0] { 15006 break 15007 } 15008 if !(d == 16-c && c < 16 && t.Size() == 2) { 15009 break 15010 } 15011 v.reset(OpAMD64ROLWconst) 15012 v.AuxInt = c 15013 v.AddArg(x) 15014 return true 15015 } 15016 // match: (ORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 15017 // cond: d==16-c && c < 16 && t.Size() == 2 15018 // result: (ROLWconst x [c]) 15019 for { 15020 t := v.Type 15021 _ = v.Args[1] 15022 v_0 := v.Args[0] 15023 if v_0.Op != OpAMD64SHRWconst { 15024 break 15025 } 15026 d := v_0.AuxInt 15027 x := v_0.Args[0] 15028 v_1 := v.Args[1] 15029 if v_1.Op != OpAMD64SHLLconst { 15030 break 15031 } 15032 c := v_1.AuxInt 15033 if x != v_1.Args[0] { 15034 break 15035 } 15036 if !(d == 16-c && c < 16 && t.Size() == 2) { 15037 break 15038 } 15039 v.reset(OpAMD64ROLWconst) 15040 v.AuxInt = c 15041 v.AddArg(x) 15042 return true 15043 } 15044 // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 15045 // cond: d==8-c && c < 8 && t.Size() == 1 15046 // result: (ROLBconst x [c]) 15047 for { 15048 t := v.Type 15049 _ = v.Args[1] 15050 v_0 := v.Args[0] 15051 if v_0.Op != OpAMD64SHLLconst { 15052 break 15053 } 15054 c := v_0.AuxInt 15055 x := v_0.Args[0] 15056 v_1 := v.Args[1] 15057 if v_1.Op != OpAMD64SHRBconst { 15058 break 15059 } 15060 d := v_1.AuxInt 15061 if x != v_1.Args[0] { 15062 break 15063 } 15064 if !(d == 8-c && c < 8 && t.Size() == 1) { 15065 break 15066 } 15067 v.reset(OpAMD64ROLBconst) 15068 v.AuxInt = c 15069 v.AddArg(x) 15070 return true 15071 } 15072 // match: (ORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 15073 // cond: d==8-c && c < 8 && t.Size() == 1 15074 // result: (ROLBconst x [c]) 15075 for { 15076 t := v.Type 15077 _ = v.Args[1] 15078 v_0 := v.Args[0] 15079 if v_0.Op != OpAMD64SHRBconst { 15080 break 15081 } 15082 d := v_0.AuxInt 15083 x := v_0.Args[0] 15084 v_1 := v.Args[1] 15085 if v_1.Op != OpAMD64SHLLconst { 15086 break 15087 } 15088 c := v_1.AuxInt 15089 if x != v_1.Args[0] { 15090 break 15091 } 15092 if !(d == 8-c && c < 8 && t.Size() == 1) { 15093 break 15094 } 15095 v.reset(OpAMD64ROLBconst) 15096 v.AuxInt = c 15097 v.AddArg(x) 15098 return true 15099 } 15100 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 15101 // cond: 15102 // result: (ROLL x y) 15103 for { 15104 _ = v.Args[1] 15105 v_0 := v.Args[0] 15106 if v_0.Op != OpAMD64SHLL { 15107 break 15108 } 15109 _ = v_0.Args[1] 15110 x := v_0.Args[0] 15111 y := v_0.Args[1] 15112 v_1 := v.Args[1] 15113 if v_1.Op != OpAMD64ANDL { 15114 break 15115 } 15116 _ = v_1.Args[1] 15117 v_1_0 := v_1.Args[0] 15118 if v_1_0.Op != OpAMD64SHRL { 15119 break 15120 } 15121 _ = v_1_0.Args[1] 15122 if x != v_1_0.Args[0] { 15123 break 15124 } 15125 v_1_0_1 := v_1_0.Args[1] 15126 if v_1_0_1.Op != OpAMD64NEGQ { 15127 break 15128 } 15129 if y != v_1_0_1.Args[0] { 15130 break 15131 } 15132 v_1_1 := v_1.Args[1] 15133 if v_1_1.Op != OpAMD64SBBLcarrymask { 15134 break 15135 } 15136 v_1_1_0 := v_1_1.Args[0] 15137 if v_1_1_0.Op != OpAMD64CMPQconst { 15138 break 15139 } 15140 if v_1_1_0.AuxInt != 32 { 15141 break 15142 } 15143 v_1_1_0_0 := v_1_1_0.Args[0] 15144 if v_1_1_0_0.Op != OpAMD64NEGQ { 15145 break 15146 } 15147 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15148 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 15149 break 15150 } 15151 if v_1_1_0_0_0.AuxInt != -32 { 15152 break 15153 } 15154 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15155 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 15156 break 15157 } 15158 if v_1_1_0_0_0_0.AuxInt != 31 { 15159 break 15160 } 15161 if y != v_1_1_0_0_0_0.Args[0] { 15162 break 15163 } 15164 v.reset(OpAMD64ROLL) 15165 v.AddArg(x) 15166 v.AddArg(y) 15167 return true 15168 } 15169 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y)))) 15170 // cond: 15171 // result: (ROLL x y) 15172 for { 15173 _ = v.Args[1] 15174 v_0 := v.Args[0] 15175 if v_0.Op != OpAMD64SHLL { 15176 break 15177 } 15178 _ = v_0.Args[1] 15179 x := v_0.Args[0] 15180 y := v_0.Args[1] 15181 v_1 := v.Args[1] 15182 if v_1.Op != OpAMD64ANDL { 15183 break 15184 } 15185 _ = v_1.Args[1] 15186 v_1_0 := v_1.Args[0] 15187 if v_1_0.Op != OpAMD64SBBLcarrymask { 15188 break 15189 } 15190 v_1_0_0 := v_1_0.Args[0] 15191 if v_1_0_0.Op != OpAMD64CMPQconst { 15192 break 15193 } 15194 if v_1_0_0.AuxInt != 32 { 15195 break 15196 } 15197 v_1_0_0_0 := v_1_0_0.Args[0] 15198 if v_1_0_0_0.Op != OpAMD64NEGQ { 15199 break 15200 } 15201 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15202 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 15203 break 15204 } 15205 if v_1_0_0_0_0.AuxInt != -32 { 15206 break 15207 } 15208 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15209 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 15210 break 15211 } 15212 if v_1_0_0_0_0_0.AuxInt != 31 { 15213 break 15214 } 15215 if y != v_1_0_0_0_0_0.Args[0] { 15216 break 15217 } 15218 v_1_1 := v_1.Args[1] 15219 if v_1_1.Op != OpAMD64SHRL { 15220 break 15221 } 15222 _ = v_1_1.Args[1] 15223 if x != v_1_1.Args[0] { 15224 break 15225 } 15226 v_1_1_1 := v_1_1.Args[1] 15227 if v_1_1_1.Op != OpAMD64NEGQ { 15228 break 15229 } 15230 if y != v_1_1_1.Args[0] { 15231 break 15232 } 15233 v.reset(OpAMD64ROLL) 15234 v.AddArg(x) 15235 v.AddArg(y) 15236 return true 15237 } 15238 return false 15239 } 15240 func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool { 15241 // match: (ORL (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHLL x y)) 15242 // cond: 15243 // result: (ROLL x y) 15244 for { 15245 _ = v.Args[1] 15246 v_0 := v.Args[0] 15247 if v_0.Op != OpAMD64ANDL { 15248 break 15249 } 15250 _ = v_0.Args[1] 15251 v_0_0 := v_0.Args[0] 15252 if v_0_0.Op != OpAMD64SHRL { 15253 break 15254 } 15255 _ = v_0_0.Args[1] 15256 x := v_0_0.Args[0] 15257 v_0_0_1 := v_0_0.Args[1] 15258 if v_0_0_1.Op != OpAMD64NEGQ { 15259 break 15260 } 15261 y := v_0_0_1.Args[0] 15262 v_0_1 := v_0.Args[1] 15263 if v_0_1.Op != OpAMD64SBBLcarrymask { 15264 break 15265 } 15266 v_0_1_0 := v_0_1.Args[0] 15267 if v_0_1_0.Op != OpAMD64CMPQconst { 15268 break 15269 } 15270 if v_0_1_0.AuxInt != 32 { 15271 break 15272 } 15273 v_0_1_0_0 := v_0_1_0.Args[0] 15274 if v_0_1_0_0.Op != OpAMD64NEGQ { 15275 break 15276 } 15277 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15278 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 15279 break 15280 } 15281 if v_0_1_0_0_0.AuxInt != -32 { 15282 break 15283 } 15284 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15285 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 15286 break 15287 } 15288 if v_0_1_0_0_0_0.AuxInt != 31 { 15289 break 15290 } 15291 if y != v_0_1_0_0_0_0.Args[0] { 15292 break 15293 } 15294 v_1 := v.Args[1] 15295 if v_1.Op != OpAMD64SHLL { 15296 break 15297 } 15298 _ = v_1.Args[1] 15299 if x != v_1.Args[0] { 15300 break 15301 } 15302 if y != v_1.Args[1] { 15303 break 15304 } 15305 v.reset(OpAMD64ROLL) 15306 v.AddArg(x) 15307 v.AddArg(y) 15308 return true 15309 } 15310 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHRL x (NEGQ y))) (SHLL x y)) 15311 // cond: 15312 // result: (ROLL x y) 15313 for { 15314 _ = v.Args[1] 15315 v_0 := v.Args[0] 15316 if v_0.Op != OpAMD64ANDL { 15317 break 15318 } 15319 _ = v_0.Args[1] 15320 v_0_0 := v_0.Args[0] 15321 if v_0_0.Op != OpAMD64SBBLcarrymask { 15322 break 15323 } 15324 v_0_0_0 := v_0_0.Args[0] 15325 if v_0_0_0.Op != OpAMD64CMPQconst { 15326 break 15327 } 15328 if v_0_0_0.AuxInt != 32 { 15329 break 15330 } 15331 v_0_0_0_0 := v_0_0_0.Args[0] 15332 if v_0_0_0_0.Op != OpAMD64NEGQ { 15333 break 15334 } 15335 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15336 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 15337 break 15338 } 15339 if v_0_0_0_0_0.AuxInt != -32 { 15340 break 15341 } 15342 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15343 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 15344 break 15345 } 15346 if v_0_0_0_0_0_0.AuxInt != 31 { 15347 break 15348 } 15349 y := v_0_0_0_0_0_0.Args[0] 15350 v_0_1 := v_0.Args[1] 15351 if v_0_1.Op != OpAMD64SHRL { 15352 break 15353 } 15354 _ = v_0_1.Args[1] 15355 x := v_0_1.Args[0] 15356 v_0_1_1 := v_0_1.Args[1] 15357 if v_0_1_1.Op != OpAMD64NEGQ { 15358 break 15359 } 15360 if y != v_0_1_1.Args[0] { 15361 break 15362 } 15363 v_1 := v.Args[1] 15364 if v_1.Op != OpAMD64SHLL { 15365 break 15366 } 15367 _ = v_1.Args[1] 15368 if x != v_1.Args[0] { 15369 break 15370 } 15371 if y != v_1.Args[1] { 15372 break 15373 } 15374 v.reset(OpAMD64ROLL) 15375 v.AddArg(x) 15376 v.AddArg(y) 15377 return true 15378 } 15379 // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 15380 // cond: 15381 // result: (ROLL x y) 15382 for { 15383 _ = v.Args[1] 15384 v_0 := v.Args[0] 15385 if v_0.Op != OpAMD64SHLL { 15386 break 15387 } 15388 _ = v_0.Args[1] 15389 x := v_0.Args[0] 15390 y := v_0.Args[1] 15391 v_1 := v.Args[1] 15392 if v_1.Op != OpAMD64ANDL { 15393 break 15394 } 15395 _ = v_1.Args[1] 15396 v_1_0 := v_1.Args[0] 15397 if v_1_0.Op != OpAMD64SHRL { 15398 break 15399 } 15400 _ = v_1_0.Args[1] 15401 if x != v_1_0.Args[0] { 15402 break 15403 } 15404 v_1_0_1 := v_1_0.Args[1] 15405 if v_1_0_1.Op != OpAMD64NEGL { 15406 break 15407 } 15408 if y != v_1_0_1.Args[0] { 15409 break 15410 } 15411 v_1_1 := v_1.Args[1] 15412 if v_1_1.Op != OpAMD64SBBLcarrymask { 15413 break 15414 } 15415 v_1_1_0 := v_1_1.Args[0] 15416 if v_1_1_0.Op != OpAMD64CMPLconst { 15417 break 15418 } 15419 if v_1_1_0.AuxInt != 32 { 15420 break 15421 } 15422 v_1_1_0_0 := v_1_1_0.Args[0] 15423 if v_1_1_0_0.Op != OpAMD64NEGL { 15424 break 15425 } 15426 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15427 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 15428 break 15429 } 15430 if v_1_1_0_0_0.AuxInt != -32 { 15431 break 15432 } 15433 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15434 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 15435 break 15436 } 15437 if v_1_1_0_0_0_0.AuxInt != 31 { 15438 break 15439 } 15440 if y != v_1_1_0_0_0_0.Args[0] { 15441 break 15442 } 15443 v.reset(OpAMD64ROLL) 15444 v.AddArg(x) 15445 v.AddArg(y) 15446 return true 15447 } 15448 // match: (ORL (SHLL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y)))) 15449 // cond: 15450 // result: (ROLL x y) 15451 for { 15452 _ = v.Args[1] 15453 v_0 := v.Args[0] 15454 if v_0.Op != OpAMD64SHLL { 15455 break 15456 } 15457 _ = v_0.Args[1] 15458 x := v_0.Args[0] 15459 y := v_0.Args[1] 15460 v_1 := v.Args[1] 15461 if v_1.Op != OpAMD64ANDL { 15462 break 15463 } 15464 _ = v_1.Args[1] 15465 v_1_0 := v_1.Args[0] 15466 if v_1_0.Op != OpAMD64SBBLcarrymask { 15467 break 15468 } 15469 v_1_0_0 := v_1_0.Args[0] 15470 if v_1_0_0.Op != OpAMD64CMPLconst { 15471 break 15472 } 15473 if v_1_0_0.AuxInt != 32 { 15474 break 15475 } 15476 v_1_0_0_0 := v_1_0_0.Args[0] 15477 if v_1_0_0_0.Op != OpAMD64NEGL { 15478 break 15479 } 15480 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15481 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 15482 break 15483 } 15484 if v_1_0_0_0_0.AuxInt != -32 { 15485 break 15486 } 15487 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15488 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 15489 break 15490 } 15491 if v_1_0_0_0_0_0.AuxInt != 31 { 15492 break 15493 } 15494 if y != v_1_0_0_0_0_0.Args[0] { 15495 break 15496 } 15497 v_1_1 := v_1.Args[1] 15498 if v_1_1.Op != OpAMD64SHRL { 15499 break 15500 } 15501 _ = v_1_1.Args[1] 15502 if x != v_1_1.Args[0] { 15503 break 15504 } 15505 v_1_1_1 := v_1_1.Args[1] 15506 if v_1_1_1.Op != OpAMD64NEGL { 15507 break 15508 } 15509 if y != v_1_1_1.Args[0] { 15510 break 15511 } 15512 v.reset(OpAMD64ROLL) 15513 v.AddArg(x) 15514 v.AddArg(y) 15515 return true 15516 } 15517 // match: (ORL (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHLL x y)) 15518 // cond: 15519 // result: (ROLL x y) 15520 for { 15521 _ = v.Args[1] 15522 v_0 := v.Args[0] 15523 if v_0.Op != OpAMD64ANDL { 15524 break 15525 } 15526 _ = v_0.Args[1] 15527 v_0_0 := v_0.Args[0] 15528 if v_0_0.Op != OpAMD64SHRL { 15529 break 15530 } 15531 _ = v_0_0.Args[1] 15532 x := v_0_0.Args[0] 15533 v_0_0_1 := v_0_0.Args[1] 15534 if v_0_0_1.Op != OpAMD64NEGL { 15535 break 15536 } 15537 y := v_0_0_1.Args[0] 15538 v_0_1 := v_0.Args[1] 15539 if v_0_1.Op != OpAMD64SBBLcarrymask { 15540 break 15541 } 15542 v_0_1_0 := v_0_1.Args[0] 15543 if v_0_1_0.Op != OpAMD64CMPLconst { 15544 break 15545 } 15546 if v_0_1_0.AuxInt != 32 { 15547 break 15548 } 15549 v_0_1_0_0 := v_0_1_0.Args[0] 15550 if v_0_1_0_0.Op != OpAMD64NEGL { 15551 break 15552 } 15553 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15554 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 15555 break 15556 } 15557 if v_0_1_0_0_0.AuxInt != -32 { 15558 break 15559 } 15560 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15561 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 15562 break 15563 } 15564 if v_0_1_0_0_0_0.AuxInt != 31 { 15565 break 15566 } 15567 if y != v_0_1_0_0_0_0.Args[0] { 15568 break 15569 } 15570 v_1 := v.Args[1] 15571 if v_1.Op != OpAMD64SHLL { 15572 break 15573 } 15574 _ = v_1.Args[1] 15575 if x != v_1.Args[0] { 15576 break 15577 } 15578 if y != v_1.Args[1] { 15579 break 15580 } 15581 v.reset(OpAMD64ROLL) 15582 v.AddArg(x) 15583 v.AddArg(y) 15584 return true 15585 } 15586 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHRL x (NEGL y))) (SHLL x y)) 15587 // cond: 15588 // result: (ROLL x y) 15589 for { 15590 _ = v.Args[1] 15591 v_0 := v.Args[0] 15592 if v_0.Op != OpAMD64ANDL { 15593 break 15594 } 15595 _ = v_0.Args[1] 15596 v_0_0 := v_0.Args[0] 15597 if v_0_0.Op != OpAMD64SBBLcarrymask { 15598 break 15599 } 15600 v_0_0_0 := v_0_0.Args[0] 15601 if v_0_0_0.Op != OpAMD64CMPLconst { 15602 break 15603 } 15604 if v_0_0_0.AuxInt != 32 { 15605 break 15606 } 15607 v_0_0_0_0 := v_0_0_0.Args[0] 15608 if v_0_0_0_0.Op != OpAMD64NEGL { 15609 break 15610 } 15611 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15612 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 15613 break 15614 } 15615 if v_0_0_0_0_0.AuxInt != -32 { 15616 break 15617 } 15618 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15619 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 15620 break 15621 } 15622 if v_0_0_0_0_0_0.AuxInt != 31 { 15623 break 15624 } 15625 y := v_0_0_0_0_0_0.Args[0] 15626 v_0_1 := v_0.Args[1] 15627 if v_0_1.Op != OpAMD64SHRL { 15628 break 15629 } 15630 _ = v_0_1.Args[1] 15631 x := v_0_1.Args[0] 15632 v_0_1_1 := v_0_1.Args[1] 15633 if v_0_1_1.Op != OpAMD64NEGL { 15634 break 15635 } 15636 if y != v_0_1_1.Args[0] { 15637 break 15638 } 15639 v_1 := v.Args[1] 15640 if v_1.Op != OpAMD64SHLL { 15641 break 15642 } 15643 _ = v_1.Args[1] 15644 if x != v_1.Args[0] { 15645 break 15646 } 15647 if y != v_1.Args[1] { 15648 break 15649 } 15650 v.reset(OpAMD64ROLL) 15651 v.AddArg(x) 15652 v.AddArg(y) 15653 return true 15654 } 15655 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) 15656 // cond: 15657 // result: (RORL x y) 15658 for { 15659 _ = v.Args[1] 15660 v_0 := v.Args[0] 15661 if v_0.Op != OpAMD64SHRL { 15662 break 15663 } 15664 _ = v_0.Args[1] 15665 x := v_0.Args[0] 15666 y := v_0.Args[1] 15667 v_1 := v.Args[1] 15668 if v_1.Op != OpAMD64ANDL { 15669 break 15670 } 15671 _ = v_1.Args[1] 15672 v_1_0 := v_1.Args[0] 15673 if v_1_0.Op != OpAMD64SHLL { 15674 break 15675 } 15676 _ = v_1_0.Args[1] 15677 if x != v_1_0.Args[0] { 15678 break 15679 } 15680 v_1_0_1 := v_1_0.Args[1] 15681 if v_1_0_1.Op != OpAMD64NEGQ { 15682 break 15683 } 15684 if y != v_1_0_1.Args[0] { 15685 break 15686 } 15687 v_1_1 := v_1.Args[1] 15688 if v_1_1.Op != OpAMD64SBBLcarrymask { 15689 break 15690 } 15691 v_1_1_0 := v_1_1.Args[0] 15692 if v_1_1_0.Op != OpAMD64CMPQconst { 15693 break 15694 } 15695 if v_1_1_0.AuxInt != 32 { 15696 break 15697 } 15698 v_1_1_0_0 := v_1_1_0.Args[0] 15699 if v_1_1_0_0.Op != OpAMD64NEGQ { 15700 break 15701 } 15702 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15703 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 15704 break 15705 } 15706 if v_1_1_0_0_0.AuxInt != -32 { 15707 break 15708 } 15709 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15710 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 15711 break 15712 } 15713 if v_1_1_0_0_0_0.AuxInt != 31 { 15714 break 15715 } 15716 if y != v_1_1_0_0_0_0.Args[0] { 15717 break 15718 } 15719 v.reset(OpAMD64RORL) 15720 v.AddArg(x) 15721 v.AddArg(y) 15722 return true 15723 } 15724 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y)))) 15725 // cond: 15726 // result: (RORL x y) 15727 for { 15728 _ = v.Args[1] 15729 v_0 := v.Args[0] 15730 if v_0.Op != OpAMD64SHRL { 15731 break 15732 } 15733 _ = v_0.Args[1] 15734 x := v_0.Args[0] 15735 y := v_0.Args[1] 15736 v_1 := v.Args[1] 15737 if v_1.Op != OpAMD64ANDL { 15738 break 15739 } 15740 _ = v_1.Args[1] 15741 v_1_0 := v_1.Args[0] 15742 if v_1_0.Op != OpAMD64SBBLcarrymask { 15743 break 15744 } 15745 v_1_0_0 := v_1_0.Args[0] 15746 if v_1_0_0.Op != OpAMD64CMPQconst { 15747 break 15748 } 15749 if v_1_0_0.AuxInt != 32 { 15750 break 15751 } 15752 v_1_0_0_0 := v_1_0_0.Args[0] 15753 if v_1_0_0_0.Op != OpAMD64NEGQ { 15754 break 15755 } 15756 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 15757 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 15758 break 15759 } 15760 if v_1_0_0_0_0.AuxInt != -32 { 15761 break 15762 } 15763 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 15764 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 15765 break 15766 } 15767 if v_1_0_0_0_0_0.AuxInt != 31 { 15768 break 15769 } 15770 if y != v_1_0_0_0_0_0.Args[0] { 15771 break 15772 } 15773 v_1_1 := v_1.Args[1] 15774 if v_1_1.Op != OpAMD64SHLL { 15775 break 15776 } 15777 _ = v_1_1.Args[1] 15778 if x != v_1_1.Args[0] { 15779 break 15780 } 15781 v_1_1_1 := v_1_1.Args[1] 15782 if v_1_1_1.Op != OpAMD64NEGQ { 15783 break 15784 } 15785 if y != v_1_1_1.Args[0] { 15786 break 15787 } 15788 v.reset(OpAMD64RORL) 15789 v.AddArg(x) 15790 v.AddArg(y) 15791 return true 15792 } 15793 // match: (ORL (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHRL x y)) 15794 // cond: 15795 // result: (RORL x y) 15796 for { 15797 _ = v.Args[1] 15798 v_0 := v.Args[0] 15799 if v_0.Op != OpAMD64ANDL { 15800 break 15801 } 15802 _ = v_0.Args[1] 15803 v_0_0 := v_0.Args[0] 15804 if v_0_0.Op != OpAMD64SHLL { 15805 break 15806 } 15807 _ = v_0_0.Args[1] 15808 x := v_0_0.Args[0] 15809 v_0_0_1 := v_0_0.Args[1] 15810 if v_0_0_1.Op != OpAMD64NEGQ { 15811 break 15812 } 15813 y := v_0_0_1.Args[0] 15814 v_0_1 := v_0.Args[1] 15815 if v_0_1.Op != OpAMD64SBBLcarrymask { 15816 break 15817 } 15818 v_0_1_0 := v_0_1.Args[0] 15819 if v_0_1_0.Op != OpAMD64CMPQconst { 15820 break 15821 } 15822 if v_0_1_0.AuxInt != 32 { 15823 break 15824 } 15825 v_0_1_0_0 := v_0_1_0.Args[0] 15826 if v_0_1_0_0.Op != OpAMD64NEGQ { 15827 break 15828 } 15829 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 15830 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 15831 break 15832 } 15833 if v_0_1_0_0_0.AuxInt != -32 { 15834 break 15835 } 15836 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 15837 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 15838 break 15839 } 15840 if v_0_1_0_0_0_0.AuxInt != 31 { 15841 break 15842 } 15843 if y != v_0_1_0_0_0_0.Args[0] { 15844 break 15845 } 15846 v_1 := v.Args[1] 15847 if v_1.Op != OpAMD64SHRL { 15848 break 15849 } 15850 _ = v_1.Args[1] 15851 if x != v_1.Args[0] { 15852 break 15853 } 15854 if y != v_1.Args[1] { 15855 break 15856 } 15857 v.reset(OpAMD64RORL) 15858 v.AddArg(x) 15859 v.AddArg(y) 15860 return true 15861 } 15862 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])) (SHLL x (NEGQ y))) (SHRL x y)) 15863 // cond: 15864 // result: (RORL x y) 15865 for { 15866 _ = v.Args[1] 15867 v_0 := v.Args[0] 15868 if v_0.Op != OpAMD64ANDL { 15869 break 15870 } 15871 _ = v_0.Args[1] 15872 v_0_0 := v_0.Args[0] 15873 if v_0_0.Op != OpAMD64SBBLcarrymask { 15874 break 15875 } 15876 v_0_0_0 := v_0_0.Args[0] 15877 if v_0_0_0.Op != OpAMD64CMPQconst { 15878 break 15879 } 15880 if v_0_0_0.AuxInt != 32 { 15881 break 15882 } 15883 v_0_0_0_0 := v_0_0_0.Args[0] 15884 if v_0_0_0_0.Op != OpAMD64NEGQ { 15885 break 15886 } 15887 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 15888 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 15889 break 15890 } 15891 if v_0_0_0_0_0.AuxInt != -32 { 15892 break 15893 } 15894 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 15895 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 15896 break 15897 } 15898 if v_0_0_0_0_0_0.AuxInt != 31 { 15899 break 15900 } 15901 y := v_0_0_0_0_0_0.Args[0] 15902 v_0_1 := v_0.Args[1] 15903 if v_0_1.Op != OpAMD64SHLL { 15904 break 15905 } 15906 _ = v_0_1.Args[1] 15907 x := v_0_1.Args[0] 15908 v_0_1_1 := v_0_1.Args[1] 15909 if v_0_1_1.Op != OpAMD64NEGQ { 15910 break 15911 } 15912 if y != v_0_1_1.Args[0] { 15913 break 15914 } 15915 v_1 := v.Args[1] 15916 if v_1.Op != OpAMD64SHRL { 15917 break 15918 } 15919 _ = v_1.Args[1] 15920 if x != v_1.Args[0] { 15921 break 15922 } 15923 if y != v_1.Args[1] { 15924 break 15925 } 15926 v.reset(OpAMD64RORL) 15927 v.AddArg(x) 15928 v.AddArg(y) 15929 return true 15930 } 15931 return false 15932 } 15933 func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool { 15934 // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) 15935 // cond: 15936 // result: (RORL x y) 15937 for { 15938 _ = v.Args[1] 15939 v_0 := v.Args[0] 15940 if v_0.Op != OpAMD64SHRL { 15941 break 15942 } 15943 _ = v_0.Args[1] 15944 x := v_0.Args[0] 15945 y := v_0.Args[1] 15946 v_1 := v.Args[1] 15947 if v_1.Op != OpAMD64ANDL { 15948 break 15949 } 15950 _ = v_1.Args[1] 15951 v_1_0 := v_1.Args[0] 15952 if v_1_0.Op != OpAMD64SHLL { 15953 break 15954 } 15955 _ = v_1_0.Args[1] 15956 if x != v_1_0.Args[0] { 15957 break 15958 } 15959 v_1_0_1 := v_1_0.Args[1] 15960 if v_1_0_1.Op != OpAMD64NEGL { 15961 break 15962 } 15963 if y != v_1_0_1.Args[0] { 15964 break 15965 } 15966 v_1_1 := v_1.Args[1] 15967 if v_1_1.Op != OpAMD64SBBLcarrymask { 15968 break 15969 } 15970 v_1_1_0 := v_1_1.Args[0] 15971 if v_1_1_0.Op != OpAMD64CMPLconst { 15972 break 15973 } 15974 if v_1_1_0.AuxInt != 32 { 15975 break 15976 } 15977 v_1_1_0_0 := v_1_1_0.Args[0] 15978 if v_1_1_0_0.Op != OpAMD64NEGL { 15979 break 15980 } 15981 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 15982 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 15983 break 15984 } 15985 if v_1_1_0_0_0.AuxInt != -32 { 15986 break 15987 } 15988 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 15989 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 15990 break 15991 } 15992 if v_1_1_0_0_0_0.AuxInt != 31 { 15993 break 15994 } 15995 if y != v_1_1_0_0_0_0.Args[0] { 15996 break 15997 } 15998 v.reset(OpAMD64RORL) 15999 v.AddArg(x) 16000 v.AddArg(y) 16001 return true 16002 } 16003 // match: (ORL (SHRL x y) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y)))) 16004 // cond: 16005 // result: (RORL x y) 16006 for { 16007 _ = v.Args[1] 16008 v_0 := v.Args[0] 16009 if v_0.Op != OpAMD64SHRL { 16010 break 16011 } 16012 _ = v_0.Args[1] 16013 x := v_0.Args[0] 16014 y := v_0.Args[1] 16015 v_1 := v.Args[1] 16016 if v_1.Op != OpAMD64ANDL { 16017 break 16018 } 16019 _ = v_1.Args[1] 16020 v_1_0 := v_1.Args[0] 16021 if v_1_0.Op != OpAMD64SBBLcarrymask { 16022 break 16023 } 16024 v_1_0_0 := v_1_0.Args[0] 16025 if v_1_0_0.Op != OpAMD64CMPLconst { 16026 break 16027 } 16028 if v_1_0_0.AuxInt != 32 { 16029 break 16030 } 16031 v_1_0_0_0 := v_1_0_0.Args[0] 16032 if v_1_0_0_0.Op != OpAMD64NEGL { 16033 break 16034 } 16035 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 16036 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 16037 break 16038 } 16039 if v_1_0_0_0_0.AuxInt != -32 { 16040 break 16041 } 16042 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 16043 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 16044 break 16045 } 16046 if v_1_0_0_0_0_0.AuxInt != 31 { 16047 break 16048 } 16049 if y != v_1_0_0_0_0_0.Args[0] { 16050 break 16051 } 16052 v_1_1 := v_1.Args[1] 16053 if v_1_1.Op != OpAMD64SHLL { 16054 break 16055 } 16056 _ = v_1_1.Args[1] 16057 if x != v_1_1.Args[0] { 16058 break 16059 } 16060 v_1_1_1 := v_1_1.Args[1] 16061 if v_1_1_1.Op != OpAMD64NEGL { 16062 break 16063 } 16064 if y != v_1_1_1.Args[0] { 16065 break 16066 } 16067 v.reset(OpAMD64RORL) 16068 v.AddArg(x) 16069 v.AddArg(y) 16070 return true 16071 } 16072 // match: (ORL (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))) (SHRL x y)) 16073 // cond: 16074 // result: (RORL x y) 16075 for { 16076 _ = v.Args[1] 16077 v_0 := v.Args[0] 16078 if v_0.Op != OpAMD64ANDL { 16079 break 16080 } 16081 _ = v_0.Args[1] 16082 v_0_0 := v_0.Args[0] 16083 if v_0_0.Op != OpAMD64SHLL { 16084 break 16085 } 16086 _ = v_0_0.Args[1] 16087 x := v_0_0.Args[0] 16088 v_0_0_1 := v_0_0.Args[1] 16089 if v_0_0_1.Op != OpAMD64NEGL { 16090 break 16091 } 16092 y := v_0_0_1.Args[0] 16093 v_0_1 := v_0.Args[1] 16094 if v_0_1.Op != OpAMD64SBBLcarrymask { 16095 break 16096 } 16097 v_0_1_0 := v_0_1.Args[0] 16098 if v_0_1_0.Op != OpAMD64CMPLconst { 16099 break 16100 } 16101 if v_0_1_0.AuxInt != 32 { 16102 break 16103 } 16104 v_0_1_0_0 := v_0_1_0.Args[0] 16105 if v_0_1_0_0.Op != OpAMD64NEGL { 16106 break 16107 } 16108 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16109 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 16110 break 16111 } 16112 if v_0_1_0_0_0.AuxInt != -32 { 16113 break 16114 } 16115 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16116 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 16117 break 16118 } 16119 if v_0_1_0_0_0_0.AuxInt != 31 { 16120 break 16121 } 16122 if y != v_0_1_0_0_0_0.Args[0] { 16123 break 16124 } 16125 v_1 := v.Args[1] 16126 if v_1.Op != OpAMD64SHRL { 16127 break 16128 } 16129 _ = v_1.Args[1] 16130 if x != v_1.Args[0] { 16131 break 16132 } 16133 if y != v_1.Args[1] { 16134 break 16135 } 16136 v.reset(OpAMD64RORL) 16137 v.AddArg(x) 16138 v.AddArg(y) 16139 return true 16140 } 16141 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])) (SHLL x (NEGL y))) (SHRL x y)) 16142 // cond: 16143 // result: (RORL x y) 16144 for { 16145 _ = v.Args[1] 16146 v_0 := v.Args[0] 16147 if v_0.Op != OpAMD64ANDL { 16148 break 16149 } 16150 _ = v_0.Args[1] 16151 v_0_0 := v_0.Args[0] 16152 if v_0_0.Op != OpAMD64SBBLcarrymask { 16153 break 16154 } 16155 v_0_0_0 := v_0_0.Args[0] 16156 if v_0_0_0.Op != OpAMD64CMPLconst { 16157 break 16158 } 16159 if v_0_0_0.AuxInt != 32 { 16160 break 16161 } 16162 v_0_0_0_0 := v_0_0_0.Args[0] 16163 if v_0_0_0_0.Op != OpAMD64NEGL { 16164 break 16165 } 16166 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16167 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 16168 break 16169 } 16170 if v_0_0_0_0_0.AuxInt != -32 { 16171 break 16172 } 16173 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16174 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 16175 break 16176 } 16177 if v_0_0_0_0_0_0.AuxInt != 31 { 16178 break 16179 } 16180 y := v_0_0_0_0_0_0.Args[0] 16181 v_0_1 := v_0.Args[1] 16182 if v_0_1.Op != OpAMD64SHLL { 16183 break 16184 } 16185 _ = v_0_1.Args[1] 16186 x := v_0_1.Args[0] 16187 v_0_1_1 := v_0_1.Args[1] 16188 if v_0_1_1.Op != OpAMD64NEGL { 16189 break 16190 } 16191 if y != v_0_1_1.Args[0] { 16192 break 16193 } 16194 v_1 := v.Args[1] 16195 if v_1.Op != OpAMD64SHRL { 16196 break 16197 } 16198 _ = v_1.Args[1] 16199 if x != v_1.Args[0] { 16200 break 16201 } 16202 if y != v_1.Args[1] { 16203 break 16204 } 16205 v.reset(OpAMD64RORL) 16206 v.AddArg(x) 16207 v.AddArg(y) 16208 return true 16209 } 16210 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) 16211 // cond: v.Type.Size() == 2 16212 // result: (ROLW x y) 16213 for { 16214 _ = v.Args[1] 16215 v_0 := v.Args[0] 16216 if v_0.Op != OpAMD64SHLL { 16217 break 16218 } 16219 _ = v_0.Args[1] 16220 x := v_0.Args[0] 16221 v_0_1 := v_0.Args[1] 16222 if v_0_1.Op != OpAMD64ANDQconst { 16223 break 16224 } 16225 if v_0_1.AuxInt != 15 { 16226 break 16227 } 16228 y := v_0_1.Args[0] 16229 v_1 := v.Args[1] 16230 if v_1.Op != OpAMD64ANDL { 16231 break 16232 } 16233 _ = v_1.Args[1] 16234 v_1_0 := v_1.Args[0] 16235 if v_1_0.Op != OpAMD64SHRW { 16236 break 16237 } 16238 _ = v_1_0.Args[1] 16239 if x != v_1_0.Args[0] { 16240 break 16241 } 16242 v_1_0_1 := v_1_0.Args[1] 16243 if v_1_0_1.Op != OpAMD64NEGQ { 16244 break 16245 } 16246 v_1_0_1_0 := v_1_0_1.Args[0] 16247 if v_1_0_1_0.Op != OpAMD64ADDQconst { 16248 break 16249 } 16250 if v_1_0_1_0.AuxInt != -16 { 16251 break 16252 } 16253 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 16254 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 16255 break 16256 } 16257 if v_1_0_1_0_0.AuxInt != 15 { 16258 break 16259 } 16260 if y != v_1_0_1_0_0.Args[0] { 16261 break 16262 } 16263 v_1_1 := v_1.Args[1] 16264 if v_1_1.Op != OpAMD64SBBLcarrymask { 16265 break 16266 } 16267 v_1_1_0 := v_1_1.Args[0] 16268 if v_1_1_0.Op != OpAMD64CMPQconst { 16269 break 16270 } 16271 if v_1_1_0.AuxInt != 16 { 16272 break 16273 } 16274 v_1_1_0_0 := v_1_1_0.Args[0] 16275 if v_1_1_0_0.Op != OpAMD64NEGQ { 16276 break 16277 } 16278 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 16279 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 16280 break 16281 } 16282 if v_1_1_0_0_0.AuxInt != -16 { 16283 break 16284 } 16285 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 16286 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 16287 break 16288 } 16289 if v_1_1_0_0_0_0.AuxInt != 15 { 16290 break 16291 } 16292 if y != v_1_1_0_0_0_0.Args[0] { 16293 break 16294 } 16295 if !(v.Type.Size() == 2) { 16296 break 16297 } 16298 v.reset(OpAMD64ROLW) 16299 v.AddArg(x) 16300 v.AddArg(y) 16301 return true 16302 } 16303 // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))) 16304 // cond: v.Type.Size() == 2 16305 // result: (ROLW x y) 16306 for { 16307 _ = v.Args[1] 16308 v_0 := v.Args[0] 16309 if v_0.Op != OpAMD64SHLL { 16310 break 16311 } 16312 _ = v_0.Args[1] 16313 x := v_0.Args[0] 16314 v_0_1 := v_0.Args[1] 16315 if v_0_1.Op != OpAMD64ANDQconst { 16316 break 16317 } 16318 if v_0_1.AuxInt != 15 { 16319 break 16320 } 16321 y := v_0_1.Args[0] 16322 v_1 := v.Args[1] 16323 if v_1.Op != OpAMD64ANDL { 16324 break 16325 } 16326 _ = v_1.Args[1] 16327 v_1_0 := v_1.Args[0] 16328 if v_1_0.Op != OpAMD64SBBLcarrymask { 16329 break 16330 } 16331 v_1_0_0 := v_1_0.Args[0] 16332 if v_1_0_0.Op != OpAMD64CMPQconst { 16333 break 16334 } 16335 if v_1_0_0.AuxInt != 16 { 16336 break 16337 } 16338 v_1_0_0_0 := v_1_0_0.Args[0] 16339 if v_1_0_0_0.Op != OpAMD64NEGQ { 16340 break 16341 } 16342 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 16343 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 16344 break 16345 } 16346 if v_1_0_0_0_0.AuxInt != -16 { 16347 break 16348 } 16349 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 16350 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 16351 break 16352 } 16353 if v_1_0_0_0_0_0.AuxInt != 15 { 16354 break 16355 } 16356 if y != v_1_0_0_0_0_0.Args[0] { 16357 break 16358 } 16359 v_1_1 := v_1.Args[1] 16360 if v_1_1.Op != OpAMD64SHRW { 16361 break 16362 } 16363 _ = v_1_1.Args[1] 16364 if x != v_1_1.Args[0] { 16365 break 16366 } 16367 v_1_1_1 := v_1_1.Args[1] 16368 if v_1_1_1.Op != OpAMD64NEGQ { 16369 break 16370 } 16371 v_1_1_1_0 := v_1_1_1.Args[0] 16372 if v_1_1_1_0.Op != OpAMD64ADDQconst { 16373 break 16374 } 16375 if v_1_1_1_0.AuxInt != -16 { 16376 break 16377 } 16378 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 16379 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 16380 break 16381 } 16382 if v_1_1_1_0_0.AuxInt != 15 { 16383 break 16384 } 16385 if y != v_1_1_1_0_0.Args[0] { 16386 break 16387 } 16388 if !(v.Type.Size() == 2) { 16389 break 16390 } 16391 v.reset(OpAMD64ROLW) 16392 v.AddArg(x) 16393 v.AddArg(y) 16394 return true 16395 } 16396 // match: (ORL (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))) (SHLL x (ANDQconst y [15]))) 16397 // cond: v.Type.Size() == 2 16398 // result: (ROLW x y) 16399 for { 16400 _ = v.Args[1] 16401 v_0 := v.Args[0] 16402 if v_0.Op != OpAMD64ANDL { 16403 break 16404 } 16405 _ = v_0.Args[1] 16406 v_0_0 := v_0.Args[0] 16407 if v_0_0.Op != OpAMD64SHRW { 16408 break 16409 } 16410 _ = v_0_0.Args[1] 16411 x := v_0_0.Args[0] 16412 v_0_0_1 := v_0_0.Args[1] 16413 if v_0_0_1.Op != OpAMD64NEGQ { 16414 break 16415 } 16416 v_0_0_1_0 := v_0_0_1.Args[0] 16417 if v_0_0_1_0.Op != OpAMD64ADDQconst { 16418 break 16419 } 16420 if v_0_0_1_0.AuxInt != -16 { 16421 break 16422 } 16423 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 16424 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 16425 break 16426 } 16427 if v_0_0_1_0_0.AuxInt != 15 { 16428 break 16429 } 16430 y := v_0_0_1_0_0.Args[0] 16431 v_0_1 := v_0.Args[1] 16432 if v_0_1.Op != OpAMD64SBBLcarrymask { 16433 break 16434 } 16435 v_0_1_0 := v_0_1.Args[0] 16436 if v_0_1_0.Op != OpAMD64CMPQconst { 16437 break 16438 } 16439 if v_0_1_0.AuxInt != 16 { 16440 break 16441 } 16442 v_0_1_0_0 := v_0_1_0.Args[0] 16443 if v_0_1_0_0.Op != OpAMD64NEGQ { 16444 break 16445 } 16446 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16447 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 16448 break 16449 } 16450 if v_0_1_0_0_0.AuxInt != -16 { 16451 break 16452 } 16453 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16454 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 16455 break 16456 } 16457 if v_0_1_0_0_0_0.AuxInt != 15 { 16458 break 16459 } 16460 if y != v_0_1_0_0_0_0.Args[0] { 16461 break 16462 } 16463 v_1 := v.Args[1] 16464 if v_1.Op != OpAMD64SHLL { 16465 break 16466 } 16467 _ = v_1.Args[1] 16468 if x != v_1.Args[0] { 16469 break 16470 } 16471 v_1_1 := v_1.Args[1] 16472 if v_1_1.Op != OpAMD64ANDQconst { 16473 break 16474 } 16475 if v_1_1.AuxInt != 15 { 16476 break 16477 } 16478 if y != v_1_1.Args[0] { 16479 break 16480 } 16481 if !(v.Type.Size() == 2) { 16482 break 16483 } 16484 v.reset(OpAMD64ROLW) 16485 v.AddArg(x) 16486 v.AddArg(y) 16487 return true 16488 } 16489 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) (SHLL x (ANDQconst y [15]))) 16490 // cond: v.Type.Size() == 2 16491 // result: (ROLW x y) 16492 for { 16493 _ = v.Args[1] 16494 v_0 := v.Args[0] 16495 if v_0.Op != OpAMD64ANDL { 16496 break 16497 } 16498 _ = v_0.Args[1] 16499 v_0_0 := v_0.Args[0] 16500 if v_0_0.Op != OpAMD64SBBLcarrymask { 16501 break 16502 } 16503 v_0_0_0 := v_0_0.Args[0] 16504 if v_0_0_0.Op != OpAMD64CMPQconst { 16505 break 16506 } 16507 if v_0_0_0.AuxInt != 16 { 16508 break 16509 } 16510 v_0_0_0_0 := v_0_0_0.Args[0] 16511 if v_0_0_0_0.Op != OpAMD64NEGQ { 16512 break 16513 } 16514 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16515 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 16516 break 16517 } 16518 if v_0_0_0_0_0.AuxInt != -16 { 16519 break 16520 } 16521 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16522 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 16523 break 16524 } 16525 if v_0_0_0_0_0_0.AuxInt != 15 { 16526 break 16527 } 16528 y := v_0_0_0_0_0_0.Args[0] 16529 v_0_1 := v_0.Args[1] 16530 if v_0_1.Op != OpAMD64SHRW { 16531 break 16532 } 16533 _ = v_0_1.Args[1] 16534 x := v_0_1.Args[0] 16535 v_0_1_1 := v_0_1.Args[1] 16536 if v_0_1_1.Op != OpAMD64NEGQ { 16537 break 16538 } 16539 v_0_1_1_0 := v_0_1_1.Args[0] 16540 if v_0_1_1_0.Op != OpAMD64ADDQconst { 16541 break 16542 } 16543 if v_0_1_1_0.AuxInt != -16 { 16544 break 16545 } 16546 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 16547 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 16548 break 16549 } 16550 if v_0_1_1_0_0.AuxInt != 15 { 16551 break 16552 } 16553 if y != v_0_1_1_0_0.Args[0] { 16554 break 16555 } 16556 v_1 := v.Args[1] 16557 if v_1.Op != OpAMD64SHLL { 16558 break 16559 } 16560 _ = v_1.Args[1] 16561 if x != v_1.Args[0] { 16562 break 16563 } 16564 v_1_1 := v_1.Args[1] 16565 if v_1_1.Op != OpAMD64ANDQconst { 16566 break 16567 } 16568 if v_1_1.AuxInt != 15 { 16569 break 16570 } 16571 if y != v_1_1.Args[0] { 16572 break 16573 } 16574 if !(v.Type.Size() == 2) { 16575 break 16576 } 16577 v.reset(OpAMD64ROLW) 16578 v.AddArg(x) 16579 v.AddArg(y) 16580 return true 16581 } 16582 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])))) 16583 // cond: v.Type.Size() == 2 16584 // result: (ROLW x y) 16585 for { 16586 _ = v.Args[1] 16587 v_0 := v.Args[0] 16588 if v_0.Op != OpAMD64SHLL { 16589 break 16590 } 16591 _ = v_0.Args[1] 16592 x := v_0.Args[0] 16593 v_0_1 := v_0.Args[1] 16594 if v_0_1.Op != OpAMD64ANDLconst { 16595 break 16596 } 16597 if v_0_1.AuxInt != 15 { 16598 break 16599 } 16600 y := v_0_1.Args[0] 16601 v_1 := v.Args[1] 16602 if v_1.Op != OpAMD64ANDL { 16603 break 16604 } 16605 _ = v_1.Args[1] 16606 v_1_0 := v_1.Args[0] 16607 if v_1_0.Op != OpAMD64SHRW { 16608 break 16609 } 16610 _ = v_1_0.Args[1] 16611 if x != v_1_0.Args[0] { 16612 break 16613 } 16614 v_1_0_1 := v_1_0.Args[1] 16615 if v_1_0_1.Op != OpAMD64NEGL { 16616 break 16617 } 16618 v_1_0_1_0 := v_1_0_1.Args[0] 16619 if v_1_0_1_0.Op != OpAMD64ADDLconst { 16620 break 16621 } 16622 if v_1_0_1_0.AuxInt != -16 { 16623 break 16624 } 16625 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 16626 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 16627 break 16628 } 16629 if v_1_0_1_0_0.AuxInt != 15 { 16630 break 16631 } 16632 if y != v_1_0_1_0_0.Args[0] { 16633 break 16634 } 16635 v_1_1 := v_1.Args[1] 16636 if v_1_1.Op != OpAMD64SBBLcarrymask { 16637 break 16638 } 16639 v_1_1_0 := v_1_1.Args[0] 16640 if v_1_1_0.Op != OpAMD64CMPLconst { 16641 break 16642 } 16643 if v_1_1_0.AuxInt != 16 { 16644 break 16645 } 16646 v_1_1_0_0 := v_1_1_0.Args[0] 16647 if v_1_1_0_0.Op != OpAMD64NEGL { 16648 break 16649 } 16650 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 16651 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 16652 break 16653 } 16654 if v_1_1_0_0_0.AuxInt != -16 { 16655 break 16656 } 16657 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 16658 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 16659 break 16660 } 16661 if v_1_1_0_0_0_0.AuxInt != 15 { 16662 break 16663 } 16664 if y != v_1_1_0_0_0_0.Args[0] { 16665 break 16666 } 16667 if !(v.Type.Size() == 2) { 16668 break 16669 } 16670 v.reset(OpAMD64ROLW) 16671 v.AddArg(x) 16672 v.AddArg(y) 16673 return true 16674 } 16675 // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))) 16676 // cond: v.Type.Size() == 2 16677 // result: (ROLW x y) 16678 for { 16679 _ = v.Args[1] 16680 v_0 := v.Args[0] 16681 if v_0.Op != OpAMD64SHLL { 16682 break 16683 } 16684 _ = v_0.Args[1] 16685 x := v_0.Args[0] 16686 v_0_1 := v_0.Args[1] 16687 if v_0_1.Op != OpAMD64ANDLconst { 16688 break 16689 } 16690 if v_0_1.AuxInt != 15 { 16691 break 16692 } 16693 y := v_0_1.Args[0] 16694 v_1 := v.Args[1] 16695 if v_1.Op != OpAMD64ANDL { 16696 break 16697 } 16698 _ = v_1.Args[1] 16699 v_1_0 := v_1.Args[0] 16700 if v_1_0.Op != OpAMD64SBBLcarrymask { 16701 break 16702 } 16703 v_1_0_0 := v_1_0.Args[0] 16704 if v_1_0_0.Op != OpAMD64CMPLconst { 16705 break 16706 } 16707 if v_1_0_0.AuxInt != 16 { 16708 break 16709 } 16710 v_1_0_0_0 := v_1_0_0.Args[0] 16711 if v_1_0_0_0.Op != OpAMD64NEGL { 16712 break 16713 } 16714 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 16715 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 16716 break 16717 } 16718 if v_1_0_0_0_0.AuxInt != -16 { 16719 break 16720 } 16721 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 16722 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 16723 break 16724 } 16725 if v_1_0_0_0_0_0.AuxInt != 15 { 16726 break 16727 } 16728 if y != v_1_0_0_0_0_0.Args[0] { 16729 break 16730 } 16731 v_1_1 := v_1.Args[1] 16732 if v_1_1.Op != OpAMD64SHRW { 16733 break 16734 } 16735 _ = v_1_1.Args[1] 16736 if x != v_1_1.Args[0] { 16737 break 16738 } 16739 v_1_1_1 := v_1_1.Args[1] 16740 if v_1_1_1.Op != OpAMD64NEGL { 16741 break 16742 } 16743 v_1_1_1_0 := v_1_1_1.Args[0] 16744 if v_1_1_1_0.Op != OpAMD64ADDLconst { 16745 break 16746 } 16747 if v_1_1_1_0.AuxInt != -16 { 16748 break 16749 } 16750 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 16751 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 16752 break 16753 } 16754 if v_1_1_1_0_0.AuxInt != 15 { 16755 break 16756 } 16757 if y != v_1_1_1_0_0.Args[0] { 16758 break 16759 } 16760 if !(v.Type.Size() == 2) { 16761 break 16762 } 16763 v.reset(OpAMD64ROLW) 16764 v.AddArg(x) 16765 v.AddArg(y) 16766 return true 16767 } 16768 return false 16769 } 16770 func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool { 16771 // match: (ORL (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))) (SHLL x (ANDLconst y [15]))) 16772 // cond: v.Type.Size() == 2 16773 // result: (ROLW x y) 16774 for { 16775 _ = v.Args[1] 16776 v_0 := v.Args[0] 16777 if v_0.Op != OpAMD64ANDL { 16778 break 16779 } 16780 _ = v_0.Args[1] 16781 v_0_0 := v_0.Args[0] 16782 if v_0_0.Op != OpAMD64SHRW { 16783 break 16784 } 16785 _ = v_0_0.Args[1] 16786 x := v_0_0.Args[0] 16787 v_0_0_1 := v_0_0.Args[1] 16788 if v_0_0_1.Op != OpAMD64NEGL { 16789 break 16790 } 16791 v_0_0_1_0 := v_0_0_1.Args[0] 16792 if v_0_0_1_0.Op != OpAMD64ADDLconst { 16793 break 16794 } 16795 if v_0_0_1_0.AuxInt != -16 { 16796 break 16797 } 16798 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 16799 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 16800 break 16801 } 16802 if v_0_0_1_0_0.AuxInt != 15 { 16803 break 16804 } 16805 y := v_0_0_1_0_0.Args[0] 16806 v_0_1 := v_0.Args[1] 16807 if v_0_1.Op != OpAMD64SBBLcarrymask { 16808 break 16809 } 16810 v_0_1_0 := v_0_1.Args[0] 16811 if v_0_1_0.Op != OpAMD64CMPLconst { 16812 break 16813 } 16814 if v_0_1_0.AuxInt != 16 { 16815 break 16816 } 16817 v_0_1_0_0 := v_0_1_0.Args[0] 16818 if v_0_1_0_0.Op != OpAMD64NEGL { 16819 break 16820 } 16821 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 16822 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 16823 break 16824 } 16825 if v_0_1_0_0_0.AuxInt != -16 { 16826 break 16827 } 16828 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 16829 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 16830 break 16831 } 16832 if v_0_1_0_0_0_0.AuxInt != 15 { 16833 break 16834 } 16835 if y != v_0_1_0_0_0_0.Args[0] { 16836 break 16837 } 16838 v_1 := v.Args[1] 16839 if v_1.Op != OpAMD64SHLL { 16840 break 16841 } 16842 _ = v_1.Args[1] 16843 if x != v_1.Args[0] { 16844 break 16845 } 16846 v_1_1 := v_1.Args[1] 16847 if v_1_1.Op != OpAMD64ANDLconst { 16848 break 16849 } 16850 if v_1_1.AuxInt != 15 { 16851 break 16852 } 16853 if y != v_1_1.Args[0] { 16854 break 16855 } 16856 if !(v.Type.Size() == 2) { 16857 break 16858 } 16859 v.reset(OpAMD64ROLW) 16860 v.AddArg(x) 16861 v.AddArg(y) 16862 return true 16863 } 16864 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) (SHLL x (ANDLconst y [15]))) 16865 // cond: v.Type.Size() == 2 16866 // result: (ROLW x y) 16867 for { 16868 _ = v.Args[1] 16869 v_0 := v.Args[0] 16870 if v_0.Op != OpAMD64ANDL { 16871 break 16872 } 16873 _ = v_0.Args[1] 16874 v_0_0 := v_0.Args[0] 16875 if v_0_0.Op != OpAMD64SBBLcarrymask { 16876 break 16877 } 16878 v_0_0_0 := v_0_0.Args[0] 16879 if v_0_0_0.Op != OpAMD64CMPLconst { 16880 break 16881 } 16882 if v_0_0_0.AuxInt != 16 { 16883 break 16884 } 16885 v_0_0_0_0 := v_0_0_0.Args[0] 16886 if v_0_0_0_0.Op != OpAMD64NEGL { 16887 break 16888 } 16889 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 16890 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 16891 break 16892 } 16893 if v_0_0_0_0_0.AuxInt != -16 { 16894 break 16895 } 16896 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 16897 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 16898 break 16899 } 16900 if v_0_0_0_0_0_0.AuxInt != 15 { 16901 break 16902 } 16903 y := v_0_0_0_0_0_0.Args[0] 16904 v_0_1 := v_0.Args[1] 16905 if v_0_1.Op != OpAMD64SHRW { 16906 break 16907 } 16908 _ = v_0_1.Args[1] 16909 x := v_0_1.Args[0] 16910 v_0_1_1 := v_0_1.Args[1] 16911 if v_0_1_1.Op != OpAMD64NEGL { 16912 break 16913 } 16914 v_0_1_1_0 := v_0_1_1.Args[0] 16915 if v_0_1_1_0.Op != OpAMD64ADDLconst { 16916 break 16917 } 16918 if v_0_1_1_0.AuxInt != -16 { 16919 break 16920 } 16921 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 16922 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 16923 break 16924 } 16925 if v_0_1_1_0_0.AuxInt != 15 { 16926 break 16927 } 16928 if y != v_0_1_1_0_0.Args[0] { 16929 break 16930 } 16931 v_1 := v.Args[1] 16932 if v_1.Op != OpAMD64SHLL { 16933 break 16934 } 16935 _ = v_1.Args[1] 16936 if x != v_1.Args[0] { 16937 break 16938 } 16939 v_1_1 := v_1.Args[1] 16940 if v_1_1.Op != OpAMD64ANDLconst { 16941 break 16942 } 16943 if v_1_1.AuxInt != 15 { 16944 break 16945 } 16946 if y != v_1_1.Args[0] { 16947 break 16948 } 16949 if !(v.Type.Size() == 2) { 16950 break 16951 } 16952 v.reset(OpAMD64ROLW) 16953 v.AddArg(x) 16954 v.AddArg(y) 16955 return true 16956 } 16957 // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) 16958 // cond: v.Type.Size() == 2 16959 // result: (RORW x y) 16960 for { 16961 _ = v.Args[1] 16962 v_0 := v.Args[0] 16963 if v_0.Op != OpAMD64SHRW { 16964 break 16965 } 16966 _ = v_0.Args[1] 16967 x := v_0.Args[0] 16968 v_0_1 := v_0.Args[1] 16969 if v_0_1.Op != OpAMD64ANDQconst { 16970 break 16971 } 16972 if v_0_1.AuxInt != 15 { 16973 break 16974 } 16975 y := v_0_1.Args[0] 16976 v_1 := v.Args[1] 16977 if v_1.Op != OpAMD64SHLL { 16978 break 16979 } 16980 _ = v_1.Args[1] 16981 if x != v_1.Args[0] { 16982 break 16983 } 16984 v_1_1 := v_1.Args[1] 16985 if v_1_1.Op != OpAMD64NEGQ { 16986 break 16987 } 16988 v_1_1_0 := v_1_1.Args[0] 16989 if v_1_1_0.Op != OpAMD64ADDQconst { 16990 break 16991 } 16992 if v_1_1_0.AuxInt != -16 { 16993 break 16994 } 16995 v_1_1_0_0 := v_1_1_0.Args[0] 16996 if v_1_1_0_0.Op != OpAMD64ANDQconst { 16997 break 16998 } 16999 if v_1_1_0_0.AuxInt != 15 { 17000 break 17001 } 17002 if y != v_1_1_0_0.Args[0] { 17003 break 17004 } 17005 if !(v.Type.Size() == 2) { 17006 break 17007 } 17008 v.reset(OpAMD64RORW) 17009 v.AddArg(x) 17010 v.AddArg(y) 17011 return true 17012 } 17013 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SHRW x (ANDQconst y [15]))) 17014 // cond: v.Type.Size() == 2 17015 // result: (RORW x y) 17016 for { 17017 _ = v.Args[1] 17018 v_0 := v.Args[0] 17019 if v_0.Op != OpAMD64SHLL { 17020 break 17021 } 17022 _ = v_0.Args[1] 17023 x := v_0.Args[0] 17024 v_0_1 := v_0.Args[1] 17025 if v_0_1.Op != OpAMD64NEGQ { 17026 break 17027 } 17028 v_0_1_0 := v_0_1.Args[0] 17029 if v_0_1_0.Op != OpAMD64ADDQconst { 17030 break 17031 } 17032 if v_0_1_0.AuxInt != -16 { 17033 break 17034 } 17035 v_0_1_0_0 := v_0_1_0.Args[0] 17036 if v_0_1_0_0.Op != OpAMD64ANDQconst { 17037 break 17038 } 17039 if v_0_1_0_0.AuxInt != 15 { 17040 break 17041 } 17042 y := v_0_1_0_0.Args[0] 17043 v_1 := v.Args[1] 17044 if v_1.Op != OpAMD64SHRW { 17045 break 17046 } 17047 _ = v_1.Args[1] 17048 if x != v_1.Args[0] { 17049 break 17050 } 17051 v_1_1 := v_1.Args[1] 17052 if v_1_1.Op != OpAMD64ANDQconst { 17053 break 17054 } 17055 if v_1_1.AuxInt != 15 { 17056 break 17057 } 17058 if y != v_1_1.Args[0] { 17059 break 17060 } 17061 if !(v.Type.Size() == 2) { 17062 break 17063 } 17064 v.reset(OpAMD64RORW) 17065 v.AddArg(x) 17066 v.AddArg(y) 17067 return true 17068 } 17069 // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) 17070 // cond: v.Type.Size() == 2 17071 // result: (RORW x y) 17072 for { 17073 _ = v.Args[1] 17074 v_0 := v.Args[0] 17075 if v_0.Op != OpAMD64SHRW { 17076 break 17077 } 17078 _ = v_0.Args[1] 17079 x := v_0.Args[0] 17080 v_0_1 := v_0.Args[1] 17081 if v_0_1.Op != OpAMD64ANDLconst { 17082 break 17083 } 17084 if v_0_1.AuxInt != 15 { 17085 break 17086 } 17087 y := v_0_1.Args[0] 17088 v_1 := v.Args[1] 17089 if v_1.Op != OpAMD64SHLL { 17090 break 17091 } 17092 _ = v_1.Args[1] 17093 if x != v_1.Args[0] { 17094 break 17095 } 17096 v_1_1 := v_1.Args[1] 17097 if v_1_1.Op != OpAMD64NEGL { 17098 break 17099 } 17100 v_1_1_0 := v_1_1.Args[0] 17101 if v_1_1_0.Op != OpAMD64ADDLconst { 17102 break 17103 } 17104 if v_1_1_0.AuxInt != -16 { 17105 break 17106 } 17107 v_1_1_0_0 := v_1_1_0.Args[0] 17108 if v_1_1_0_0.Op != OpAMD64ANDLconst { 17109 break 17110 } 17111 if v_1_1_0_0.AuxInt != 15 { 17112 break 17113 } 17114 if y != v_1_1_0_0.Args[0] { 17115 break 17116 } 17117 if !(v.Type.Size() == 2) { 17118 break 17119 } 17120 v.reset(OpAMD64RORW) 17121 v.AddArg(x) 17122 v.AddArg(y) 17123 return true 17124 } 17125 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SHRW x (ANDLconst y [15]))) 17126 // cond: v.Type.Size() == 2 17127 // result: (RORW x y) 17128 for { 17129 _ = v.Args[1] 17130 v_0 := v.Args[0] 17131 if v_0.Op != OpAMD64SHLL { 17132 break 17133 } 17134 _ = v_0.Args[1] 17135 x := v_0.Args[0] 17136 v_0_1 := v_0.Args[1] 17137 if v_0_1.Op != OpAMD64NEGL { 17138 break 17139 } 17140 v_0_1_0 := v_0_1.Args[0] 17141 if v_0_1_0.Op != OpAMD64ADDLconst { 17142 break 17143 } 17144 if v_0_1_0.AuxInt != -16 { 17145 break 17146 } 17147 v_0_1_0_0 := v_0_1_0.Args[0] 17148 if v_0_1_0_0.Op != OpAMD64ANDLconst { 17149 break 17150 } 17151 if v_0_1_0_0.AuxInt != 15 { 17152 break 17153 } 17154 y := v_0_1_0_0.Args[0] 17155 v_1 := v.Args[1] 17156 if v_1.Op != OpAMD64SHRW { 17157 break 17158 } 17159 _ = v_1.Args[1] 17160 if x != v_1.Args[0] { 17161 break 17162 } 17163 v_1_1 := v_1.Args[1] 17164 if v_1_1.Op != OpAMD64ANDLconst { 17165 break 17166 } 17167 if v_1_1.AuxInt != 15 { 17168 break 17169 } 17170 if y != v_1_1.Args[0] { 17171 break 17172 } 17173 if !(v.Type.Size() == 2) { 17174 break 17175 } 17176 v.reset(OpAMD64RORW) 17177 v.AddArg(x) 17178 v.AddArg(y) 17179 return true 17180 } 17181 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])))) 17182 // cond: v.Type.Size() == 1 17183 // result: (ROLB x y) 17184 for { 17185 _ = v.Args[1] 17186 v_0 := v.Args[0] 17187 if v_0.Op != OpAMD64SHLL { 17188 break 17189 } 17190 _ = v_0.Args[1] 17191 x := v_0.Args[0] 17192 v_0_1 := v_0.Args[1] 17193 if v_0_1.Op != OpAMD64ANDQconst { 17194 break 17195 } 17196 if v_0_1.AuxInt != 7 { 17197 break 17198 } 17199 y := v_0_1.Args[0] 17200 v_1 := v.Args[1] 17201 if v_1.Op != OpAMD64ANDL { 17202 break 17203 } 17204 _ = v_1.Args[1] 17205 v_1_0 := v_1.Args[0] 17206 if v_1_0.Op != OpAMD64SHRB { 17207 break 17208 } 17209 _ = v_1_0.Args[1] 17210 if x != v_1_0.Args[0] { 17211 break 17212 } 17213 v_1_0_1 := v_1_0.Args[1] 17214 if v_1_0_1.Op != OpAMD64NEGQ { 17215 break 17216 } 17217 v_1_0_1_0 := v_1_0_1.Args[0] 17218 if v_1_0_1_0.Op != OpAMD64ADDQconst { 17219 break 17220 } 17221 if v_1_0_1_0.AuxInt != -8 { 17222 break 17223 } 17224 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 17225 if v_1_0_1_0_0.Op != OpAMD64ANDQconst { 17226 break 17227 } 17228 if v_1_0_1_0_0.AuxInt != 7 { 17229 break 17230 } 17231 if y != v_1_0_1_0_0.Args[0] { 17232 break 17233 } 17234 v_1_1 := v_1.Args[1] 17235 if v_1_1.Op != OpAMD64SBBLcarrymask { 17236 break 17237 } 17238 v_1_1_0 := v_1_1.Args[0] 17239 if v_1_1_0.Op != OpAMD64CMPQconst { 17240 break 17241 } 17242 if v_1_1_0.AuxInt != 8 { 17243 break 17244 } 17245 v_1_1_0_0 := v_1_1_0.Args[0] 17246 if v_1_1_0_0.Op != OpAMD64NEGQ { 17247 break 17248 } 17249 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 17250 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 17251 break 17252 } 17253 if v_1_1_0_0_0.AuxInt != -8 { 17254 break 17255 } 17256 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 17257 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 17258 break 17259 } 17260 if v_1_1_0_0_0_0.AuxInt != 7 { 17261 break 17262 } 17263 if y != v_1_1_0_0_0_0.Args[0] { 17264 break 17265 } 17266 if !(v.Type.Size() == 1) { 17267 break 17268 } 17269 v.reset(OpAMD64ROLB) 17270 v.AddArg(x) 17271 v.AddArg(y) 17272 return true 17273 } 17274 // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))) 17275 // cond: v.Type.Size() == 1 17276 // result: (ROLB x y) 17277 for { 17278 _ = v.Args[1] 17279 v_0 := v.Args[0] 17280 if v_0.Op != OpAMD64SHLL { 17281 break 17282 } 17283 _ = v_0.Args[1] 17284 x := v_0.Args[0] 17285 v_0_1 := v_0.Args[1] 17286 if v_0_1.Op != OpAMD64ANDQconst { 17287 break 17288 } 17289 if v_0_1.AuxInt != 7 { 17290 break 17291 } 17292 y := v_0_1.Args[0] 17293 v_1 := v.Args[1] 17294 if v_1.Op != OpAMD64ANDL { 17295 break 17296 } 17297 _ = v_1.Args[1] 17298 v_1_0 := v_1.Args[0] 17299 if v_1_0.Op != OpAMD64SBBLcarrymask { 17300 break 17301 } 17302 v_1_0_0 := v_1_0.Args[0] 17303 if v_1_0_0.Op != OpAMD64CMPQconst { 17304 break 17305 } 17306 if v_1_0_0.AuxInt != 8 { 17307 break 17308 } 17309 v_1_0_0_0 := v_1_0_0.Args[0] 17310 if v_1_0_0_0.Op != OpAMD64NEGQ { 17311 break 17312 } 17313 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 17314 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 17315 break 17316 } 17317 if v_1_0_0_0_0.AuxInt != -8 { 17318 break 17319 } 17320 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 17321 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 17322 break 17323 } 17324 if v_1_0_0_0_0_0.AuxInt != 7 { 17325 break 17326 } 17327 if y != v_1_0_0_0_0_0.Args[0] { 17328 break 17329 } 17330 v_1_1 := v_1.Args[1] 17331 if v_1_1.Op != OpAMD64SHRB { 17332 break 17333 } 17334 _ = v_1_1.Args[1] 17335 if x != v_1_1.Args[0] { 17336 break 17337 } 17338 v_1_1_1 := v_1_1.Args[1] 17339 if v_1_1_1.Op != OpAMD64NEGQ { 17340 break 17341 } 17342 v_1_1_1_0 := v_1_1_1.Args[0] 17343 if v_1_1_1_0.Op != OpAMD64ADDQconst { 17344 break 17345 } 17346 if v_1_1_1_0.AuxInt != -8 { 17347 break 17348 } 17349 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 17350 if v_1_1_1_0_0.Op != OpAMD64ANDQconst { 17351 break 17352 } 17353 if v_1_1_1_0_0.AuxInt != 7 { 17354 break 17355 } 17356 if y != v_1_1_1_0_0.Args[0] { 17357 break 17358 } 17359 if !(v.Type.Size() == 1) { 17360 break 17361 } 17362 v.reset(OpAMD64ROLB) 17363 v.AddArg(x) 17364 v.AddArg(y) 17365 return true 17366 } 17367 // match: (ORL (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDQconst y [ 7]))) 17368 // cond: v.Type.Size() == 1 17369 // result: (ROLB x y) 17370 for { 17371 _ = v.Args[1] 17372 v_0 := v.Args[0] 17373 if v_0.Op != OpAMD64ANDL { 17374 break 17375 } 17376 _ = v_0.Args[1] 17377 v_0_0 := v_0.Args[0] 17378 if v_0_0.Op != OpAMD64SHRB { 17379 break 17380 } 17381 _ = v_0_0.Args[1] 17382 x := v_0_0.Args[0] 17383 v_0_0_1 := v_0_0.Args[1] 17384 if v_0_0_1.Op != OpAMD64NEGQ { 17385 break 17386 } 17387 v_0_0_1_0 := v_0_0_1.Args[0] 17388 if v_0_0_1_0.Op != OpAMD64ADDQconst { 17389 break 17390 } 17391 if v_0_0_1_0.AuxInt != -8 { 17392 break 17393 } 17394 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 17395 if v_0_0_1_0_0.Op != OpAMD64ANDQconst { 17396 break 17397 } 17398 if v_0_0_1_0_0.AuxInt != 7 { 17399 break 17400 } 17401 y := v_0_0_1_0_0.Args[0] 17402 v_0_1 := v_0.Args[1] 17403 if v_0_1.Op != OpAMD64SBBLcarrymask { 17404 break 17405 } 17406 v_0_1_0 := v_0_1.Args[0] 17407 if v_0_1_0.Op != OpAMD64CMPQconst { 17408 break 17409 } 17410 if v_0_1_0.AuxInt != 8 { 17411 break 17412 } 17413 v_0_1_0_0 := v_0_1_0.Args[0] 17414 if v_0_1_0_0.Op != OpAMD64NEGQ { 17415 break 17416 } 17417 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 17418 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 17419 break 17420 } 17421 if v_0_1_0_0_0.AuxInt != -8 { 17422 break 17423 } 17424 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 17425 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 17426 break 17427 } 17428 if v_0_1_0_0_0_0.AuxInt != 7 { 17429 break 17430 } 17431 if y != v_0_1_0_0_0_0.Args[0] { 17432 break 17433 } 17434 v_1 := v.Args[1] 17435 if v_1.Op != OpAMD64SHLL { 17436 break 17437 } 17438 _ = v_1.Args[1] 17439 if x != v_1.Args[0] { 17440 break 17441 } 17442 v_1_1 := v_1.Args[1] 17443 if v_1_1.Op != OpAMD64ANDQconst { 17444 break 17445 } 17446 if v_1_1.AuxInt != 7 { 17447 break 17448 } 17449 if y != v_1_1.Args[0] { 17450 break 17451 } 17452 if !(v.Type.Size() == 1) { 17453 break 17454 } 17455 v.reset(OpAMD64ROLB) 17456 v.AddArg(x) 17457 v.AddArg(y) 17458 return true 17459 } 17460 // match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) (SHLL x (ANDQconst y [ 7]))) 17461 // cond: v.Type.Size() == 1 17462 // result: (ROLB x y) 17463 for { 17464 _ = v.Args[1] 17465 v_0 := v.Args[0] 17466 if v_0.Op != OpAMD64ANDL { 17467 break 17468 } 17469 _ = v_0.Args[1] 17470 v_0_0 := v_0.Args[0] 17471 if v_0_0.Op != OpAMD64SBBLcarrymask { 17472 break 17473 } 17474 v_0_0_0 := v_0_0.Args[0] 17475 if v_0_0_0.Op != OpAMD64CMPQconst { 17476 break 17477 } 17478 if v_0_0_0.AuxInt != 8 { 17479 break 17480 } 17481 v_0_0_0_0 := v_0_0_0.Args[0] 17482 if v_0_0_0_0.Op != OpAMD64NEGQ { 17483 break 17484 } 17485 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 17486 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 17487 break 17488 } 17489 if v_0_0_0_0_0.AuxInt != -8 { 17490 break 17491 } 17492 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 17493 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 17494 break 17495 } 17496 if v_0_0_0_0_0_0.AuxInt != 7 { 17497 break 17498 } 17499 y := v_0_0_0_0_0_0.Args[0] 17500 v_0_1 := v_0.Args[1] 17501 if v_0_1.Op != OpAMD64SHRB { 17502 break 17503 } 17504 _ = v_0_1.Args[1] 17505 x := v_0_1.Args[0] 17506 v_0_1_1 := v_0_1.Args[1] 17507 if v_0_1_1.Op != OpAMD64NEGQ { 17508 break 17509 } 17510 v_0_1_1_0 := v_0_1_1.Args[0] 17511 if v_0_1_1_0.Op != OpAMD64ADDQconst { 17512 break 17513 } 17514 if v_0_1_1_0.AuxInt != -8 { 17515 break 17516 } 17517 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 17518 if v_0_1_1_0_0.Op != OpAMD64ANDQconst { 17519 break 17520 } 17521 if v_0_1_1_0_0.AuxInt != 7 { 17522 break 17523 } 17524 if y != v_0_1_1_0_0.Args[0] { 17525 break 17526 } 17527 v_1 := v.Args[1] 17528 if v_1.Op != OpAMD64SHLL { 17529 break 17530 } 17531 _ = v_1.Args[1] 17532 if x != v_1.Args[0] { 17533 break 17534 } 17535 v_1_1 := v_1.Args[1] 17536 if v_1_1.Op != OpAMD64ANDQconst { 17537 break 17538 } 17539 if v_1_1.AuxInt != 7 { 17540 break 17541 } 17542 if y != v_1_1.Args[0] { 17543 break 17544 } 17545 if !(v.Type.Size() == 1) { 17546 break 17547 } 17548 v.reset(OpAMD64ROLB) 17549 v.AddArg(x) 17550 v.AddArg(y) 17551 return true 17552 } 17553 return false 17554 } 17555 func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool { 17556 b := v.Block 17557 _ = b 17558 typ := &b.Func.Config.Types 17559 _ = typ 17560 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])))) 17561 // cond: v.Type.Size() == 1 17562 // result: (ROLB x y) 17563 for { 17564 _ = v.Args[1] 17565 v_0 := v.Args[0] 17566 if v_0.Op != OpAMD64SHLL { 17567 break 17568 } 17569 _ = v_0.Args[1] 17570 x := v_0.Args[0] 17571 v_0_1 := v_0.Args[1] 17572 if v_0_1.Op != OpAMD64ANDLconst { 17573 break 17574 } 17575 if v_0_1.AuxInt != 7 { 17576 break 17577 } 17578 y := v_0_1.Args[0] 17579 v_1 := v.Args[1] 17580 if v_1.Op != OpAMD64ANDL { 17581 break 17582 } 17583 _ = v_1.Args[1] 17584 v_1_0 := v_1.Args[0] 17585 if v_1_0.Op != OpAMD64SHRB { 17586 break 17587 } 17588 _ = v_1_0.Args[1] 17589 if x != v_1_0.Args[0] { 17590 break 17591 } 17592 v_1_0_1 := v_1_0.Args[1] 17593 if v_1_0_1.Op != OpAMD64NEGL { 17594 break 17595 } 17596 v_1_0_1_0 := v_1_0_1.Args[0] 17597 if v_1_0_1_0.Op != OpAMD64ADDLconst { 17598 break 17599 } 17600 if v_1_0_1_0.AuxInt != -8 { 17601 break 17602 } 17603 v_1_0_1_0_0 := v_1_0_1_0.Args[0] 17604 if v_1_0_1_0_0.Op != OpAMD64ANDLconst { 17605 break 17606 } 17607 if v_1_0_1_0_0.AuxInt != 7 { 17608 break 17609 } 17610 if y != v_1_0_1_0_0.Args[0] { 17611 break 17612 } 17613 v_1_1 := v_1.Args[1] 17614 if v_1_1.Op != OpAMD64SBBLcarrymask { 17615 break 17616 } 17617 v_1_1_0 := v_1_1.Args[0] 17618 if v_1_1_0.Op != OpAMD64CMPLconst { 17619 break 17620 } 17621 if v_1_1_0.AuxInt != 8 { 17622 break 17623 } 17624 v_1_1_0_0 := v_1_1_0.Args[0] 17625 if v_1_1_0_0.Op != OpAMD64NEGL { 17626 break 17627 } 17628 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 17629 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 17630 break 17631 } 17632 if v_1_1_0_0_0.AuxInt != -8 { 17633 break 17634 } 17635 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 17636 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 17637 break 17638 } 17639 if v_1_1_0_0_0_0.AuxInt != 7 { 17640 break 17641 } 17642 if y != v_1_1_0_0_0_0.Args[0] { 17643 break 17644 } 17645 if !(v.Type.Size() == 1) { 17646 break 17647 } 17648 v.reset(OpAMD64ROLB) 17649 v.AddArg(x) 17650 v.AddArg(y) 17651 return true 17652 } 17653 // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))) 17654 // cond: v.Type.Size() == 1 17655 // result: (ROLB x y) 17656 for { 17657 _ = v.Args[1] 17658 v_0 := v.Args[0] 17659 if v_0.Op != OpAMD64SHLL { 17660 break 17661 } 17662 _ = v_0.Args[1] 17663 x := v_0.Args[0] 17664 v_0_1 := v_0.Args[1] 17665 if v_0_1.Op != OpAMD64ANDLconst { 17666 break 17667 } 17668 if v_0_1.AuxInt != 7 { 17669 break 17670 } 17671 y := v_0_1.Args[0] 17672 v_1 := v.Args[1] 17673 if v_1.Op != OpAMD64ANDL { 17674 break 17675 } 17676 _ = v_1.Args[1] 17677 v_1_0 := v_1.Args[0] 17678 if v_1_0.Op != OpAMD64SBBLcarrymask { 17679 break 17680 } 17681 v_1_0_0 := v_1_0.Args[0] 17682 if v_1_0_0.Op != OpAMD64CMPLconst { 17683 break 17684 } 17685 if v_1_0_0.AuxInt != 8 { 17686 break 17687 } 17688 v_1_0_0_0 := v_1_0_0.Args[0] 17689 if v_1_0_0_0.Op != OpAMD64NEGL { 17690 break 17691 } 17692 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 17693 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 17694 break 17695 } 17696 if v_1_0_0_0_0.AuxInt != -8 { 17697 break 17698 } 17699 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 17700 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 17701 break 17702 } 17703 if v_1_0_0_0_0_0.AuxInt != 7 { 17704 break 17705 } 17706 if y != v_1_0_0_0_0_0.Args[0] { 17707 break 17708 } 17709 v_1_1 := v_1.Args[1] 17710 if v_1_1.Op != OpAMD64SHRB { 17711 break 17712 } 17713 _ = v_1_1.Args[1] 17714 if x != v_1_1.Args[0] { 17715 break 17716 } 17717 v_1_1_1 := v_1_1.Args[1] 17718 if v_1_1_1.Op != OpAMD64NEGL { 17719 break 17720 } 17721 v_1_1_1_0 := v_1_1_1.Args[0] 17722 if v_1_1_1_0.Op != OpAMD64ADDLconst { 17723 break 17724 } 17725 if v_1_1_1_0.AuxInt != -8 { 17726 break 17727 } 17728 v_1_1_1_0_0 := v_1_1_1_0.Args[0] 17729 if v_1_1_1_0_0.Op != OpAMD64ANDLconst { 17730 break 17731 } 17732 if v_1_1_1_0_0.AuxInt != 7 { 17733 break 17734 } 17735 if y != v_1_1_1_0_0.Args[0] { 17736 break 17737 } 17738 if !(v.Type.Size() == 1) { 17739 break 17740 } 17741 v.reset(OpAMD64ROLB) 17742 v.AddArg(x) 17743 v.AddArg(y) 17744 return true 17745 } 17746 // match: (ORL (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDLconst y [ 7]))) 17747 // cond: v.Type.Size() == 1 17748 // result: (ROLB x y) 17749 for { 17750 _ = v.Args[1] 17751 v_0 := v.Args[0] 17752 if v_0.Op != OpAMD64ANDL { 17753 break 17754 } 17755 _ = v_0.Args[1] 17756 v_0_0 := v_0.Args[0] 17757 if v_0_0.Op != OpAMD64SHRB { 17758 break 17759 } 17760 _ = v_0_0.Args[1] 17761 x := v_0_0.Args[0] 17762 v_0_0_1 := v_0_0.Args[1] 17763 if v_0_0_1.Op != OpAMD64NEGL { 17764 break 17765 } 17766 v_0_0_1_0 := v_0_0_1.Args[0] 17767 if v_0_0_1_0.Op != OpAMD64ADDLconst { 17768 break 17769 } 17770 if v_0_0_1_0.AuxInt != -8 { 17771 break 17772 } 17773 v_0_0_1_0_0 := v_0_0_1_0.Args[0] 17774 if v_0_0_1_0_0.Op != OpAMD64ANDLconst { 17775 break 17776 } 17777 if v_0_0_1_0_0.AuxInt != 7 { 17778 break 17779 } 17780 y := v_0_0_1_0_0.Args[0] 17781 v_0_1 := v_0.Args[1] 17782 if v_0_1.Op != OpAMD64SBBLcarrymask { 17783 break 17784 } 17785 v_0_1_0 := v_0_1.Args[0] 17786 if v_0_1_0.Op != OpAMD64CMPLconst { 17787 break 17788 } 17789 if v_0_1_0.AuxInt != 8 { 17790 break 17791 } 17792 v_0_1_0_0 := v_0_1_0.Args[0] 17793 if v_0_1_0_0.Op != OpAMD64NEGL { 17794 break 17795 } 17796 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 17797 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 17798 break 17799 } 17800 if v_0_1_0_0_0.AuxInt != -8 { 17801 break 17802 } 17803 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 17804 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 17805 break 17806 } 17807 if v_0_1_0_0_0_0.AuxInt != 7 { 17808 break 17809 } 17810 if y != v_0_1_0_0_0_0.Args[0] { 17811 break 17812 } 17813 v_1 := v.Args[1] 17814 if v_1.Op != OpAMD64SHLL { 17815 break 17816 } 17817 _ = v_1.Args[1] 17818 if x != v_1.Args[0] { 17819 break 17820 } 17821 v_1_1 := v_1.Args[1] 17822 if v_1_1.Op != OpAMD64ANDLconst { 17823 break 17824 } 17825 if v_1_1.AuxInt != 7 { 17826 break 17827 } 17828 if y != v_1_1.Args[0] { 17829 break 17830 } 17831 if !(v.Type.Size() == 1) { 17832 break 17833 } 17834 v.reset(OpAMD64ROLB) 17835 v.AddArg(x) 17836 v.AddArg(y) 17837 return true 17838 } 17839 // match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) (SHLL x (ANDLconst y [ 7]))) 17840 // cond: v.Type.Size() == 1 17841 // result: (ROLB x y) 17842 for { 17843 _ = v.Args[1] 17844 v_0 := v.Args[0] 17845 if v_0.Op != OpAMD64ANDL { 17846 break 17847 } 17848 _ = v_0.Args[1] 17849 v_0_0 := v_0.Args[0] 17850 if v_0_0.Op != OpAMD64SBBLcarrymask { 17851 break 17852 } 17853 v_0_0_0 := v_0_0.Args[0] 17854 if v_0_0_0.Op != OpAMD64CMPLconst { 17855 break 17856 } 17857 if v_0_0_0.AuxInt != 8 { 17858 break 17859 } 17860 v_0_0_0_0 := v_0_0_0.Args[0] 17861 if v_0_0_0_0.Op != OpAMD64NEGL { 17862 break 17863 } 17864 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 17865 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 17866 break 17867 } 17868 if v_0_0_0_0_0.AuxInt != -8 { 17869 break 17870 } 17871 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 17872 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 17873 break 17874 } 17875 if v_0_0_0_0_0_0.AuxInt != 7 { 17876 break 17877 } 17878 y := v_0_0_0_0_0_0.Args[0] 17879 v_0_1 := v_0.Args[1] 17880 if v_0_1.Op != OpAMD64SHRB { 17881 break 17882 } 17883 _ = v_0_1.Args[1] 17884 x := v_0_1.Args[0] 17885 v_0_1_1 := v_0_1.Args[1] 17886 if v_0_1_1.Op != OpAMD64NEGL { 17887 break 17888 } 17889 v_0_1_1_0 := v_0_1_1.Args[0] 17890 if v_0_1_1_0.Op != OpAMD64ADDLconst { 17891 break 17892 } 17893 if v_0_1_1_0.AuxInt != -8 { 17894 break 17895 } 17896 v_0_1_1_0_0 := v_0_1_1_0.Args[0] 17897 if v_0_1_1_0_0.Op != OpAMD64ANDLconst { 17898 break 17899 } 17900 if v_0_1_1_0_0.AuxInt != 7 { 17901 break 17902 } 17903 if y != v_0_1_1_0_0.Args[0] { 17904 break 17905 } 17906 v_1 := v.Args[1] 17907 if v_1.Op != OpAMD64SHLL { 17908 break 17909 } 17910 _ = v_1.Args[1] 17911 if x != v_1.Args[0] { 17912 break 17913 } 17914 v_1_1 := v_1.Args[1] 17915 if v_1_1.Op != OpAMD64ANDLconst { 17916 break 17917 } 17918 if v_1_1.AuxInt != 7 { 17919 break 17920 } 17921 if y != v_1_1.Args[0] { 17922 break 17923 } 17924 if !(v.Type.Size() == 1) { 17925 break 17926 } 17927 v.reset(OpAMD64ROLB) 17928 v.AddArg(x) 17929 v.AddArg(y) 17930 return true 17931 } 17932 // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) 17933 // cond: v.Type.Size() == 1 17934 // result: (RORB x y) 17935 for { 17936 _ = v.Args[1] 17937 v_0 := v.Args[0] 17938 if v_0.Op != OpAMD64SHRB { 17939 break 17940 } 17941 _ = v_0.Args[1] 17942 x := v_0.Args[0] 17943 v_0_1 := v_0.Args[1] 17944 if v_0_1.Op != OpAMD64ANDQconst { 17945 break 17946 } 17947 if v_0_1.AuxInt != 7 { 17948 break 17949 } 17950 y := v_0_1.Args[0] 17951 v_1 := v.Args[1] 17952 if v_1.Op != OpAMD64SHLL { 17953 break 17954 } 17955 _ = v_1.Args[1] 17956 if x != v_1.Args[0] { 17957 break 17958 } 17959 v_1_1 := v_1.Args[1] 17960 if v_1_1.Op != OpAMD64NEGQ { 17961 break 17962 } 17963 v_1_1_0 := v_1_1.Args[0] 17964 if v_1_1_0.Op != OpAMD64ADDQconst { 17965 break 17966 } 17967 if v_1_1_0.AuxInt != -8 { 17968 break 17969 } 17970 v_1_1_0_0 := v_1_1_0.Args[0] 17971 if v_1_1_0_0.Op != OpAMD64ANDQconst { 17972 break 17973 } 17974 if v_1_1_0_0.AuxInt != 7 { 17975 break 17976 } 17977 if y != v_1_1_0_0.Args[0] { 17978 break 17979 } 17980 if !(v.Type.Size() == 1) { 17981 break 17982 } 17983 v.reset(OpAMD64RORB) 17984 v.AddArg(x) 17985 v.AddArg(y) 17986 return true 17987 } 17988 // match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SHRB x (ANDQconst y [ 7]))) 17989 // cond: v.Type.Size() == 1 17990 // result: (RORB x y) 17991 for { 17992 _ = v.Args[1] 17993 v_0 := v.Args[0] 17994 if v_0.Op != OpAMD64SHLL { 17995 break 17996 } 17997 _ = v_0.Args[1] 17998 x := v_0.Args[0] 17999 v_0_1 := v_0.Args[1] 18000 if v_0_1.Op != OpAMD64NEGQ { 18001 break 18002 } 18003 v_0_1_0 := v_0_1.Args[0] 18004 if v_0_1_0.Op != OpAMD64ADDQconst { 18005 break 18006 } 18007 if v_0_1_0.AuxInt != -8 { 18008 break 18009 } 18010 v_0_1_0_0 := v_0_1_0.Args[0] 18011 if v_0_1_0_0.Op != OpAMD64ANDQconst { 18012 break 18013 } 18014 if v_0_1_0_0.AuxInt != 7 { 18015 break 18016 } 18017 y := v_0_1_0_0.Args[0] 18018 v_1 := v.Args[1] 18019 if v_1.Op != OpAMD64SHRB { 18020 break 18021 } 18022 _ = v_1.Args[1] 18023 if x != v_1.Args[0] { 18024 break 18025 } 18026 v_1_1 := v_1.Args[1] 18027 if v_1_1.Op != OpAMD64ANDQconst { 18028 break 18029 } 18030 if v_1_1.AuxInt != 7 { 18031 break 18032 } 18033 if y != v_1_1.Args[0] { 18034 break 18035 } 18036 if !(v.Type.Size() == 1) { 18037 break 18038 } 18039 v.reset(OpAMD64RORB) 18040 v.AddArg(x) 18041 v.AddArg(y) 18042 return true 18043 } 18044 // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) 18045 // cond: v.Type.Size() == 1 18046 // result: (RORB x y) 18047 for { 18048 _ = v.Args[1] 18049 v_0 := v.Args[0] 18050 if v_0.Op != OpAMD64SHRB { 18051 break 18052 } 18053 _ = v_0.Args[1] 18054 x := v_0.Args[0] 18055 v_0_1 := v_0.Args[1] 18056 if v_0_1.Op != OpAMD64ANDLconst { 18057 break 18058 } 18059 if v_0_1.AuxInt != 7 { 18060 break 18061 } 18062 y := v_0_1.Args[0] 18063 v_1 := v.Args[1] 18064 if v_1.Op != OpAMD64SHLL { 18065 break 18066 } 18067 _ = v_1.Args[1] 18068 if x != v_1.Args[0] { 18069 break 18070 } 18071 v_1_1 := v_1.Args[1] 18072 if v_1_1.Op != OpAMD64NEGL { 18073 break 18074 } 18075 v_1_1_0 := v_1_1.Args[0] 18076 if v_1_1_0.Op != OpAMD64ADDLconst { 18077 break 18078 } 18079 if v_1_1_0.AuxInt != -8 { 18080 break 18081 } 18082 v_1_1_0_0 := v_1_1_0.Args[0] 18083 if v_1_1_0_0.Op != OpAMD64ANDLconst { 18084 break 18085 } 18086 if v_1_1_0_0.AuxInt != 7 { 18087 break 18088 } 18089 if y != v_1_1_0_0.Args[0] { 18090 break 18091 } 18092 if !(v.Type.Size() == 1) { 18093 break 18094 } 18095 v.reset(OpAMD64RORB) 18096 v.AddArg(x) 18097 v.AddArg(y) 18098 return true 18099 } 18100 // match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SHRB x (ANDLconst y [ 7]))) 18101 // cond: v.Type.Size() == 1 18102 // result: (RORB x y) 18103 for { 18104 _ = v.Args[1] 18105 v_0 := v.Args[0] 18106 if v_0.Op != OpAMD64SHLL { 18107 break 18108 } 18109 _ = v_0.Args[1] 18110 x := v_0.Args[0] 18111 v_0_1 := v_0.Args[1] 18112 if v_0_1.Op != OpAMD64NEGL { 18113 break 18114 } 18115 v_0_1_0 := v_0_1.Args[0] 18116 if v_0_1_0.Op != OpAMD64ADDLconst { 18117 break 18118 } 18119 if v_0_1_0.AuxInt != -8 { 18120 break 18121 } 18122 v_0_1_0_0 := v_0_1_0.Args[0] 18123 if v_0_1_0_0.Op != OpAMD64ANDLconst { 18124 break 18125 } 18126 if v_0_1_0_0.AuxInt != 7 { 18127 break 18128 } 18129 y := v_0_1_0_0.Args[0] 18130 v_1 := v.Args[1] 18131 if v_1.Op != OpAMD64SHRB { 18132 break 18133 } 18134 _ = v_1.Args[1] 18135 if x != v_1.Args[0] { 18136 break 18137 } 18138 v_1_1 := v_1.Args[1] 18139 if v_1_1.Op != OpAMD64ANDLconst { 18140 break 18141 } 18142 if v_1_1.AuxInt != 7 { 18143 break 18144 } 18145 if y != v_1_1.Args[0] { 18146 break 18147 } 18148 if !(v.Type.Size() == 1) { 18149 break 18150 } 18151 v.reset(OpAMD64RORB) 18152 v.AddArg(x) 18153 v.AddArg(y) 18154 return true 18155 } 18156 // match: (ORL x x) 18157 // cond: 18158 // result: x 18159 for { 18160 _ = v.Args[1] 18161 x := v.Args[0] 18162 if x != v.Args[1] { 18163 break 18164 } 18165 v.reset(OpCopy) 18166 v.Type = x.Type 18167 v.AddArg(x) 18168 return true 18169 } 18170 // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) 18171 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18172 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 18173 for { 18174 _ = v.Args[1] 18175 x0 := v.Args[0] 18176 if x0.Op != OpAMD64MOVBload { 18177 break 18178 } 18179 i0 := x0.AuxInt 18180 s := x0.Aux 18181 _ = x0.Args[1] 18182 p := x0.Args[0] 18183 mem := x0.Args[1] 18184 sh := v.Args[1] 18185 if sh.Op != OpAMD64SHLLconst { 18186 break 18187 } 18188 if sh.AuxInt != 8 { 18189 break 18190 } 18191 x1 := sh.Args[0] 18192 if x1.Op != OpAMD64MOVBload { 18193 break 18194 } 18195 i1 := x1.AuxInt 18196 if x1.Aux != s { 18197 break 18198 } 18199 _ = x1.Args[1] 18200 if p != x1.Args[0] { 18201 break 18202 } 18203 if mem != x1.Args[1] { 18204 break 18205 } 18206 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18207 break 18208 } 18209 b = mergePoint(b, x0, x1) 18210 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18211 v.reset(OpCopy) 18212 v.AddArg(v0) 18213 v0.AuxInt = i0 18214 v0.Aux = s 18215 v0.AddArg(p) 18216 v0.AddArg(mem) 18217 return true 18218 } 18219 return false 18220 } 18221 func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { 18222 b := v.Block 18223 _ = b 18224 typ := &b.Func.Config.Types 18225 _ = typ 18226 // match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 18227 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18228 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 18229 for { 18230 _ = v.Args[1] 18231 sh := v.Args[0] 18232 if sh.Op != OpAMD64SHLLconst { 18233 break 18234 } 18235 if sh.AuxInt != 8 { 18236 break 18237 } 18238 x1 := sh.Args[0] 18239 if x1.Op != OpAMD64MOVBload { 18240 break 18241 } 18242 i1 := x1.AuxInt 18243 s := x1.Aux 18244 _ = x1.Args[1] 18245 p := x1.Args[0] 18246 mem := x1.Args[1] 18247 x0 := v.Args[1] 18248 if x0.Op != OpAMD64MOVBload { 18249 break 18250 } 18251 i0 := x0.AuxInt 18252 if x0.Aux != s { 18253 break 18254 } 18255 _ = x0.Args[1] 18256 if p != x0.Args[0] { 18257 break 18258 } 18259 if mem != x0.Args[1] { 18260 break 18261 } 18262 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18263 break 18264 } 18265 b = mergePoint(b, x0, x1) 18266 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18267 v.reset(OpCopy) 18268 v.AddArg(v0) 18269 v0.AuxInt = i0 18270 v0.Aux = s 18271 v0.AddArg(p) 18272 v0.AddArg(mem) 18273 return true 18274 } 18275 // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) 18276 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18277 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 18278 for { 18279 _ = v.Args[1] 18280 x0 := v.Args[0] 18281 if x0.Op != OpAMD64MOVWload { 18282 break 18283 } 18284 i0 := x0.AuxInt 18285 s := x0.Aux 18286 _ = x0.Args[1] 18287 p := x0.Args[0] 18288 mem := x0.Args[1] 18289 sh := v.Args[1] 18290 if sh.Op != OpAMD64SHLLconst { 18291 break 18292 } 18293 if sh.AuxInt != 16 { 18294 break 18295 } 18296 x1 := sh.Args[0] 18297 if x1.Op != OpAMD64MOVWload { 18298 break 18299 } 18300 i1 := x1.AuxInt 18301 if x1.Aux != s { 18302 break 18303 } 18304 _ = x1.Args[1] 18305 if p != x1.Args[0] { 18306 break 18307 } 18308 if mem != x1.Args[1] { 18309 break 18310 } 18311 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18312 break 18313 } 18314 b = mergePoint(b, x0, x1) 18315 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 18316 v.reset(OpCopy) 18317 v.AddArg(v0) 18318 v0.AuxInt = i0 18319 v0.Aux = s 18320 v0.AddArg(p) 18321 v0.AddArg(mem) 18322 return true 18323 } 18324 // match: (ORL sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 18325 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18326 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 18327 for { 18328 _ = v.Args[1] 18329 sh := v.Args[0] 18330 if sh.Op != OpAMD64SHLLconst { 18331 break 18332 } 18333 if sh.AuxInt != 16 { 18334 break 18335 } 18336 x1 := sh.Args[0] 18337 if x1.Op != OpAMD64MOVWload { 18338 break 18339 } 18340 i1 := x1.AuxInt 18341 s := x1.Aux 18342 _ = x1.Args[1] 18343 p := x1.Args[0] 18344 mem := x1.Args[1] 18345 x0 := v.Args[1] 18346 if x0.Op != OpAMD64MOVWload { 18347 break 18348 } 18349 i0 := x0.AuxInt 18350 if x0.Aux != s { 18351 break 18352 } 18353 _ = x0.Args[1] 18354 if p != x0.Args[0] { 18355 break 18356 } 18357 if mem != x0.Args[1] { 18358 break 18359 } 18360 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18361 break 18362 } 18363 b = mergePoint(b, x0, x1) 18364 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 18365 v.reset(OpCopy) 18366 v.AddArg(v0) 18367 v0.AuxInt = i0 18368 v0.Aux = s 18369 v0.AddArg(p) 18370 v0.AddArg(mem) 18371 return true 18372 } 18373 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 18374 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18375 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18376 for { 18377 _ = v.Args[1] 18378 s1 := v.Args[0] 18379 if s1.Op != OpAMD64SHLLconst { 18380 break 18381 } 18382 j1 := s1.AuxInt 18383 x1 := s1.Args[0] 18384 if x1.Op != OpAMD64MOVBload { 18385 break 18386 } 18387 i1 := x1.AuxInt 18388 s := x1.Aux 18389 _ = x1.Args[1] 18390 p := x1.Args[0] 18391 mem := x1.Args[1] 18392 or := v.Args[1] 18393 if or.Op != OpAMD64ORL { 18394 break 18395 } 18396 _ = or.Args[1] 18397 s0 := or.Args[0] 18398 if s0.Op != OpAMD64SHLLconst { 18399 break 18400 } 18401 j0 := s0.AuxInt 18402 x0 := s0.Args[0] 18403 if x0.Op != OpAMD64MOVBload { 18404 break 18405 } 18406 i0 := x0.AuxInt 18407 if x0.Aux != s { 18408 break 18409 } 18410 _ = x0.Args[1] 18411 if p != x0.Args[0] { 18412 break 18413 } 18414 if mem != x0.Args[1] { 18415 break 18416 } 18417 y := or.Args[1] 18418 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18419 break 18420 } 18421 b = mergePoint(b, x0, x1) 18422 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18423 v.reset(OpCopy) 18424 v.AddArg(v0) 18425 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18426 v1.AuxInt = j0 18427 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18428 v2.AuxInt = i0 18429 v2.Aux = s 18430 v2.AddArg(p) 18431 v2.AddArg(mem) 18432 v1.AddArg(v2) 18433 v0.AddArg(v1) 18434 v0.AddArg(y) 18435 return true 18436 } 18437 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))) 18438 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18439 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18440 for { 18441 _ = v.Args[1] 18442 s1 := v.Args[0] 18443 if s1.Op != OpAMD64SHLLconst { 18444 break 18445 } 18446 j1 := s1.AuxInt 18447 x1 := s1.Args[0] 18448 if x1.Op != OpAMD64MOVBload { 18449 break 18450 } 18451 i1 := x1.AuxInt 18452 s := x1.Aux 18453 _ = x1.Args[1] 18454 p := x1.Args[0] 18455 mem := x1.Args[1] 18456 or := v.Args[1] 18457 if or.Op != OpAMD64ORL { 18458 break 18459 } 18460 _ = or.Args[1] 18461 y := or.Args[0] 18462 s0 := or.Args[1] 18463 if s0.Op != OpAMD64SHLLconst { 18464 break 18465 } 18466 j0 := s0.AuxInt 18467 x0 := s0.Args[0] 18468 if x0.Op != OpAMD64MOVBload { 18469 break 18470 } 18471 i0 := x0.AuxInt 18472 if x0.Aux != s { 18473 break 18474 } 18475 _ = x0.Args[1] 18476 if p != x0.Args[0] { 18477 break 18478 } 18479 if mem != x0.Args[1] { 18480 break 18481 } 18482 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18483 break 18484 } 18485 b = mergePoint(b, x0, x1) 18486 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18487 v.reset(OpCopy) 18488 v.AddArg(v0) 18489 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18490 v1.AuxInt = j0 18491 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18492 v2.AuxInt = i0 18493 v2.Aux = s 18494 v2.AddArg(p) 18495 v2.AddArg(mem) 18496 v1.AddArg(v2) 18497 v0.AddArg(v1) 18498 v0.AddArg(y) 18499 return true 18500 } 18501 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 18502 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18503 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18504 for { 18505 _ = v.Args[1] 18506 or := v.Args[0] 18507 if or.Op != OpAMD64ORL { 18508 break 18509 } 18510 _ = or.Args[1] 18511 s0 := or.Args[0] 18512 if s0.Op != OpAMD64SHLLconst { 18513 break 18514 } 18515 j0 := s0.AuxInt 18516 x0 := s0.Args[0] 18517 if x0.Op != OpAMD64MOVBload { 18518 break 18519 } 18520 i0 := x0.AuxInt 18521 s := x0.Aux 18522 _ = x0.Args[1] 18523 p := x0.Args[0] 18524 mem := x0.Args[1] 18525 y := or.Args[1] 18526 s1 := v.Args[1] 18527 if s1.Op != OpAMD64SHLLconst { 18528 break 18529 } 18530 j1 := s1.AuxInt 18531 x1 := s1.Args[0] 18532 if x1.Op != OpAMD64MOVBload { 18533 break 18534 } 18535 i1 := x1.AuxInt 18536 if x1.Aux != s { 18537 break 18538 } 18539 _ = x1.Args[1] 18540 if p != x1.Args[0] { 18541 break 18542 } 18543 if mem != x1.Args[1] { 18544 break 18545 } 18546 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18547 break 18548 } 18549 b = mergePoint(b, x0, x1) 18550 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18551 v.reset(OpCopy) 18552 v.AddArg(v0) 18553 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18554 v1.AuxInt = j0 18555 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18556 v2.AuxInt = i0 18557 v2.Aux = s 18558 v2.AddArg(p) 18559 v2.AddArg(mem) 18560 v1.AddArg(v2) 18561 v0.AddArg(v1) 18562 v0.AddArg(y) 18563 return true 18564 } 18565 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) 18566 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 18567 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 18568 for { 18569 _ = v.Args[1] 18570 or := v.Args[0] 18571 if or.Op != OpAMD64ORL { 18572 break 18573 } 18574 _ = or.Args[1] 18575 y := or.Args[0] 18576 s0 := or.Args[1] 18577 if s0.Op != OpAMD64SHLLconst { 18578 break 18579 } 18580 j0 := s0.AuxInt 18581 x0 := s0.Args[0] 18582 if x0.Op != OpAMD64MOVBload { 18583 break 18584 } 18585 i0 := x0.AuxInt 18586 s := x0.Aux 18587 _ = x0.Args[1] 18588 p := x0.Args[0] 18589 mem := x0.Args[1] 18590 s1 := v.Args[1] 18591 if s1.Op != OpAMD64SHLLconst { 18592 break 18593 } 18594 j1 := s1.AuxInt 18595 x1 := s1.Args[0] 18596 if x1.Op != OpAMD64MOVBload { 18597 break 18598 } 18599 i1 := x1.AuxInt 18600 if x1.Aux != s { 18601 break 18602 } 18603 _ = x1.Args[1] 18604 if p != x1.Args[0] { 18605 break 18606 } 18607 if mem != x1.Args[1] { 18608 break 18609 } 18610 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 18611 break 18612 } 18613 b = mergePoint(b, x0, x1) 18614 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 18615 v.reset(OpCopy) 18616 v.AddArg(v0) 18617 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 18618 v1.AuxInt = j0 18619 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 18620 v2.AuxInt = i0 18621 v2.Aux = s 18622 v2.AddArg(p) 18623 v2.AddArg(mem) 18624 v1.AddArg(v2) 18625 v0.AddArg(v1) 18626 v0.AddArg(y) 18627 return true 18628 } 18629 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18630 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18631 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18632 for { 18633 _ = v.Args[1] 18634 x0 := v.Args[0] 18635 if x0.Op != OpAMD64MOVBloadidx1 { 18636 break 18637 } 18638 i0 := x0.AuxInt 18639 s := x0.Aux 18640 _ = x0.Args[2] 18641 p := x0.Args[0] 18642 idx := x0.Args[1] 18643 mem := x0.Args[2] 18644 sh := v.Args[1] 18645 if sh.Op != OpAMD64SHLLconst { 18646 break 18647 } 18648 if sh.AuxInt != 8 { 18649 break 18650 } 18651 x1 := sh.Args[0] 18652 if x1.Op != OpAMD64MOVBloadidx1 { 18653 break 18654 } 18655 i1 := x1.AuxInt 18656 if x1.Aux != s { 18657 break 18658 } 18659 _ = x1.Args[2] 18660 if p != x1.Args[0] { 18661 break 18662 } 18663 if idx != x1.Args[1] { 18664 break 18665 } 18666 if mem != x1.Args[2] { 18667 break 18668 } 18669 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18670 break 18671 } 18672 b = mergePoint(b, x0, x1) 18673 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18674 v.reset(OpCopy) 18675 v.AddArg(v0) 18676 v0.AuxInt = i0 18677 v0.Aux = s 18678 v0.AddArg(p) 18679 v0.AddArg(idx) 18680 v0.AddArg(mem) 18681 return true 18682 } 18683 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 18684 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18685 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18686 for { 18687 _ = v.Args[1] 18688 x0 := v.Args[0] 18689 if x0.Op != OpAMD64MOVBloadidx1 { 18690 break 18691 } 18692 i0 := x0.AuxInt 18693 s := x0.Aux 18694 _ = x0.Args[2] 18695 idx := x0.Args[0] 18696 p := x0.Args[1] 18697 mem := x0.Args[2] 18698 sh := v.Args[1] 18699 if sh.Op != OpAMD64SHLLconst { 18700 break 18701 } 18702 if sh.AuxInt != 8 { 18703 break 18704 } 18705 x1 := sh.Args[0] 18706 if x1.Op != OpAMD64MOVBloadidx1 { 18707 break 18708 } 18709 i1 := x1.AuxInt 18710 if x1.Aux != s { 18711 break 18712 } 18713 _ = x1.Args[2] 18714 if p != x1.Args[0] { 18715 break 18716 } 18717 if idx != x1.Args[1] { 18718 break 18719 } 18720 if mem != x1.Args[2] { 18721 break 18722 } 18723 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18724 break 18725 } 18726 b = mergePoint(b, x0, x1) 18727 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18728 v.reset(OpCopy) 18729 v.AddArg(v0) 18730 v0.AuxInt = i0 18731 v0.Aux = s 18732 v0.AddArg(p) 18733 v0.AddArg(idx) 18734 v0.AddArg(mem) 18735 return true 18736 } 18737 // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18738 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18739 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18740 for { 18741 _ = v.Args[1] 18742 x0 := v.Args[0] 18743 if x0.Op != OpAMD64MOVBloadidx1 { 18744 break 18745 } 18746 i0 := x0.AuxInt 18747 s := x0.Aux 18748 _ = x0.Args[2] 18749 p := x0.Args[0] 18750 idx := x0.Args[1] 18751 mem := x0.Args[2] 18752 sh := v.Args[1] 18753 if sh.Op != OpAMD64SHLLconst { 18754 break 18755 } 18756 if sh.AuxInt != 8 { 18757 break 18758 } 18759 x1 := sh.Args[0] 18760 if x1.Op != OpAMD64MOVBloadidx1 { 18761 break 18762 } 18763 i1 := x1.AuxInt 18764 if x1.Aux != s { 18765 break 18766 } 18767 _ = x1.Args[2] 18768 if idx != x1.Args[0] { 18769 break 18770 } 18771 if p != x1.Args[1] { 18772 break 18773 } 18774 if mem != x1.Args[2] { 18775 break 18776 } 18777 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18778 break 18779 } 18780 b = mergePoint(b, x0, x1) 18781 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18782 v.reset(OpCopy) 18783 v.AddArg(v0) 18784 v0.AuxInt = i0 18785 v0.Aux = s 18786 v0.AddArg(p) 18787 v0.AddArg(idx) 18788 v0.AddArg(mem) 18789 return true 18790 } 18791 return false 18792 } 18793 func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool { 18794 b := v.Block 18795 _ = b 18796 typ := &b.Func.Config.Types 18797 _ = typ 18798 // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 18799 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18800 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18801 for { 18802 _ = v.Args[1] 18803 x0 := v.Args[0] 18804 if x0.Op != OpAMD64MOVBloadidx1 { 18805 break 18806 } 18807 i0 := x0.AuxInt 18808 s := x0.Aux 18809 _ = x0.Args[2] 18810 idx := x0.Args[0] 18811 p := x0.Args[1] 18812 mem := x0.Args[2] 18813 sh := v.Args[1] 18814 if sh.Op != OpAMD64SHLLconst { 18815 break 18816 } 18817 if sh.AuxInt != 8 { 18818 break 18819 } 18820 x1 := sh.Args[0] 18821 if x1.Op != OpAMD64MOVBloadidx1 { 18822 break 18823 } 18824 i1 := x1.AuxInt 18825 if x1.Aux != s { 18826 break 18827 } 18828 _ = x1.Args[2] 18829 if idx != x1.Args[0] { 18830 break 18831 } 18832 if p != x1.Args[1] { 18833 break 18834 } 18835 if mem != x1.Args[2] { 18836 break 18837 } 18838 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18839 break 18840 } 18841 b = mergePoint(b, x0, x1) 18842 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18843 v.reset(OpCopy) 18844 v.AddArg(v0) 18845 v0.AuxInt = i0 18846 v0.Aux = s 18847 v0.AddArg(p) 18848 v0.AddArg(idx) 18849 v0.AddArg(mem) 18850 return true 18851 } 18852 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 18853 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18854 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18855 for { 18856 _ = v.Args[1] 18857 sh := v.Args[0] 18858 if sh.Op != OpAMD64SHLLconst { 18859 break 18860 } 18861 if sh.AuxInt != 8 { 18862 break 18863 } 18864 x1 := sh.Args[0] 18865 if x1.Op != OpAMD64MOVBloadidx1 { 18866 break 18867 } 18868 i1 := x1.AuxInt 18869 s := x1.Aux 18870 _ = x1.Args[2] 18871 p := x1.Args[0] 18872 idx := x1.Args[1] 18873 mem := x1.Args[2] 18874 x0 := v.Args[1] 18875 if x0.Op != OpAMD64MOVBloadidx1 { 18876 break 18877 } 18878 i0 := x0.AuxInt 18879 if x0.Aux != s { 18880 break 18881 } 18882 _ = x0.Args[2] 18883 if p != x0.Args[0] { 18884 break 18885 } 18886 if idx != x0.Args[1] { 18887 break 18888 } 18889 if mem != x0.Args[2] { 18890 break 18891 } 18892 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18893 break 18894 } 18895 b = mergePoint(b, x0, x1) 18896 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18897 v.reset(OpCopy) 18898 v.AddArg(v0) 18899 v0.AuxInt = i0 18900 v0.Aux = s 18901 v0.AddArg(p) 18902 v0.AddArg(idx) 18903 v0.AddArg(mem) 18904 return true 18905 } 18906 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 18907 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18908 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18909 for { 18910 _ = v.Args[1] 18911 sh := v.Args[0] 18912 if sh.Op != OpAMD64SHLLconst { 18913 break 18914 } 18915 if sh.AuxInt != 8 { 18916 break 18917 } 18918 x1 := sh.Args[0] 18919 if x1.Op != OpAMD64MOVBloadidx1 { 18920 break 18921 } 18922 i1 := x1.AuxInt 18923 s := x1.Aux 18924 _ = x1.Args[2] 18925 idx := x1.Args[0] 18926 p := x1.Args[1] 18927 mem := x1.Args[2] 18928 x0 := v.Args[1] 18929 if x0.Op != OpAMD64MOVBloadidx1 { 18930 break 18931 } 18932 i0 := x0.AuxInt 18933 if x0.Aux != s { 18934 break 18935 } 18936 _ = x0.Args[2] 18937 if p != x0.Args[0] { 18938 break 18939 } 18940 if idx != x0.Args[1] { 18941 break 18942 } 18943 if mem != x0.Args[2] { 18944 break 18945 } 18946 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 18947 break 18948 } 18949 b = mergePoint(b, x0, x1) 18950 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 18951 v.reset(OpCopy) 18952 v.AddArg(v0) 18953 v0.AuxInt = i0 18954 v0.Aux = s 18955 v0.AddArg(p) 18956 v0.AddArg(idx) 18957 v0.AddArg(mem) 18958 return true 18959 } 18960 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 18961 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 18962 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 18963 for { 18964 _ = v.Args[1] 18965 sh := v.Args[0] 18966 if sh.Op != OpAMD64SHLLconst { 18967 break 18968 } 18969 if sh.AuxInt != 8 { 18970 break 18971 } 18972 x1 := sh.Args[0] 18973 if x1.Op != OpAMD64MOVBloadidx1 { 18974 break 18975 } 18976 i1 := x1.AuxInt 18977 s := x1.Aux 18978 _ = x1.Args[2] 18979 p := x1.Args[0] 18980 idx := x1.Args[1] 18981 mem := x1.Args[2] 18982 x0 := v.Args[1] 18983 if x0.Op != OpAMD64MOVBloadidx1 { 18984 break 18985 } 18986 i0 := x0.AuxInt 18987 if x0.Aux != s { 18988 break 18989 } 18990 _ = x0.Args[2] 18991 if idx != x0.Args[0] { 18992 break 18993 } 18994 if p != x0.Args[1] { 18995 break 18996 } 18997 if mem != x0.Args[2] { 18998 break 18999 } 19000 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19001 break 19002 } 19003 b = mergePoint(b, x0, x1) 19004 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 19005 v.reset(OpCopy) 19006 v.AddArg(v0) 19007 v0.AuxInt = i0 19008 v0.Aux = s 19009 v0.AddArg(p) 19010 v0.AddArg(idx) 19011 v0.AddArg(mem) 19012 return true 19013 } 19014 // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 19015 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19016 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 19017 for { 19018 _ = v.Args[1] 19019 sh := v.Args[0] 19020 if sh.Op != OpAMD64SHLLconst { 19021 break 19022 } 19023 if sh.AuxInt != 8 { 19024 break 19025 } 19026 x1 := sh.Args[0] 19027 if x1.Op != OpAMD64MOVBloadidx1 { 19028 break 19029 } 19030 i1 := x1.AuxInt 19031 s := x1.Aux 19032 _ = x1.Args[2] 19033 idx := x1.Args[0] 19034 p := x1.Args[1] 19035 mem := x1.Args[2] 19036 x0 := v.Args[1] 19037 if x0.Op != OpAMD64MOVBloadidx1 { 19038 break 19039 } 19040 i0 := x0.AuxInt 19041 if x0.Aux != s { 19042 break 19043 } 19044 _ = x0.Args[2] 19045 if idx != x0.Args[0] { 19046 break 19047 } 19048 if p != x0.Args[1] { 19049 break 19050 } 19051 if mem != x0.Args[2] { 19052 break 19053 } 19054 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19055 break 19056 } 19057 b = mergePoint(b, x0, x1) 19058 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 19059 v.reset(OpCopy) 19060 v.AddArg(v0) 19061 v0.AuxInt = i0 19062 v0.Aux = s 19063 v0.AddArg(p) 19064 v0.AddArg(idx) 19065 v0.AddArg(mem) 19066 return true 19067 } 19068 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 19069 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19070 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19071 for { 19072 _ = v.Args[1] 19073 x0 := v.Args[0] 19074 if x0.Op != OpAMD64MOVWloadidx1 { 19075 break 19076 } 19077 i0 := x0.AuxInt 19078 s := x0.Aux 19079 _ = x0.Args[2] 19080 p := x0.Args[0] 19081 idx := x0.Args[1] 19082 mem := x0.Args[2] 19083 sh := v.Args[1] 19084 if sh.Op != OpAMD64SHLLconst { 19085 break 19086 } 19087 if sh.AuxInt != 16 { 19088 break 19089 } 19090 x1 := sh.Args[0] 19091 if x1.Op != OpAMD64MOVWloadidx1 { 19092 break 19093 } 19094 i1 := x1.AuxInt 19095 if x1.Aux != s { 19096 break 19097 } 19098 _ = x1.Args[2] 19099 if p != x1.Args[0] { 19100 break 19101 } 19102 if idx != x1.Args[1] { 19103 break 19104 } 19105 if mem != x1.Args[2] { 19106 break 19107 } 19108 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19109 break 19110 } 19111 b = mergePoint(b, x0, x1) 19112 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19113 v.reset(OpCopy) 19114 v.AddArg(v0) 19115 v0.AuxInt = i0 19116 v0.Aux = s 19117 v0.AddArg(p) 19118 v0.AddArg(idx) 19119 v0.AddArg(mem) 19120 return true 19121 } 19122 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 19123 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19124 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19125 for { 19126 _ = v.Args[1] 19127 x0 := v.Args[0] 19128 if x0.Op != OpAMD64MOVWloadidx1 { 19129 break 19130 } 19131 i0 := x0.AuxInt 19132 s := x0.Aux 19133 _ = x0.Args[2] 19134 idx := x0.Args[0] 19135 p := x0.Args[1] 19136 mem := x0.Args[2] 19137 sh := v.Args[1] 19138 if sh.Op != OpAMD64SHLLconst { 19139 break 19140 } 19141 if sh.AuxInt != 16 { 19142 break 19143 } 19144 x1 := sh.Args[0] 19145 if x1.Op != OpAMD64MOVWloadidx1 { 19146 break 19147 } 19148 i1 := x1.AuxInt 19149 if x1.Aux != s { 19150 break 19151 } 19152 _ = x1.Args[2] 19153 if p != x1.Args[0] { 19154 break 19155 } 19156 if idx != x1.Args[1] { 19157 break 19158 } 19159 if mem != x1.Args[2] { 19160 break 19161 } 19162 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19163 break 19164 } 19165 b = mergePoint(b, x0, x1) 19166 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19167 v.reset(OpCopy) 19168 v.AddArg(v0) 19169 v0.AuxInt = i0 19170 v0.Aux = s 19171 v0.AddArg(p) 19172 v0.AddArg(idx) 19173 v0.AddArg(mem) 19174 return true 19175 } 19176 // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 19177 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19178 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19179 for { 19180 _ = v.Args[1] 19181 x0 := v.Args[0] 19182 if x0.Op != OpAMD64MOVWloadidx1 { 19183 break 19184 } 19185 i0 := x0.AuxInt 19186 s := x0.Aux 19187 _ = x0.Args[2] 19188 p := x0.Args[0] 19189 idx := x0.Args[1] 19190 mem := x0.Args[2] 19191 sh := v.Args[1] 19192 if sh.Op != OpAMD64SHLLconst { 19193 break 19194 } 19195 if sh.AuxInt != 16 { 19196 break 19197 } 19198 x1 := sh.Args[0] 19199 if x1.Op != OpAMD64MOVWloadidx1 { 19200 break 19201 } 19202 i1 := x1.AuxInt 19203 if x1.Aux != s { 19204 break 19205 } 19206 _ = x1.Args[2] 19207 if idx != x1.Args[0] { 19208 break 19209 } 19210 if p != x1.Args[1] { 19211 break 19212 } 19213 if mem != x1.Args[2] { 19214 break 19215 } 19216 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19217 break 19218 } 19219 b = mergePoint(b, x0, x1) 19220 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19221 v.reset(OpCopy) 19222 v.AddArg(v0) 19223 v0.AuxInt = i0 19224 v0.Aux = s 19225 v0.AddArg(p) 19226 v0.AddArg(idx) 19227 v0.AddArg(mem) 19228 return true 19229 } 19230 // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 19231 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19232 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19233 for { 19234 _ = v.Args[1] 19235 x0 := v.Args[0] 19236 if x0.Op != OpAMD64MOVWloadidx1 { 19237 break 19238 } 19239 i0 := x0.AuxInt 19240 s := x0.Aux 19241 _ = x0.Args[2] 19242 idx := x0.Args[0] 19243 p := x0.Args[1] 19244 mem := x0.Args[2] 19245 sh := v.Args[1] 19246 if sh.Op != OpAMD64SHLLconst { 19247 break 19248 } 19249 if sh.AuxInt != 16 { 19250 break 19251 } 19252 x1 := sh.Args[0] 19253 if x1.Op != OpAMD64MOVWloadidx1 { 19254 break 19255 } 19256 i1 := x1.AuxInt 19257 if x1.Aux != s { 19258 break 19259 } 19260 _ = x1.Args[2] 19261 if idx != x1.Args[0] { 19262 break 19263 } 19264 if p != x1.Args[1] { 19265 break 19266 } 19267 if mem != x1.Args[2] { 19268 break 19269 } 19270 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19271 break 19272 } 19273 b = mergePoint(b, x0, x1) 19274 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19275 v.reset(OpCopy) 19276 v.AddArg(v0) 19277 v0.AuxInt = i0 19278 v0.Aux = s 19279 v0.AddArg(p) 19280 v0.AddArg(idx) 19281 v0.AddArg(mem) 19282 return true 19283 } 19284 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 19285 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19286 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19287 for { 19288 _ = v.Args[1] 19289 sh := v.Args[0] 19290 if sh.Op != OpAMD64SHLLconst { 19291 break 19292 } 19293 if sh.AuxInt != 16 { 19294 break 19295 } 19296 x1 := sh.Args[0] 19297 if x1.Op != OpAMD64MOVWloadidx1 { 19298 break 19299 } 19300 i1 := x1.AuxInt 19301 s := x1.Aux 19302 _ = x1.Args[2] 19303 p := x1.Args[0] 19304 idx := x1.Args[1] 19305 mem := x1.Args[2] 19306 x0 := v.Args[1] 19307 if x0.Op != OpAMD64MOVWloadidx1 { 19308 break 19309 } 19310 i0 := x0.AuxInt 19311 if x0.Aux != s { 19312 break 19313 } 19314 _ = x0.Args[2] 19315 if p != x0.Args[0] { 19316 break 19317 } 19318 if idx != x0.Args[1] { 19319 break 19320 } 19321 if mem != x0.Args[2] { 19322 break 19323 } 19324 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19325 break 19326 } 19327 b = mergePoint(b, x0, x1) 19328 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19329 v.reset(OpCopy) 19330 v.AddArg(v0) 19331 v0.AuxInt = i0 19332 v0.Aux = s 19333 v0.AddArg(p) 19334 v0.AddArg(idx) 19335 v0.AddArg(mem) 19336 return true 19337 } 19338 return false 19339 } 19340 func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool { 19341 b := v.Block 19342 _ = b 19343 typ := &b.Func.Config.Types 19344 _ = typ 19345 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 19346 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19347 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19348 for { 19349 _ = v.Args[1] 19350 sh := v.Args[0] 19351 if sh.Op != OpAMD64SHLLconst { 19352 break 19353 } 19354 if sh.AuxInt != 16 { 19355 break 19356 } 19357 x1 := sh.Args[0] 19358 if x1.Op != OpAMD64MOVWloadidx1 { 19359 break 19360 } 19361 i1 := x1.AuxInt 19362 s := x1.Aux 19363 _ = x1.Args[2] 19364 idx := x1.Args[0] 19365 p := x1.Args[1] 19366 mem := x1.Args[2] 19367 x0 := v.Args[1] 19368 if x0.Op != OpAMD64MOVWloadidx1 { 19369 break 19370 } 19371 i0 := x0.AuxInt 19372 if x0.Aux != s { 19373 break 19374 } 19375 _ = x0.Args[2] 19376 if p != x0.Args[0] { 19377 break 19378 } 19379 if idx != x0.Args[1] { 19380 break 19381 } 19382 if mem != x0.Args[2] { 19383 break 19384 } 19385 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19386 break 19387 } 19388 b = mergePoint(b, x0, x1) 19389 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19390 v.reset(OpCopy) 19391 v.AddArg(v0) 19392 v0.AuxInt = i0 19393 v0.Aux = s 19394 v0.AddArg(p) 19395 v0.AddArg(idx) 19396 v0.AddArg(mem) 19397 return true 19398 } 19399 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 19400 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19401 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19402 for { 19403 _ = v.Args[1] 19404 sh := v.Args[0] 19405 if sh.Op != OpAMD64SHLLconst { 19406 break 19407 } 19408 if sh.AuxInt != 16 { 19409 break 19410 } 19411 x1 := sh.Args[0] 19412 if x1.Op != OpAMD64MOVWloadidx1 { 19413 break 19414 } 19415 i1 := x1.AuxInt 19416 s := x1.Aux 19417 _ = x1.Args[2] 19418 p := x1.Args[0] 19419 idx := x1.Args[1] 19420 mem := x1.Args[2] 19421 x0 := v.Args[1] 19422 if x0.Op != OpAMD64MOVWloadidx1 { 19423 break 19424 } 19425 i0 := x0.AuxInt 19426 if x0.Aux != s { 19427 break 19428 } 19429 _ = x0.Args[2] 19430 if idx != x0.Args[0] { 19431 break 19432 } 19433 if p != x0.Args[1] { 19434 break 19435 } 19436 if mem != x0.Args[2] { 19437 break 19438 } 19439 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19440 break 19441 } 19442 b = mergePoint(b, x0, x1) 19443 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19444 v.reset(OpCopy) 19445 v.AddArg(v0) 19446 v0.AuxInt = i0 19447 v0.Aux = s 19448 v0.AddArg(p) 19449 v0.AddArg(idx) 19450 v0.AddArg(mem) 19451 return true 19452 } 19453 // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 19454 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 19455 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 19456 for { 19457 _ = v.Args[1] 19458 sh := v.Args[0] 19459 if sh.Op != OpAMD64SHLLconst { 19460 break 19461 } 19462 if sh.AuxInt != 16 { 19463 break 19464 } 19465 x1 := sh.Args[0] 19466 if x1.Op != OpAMD64MOVWloadidx1 { 19467 break 19468 } 19469 i1 := x1.AuxInt 19470 s := x1.Aux 19471 _ = x1.Args[2] 19472 idx := x1.Args[0] 19473 p := x1.Args[1] 19474 mem := x1.Args[2] 19475 x0 := v.Args[1] 19476 if x0.Op != OpAMD64MOVWloadidx1 { 19477 break 19478 } 19479 i0 := x0.AuxInt 19480 if x0.Aux != s { 19481 break 19482 } 19483 _ = x0.Args[2] 19484 if idx != x0.Args[0] { 19485 break 19486 } 19487 if p != x0.Args[1] { 19488 break 19489 } 19490 if mem != x0.Args[2] { 19491 break 19492 } 19493 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 19494 break 19495 } 19496 b = mergePoint(b, x0, x1) 19497 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 19498 v.reset(OpCopy) 19499 v.AddArg(v0) 19500 v0.AuxInt = i0 19501 v0.Aux = s 19502 v0.AddArg(p) 19503 v0.AddArg(idx) 19504 v0.AddArg(mem) 19505 return true 19506 } 19507 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 19508 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19509 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19510 for { 19511 _ = v.Args[1] 19512 s1 := v.Args[0] 19513 if s1.Op != OpAMD64SHLLconst { 19514 break 19515 } 19516 j1 := s1.AuxInt 19517 x1 := s1.Args[0] 19518 if x1.Op != OpAMD64MOVBloadidx1 { 19519 break 19520 } 19521 i1 := x1.AuxInt 19522 s := x1.Aux 19523 _ = x1.Args[2] 19524 p := x1.Args[0] 19525 idx := x1.Args[1] 19526 mem := x1.Args[2] 19527 or := v.Args[1] 19528 if or.Op != OpAMD64ORL { 19529 break 19530 } 19531 _ = or.Args[1] 19532 s0 := or.Args[0] 19533 if s0.Op != OpAMD64SHLLconst { 19534 break 19535 } 19536 j0 := s0.AuxInt 19537 x0 := s0.Args[0] 19538 if x0.Op != OpAMD64MOVBloadidx1 { 19539 break 19540 } 19541 i0 := x0.AuxInt 19542 if x0.Aux != s { 19543 break 19544 } 19545 _ = x0.Args[2] 19546 if p != x0.Args[0] { 19547 break 19548 } 19549 if idx != x0.Args[1] { 19550 break 19551 } 19552 if mem != x0.Args[2] { 19553 break 19554 } 19555 y := or.Args[1] 19556 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19557 break 19558 } 19559 b = mergePoint(b, x0, x1) 19560 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19561 v.reset(OpCopy) 19562 v.AddArg(v0) 19563 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19564 v1.AuxInt = j0 19565 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19566 v2.AuxInt = i0 19567 v2.Aux = s 19568 v2.AddArg(p) 19569 v2.AddArg(idx) 19570 v2.AddArg(mem) 19571 v1.AddArg(v2) 19572 v0.AddArg(v1) 19573 v0.AddArg(y) 19574 return true 19575 } 19576 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 19577 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19578 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19579 for { 19580 _ = v.Args[1] 19581 s1 := v.Args[0] 19582 if s1.Op != OpAMD64SHLLconst { 19583 break 19584 } 19585 j1 := s1.AuxInt 19586 x1 := s1.Args[0] 19587 if x1.Op != OpAMD64MOVBloadidx1 { 19588 break 19589 } 19590 i1 := x1.AuxInt 19591 s := x1.Aux 19592 _ = x1.Args[2] 19593 idx := x1.Args[0] 19594 p := x1.Args[1] 19595 mem := x1.Args[2] 19596 or := v.Args[1] 19597 if or.Op != OpAMD64ORL { 19598 break 19599 } 19600 _ = or.Args[1] 19601 s0 := or.Args[0] 19602 if s0.Op != OpAMD64SHLLconst { 19603 break 19604 } 19605 j0 := s0.AuxInt 19606 x0 := s0.Args[0] 19607 if x0.Op != OpAMD64MOVBloadidx1 { 19608 break 19609 } 19610 i0 := x0.AuxInt 19611 if x0.Aux != s { 19612 break 19613 } 19614 _ = x0.Args[2] 19615 if p != x0.Args[0] { 19616 break 19617 } 19618 if idx != x0.Args[1] { 19619 break 19620 } 19621 if mem != x0.Args[2] { 19622 break 19623 } 19624 y := or.Args[1] 19625 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19626 break 19627 } 19628 b = mergePoint(b, x0, x1) 19629 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19630 v.reset(OpCopy) 19631 v.AddArg(v0) 19632 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19633 v1.AuxInt = j0 19634 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19635 v2.AuxInt = i0 19636 v2.Aux = s 19637 v2.AddArg(p) 19638 v2.AddArg(idx) 19639 v2.AddArg(mem) 19640 v1.AddArg(v2) 19641 v0.AddArg(v1) 19642 v0.AddArg(y) 19643 return true 19644 } 19645 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 19646 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19647 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19648 for { 19649 _ = v.Args[1] 19650 s1 := v.Args[0] 19651 if s1.Op != OpAMD64SHLLconst { 19652 break 19653 } 19654 j1 := s1.AuxInt 19655 x1 := s1.Args[0] 19656 if x1.Op != OpAMD64MOVBloadidx1 { 19657 break 19658 } 19659 i1 := x1.AuxInt 19660 s := x1.Aux 19661 _ = x1.Args[2] 19662 p := x1.Args[0] 19663 idx := x1.Args[1] 19664 mem := x1.Args[2] 19665 or := v.Args[1] 19666 if or.Op != OpAMD64ORL { 19667 break 19668 } 19669 _ = or.Args[1] 19670 s0 := or.Args[0] 19671 if s0.Op != OpAMD64SHLLconst { 19672 break 19673 } 19674 j0 := s0.AuxInt 19675 x0 := s0.Args[0] 19676 if x0.Op != OpAMD64MOVBloadidx1 { 19677 break 19678 } 19679 i0 := x0.AuxInt 19680 if x0.Aux != s { 19681 break 19682 } 19683 _ = x0.Args[2] 19684 if idx != x0.Args[0] { 19685 break 19686 } 19687 if p != x0.Args[1] { 19688 break 19689 } 19690 if mem != x0.Args[2] { 19691 break 19692 } 19693 y := or.Args[1] 19694 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19695 break 19696 } 19697 b = mergePoint(b, x0, x1) 19698 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19699 v.reset(OpCopy) 19700 v.AddArg(v0) 19701 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19702 v1.AuxInt = j0 19703 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19704 v2.AuxInt = i0 19705 v2.Aux = s 19706 v2.AddArg(p) 19707 v2.AddArg(idx) 19708 v2.AddArg(mem) 19709 v1.AddArg(v2) 19710 v0.AddArg(v1) 19711 v0.AddArg(y) 19712 return true 19713 } 19714 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 19715 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19716 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19717 for { 19718 _ = v.Args[1] 19719 s1 := v.Args[0] 19720 if s1.Op != OpAMD64SHLLconst { 19721 break 19722 } 19723 j1 := s1.AuxInt 19724 x1 := s1.Args[0] 19725 if x1.Op != OpAMD64MOVBloadidx1 { 19726 break 19727 } 19728 i1 := x1.AuxInt 19729 s := x1.Aux 19730 _ = x1.Args[2] 19731 idx := x1.Args[0] 19732 p := x1.Args[1] 19733 mem := x1.Args[2] 19734 or := v.Args[1] 19735 if or.Op != OpAMD64ORL { 19736 break 19737 } 19738 _ = or.Args[1] 19739 s0 := or.Args[0] 19740 if s0.Op != OpAMD64SHLLconst { 19741 break 19742 } 19743 j0 := s0.AuxInt 19744 x0 := s0.Args[0] 19745 if x0.Op != OpAMD64MOVBloadidx1 { 19746 break 19747 } 19748 i0 := x0.AuxInt 19749 if x0.Aux != s { 19750 break 19751 } 19752 _ = x0.Args[2] 19753 if idx != x0.Args[0] { 19754 break 19755 } 19756 if p != x0.Args[1] { 19757 break 19758 } 19759 if mem != x0.Args[2] { 19760 break 19761 } 19762 y := or.Args[1] 19763 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19764 break 19765 } 19766 b = mergePoint(b, x0, x1) 19767 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19768 v.reset(OpCopy) 19769 v.AddArg(v0) 19770 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19771 v1.AuxInt = j0 19772 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19773 v2.AuxInt = i0 19774 v2.Aux = s 19775 v2.AddArg(p) 19776 v2.AddArg(idx) 19777 v2.AddArg(mem) 19778 v1.AddArg(v2) 19779 v0.AddArg(v1) 19780 v0.AddArg(y) 19781 return true 19782 } 19783 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 19784 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19785 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19786 for { 19787 _ = v.Args[1] 19788 s1 := v.Args[0] 19789 if s1.Op != OpAMD64SHLLconst { 19790 break 19791 } 19792 j1 := s1.AuxInt 19793 x1 := s1.Args[0] 19794 if x1.Op != OpAMD64MOVBloadidx1 { 19795 break 19796 } 19797 i1 := x1.AuxInt 19798 s := x1.Aux 19799 _ = x1.Args[2] 19800 p := x1.Args[0] 19801 idx := x1.Args[1] 19802 mem := x1.Args[2] 19803 or := v.Args[1] 19804 if or.Op != OpAMD64ORL { 19805 break 19806 } 19807 _ = or.Args[1] 19808 y := or.Args[0] 19809 s0 := or.Args[1] 19810 if s0.Op != OpAMD64SHLLconst { 19811 break 19812 } 19813 j0 := s0.AuxInt 19814 x0 := s0.Args[0] 19815 if x0.Op != OpAMD64MOVBloadidx1 { 19816 break 19817 } 19818 i0 := x0.AuxInt 19819 if x0.Aux != s { 19820 break 19821 } 19822 _ = x0.Args[2] 19823 if p != x0.Args[0] { 19824 break 19825 } 19826 if idx != x0.Args[1] { 19827 break 19828 } 19829 if mem != x0.Args[2] { 19830 break 19831 } 19832 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19833 break 19834 } 19835 b = mergePoint(b, x0, x1) 19836 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19837 v.reset(OpCopy) 19838 v.AddArg(v0) 19839 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19840 v1.AuxInt = j0 19841 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19842 v2.AuxInt = i0 19843 v2.Aux = s 19844 v2.AddArg(p) 19845 v2.AddArg(idx) 19846 v2.AddArg(mem) 19847 v1.AddArg(v2) 19848 v0.AddArg(v1) 19849 v0.AddArg(y) 19850 return true 19851 } 19852 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 19853 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19854 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19855 for { 19856 _ = v.Args[1] 19857 s1 := v.Args[0] 19858 if s1.Op != OpAMD64SHLLconst { 19859 break 19860 } 19861 j1 := s1.AuxInt 19862 x1 := s1.Args[0] 19863 if x1.Op != OpAMD64MOVBloadidx1 { 19864 break 19865 } 19866 i1 := x1.AuxInt 19867 s := x1.Aux 19868 _ = x1.Args[2] 19869 idx := x1.Args[0] 19870 p := x1.Args[1] 19871 mem := x1.Args[2] 19872 or := v.Args[1] 19873 if or.Op != OpAMD64ORL { 19874 break 19875 } 19876 _ = or.Args[1] 19877 y := or.Args[0] 19878 s0 := or.Args[1] 19879 if s0.Op != OpAMD64SHLLconst { 19880 break 19881 } 19882 j0 := s0.AuxInt 19883 x0 := s0.Args[0] 19884 if x0.Op != OpAMD64MOVBloadidx1 { 19885 break 19886 } 19887 i0 := x0.AuxInt 19888 if x0.Aux != s { 19889 break 19890 } 19891 _ = x0.Args[2] 19892 if p != x0.Args[0] { 19893 break 19894 } 19895 if idx != x0.Args[1] { 19896 break 19897 } 19898 if mem != x0.Args[2] { 19899 break 19900 } 19901 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19902 break 19903 } 19904 b = mergePoint(b, x0, x1) 19905 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19906 v.reset(OpCopy) 19907 v.AddArg(v0) 19908 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19909 v1.AuxInt = j0 19910 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19911 v2.AuxInt = i0 19912 v2.Aux = s 19913 v2.AddArg(p) 19914 v2.AddArg(idx) 19915 v2.AddArg(mem) 19916 v1.AddArg(v2) 19917 v0.AddArg(v1) 19918 v0.AddArg(y) 19919 return true 19920 } 19921 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 19922 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19923 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 19924 for { 19925 _ = v.Args[1] 19926 s1 := v.Args[0] 19927 if s1.Op != OpAMD64SHLLconst { 19928 break 19929 } 19930 j1 := s1.AuxInt 19931 x1 := s1.Args[0] 19932 if x1.Op != OpAMD64MOVBloadidx1 { 19933 break 19934 } 19935 i1 := x1.AuxInt 19936 s := x1.Aux 19937 _ = x1.Args[2] 19938 p := x1.Args[0] 19939 idx := x1.Args[1] 19940 mem := x1.Args[2] 19941 or := v.Args[1] 19942 if or.Op != OpAMD64ORL { 19943 break 19944 } 19945 _ = or.Args[1] 19946 y := or.Args[0] 19947 s0 := or.Args[1] 19948 if s0.Op != OpAMD64SHLLconst { 19949 break 19950 } 19951 j0 := s0.AuxInt 19952 x0 := s0.Args[0] 19953 if x0.Op != OpAMD64MOVBloadidx1 { 19954 break 19955 } 19956 i0 := x0.AuxInt 19957 if x0.Aux != s { 19958 break 19959 } 19960 _ = x0.Args[2] 19961 if idx != x0.Args[0] { 19962 break 19963 } 19964 if p != x0.Args[1] { 19965 break 19966 } 19967 if mem != x0.Args[2] { 19968 break 19969 } 19970 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 19971 break 19972 } 19973 b = mergePoint(b, x0, x1) 19974 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 19975 v.reset(OpCopy) 19976 v.AddArg(v0) 19977 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 19978 v1.AuxInt = j0 19979 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 19980 v2.AuxInt = i0 19981 v2.Aux = s 19982 v2.AddArg(p) 19983 v2.AddArg(idx) 19984 v2.AddArg(mem) 19985 v1.AddArg(v2) 19986 v0.AddArg(v1) 19987 v0.AddArg(y) 19988 return true 19989 } 19990 return false 19991 } 19992 func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool { 19993 b := v.Block 19994 _ = b 19995 typ := &b.Func.Config.Types 19996 _ = typ 19997 // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 19998 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 19999 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20000 for { 20001 _ = v.Args[1] 20002 s1 := v.Args[0] 20003 if s1.Op != OpAMD64SHLLconst { 20004 break 20005 } 20006 j1 := s1.AuxInt 20007 x1 := s1.Args[0] 20008 if x1.Op != OpAMD64MOVBloadidx1 { 20009 break 20010 } 20011 i1 := x1.AuxInt 20012 s := x1.Aux 20013 _ = x1.Args[2] 20014 idx := x1.Args[0] 20015 p := x1.Args[1] 20016 mem := x1.Args[2] 20017 or := v.Args[1] 20018 if or.Op != OpAMD64ORL { 20019 break 20020 } 20021 _ = or.Args[1] 20022 y := or.Args[0] 20023 s0 := or.Args[1] 20024 if s0.Op != OpAMD64SHLLconst { 20025 break 20026 } 20027 j0 := s0.AuxInt 20028 x0 := s0.Args[0] 20029 if x0.Op != OpAMD64MOVBloadidx1 { 20030 break 20031 } 20032 i0 := x0.AuxInt 20033 if x0.Aux != s { 20034 break 20035 } 20036 _ = x0.Args[2] 20037 if idx != x0.Args[0] { 20038 break 20039 } 20040 if p != x0.Args[1] { 20041 break 20042 } 20043 if mem != x0.Args[2] { 20044 break 20045 } 20046 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20047 break 20048 } 20049 b = mergePoint(b, x0, x1) 20050 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20051 v.reset(OpCopy) 20052 v.AddArg(v0) 20053 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20054 v1.AuxInt = j0 20055 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20056 v2.AuxInt = i0 20057 v2.Aux = s 20058 v2.AddArg(p) 20059 v2.AddArg(idx) 20060 v2.AddArg(mem) 20061 v1.AddArg(v2) 20062 v0.AddArg(v1) 20063 v0.AddArg(y) 20064 return true 20065 } 20066 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20067 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20068 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20069 for { 20070 _ = v.Args[1] 20071 or := v.Args[0] 20072 if or.Op != OpAMD64ORL { 20073 break 20074 } 20075 _ = or.Args[1] 20076 s0 := or.Args[0] 20077 if s0.Op != OpAMD64SHLLconst { 20078 break 20079 } 20080 j0 := s0.AuxInt 20081 x0 := s0.Args[0] 20082 if x0.Op != OpAMD64MOVBloadidx1 { 20083 break 20084 } 20085 i0 := x0.AuxInt 20086 s := x0.Aux 20087 _ = x0.Args[2] 20088 p := x0.Args[0] 20089 idx := x0.Args[1] 20090 mem := x0.Args[2] 20091 y := or.Args[1] 20092 s1 := v.Args[1] 20093 if s1.Op != OpAMD64SHLLconst { 20094 break 20095 } 20096 j1 := s1.AuxInt 20097 x1 := s1.Args[0] 20098 if x1.Op != OpAMD64MOVBloadidx1 { 20099 break 20100 } 20101 i1 := x1.AuxInt 20102 if x1.Aux != s { 20103 break 20104 } 20105 _ = x1.Args[2] 20106 if p != x1.Args[0] { 20107 break 20108 } 20109 if idx != x1.Args[1] { 20110 break 20111 } 20112 if mem != x1.Args[2] { 20113 break 20114 } 20115 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20116 break 20117 } 20118 b = mergePoint(b, x0, x1) 20119 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20120 v.reset(OpCopy) 20121 v.AddArg(v0) 20122 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20123 v1.AuxInt = j0 20124 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20125 v2.AuxInt = i0 20126 v2.Aux = s 20127 v2.AddArg(p) 20128 v2.AddArg(idx) 20129 v2.AddArg(mem) 20130 v1.AddArg(v2) 20131 v0.AddArg(v1) 20132 v0.AddArg(y) 20133 return true 20134 } 20135 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20136 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20137 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20138 for { 20139 _ = v.Args[1] 20140 or := v.Args[0] 20141 if or.Op != OpAMD64ORL { 20142 break 20143 } 20144 _ = or.Args[1] 20145 s0 := or.Args[0] 20146 if s0.Op != OpAMD64SHLLconst { 20147 break 20148 } 20149 j0 := s0.AuxInt 20150 x0 := s0.Args[0] 20151 if x0.Op != OpAMD64MOVBloadidx1 { 20152 break 20153 } 20154 i0 := x0.AuxInt 20155 s := x0.Aux 20156 _ = x0.Args[2] 20157 idx := x0.Args[0] 20158 p := x0.Args[1] 20159 mem := x0.Args[2] 20160 y := or.Args[1] 20161 s1 := v.Args[1] 20162 if s1.Op != OpAMD64SHLLconst { 20163 break 20164 } 20165 j1 := s1.AuxInt 20166 x1 := s1.Args[0] 20167 if x1.Op != OpAMD64MOVBloadidx1 { 20168 break 20169 } 20170 i1 := x1.AuxInt 20171 if x1.Aux != s { 20172 break 20173 } 20174 _ = x1.Args[2] 20175 if p != x1.Args[0] { 20176 break 20177 } 20178 if idx != x1.Args[1] { 20179 break 20180 } 20181 if mem != x1.Args[2] { 20182 break 20183 } 20184 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20185 break 20186 } 20187 b = mergePoint(b, x0, x1) 20188 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20189 v.reset(OpCopy) 20190 v.AddArg(v0) 20191 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20192 v1.AuxInt = j0 20193 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20194 v2.AuxInt = i0 20195 v2.Aux = s 20196 v2.AddArg(p) 20197 v2.AddArg(idx) 20198 v2.AddArg(mem) 20199 v1.AddArg(v2) 20200 v0.AddArg(v1) 20201 v0.AddArg(y) 20202 return true 20203 } 20204 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20205 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20206 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20207 for { 20208 _ = v.Args[1] 20209 or := v.Args[0] 20210 if or.Op != OpAMD64ORL { 20211 break 20212 } 20213 _ = or.Args[1] 20214 y := or.Args[0] 20215 s0 := or.Args[1] 20216 if s0.Op != OpAMD64SHLLconst { 20217 break 20218 } 20219 j0 := s0.AuxInt 20220 x0 := s0.Args[0] 20221 if x0.Op != OpAMD64MOVBloadidx1 { 20222 break 20223 } 20224 i0 := x0.AuxInt 20225 s := x0.Aux 20226 _ = x0.Args[2] 20227 p := x0.Args[0] 20228 idx := x0.Args[1] 20229 mem := x0.Args[2] 20230 s1 := v.Args[1] 20231 if s1.Op != OpAMD64SHLLconst { 20232 break 20233 } 20234 j1 := s1.AuxInt 20235 x1 := s1.Args[0] 20236 if x1.Op != OpAMD64MOVBloadidx1 { 20237 break 20238 } 20239 i1 := x1.AuxInt 20240 if x1.Aux != s { 20241 break 20242 } 20243 _ = x1.Args[2] 20244 if p != x1.Args[0] { 20245 break 20246 } 20247 if idx != x1.Args[1] { 20248 break 20249 } 20250 if mem != x1.Args[2] { 20251 break 20252 } 20253 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20254 break 20255 } 20256 b = mergePoint(b, x0, x1) 20257 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20258 v.reset(OpCopy) 20259 v.AddArg(v0) 20260 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20261 v1.AuxInt = j0 20262 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20263 v2.AuxInt = i0 20264 v2.Aux = s 20265 v2.AddArg(p) 20266 v2.AddArg(idx) 20267 v2.AddArg(mem) 20268 v1.AddArg(v2) 20269 v0.AddArg(v1) 20270 v0.AddArg(y) 20271 return true 20272 } 20273 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 20274 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20275 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20276 for { 20277 _ = v.Args[1] 20278 or := v.Args[0] 20279 if or.Op != OpAMD64ORL { 20280 break 20281 } 20282 _ = or.Args[1] 20283 y := or.Args[0] 20284 s0 := or.Args[1] 20285 if s0.Op != OpAMD64SHLLconst { 20286 break 20287 } 20288 j0 := s0.AuxInt 20289 x0 := s0.Args[0] 20290 if x0.Op != OpAMD64MOVBloadidx1 { 20291 break 20292 } 20293 i0 := x0.AuxInt 20294 s := x0.Aux 20295 _ = x0.Args[2] 20296 idx := x0.Args[0] 20297 p := x0.Args[1] 20298 mem := x0.Args[2] 20299 s1 := v.Args[1] 20300 if s1.Op != OpAMD64SHLLconst { 20301 break 20302 } 20303 j1 := s1.AuxInt 20304 x1 := s1.Args[0] 20305 if x1.Op != OpAMD64MOVBloadidx1 { 20306 break 20307 } 20308 i1 := x1.AuxInt 20309 if x1.Aux != s { 20310 break 20311 } 20312 _ = x1.Args[2] 20313 if p != x1.Args[0] { 20314 break 20315 } 20316 if idx != x1.Args[1] { 20317 break 20318 } 20319 if mem != x1.Args[2] { 20320 break 20321 } 20322 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20323 break 20324 } 20325 b = mergePoint(b, x0, x1) 20326 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20327 v.reset(OpCopy) 20328 v.AddArg(v0) 20329 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20330 v1.AuxInt = j0 20331 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20332 v2.AuxInt = i0 20333 v2.Aux = s 20334 v2.AddArg(p) 20335 v2.AddArg(idx) 20336 v2.AddArg(mem) 20337 v1.AddArg(v2) 20338 v0.AddArg(v1) 20339 v0.AddArg(y) 20340 return true 20341 } 20342 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20343 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20344 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20345 for { 20346 _ = v.Args[1] 20347 or := v.Args[0] 20348 if or.Op != OpAMD64ORL { 20349 break 20350 } 20351 _ = or.Args[1] 20352 s0 := or.Args[0] 20353 if s0.Op != OpAMD64SHLLconst { 20354 break 20355 } 20356 j0 := s0.AuxInt 20357 x0 := s0.Args[0] 20358 if x0.Op != OpAMD64MOVBloadidx1 { 20359 break 20360 } 20361 i0 := x0.AuxInt 20362 s := x0.Aux 20363 _ = x0.Args[2] 20364 p := x0.Args[0] 20365 idx := x0.Args[1] 20366 mem := x0.Args[2] 20367 y := or.Args[1] 20368 s1 := v.Args[1] 20369 if s1.Op != OpAMD64SHLLconst { 20370 break 20371 } 20372 j1 := s1.AuxInt 20373 x1 := s1.Args[0] 20374 if x1.Op != OpAMD64MOVBloadidx1 { 20375 break 20376 } 20377 i1 := x1.AuxInt 20378 if x1.Aux != s { 20379 break 20380 } 20381 _ = x1.Args[2] 20382 if idx != x1.Args[0] { 20383 break 20384 } 20385 if p != x1.Args[1] { 20386 break 20387 } 20388 if mem != x1.Args[2] { 20389 break 20390 } 20391 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20392 break 20393 } 20394 b = mergePoint(b, x0, x1) 20395 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20396 v.reset(OpCopy) 20397 v.AddArg(v0) 20398 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20399 v1.AuxInt = j0 20400 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20401 v2.AuxInt = i0 20402 v2.Aux = s 20403 v2.AddArg(p) 20404 v2.AddArg(idx) 20405 v2.AddArg(mem) 20406 v1.AddArg(v2) 20407 v0.AddArg(v1) 20408 v0.AddArg(y) 20409 return true 20410 } 20411 // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20412 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20413 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20414 for { 20415 _ = v.Args[1] 20416 or := v.Args[0] 20417 if or.Op != OpAMD64ORL { 20418 break 20419 } 20420 _ = or.Args[1] 20421 s0 := or.Args[0] 20422 if s0.Op != OpAMD64SHLLconst { 20423 break 20424 } 20425 j0 := s0.AuxInt 20426 x0 := s0.Args[0] 20427 if x0.Op != OpAMD64MOVBloadidx1 { 20428 break 20429 } 20430 i0 := x0.AuxInt 20431 s := x0.Aux 20432 _ = x0.Args[2] 20433 idx := x0.Args[0] 20434 p := x0.Args[1] 20435 mem := x0.Args[2] 20436 y := or.Args[1] 20437 s1 := v.Args[1] 20438 if s1.Op != OpAMD64SHLLconst { 20439 break 20440 } 20441 j1 := s1.AuxInt 20442 x1 := s1.Args[0] 20443 if x1.Op != OpAMD64MOVBloadidx1 { 20444 break 20445 } 20446 i1 := x1.AuxInt 20447 if x1.Aux != s { 20448 break 20449 } 20450 _ = x1.Args[2] 20451 if idx != x1.Args[0] { 20452 break 20453 } 20454 if p != x1.Args[1] { 20455 break 20456 } 20457 if mem != x1.Args[2] { 20458 break 20459 } 20460 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20461 break 20462 } 20463 b = mergePoint(b, x0, x1) 20464 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20465 v.reset(OpCopy) 20466 v.AddArg(v0) 20467 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20468 v1.AuxInt = j0 20469 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20470 v2.AuxInt = i0 20471 v2.Aux = s 20472 v2.AddArg(p) 20473 v2.AddArg(idx) 20474 v2.AddArg(mem) 20475 v1.AddArg(v2) 20476 v0.AddArg(v1) 20477 v0.AddArg(y) 20478 return true 20479 } 20480 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20481 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20482 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20483 for { 20484 _ = v.Args[1] 20485 or := v.Args[0] 20486 if or.Op != OpAMD64ORL { 20487 break 20488 } 20489 _ = or.Args[1] 20490 y := or.Args[0] 20491 s0 := or.Args[1] 20492 if s0.Op != OpAMD64SHLLconst { 20493 break 20494 } 20495 j0 := s0.AuxInt 20496 x0 := s0.Args[0] 20497 if x0.Op != OpAMD64MOVBloadidx1 { 20498 break 20499 } 20500 i0 := x0.AuxInt 20501 s := x0.Aux 20502 _ = x0.Args[2] 20503 p := x0.Args[0] 20504 idx := x0.Args[1] 20505 mem := x0.Args[2] 20506 s1 := v.Args[1] 20507 if s1.Op != OpAMD64SHLLconst { 20508 break 20509 } 20510 j1 := s1.AuxInt 20511 x1 := s1.Args[0] 20512 if x1.Op != OpAMD64MOVBloadidx1 { 20513 break 20514 } 20515 i1 := x1.AuxInt 20516 if x1.Aux != s { 20517 break 20518 } 20519 _ = x1.Args[2] 20520 if idx != x1.Args[0] { 20521 break 20522 } 20523 if p != x1.Args[1] { 20524 break 20525 } 20526 if mem != x1.Args[2] { 20527 break 20528 } 20529 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20530 break 20531 } 20532 b = mergePoint(b, x0, x1) 20533 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20534 v.reset(OpCopy) 20535 v.AddArg(v0) 20536 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20537 v1.AuxInt = j0 20538 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20539 v2.AuxInt = i0 20540 v2.Aux = s 20541 v2.AddArg(p) 20542 v2.AddArg(idx) 20543 v2.AddArg(mem) 20544 v1.AddArg(v2) 20545 v0.AddArg(v1) 20546 v0.AddArg(y) 20547 return true 20548 } 20549 // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 20550 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20551 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 20552 for { 20553 _ = v.Args[1] 20554 or := v.Args[0] 20555 if or.Op != OpAMD64ORL { 20556 break 20557 } 20558 _ = or.Args[1] 20559 y := or.Args[0] 20560 s0 := or.Args[1] 20561 if s0.Op != OpAMD64SHLLconst { 20562 break 20563 } 20564 j0 := s0.AuxInt 20565 x0 := s0.Args[0] 20566 if x0.Op != OpAMD64MOVBloadidx1 { 20567 break 20568 } 20569 i0 := x0.AuxInt 20570 s := x0.Aux 20571 _ = x0.Args[2] 20572 idx := x0.Args[0] 20573 p := x0.Args[1] 20574 mem := x0.Args[2] 20575 s1 := v.Args[1] 20576 if s1.Op != OpAMD64SHLLconst { 20577 break 20578 } 20579 j1 := s1.AuxInt 20580 x1 := s1.Args[0] 20581 if x1.Op != OpAMD64MOVBloadidx1 { 20582 break 20583 } 20584 i1 := x1.AuxInt 20585 if x1.Aux != s { 20586 break 20587 } 20588 _ = x1.Args[2] 20589 if idx != x1.Args[0] { 20590 break 20591 } 20592 if p != x1.Args[1] { 20593 break 20594 } 20595 if mem != x1.Args[2] { 20596 break 20597 } 20598 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20599 break 20600 } 20601 b = mergePoint(b, x0, x1) 20602 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20603 v.reset(OpCopy) 20604 v.AddArg(v0) 20605 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20606 v1.AuxInt = j0 20607 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 20608 v2.AuxInt = i0 20609 v2.Aux = s 20610 v2.AddArg(p) 20611 v2.AddArg(idx) 20612 v2.AddArg(mem) 20613 v1.AddArg(v2) 20614 v0.AddArg(v1) 20615 v0.AddArg(y) 20616 return true 20617 } 20618 // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) 20619 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 20620 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 20621 for { 20622 _ = v.Args[1] 20623 x1 := v.Args[0] 20624 if x1.Op != OpAMD64MOVBload { 20625 break 20626 } 20627 i1 := x1.AuxInt 20628 s := x1.Aux 20629 _ = x1.Args[1] 20630 p := x1.Args[0] 20631 mem := x1.Args[1] 20632 sh := v.Args[1] 20633 if sh.Op != OpAMD64SHLLconst { 20634 break 20635 } 20636 if sh.AuxInt != 8 { 20637 break 20638 } 20639 x0 := sh.Args[0] 20640 if x0.Op != OpAMD64MOVBload { 20641 break 20642 } 20643 i0 := x0.AuxInt 20644 if x0.Aux != s { 20645 break 20646 } 20647 _ = x0.Args[1] 20648 if p != x0.Args[0] { 20649 break 20650 } 20651 if mem != x0.Args[1] { 20652 break 20653 } 20654 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 20655 break 20656 } 20657 b = mergePoint(b, x0, x1) 20658 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 20659 v.reset(OpCopy) 20660 v.AddArg(v0) 20661 v0.AuxInt = 8 20662 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20663 v1.AuxInt = i0 20664 v1.Aux = s 20665 v1.AddArg(p) 20666 v1.AddArg(mem) 20667 v0.AddArg(v1) 20668 return true 20669 } 20670 return false 20671 } 20672 func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { 20673 b := v.Block 20674 _ = b 20675 typ := &b.Func.Config.Types 20676 _ = typ 20677 // match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 20678 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 20679 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 20680 for { 20681 _ = v.Args[1] 20682 sh := v.Args[0] 20683 if sh.Op != OpAMD64SHLLconst { 20684 break 20685 } 20686 if sh.AuxInt != 8 { 20687 break 20688 } 20689 x0 := sh.Args[0] 20690 if x0.Op != OpAMD64MOVBload { 20691 break 20692 } 20693 i0 := x0.AuxInt 20694 s := x0.Aux 20695 _ = x0.Args[1] 20696 p := x0.Args[0] 20697 mem := x0.Args[1] 20698 x1 := v.Args[1] 20699 if x1.Op != OpAMD64MOVBload { 20700 break 20701 } 20702 i1 := x1.AuxInt 20703 if x1.Aux != s { 20704 break 20705 } 20706 _ = x1.Args[1] 20707 if p != x1.Args[0] { 20708 break 20709 } 20710 if mem != x1.Args[1] { 20711 break 20712 } 20713 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 20714 break 20715 } 20716 b = mergePoint(b, x0, x1) 20717 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 20718 v.reset(OpCopy) 20719 v.AddArg(v0) 20720 v0.AuxInt = 8 20721 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20722 v1.AuxInt = i0 20723 v1.Aux = s 20724 v1.AddArg(p) 20725 v1.AddArg(mem) 20726 v0.AddArg(v1) 20727 return true 20728 } 20729 // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 20730 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20731 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 20732 for { 20733 _ = v.Args[1] 20734 r1 := v.Args[0] 20735 if r1.Op != OpAMD64ROLWconst { 20736 break 20737 } 20738 if r1.AuxInt != 8 { 20739 break 20740 } 20741 x1 := r1.Args[0] 20742 if x1.Op != OpAMD64MOVWload { 20743 break 20744 } 20745 i1 := x1.AuxInt 20746 s := x1.Aux 20747 _ = x1.Args[1] 20748 p := x1.Args[0] 20749 mem := x1.Args[1] 20750 sh := v.Args[1] 20751 if sh.Op != OpAMD64SHLLconst { 20752 break 20753 } 20754 if sh.AuxInt != 16 { 20755 break 20756 } 20757 r0 := sh.Args[0] 20758 if r0.Op != OpAMD64ROLWconst { 20759 break 20760 } 20761 if r0.AuxInt != 8 { 20762 break 20763 } 20764 x0 := r0.Args[0] 20765 if x0.Op != OpAMD64MOVWload { 20766 break 20767 } 20768 i0 := x0.AuxInt 20769 if x0.Aux != s { 20770 break 20771 } 20772 _ = x0.Args[1] 20773 if p != x0.Args[0] { 20774 break 20775 } 20776 if mem != x0.Args[1] { 20777 break 20778 } 20779 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20780 break 20781 } 20782 b = mergePoint(b, x0, x1) 20783 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20784 v.reset(OpCopy) 20785 v.AddArg(v0) 20786 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 20787 v1.AuxInt = i0 20788 v1.Aux = s 20789 v1.AddArg(p) 20790 v1.AddArg(mem) 20791 v0.AddArg(v1) 20792 return true 20793 } 20794 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 20795 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 20796 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 20797 for { 20798 _ = v.Args[1] 20799 sh := v.Args[0] 20800 if sh.Op != OpAMD64SHLLconst { 20801 break 20802 } 20803 if sh.AuxInt != 16 { 20804 break 20805 } 20806 r0 := sh.Args[0] 20807 if r0.Op != OpAMD64ROLWconst { 20808 break 20809 } 20810 if r0.AuxInt != 8 { 20811 break 20812 } 20813 x0 := r0.Args[0] 20814 if x0.Op != OpAMD64MOVWload { 20815 break 20816 } 20817 i0 := x0.AuxInt 20818 s := x0.Aux 20819 _ = x0.Args[1] 20820 p := x0.Args[0] 20821 mem := x0.Args[1] 20822 r1 := v.Args[1] 20823 if r1.Op != OpAMD64ROLWconst { 20824 break 20825 } 20826 if r1.AuxInt != 8 { 20827 break 20828 } 20829 x1 := r1.Args[0] 20830 if x1.Op != OpAMD64MOVWload { 20831 break 20832 } 20833 i1 := x1.AuxInt 20834 if x1.Aux != s { 20835 break 20836 } 20837 _ = x1.Args[1] 20838 if p != x1.Args[0] { 20839 break 20840 } 20841 if mem != x1.Args[1] { 20842 break 20843 } 20844 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 20845 break 20846 } 20847 b = mergePoint(b, x0, x1) 20848 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 20849 v.reset(OpCopy) 20850 v.AddArg(v0) 20851 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 20852 v1.AuxInt = i0 20853 v1.Aux = s 20854 v1.AddArg(p) 20855 v1.AddArg(mem) 20856 v0.AddArg(v1) 20857 return true 20858 } 20859 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 20860 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20861 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 20862 for { 20863 _ = v.Args[1] 20864 s0 := v.Args[0] 20865 if s0.Op != OpAMD64SHLLconst { 20866 break 20867 } 20868 j0 := s0.AuxInt 20869 x0 := s0.Args[0] 20870 if x0.Op != OpAMD64MOVBload { 20871 break 20872 } 20873 i0 := x0.AuxInt 20874 s := x0.Aux 20875 _ = x0.Args[1] 20876 p := x0.Args[0] 20877 mem := x0.Args[1] 20878 or := v.Args[1] 20879 if or.Op != OpAMD64ORL { 20880 break 20881 } 20882 _ = or.Args[1] 20883 s1 := or.Args[0] 20884 if s1.Op != OpAMD64SHLLconst { 20885 break 20886 } 20887 j1 := s1.AuxInt 20888 x1 := s1.Args[0] 20889 if x1.Op != OpAMD64MOVBload { 20890 break 20891 } 20892 i1 := x1.AuxInt 20893 if x1.Aux != s { 20894 break 20895 } 20896 _ = x1.Args[1] 20897 if p != x1.Args[0] { 20898 break 20899 } 20900 if mem != x1.Args[1] { 20901 break 20902 } 20903 y := or.Args[1] 20904 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20905 break 20906 } 20907 b = mergePoint(b, x0, x1) 20908 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20909 v.reset(OpCopy) 20910 v.AddArg(v0) 20911 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20912 v1.AuxInt = j1 20913 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20914 v2.AuxInt = 8 20915 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20916 v3.AuxInt = i0 20917 v3.Aux = s 20918 v3.AddArg(p) 20919 v3.AddArg(mem) 20920 v2.AddArg(v3) 20921 v1.AddArg(v2) 20922 v0.AddArg(v1) 20923 v0.AddArg(y) 20924 return true 20925 } 20926 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))) 20927 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20928 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 20929 for { 20930 _ = v.Args[1] 20931 s0 := v.Args[0] 20932 if s0.Op != OpAMD64SHLLconst { 20933 break 20934 } 20935 j0 := s0.AuxInt 20936 x0 := s0.Args[0] 20937 if x0.Op != OpAMD64MOVBload { 20938 break 20939 } 20940 i0 := x0.AuxInt 20941 s := x0.Aux 20942 _ = x0.Args[1] 20943 p := x0.Args[0] 20944 mem := x0.Args[1] 20945 or := v.Args[1] 20946 if or.Op != OpAMD64ORL { 20947 break 20948 } 20949 _ = or.Args[1] 20950 y := or.Args[0] 20951 s1 := or.Args[1] 20952 if s1.Op != OpAMD64SHLLconst { 20953 break 20954 } 20955 j1 := s1.AuxInt 20956 x1 := s1.Args[0] 20957 if x1.Op != OpAMD64MOVBload { 20958 break 20959 } 20960 i1 := x1.AuxInt 20961 if x1.Aux != s { 20962 break 20963 } 20964 _ = x1.Args[1] 20965 if p != x1.Args[0] { 20966 break 20967 } 20968 if mem != x1.Args[1] { 20969 break 20970 } 20971 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 20972 break 20973 } 20974 b = mergePoint(b, x0, x1) 20975 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 20976 v.reset(OpCopy) 20977 v.AddArg(v0) 20978 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 20979 v1.AuxInt = j1 20980 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 20981 v2.AuxInt = 8 20982 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 20983 v3.AuxInt = i0 20984 v3.Aux = s 20985 v3.AddArg(p) 20986 v3.AddArg(mem) 20987 v2.AddArg(v3) 20988 v1.AddArg(v2) 20989 v0.AddArg(v1) 20990 v0.AddArg(y) 20991 return true 20992 } 20993 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 20994 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 20995 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 20996 for { 20997 _ = v.Args[1] 20998 or := v.Args[0] 20999 if or.Op != OpAMD64ORL { 21000 break 21001 } 21002 _ = or.Args[1] 21003 s1 := or.Args[0] 21004 if s1.Op != OpAMD64SHLLconst { 21005 break 21006 } 21007 j1 := s1.AuxInt 21008 x1 := s1.Args[0] 21009 if x1.Op != OpAMD64MOVBload { 21010 break 21011 } 21012 i1 := x1.AuxInt 21013 s := x1.Aux 21014 _ = x1.Args[1] 21015 p := x1.Args[0] 21016 mem := x1.Args[1] 21017 y := or.Args[1] 21018 s0 := v.Args[1] 21019 if s0.Op != OpAMD64SHLLconst { 21020 break 21021 } 21022 j0 := s0.AuxInt 21023 x0 := s0.Args[0] 21024 if x0.Op != OpAMD64MOVBload { 21025 break 21026 } 21027 i0 := x0.AuxInt 21028 if x0.Aux != s { 21029 break 21030 } 21031 _ = x0.Args[1] 21032 if p != x0.Args[0] { 21033 break 21034 } 21035 if mem != x0.Args[1] { 21036 break 21037 } 21038 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21039 break 21040 } 21041 b = mergePoint(b, x0, x1) 21042 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21043 v.reset(OpCopy) 21044 v.AddArg(v0) 21045 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21046 v1.AuxInt = j1 21047 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21048 v2.AuxInt = 8 21049 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 21050 v3.AuxInt = i0 21051 v3.Aux = s 21052 v3.AddArg(p) 21053 v3.AddArg(mem) 21054 v2.AddArg(v3) 21055 v1.AddArg(v2) 21056 v0.AddArg(v1) 21057 v0.AddArg(y) 21058 return true 21059 } 21060 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) 21061 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 21062 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 21063 for { 21064 _ = v.Args[1] 21065 or := v.Args[0] 21066 if or.Op != OpAMD64ORL { 21067 break 21068 } 21069 _ = or.Args[1] 21070 y := or.Args[0] 21071 s1 := or.Args[1] 21072 if s1.Op != OpAMD64SHLLconst { 21073 break 21074 } 21075 j1 := s1.AuxInt 21076 x1 := s1.Args[0] 21077 if x1.Op != OpAMD64MOVBload { 21078 break 21079 } 21080 i1 := x1.AuxInt 21081 s := x1.Aux 21082 _ = x1.Args[1] 21083 p := x1.Args[0] 21084 mem := x1.Args[1] 21085 s0 := v.Args[1] 21086 if s0.Op != OpAMD64SHLLconst { 21087 break 21088 } 21089 j0 := s0.AuxInt 21090 x0 := s0.Args[0] 21091 if x0.Op != OpAMD64MOVBload { 21092 break 21093 } 21094 i0 := x0.AuxInt 21095 if x0.Aux != s { 21096 break 21097 } 21098 _ = x0.Args[1] 21099 if p != x0.Args[0] { 21100 break 21101 } 21102 if mem != x0.Args[1] { 21103 break 21104 } 21105 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 21106 break 21107 } 21108 b = mergePoint(b, x0, x1) 21109 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 21110 v.reset(OpCopy) 21111 v.AddArg(v0) 21112 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 21113 v1.AuxInt = j1 21114 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 21115 v2.AuxInt = 8 21116 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 21117 v3.AuxInt = i0 21118 v3.Aux = s 21119 v3.AddArg(p) 21120 v3.AddArg(mem) 21121 v2.AddArg(v3) 21122 v1.AddArg(v2) 21123 v0.AddArg(v1) 21124 v0.AddArg(y) 21125 return true 21126 } 21127 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21128 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21129 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21130 for { 21131 _ = v.Args[1] 21132 x1 := v.Args[0] 21133 if x1.Op != OpAMD64MOVBloadidx1 { 21134 break 21135 } 21136 i1 := x1.AuxInt 21137 s := x1.Aux 21138 _ = x1.Args[2] 21139 p := x1.Args[0] 21140 idx := x1.Args[1] 21141 mem := x1.Args[2] 21142 sh := v.Args[1] 21143 if sh.Op != OpAMD64SHLLconst { 21144 break 21145 } 21146 if sh.AuxInt != 8 { 21147 break 21148 } 21149 x0 := sh.Args[0] 21150 if x0.Op != OpAMD64MOVBloadidx1 { 21151 break 21152 } 21153 i0 := x0.AuxInt 21154 if x0.Aux != s { 21155 break 21156 } 21157 _ = x0.Args[2] 21158 if p != x0.Args[0] { 21159 break 21160 } 21161 if idx != x0.Args[1] { 21162 break 21163 } 21164 if mem != x0.Args[2] { 21165 break 21166 } 21167 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21168 break 21169 } 21170 b = mergePoint(b, x0, x1) 21171 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21172 v.reset(OpCopy) 21173 v.AddArg(v0) 21174 v0.AuxInt = 8 21175 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21176 v1.AuxInt = i0 21177 v1.Aux = s 21178 v1.AddArg(p) 21179 v1.AddArg(idx) 21180 v1.AddArg(mem) 21181 v0.AddArg(v1) 21182 return true 21183 } 21184 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 21185 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21186 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21187 for { 21188 _ = v.Args[1] 21189 x1 := v.Args[0] 21190 if x1.Op != OpAMD64MOVBloadidx1 { 21191 break 21192 } 21193 i1 := x1.AuxInt 21194 s := x1.Aux 21195 _ = x1.Args[2] 21196 idx := x1.Args[0] 21197 p := x1.Args[1] 21198 mem := x1.Args[2] 21199 sh := v.Args[1] 21200 if sh.Op != OpAMD64SHLLconst { 21201 break 21202 } 21203 if sh.AuxInt != 8 { 21204 break 21205 } 21206 x0 := sh.Args[0] 21207 if x0.Op != OpAMD64MOVBloadidx1 { 21208 break 21209 } 21210 i0 := x0.AuxInt 21211 if x0.Aux != s { 21212 break 21213 } 21214 _ = x0.Args[2] 21215 if p != x0.Args[0] { 21216 break 21217 } 21218 if idx != x0.Args[1] { 21219 break 21220 } 21221 if mem != x0.Args[2] { 21222 break 21223 } 21224 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21225 break 21226 } 21227 b = mergePoint(b, x0, x1) 21228 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21229 v.reset(OpCopy) 21230 v.AddArg(v0) 21231 v0.AuxInt = 8 21232 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21233 v1.AuxInt = i0 21234 v1.Aux = s 21235 v1.AddArg(p) 21236 v1.AddArg(idx) 21237 v1.AddArg(mem) 21238 v0.AddArg(v1) 21239 return true 21240 } 21241 // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21242 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21243 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21244 for { 21245 _ = v.Args[1] 21246 x1 := v.Args[0] 21247 if x1.Op != OpAMD64MOVBloadidx1 { 21248 break 21249 } 21250 i1 := x1.AuxInt 21251 s := x1.Aux 21252 _ = x1.Args[2] 21253 p := x1.Args[0] 21254 idx := x1.Args[1] 21255 mem := x1.Args[2] 21256 sh := v.Args[1] 21257 if sh.Op != OpAMD64SHLLconst { 21258 break 21259 } 21260 if sh.AuxInt != 8 { 21261 break 21262 } 21263 x0 := sh.Args[0] 21264 if x0.Op != OpAMD64MOVBloadidx1 { 21265 break 21266 } 21267 i0 := x0.AuxInt 21268 if x0.Aux != s { 21269 break 21270 } 21271 _ = x0.Args[2] 21272 if idx != x0.Args[0] { 21273 break 21274 } 21275 if p != x0.Args[1] { 21276 break 21277 } 21278 if mem != x0.Args[2] { 21279 break 21280 } 21281 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21282 break 21283 } 21284 b = mergePoint(b, x0, x1) 21285 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21286 v.reset(OpCopy) 21287 v.AddArg(v0) 21288 v0.AuxInt = 8 21289 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21290 v1.AuxInt = i0 21291 v1.Aux = s 21292 v1.AddArg(p) 21293 v1.AddArg(idx) 21294 v1.AddArg(mem) 21295 v0.AddArg(v1) 21296 return true 21297 } 21298 return false 21299 } 21300 func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool { 21301 b := v.Block 21302 _ = b 21303 typ := &b.Func.Config.Types 21304 _ = typ 21305 // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 21306 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21307 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21308 for { 21309 _ = v.Args[1] 21310 x1 := v.Args[0] 21311 if x1.Op != OpAMD64MOVBloadidx1 { 21312 break 21313 } 21314 i1 := x1.AuxInt 21315 s := x1.Aux 21316 _ = x1.Args[2] 21317 idx := x1.Args[0] 21318 p := x1.Args[1] 21319 mem := x1.Args[2] 21320 sh := v.Args[1] 21321 if sh.Op != OpAMD64SHLLconst { 21322 break 21323 } 21324 if sh.AuxInt != 8 { 21325 break 21326 } 21327 x0 := sh.Args[0] 21328 if x0.Op != OpAMD64MOVBloadidx1 { 21329 break 21330 } 21331 i0 := x0.AuxInt 21332 if x0.Aux != s { 21333 break 21334 } 21335 _ = x0.Args[2] 21336 if idx != x0.Args[0] { 21337 break 21338 } 21339 if p != x0.Args[1] { 21340 break 21341 } 21342 if mem != x0.Args[2] { 21343 break 21344 } 21345 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21346 break 21347 } 21348 b = mergePoint(b, x0, x1) 21349 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21350 v.reset(OpCopy) 21351 v.AddArg(v0) 21352 v0.AuxInt = 8 21353 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21354 v1.AuxInt = i0 21355 v1.Aux = s 21356 v1.AddArg(p) 21357 v1.AddArg(idx) 21358 v1.AddArg(mem) 21359 v0.AddArg(v1) 21360 return true 21361 } 21362 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 21363 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21364 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21365 for { 21366 _ = v.Args[1] 21367 sh := v.Args[0] 21368 if sh.Op != OpAMD64SHLLconst { 21369 break 21370 } 21371 if sh.AuxInt != 8 { 21372 break 21373 } 21374 x0 := sh.Args[0] 21375 if x0.Op != OpAMD64MOVBloadidx1 { 21376 break 21377 } 21378 i0 := x0.AuxInt 21379 s := x0.Aux 21380 _ = x0.Args[2] 21381 p := x0.Args[0] 21382 idx := x0.Args[1] 21383 mem := x0.Args[2] 21384 x1 := v.Args[1] 21385 if x1.Op != OpAMD64MOVBloadidx1 { 21386 break 21387 } 21388 i1 := x1.AuxInt 21389 if x1.Aux != s { 21390 break 21391 } 21392 _ = x1.Args[2] 21393 if p != x1.Args[0] { 21394 break 21395 } 21396 if idx != x1.Args[1] { 21397 break 21398 } 21399 if mem != x1.Args[2] { 21400 break 21401 } 21402 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21403 break 21404 } 21405 b = mergePoint(b, x0, x1) 21406 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21407 v.reset(OpCopy) 21408 v.AddArg(v0) 21409 v0.AuxInt = 8 21410 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21411 v1.AuxInt = i0 21412 v1.Aux = s 21413 v1.AddArg(p) 21414 v1.AddArg(idx) 21415 v1.AddArg(mem) 21416 v0.AddArg(v1) 21417 return true 21418 } 21419 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 21420 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21421 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21422 for { 21423 _ = v.Args[1] 21424 sh := v.Args[0] 21425 if sh.Op != OpAMD64SHLLconst { 21426 break 21427 } 21428 if sh.AuxInt != 8 { 21429 break 21430 } 21431 x0 := sh.Args[0] 21432 if x0.Op != OpAMD64MOVBloadidx1 { 21433 break 21434 } 21435 i0 := x0.AuxInt 21436 s := x0.Aux 21437 _ = x0.Args[2] 21438 idx := x0.Args[0] 21439 p := x0.Args[1] 21440 mem := x0.Args[2] 21441 x1 := v.Args[1] 21442 if x1.Op != OpAMD64MOVBloadidx1 { 21443 break 21444 } 21445 i1 := x1.AuxInt 21446 if x1.Aux != s { 21447 break 21448 } 21449 _ = x1.Args[2] 21450 if p != x1.Args[0] { 21451 break 21452 } 21453 if idx != x1.Args[1] { 21454 break 21455 } 21456 if mem != x1.Args[2] { 21457 break 21458 } 21459 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21460 break 21461 } 21462 b = mergePoint(b, x0, x1) 21463 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21464 v.reset(OpCopy) 21465 v.AddArg(v0) 21466 v0.AuxInt = 8 21467 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21468 v1.AuxInt = i0 21469 v1.Aux = s 21470 v1.AddArg(p) 21471 v1.AddArg(idx) 21472 v1.AddArg(mem) 21473 v0.AddArg(v1) 21474 return true 21475 } 21476 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 21477 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21478 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21479 for { 21480 _ = v.Args[1] 21481 sh := v.Args[0] 21482 if sh.Op != OpAMD64SHLLconst { 21483 break 21484 } 21485 if sh.AuxInt != 8 { 21486 break 21487 } 21488 x0 := sh.Args[0] 21489 if x0.Op != OpAMD64MOVBloadidx1 { 21490 break 21491 } 21492 i0 := x0.AuxInt 21493 s := x0.Aux 21494 _ = x0.Args[2] 21495 p := x0.Args[0] 21496 idx := x0.Args[1] 21497 mem := x0.Args[2] 21498 x1 := v.Args[1] 21499 if x1.Op != OpAMD64MOVBloadidx1 { 21500 break 21501 } 21502 i1 := x1.AuxInt 21503 if x1.Aux != s { 21504 break 21505 } 21506 _ = x1.Args[2] 21507 if idx != x1.Args[0] { 21508 break 21509 } 21510 if p != x1.Args[1] { 21511 break 21512 } 21513 if mem != x1.Args[2] { 21514 break 21515 } 21516 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21517 break 21518 } 21519 b = mergePoint(b, x0, x1) 21520 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21521 v.reset(OpCopy) 21522 v.AddArg(v0) 21523 v0.AuxInt = 8 21524 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21525 v1.AuxInt = i0 21526 v1.Aux = s 21527 v1.AddArg(p) 21528 v1.AddArg(idx) 21529 v1.AddArg(mem) 21530 v0.AddArg(v1) 21531 return true 21532 } 21533 // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 21534 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 21535 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 21536 for { 21537 _ = v.Args[1] 21538 sh := v.Args[0] 21539 if sh.Op != OpAMD64SHLLconst { 21540 break 21541 } 21542 if sh.AuxInt != 8 { 21543 break 21544 } 21545 x0 := sh.Args[0] 21546 if x0.Op != OpAMD64MOVBloadidx1 { 21547 break 21548 } 21549 i0 := x0.AuxInt 21550 s := x0.Aux 21551 _ = x0.Args[2] 21552 idx := x0.Args[0] 21553 p := x0.Args[1] 21554 mem := x0.Args[2] 21555 x1 := v.Args[1] 21556 if x1.Op != OpAMD64MOVBloadidx1 { 21557 break 21558 } 21559 i1 := x1.AuxInt 21560 if x1.Aux != s { 21561 break 21562 } 21563 _ = x1.Args[2] 21564 if idx != x1.Args[0] { 21565 break 21566 } 21567 if p != x1.Args[1] { 21568 break 21569 } 21570 if mem != x1.Args[2] { 21571 break 21572 } 21573 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 21574 break 21575 } 21576 b = mergePoint(b, x0, x1) 21577 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 21578 v.reset(OpCopy) 21579 v.AddArg(v0) 21580 v0.AuxInt = 8 21581 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 21582 v1.AuxInt = i0 21583 v1.Aux = s 21584 v1.AddArg(p) 21585 v1.AddArg(idx) 21586 v1.AddArg(mem) 21587 v0.AddArg(v1) 21588 return true 21589 } 21590 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 21591 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21592 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21593 for { 21594 _ = v.Args[1] 21595 r1 := v.Args[0] 21596 if r1.Op != OpAMD64ROLWconst { 21597 break 21598 } 21599 if r1.AuxInt != 8 { 21600 break 21601 } 21602 x1 := r1.Args[0] 21603 if x1.Op != OpAMD64MOVWloadidx1 { 21604 break 21605 } 21606 i1 := x1.AuxInt 21607 s := x1.Aux 21608 _ = x1.Args[2] 21609 p := x1.Args[0] 21610 idx := x1.Args[1] 21611 mem := x1.Args[2] 21612 sh := v.Args[1] 21613 if sh.Op != OpAMD64SHLLconst { 21614 break 21615 } 21616 if sh.AuxInt != 16 { 21617 break 21618 } 21619 r0 := sh.Args[0] 21620 if r0.Op != OpAMD64ROLWconst { 21621 break 21622 } 21623 if r0.AuxInt != 8 { 21624 break 21625 } 21626 x0 := r0.Args[0] 21627 if x0.Op != OpAMD64MOVWloadidx1 { 21628 break 21629 } 21630 i0 := x0.AuxInt 21631 if x0.Aux != s { 21632 break 21633 } 21634 _ = x0.Args[2] 21635 if p != x0.Args[0] { 21636 break 21637 } 21638 if idx != x0.Args[1] { 21639 break 21640 } 21641 if mem != x0.Args[2] { 21642 break 21643 } 21644 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21645 break 21646 } 21647 b = mergePoint(b, x0, x1) 21648 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21649 v.reset(OpCopy) 21650 v.AddArg(v0) 21651 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21652 v1.AuxInt = i0 21653 v1.Aux = s 21654 v1.AddArg(p) 21655 v1.AddArg(idx) 21656 v1.AddArg(mem) 21657 v0.AddArg(v1) 21658 return true 21659 } 21660 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 21661 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21662 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21663 for { 21664 _ = v.Args[1] 21665 r1 := v.Args[0] 21666 if r1.Op != OpAMD64ROLWconst { 21667 break 21668 } 21669 if r1.AuxInt != 8 { 21670 break 21671 } 21672 x1 := r1.Args[0] 21673 if x1.Op != OpAMD64MOVWloadidx1 { 21674 break 21675 } 21676 i1 := x1.AuxInt 21677 s := x1.Aux 21678 _ = x1.Args[2] 21679 idx := x1.Args[0] 21680 p := x1.Args[1] 21681 mem := x1.Args[2] 21682 sh := v.Args[1] 21683 if sh.Op != OpAMD64SHLLconst { 21684 break 21685 } 21686 if sh.AuxInt != 16 { 21687 break 21688 } 21689 r0 := sh.Args[0] 21690 if r0.Op != OpAMD64ROLWconst { 21691 break 21692 } 21693 if r0.AuxInt != 8 { 21694 break 21695 } 21696 x0 := r0.Args[0] 21697 if x0.Op != OpAMD64MOVWloadidx1 { 21698 break 21699 } 21700 i0 := x0.AuxInt 21701 if x0.Aux != s { 21702 break 21703 } 21704 _ = x0.Args[2] 21705 if p != x0.Args[0] { 21706 break 21707 } 21708 if idx != x0.Args[1] { 21709 break 21710 } 21711 if mem != x0.Args[2] { 21712 break 21713 } 21714 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21715 break 21716 } 21717 b = mergePoint(b, x0, x1) 21718 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21719 v.reset(OpCopy) 21720 v.AddArg(v0) 21721 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21722 v1.AuxInt = i0 21723 v1.Aux = s 21724 v1.AddArg(p) 21725 v1.AddArg(idx) 21726 v1.AddArg(mem) 21727 v0.AddArg(v1) 21728 return true 21729 } 21730 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 21731 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21732 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21733 for { 21734 _ = v.Args[1] 21735 r1 := v.Args[0] 21736 if r1.Op != OpAMD64ROLWconst { 21737 break 21738 } 21739 if r1.AuxInt != 8 { 21740 break 21741 } 21742 x1 := r1.Args[0] 21743 if x1.Op != OpAMD64MOVWloadidx1 { 21744 break 21745 } 21746 i1 := x1.AuxInt 21747 s := x1.Aux 21748 _ = x1.Args[2] 21749 p := x1.Args[0] 21750 idx := x1.Args[1] 21751 mem := x1.Args[2] 21752 sh := v.Args[1] 21753 if sh.Op != OpAMD64SHLLconst { 21754 break 21755 } 21756 if sh.AuxInt != 16 { 21757 break 21758 } 21759 r0 := sh.Args[0] 21760 if r0.Op != OpAMD64ROLWconst { 21761 break 21762 } 21763 if r0.AuxInt != 8 { 21764 break 21765 } 21766 x0 := r0.Args[0] 21767 if x0.Op != OpAMD64MOVWloadidx1 { 21768 break 21769 } 21770 i0 := x0.AuxInt 21771 if x0.Aux != s { 21772 break 21773 } 21774 _ = x0.Args[2] 21775 if idx != x0.Args[0] { 21776 break 21777 } 21778 if p != x0.Args[1] { 21779 break 21780 } 21781 if mem != x0.Args[2] { 21782 break 21783 } 21784 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21785 break 21786 } 21787 b = mergePoint(b, x0, x1) 21788 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21789 v.reset(OpCopy) 21790 v.AddArg(v0) 21791 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21792 v1.AuxInt = i0 21793 v1.Aux = s 21794 v1.AddArg(p) 21795 v1.AddArg(idx) 21796 v1.AddArg(mem) 21797 v0.AddArg(v1) 21798 return true 21799 } 21800 // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 21801 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21802 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21803 for { 21804 _ = v.Args[1] 21805 r1 := v.Args[0] 21806 if r1.Op != OpAMD64ROLWconst { 21807 break 21808 } 21809 if r1.AuxInt != 8 { 21810 break 21811 } 21812 x1 := r1.Args[0] 21813 if x1.Op != OpAMD64MOVWloadidx1 { 21814 break 21815 } 21816 i1 := x1.AuxInt 21817 s := x1.Aux 21818 _ = x1.Args[2] 21819 idx := x1.Args[0] 21820 p := x1.Args[1] 21821 mem := x1.Args[2] 21822 sh := v.Args[1] 21823 if sh.Op != OpAMD64SHLLconst { 21824 break 21825 } 21826 if sh.AuxInt != 16 { 21827 break 21828 } 21829 r0 := sh.Args[0] 21830 if r0.Op != OpAMD64ROLWconst { 21831 break 21832 } 21833 if r0.AuxInt != 8 { 21834 break 21835 } 21836 x0 := r0.Args[0] 21837 if x0.Op != OpAMD64MOVWloadidx1 { 21838 break 21839 } 21840 i0 := x0.AuxInt 21841 if x0.Aux != s { 21842 break 21843 } 21844 _ = x0.Args[2] 21845 if idx != x0.Args[0] { 21846 break 21847 } 21848 if p != x0.Args[1] { 21849 break 21850 } 21851 if mem != x0.Args[2] { 21852 break 21853 } 21854 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21855 break 21856 } 21857 b = mergePoint(b, x0, x1) 21858 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21859 v.reset(OpCopy) 21860 v.AddArg(v0) 21861 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21862 v1.AuxInt = i0 21863 v1.Aux = s 21864 v1.AddArg(p) 21865 v1.AddArg(idx) 21866 v1.AddArg(mem) 21867 v0.AddArg(v1) 21868 return true 21869 } 21870 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 21871 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21872 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21873 for { 21874 _ = v.Args[1] 21875 sh := v.Args[0] 21876 if sh.Op != OpAMD64SHLLconst { 21877 break 21878 } 21879 if sh.AuxInt != 16 { 21880 break 21881 } 21882 r0 := sh.Args[0] 21883 if r0.Op != OpAMD64ROLWconst { 21884 break 21885 } 21886 if r0.AuxInt != 8 { 21887 break 21888 } 21889 x0 := r0.Args[0] 21890 if x0.Op != OpAMD64MOVWloadidx1 { 21891 break 21892 } 21893 i0 := x0.AuxInt 21894 s := x0.Aux 21895 _ = x0.Args[2] 21896 p := x0.Args[0] 21897 idx := x0.Args[1] 21898 mem := x0.Args[2] 21899 r1 := v.Args[1] 21900 if r1.Op != OpAMD64ROLWconst { 21901 break 21902 } 21903 if r1.AuxInt != 8 { 21904 break 21905 } 21906 x1 := r1.Args[0] 21907 if x1.Op != OpAMD64MOVWloadidx1 { 21908 break 21909 } 21910 i1 := x1.AuxInt 21911 if x1.Aux != s { 21912 break 21913 } 21914 _ = x1.Args[2] 21915 if p != x1.Args[0] { 21916 break 21917 } 21918 if idx != x1.Args[1] { 21919 break 21920 } 21921 if mem != x1.Args[2] { 21922 break 21923 } 21924 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 21925 break 21926 } 21927 b = mergePoint(b, x0, x1) 21928 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 21929 v.reset(OpCopy) 21930 v.AddArg(v0) 21931 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 21932 v1.AuxInt = i0 21933 v1.Aux = s 21934 v1.AddArg(p) 21935 v1.AddArg(idx) 21936 v1.AddArg(mem) 21937 v0.AddArg(v1) 21938 return true 21939 } 21940 return false 21941 } 21942 func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool { 21943 b := v.Block 21944 _ = b 21945 typ := &b.Func.Config.Types 21946 _ = typ 21947 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 21948 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 21949 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 21950 for { 21951 _ = v.Args[1] 21952 sh := v.Args[0] 21953 if sh.Op != OpAMD64SHLLconst { 21954 break 21955 } 21956 if sh.AuxInt != 16 { 21957 break 21958 } 21959 r0 := sh.Args[0] 21960 if r0.Op != OpAMD64ROLWconst { 21961 break 21962 } 21963 if r0.AuxInt != 8 { 21964 break 21965 } 21966 x0 := r0.Args[0] 21967 if x0.Op != OpAMD64MOVWloadidx1 { 21968 break 21969 } 21970 i0 := x0.AuxInt 21971 s := x0.Aux 21972 _ = x0.Args[2] 21973 idx := x0.Args[0] 21974 p := x0.Args[1] 21975 mem := x0.Args[2] 21976 r1 := v.Args[1] 21977 if r1.Op != OpAMD64ROLWconst { 21978 break 21979 } 21980 if r1.AuxInt != 8 { 21981 break 21982 } 21983 x1 := r1.Args[0] 21984 if x1.Op != OpAMD64MOVWloadidx1 { 21985 break 21986 } 21987 i1 := x1.AuxInt 21988 if x1.Aux != s { 21989 break 21990 } 21991 _ = x1.Args[2] 21992 if p != x1.Args[0] { 21993 break 21994 } 21995 if idx != x1.Args[1] { 21996 break 21997 } 21998 if mem != x1.Args[2] { 21999 break 22000 } 22001 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 22002 break 22003 } 22004 b = mergePoint(b, x0, x1) 22005 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 22006 v.reset(OpCopy) 22007 v.AddArg(v0) 22008 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 22009 v1.AuxInt = i0 22010 v1.Aux = s 22011 v1.AddArg(p) 22012 v1.AddArg(idx) 22013 v1.AddArg(mem) 22014 v0.AddArg(v1) 22015 return true 22016 } 22017 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 22018 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 22019 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 22020 for { 22021 _ = v.Args[1] 22022 sh := v.Args[0] 22023 if sh.Op != OpAMD64SHLLconst { 22024 break 22025 } 22026 if sh.AuxInt != 16 { 22027 break 22028 } 22029 r0 := sh.Args[0] 22030 if r0.Op != OpAMD64ROLWconst { 22031 break 22032 } 22033 if r0.AuxInt != 8 { 22034 break 22035 } 22036 x0 := r0.Args[0] 22037 if x0.Op != OpAMD64MOVWloadidx1 { 22038 break 22039 } 22040 i0 := x0.AuxInt 22041 s := x0.Aux 22042 _ = x0.Args[2] 22043 p := x0.Args[0] 22044 idx := x0.Args[1] 22045 mem := x0.Args[2] 22046 r1 := v.Args[1] 22047 if r1.Op != OpAMD64ROLWconst { 22048 break 22049 } 22050 if r1.AuxInt != 8 { 22051 break 22052 } 22053 x1 := r1.Args[0] 22054 if x1.Op != OpAMD64MOVWloadidx1 { 22055 break 22056 } 22057 i1 := x1.AuxInt 22058 if x1.Aux != s { 22059 break 22060 } 22061 _ = x1.Args[2] 22062 if idx != x1.Args[0] { 22063 break 22064 } 22065 if p != x1.Args[1] { 22066 break 22067 } 22068 if mem != x1.Args[2] { 22069 break 22070 } 22071 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 22072 break 22073 } 22074 b = mergePoint(b, x0, x1) 22075 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 22076 v.reset(OpCopy) 22077 v.AddArg(v0) 22078 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 22079 v1.AuxInt = i0 22080 v1.Aux = s 22081 v1.AddArg(p) 22082 v1.AddArg(idx) 22083 v1.AddArg(mem) 22084 v0.AddArg(v1) 22085 return true 22086 } 22087 // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 22088 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 22089 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 22090 for { 22091 _ = v.Args[1] 22092 sh := v.Args[0] 22093 if sh.Op != OpAMD64SHLLconst { 22094 break 22095 } 22096 if sh.AuxInt != 16 { 22097 break 22098 } 22099 r0 := sh.Args[0] 22100 if r0.Op != OpAMD64ROLWconst { 22101 break 22102 } 22103 if r0.AuxInt != 8 { 22104 break 22105 } 22106 x0 := r0.Args[0] 22107 if x0.Op != OpAMD64MOVWloadidx1 { 22108 break 22109 } 22110 i0 := x0.AuxInt 22111 s := x0.Aux 22112 _ = x0.Args[2] 22113 idx := x0.Args[0] 22114 p := x0.Args[1] 22115 mem := x0.Args[2] 22116 r1 := v.Args[1] 22117 if r1.Op != OpAMD64ROLWconst { 22118 break 22119 } 22120 if r1.AuxInt != 8 { 22121 break 22122 } 22123 x1 := r1.Args[0] 22124 if x1.Op != OpAMD64MOVWloadidx1 { 22125 break 22126 } 22127 i1 := x1.AuxInt 22128 if x1.Aux != s { 22129 break 22130 } 22131 _ = x1.Args[2] 22132 if idx != x1.Args[0] { 22133 break 22134 } 22135 if p != x1.Args[1] { 22136 break 22137 } 22138 if mem != x1.Args[2] { 22139 break 22140 } 22141 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 22142 break 22143 } 22144 b = mergePoint(b, x0, x1) 22145 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 22146 v.reset(OpCopy) 22147 v.AddArg(v0) 22148 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 22149 v1.AuxInt = i0 22150 v1.Aux = s 22151 v1.AddArg(p) 22152 v1.AddArg(idx) 22153 v1.AddArg(mem) 22154 v0.AddArg(v1) 22155 return true 22156 } 22157 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 22158 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22159 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22160 for { 22161 _ = v.Args[1] 22162 s0 := v.Args[0] 22163 if s0.Op != OpAMD64SHLLconst { 22164 break 22165 } 22166 j0 := s0.AuxInt 22167 x0 := s0.Args[0] 22168 if x0.Op != OpAMD64MOVBloadidx1 { 22169 break 22170 } 22171 i0 := x0.AuxInt 22172 s := x0.Aux 22173 _ = x0.Args[2] 22174 p := x0.Args[0] 22175 idx := x0.Args[1] 22176 mem := x0.Args[2] 22177 or := v.Args[1] 22178 if or.Op != OpAMD64ORL { 22179 break 22180 } 22181 _ = or.Args[1] 22182 s1 := or.Args[0] 22183 if s1.Op != OpAMD64SHLLconst { 22184 break 22185 } 22186 j1 := s1.AuxInt 22187 x1 := s1.Args[0] 22188 if x1.Op != OpAMD64MOVBloadidx1 { 22189 break 22190 } 22191 i1 := x1.AuxInt 22192 if x1.Aux != s { 22193 break 22194 } 22195 _ = x1.Args[2] 22196 if p != x1.Args[0] { 22197 break 22198 } 22199 if idx != x1.Args[1] { 22200 break 22201 } 22202 if mem != x1.Args[2] { 22203 break 22204 } 22205 y := or.Args[1] 22206 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22207 break 22208 } 22209 b = mergePoint(b, x0, x1) 22210 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22211 v.reset(OpCopy) 22212 v.AddArg(v0) 22213 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22214 v1.AuxInt = j1 22215 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22216 v2.AuxInt = 8 22217 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22218 v3.AuxInt = i0 22219 v3.Aux = s 22220 v3.AddArg(p) 22221 v3.AddArg(idx) 22222 v3.AddArg(mem) 22223 v2.AddArg(v3) 22224 v1.AddArg(v2) 22225 v0.AddArg(v1) 22226 v0.AddArg(y) 22227 return true 22228 } 22229 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 22230 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22231 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22232 for { 22233 _ = v.Args[1] 22234 s0 := v.Args[0] 22235 if s0.Op != OpAMD64SHLLconst { 22236 break 22237 } 22238 j0 := s0.AuxInt 22239 x0 := s0.Args[0] 22240 if x0.Op != OpAMD64MOVBloadidx1 { 22241 break 22242 } 22243 i0 := x0.AuxInt 22244 s := x0.Aux 22245 _ = x0.Args[2] 22246 idx := x0.Args[0] 22247 p := x0.Args[1] 22248 mem := x0.Args[2] 22249 or := v.Args[1] 22250 if or.Op != OpAMD64ORL { 22251 break 22252 } 22253 _ = or.Args[1] 22254 s1 := or.Args[0] 22255 if s1.Op != OpAMD64SHLLconst { 22256 break 22257 } 22258 j1 := s1.AuxInt 22259 x1 := s1.Args[0] 22260 if x1.Op != OpAMD64MOVBloadidx1 { 22261 break 22262 } 22263 i1 := x1.AuxInt 22264 if x1.Aux != s { 22265 break 22266 } 22267 _ = x1.Args[2] 22268 if p != x1.Args[0] { 22269 break 22270 } 22271 if idx != x1.Args[1] { 22272 break 22273 } 22274 if mem != x1.Args[2] { 22275 break 22276 } 22277 y := or.Args[1] 22278 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22279 break 22280 } 22281 b = mergePoint(b, x0, x1) 22282 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22283 v.reset(OpCopy) 22284 v.AddArg(v0) 22285 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22286 v1.AuxInt = j1 22287 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22288 v2.AuxInt = 8 22289 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22290 v3.AuxInt = i0 22291 v3.Aux = s 22292 v3.AddArg(p) 22293 v3.AddArg(idx) 22294 v3.AddArg(mem) 22295 v2.AddArg(v3) 22296 v1.AddArg(v2) 22297 v0.AddArg(v1) 22298 v0.AddArg(y) 22299 return true 22300 } 22301 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 22302 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22303 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22304 for { 22305 _ = v.Args[1] 22306 s0 := v.Args[0] 22307 if s0.Op != OpAMD64SHLLconst { 22308 break 22309 } 22310 j0 := s0.AuxInt 22311 x0 := s0.Args[0] 22312 if x0.Op != OpAMD64MOVBloadidx1 { 22313 break 22314 } 22315 i0 := x0.AuxInt 22316 s := x0.Aux 22317 _ = x0.Args[2] 22318 p := x0.Args[0] 22319 idx := x0.Args[1] 22320 mem := x0.Args[2] 22321 or := v.Args[1] 22322 if or.Op != OpAMD64ORL { 22323 break 22324 } 22325 _ = or.Args[1] 22326 s1 := or.Args[0] 22327 if s1.Op != OpAMD64SHLLconst { 22328 break 22329 } 22330 j1 := s1.AuxInt 22331 x1 := s1.Args[0] 22332 if x1.Op != OpAMD64MOVBloadidx1 { 22333 break 22334 } 22335 i1 := x1.AuxInt 22336 if x1.Aux != s { 22337 break 22338 } 22339 _ = x1.Args[2] 22340 if idx != x1.Args[0] { 22341 break 22342 } 22343 if p != x1.Args[1] { 22344 break 22345 } 22346 if mem != x1.Args[2] { 22347 break 22348 } 22349 y := or.Args[1] 22350 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22351 break 22352 } 22353 b = mergePoint(b, x0, x1) 22354 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22355 v.reset(OpCopy) 22356 v.AddArg(v0) 22357 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22358 v1.AuxInt = j1 22359 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22360 v2.AuxInt = 8 22361 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22362 v3.AuxInt = i0 22363 v3.Aux = s 22364 v3.AddArg(p) 22365 v3.AddArg(idx) 22366 v3.AddArg(mem) 22367 v2.AddArg(v3) 22368 v1.AddArg(v2) 22369 v0.AddArg(v1) 22370 v0.AddArg(y) 22371 return true 22372 } 22373 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 22374 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22375 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22376 for { 22377 _ = v.Args[1] 22378 s0 := v.Args[0] 22379 if s0.Op != OpAMD64SHLLconst { 22380 break 22381 } 22382 j0 := s0.AuxInt 22383 x0 := s0.Args[0] 22384 if x0.Op != OpAMD64MOVBloadidx1 { 22385 break 22386 } 22387 i0 := x0.AuxInt 22388 s := x0.Aux 22389 _ = x0.Args[2] 22390 idx := x0.Args[0] 22391 p := x0.Args[1] 22392 mem := x0.Args[2] 22393 or := v.Args[1] 22394 if or.Op != OpAMD64ORL { 22395 break 22396 } 22397 _ = or.Args[1] 22398 s1 := or.Args[0] 22399 if s1.Op != OpAMD64SHLLconst { 22400 break 22401 } 22402 j1 := s1.AuxInt 22403 x1 := s1.Args[0] 22404 if x1.Op != OpAMD64MOVBloadidx1 { 22405 break 22406 } 22407 i1 := x1.AuxInt 22408 if x1.Aux != s { 22409 break 22410 } 22411 _ = x1.Args[2] 22412 if idx != x1.Args[0] { 22413 break 22414 } 22415 if p != x1.Args[1] { 22416 break 22417 } 22418 if mem != x1.Args[2] { 22419 break 22420 } 22421 y := or.Args[1] 22422 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22423 break 22424 } 22425 b = mergePoint(b, x0, x1) 22426 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22427 v.reset(OpCopy) 22428 v.AddArg(v0) 22429 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22430 v1.AuxInt = j1 22431 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22432 v2.AuxInt = 8 22433 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22434 v3.AuxInt = i0 22435 v3.Aux = s 22436 v3.AddArg(p) 22437 v3.AddArg(idx) 22438 v3.AddArg(mem) 22439 v2.AddArg(v3) 22440 v1.AddArg(v2) 22441 v0.AddArg(v1) 22442 v0.AddArg(y) 22443 return true 22444 } 22445 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 22446 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22447 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22448 for { 22449 _ = v.Args[1] 22450 s0 := v.Args[0] 22451 if s0.Op != OpAMD64SHLLconst { 22452 break 22453 } 22454 j0 := s0.AuxInt 22455 x0 := s0.Args[0] 22456 if x0.Op != OpAMD64MOVBloadidx1 { 22457 break 22458 } 22459 i0 := x0.AuxInt 22460 s := x0.Aux 22461 _ = x0.Args[2] 22462 p := x0.Args[0] 22463 idx := x0.Args[1] 22464 mem := x0.Args[2] 22465 or := v.Args[1] 22466 if or.Op != OpAMD64ORL { 22467 break 22468 } 22469 _ = or.Args[1] 22470 y := or.Args[0] 22471 s1 := or.Args[1] 22472 if s1.Op != OpAMD64SHLLconst { 22473 break 22474 } 22475 j1 := s1.AuxInt 22476 x1 := s1.Args[0] 22477 if x1.Op != OpAMD64MOVBloadidx1 { 22478 break 22479 } 22480 i1 := x1.AuxInt 22481 if x1.Aux != s { 22482 break 22483 } 22484 _ = x1.Args[2] 22485 if p != x1.Args[0] { 22486 break 22487 } 22488 if idx != x1.Args[1] { 22489 break 22490 } 22491 if mem != x1.Args[2] { 22492 break 22493 } 22494 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22495 break 22496 } 22497 b = mergePoint(b, x0, x1) 22498 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22499 v.reset(OpCopy) 22500 v.AddArg(v0) 22501 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22502 v1.AuxInt = j1 22503 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22504 v2.AuxInt = 8 22505 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22506 v3.AuxInt = i0 22507 v3.Aux = s 22508 v3.AddArg(p) 22509 v3.AddArg(idx) 22510 v3.AddArg(mem) 22511 v2.AddArg(v3) 22512 v1.AddArg(v2) 22513 v0.AddArg(v1) 22514 v0.AddArg(y) 22515 return true 22516 } 22517 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 22518 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22519 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22520 for { 22521 _ = v.Args[1] 22522 s0 := v.Args[0] 22523 if s0.Op != OpAMD64SHLLconst { 22524 break 22525 } 22526 j0 := s0.AuxInt 22527 x0 := s0.Args[0] 22528 if x0.Op != OpAMD64MOVBloadidx1 { 22529 break 22530 } 22531 i0 := x0.AuxInt 22532 s := x0.Aux 22533 _ = x0.Args[2] 22534 idx := x0.Args[0] 22535 p := x0.Args[1] 22536 mem := x0.Args[2] 22537 or := v.Args[1] 22538 if or.Op != OpAMD64ORL { 22539 break 22540 } 22541 _ = or.Args[1] 22542 y := or.Args[0] 22543 s1 := or.Args[1] 22544 if s1.Op != OpAMD64SHLLconst { 22545 break 22546 } 22547 j1 := s1.AuxInt 22548 x1 := s1.Args[0] 22549 if x1.Op != OpAMD64MOVBloadidx1 { 22550 break 22551 } 22552 i1 := x1.AuxInt 22553 if x1.Aux != s { 22554 break 22555 } 22556 _ = x1.Args[2] 22557 if p != x1.Args[0] { 22558 break 22559 } 22560 if idx != x1.Args[1] { 22561 break 22562 } 22563 if mem != x1.Args[2] { 22564 break 22565 } 22566 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22567 break 22568 } 22569 b = mergePoint(b, x0, x1) 22570 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22571 v.reset(OpCopy) 22572 v.AddArg(v0) 22573 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22574 v1.AuxInt = j1 22575 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22576 v2.AuxInt = 8 22577 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22578 v3.AuxInt = i0 22579 v3.Aux = s 22580 v3.AddArg(p) 22581 v3.AddArg(idx) 22582 v3.AddArg(mem) 22583 v2.AddArg(v3) 22584 v1.AddArg(v2) 22585 v0.AddArg(v1) 22586 v0.AddArg(y) 22587 return true 22588 } 22589 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 22590 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22591 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22592 for { 22593 _ = v.Args[1] 22594 s0 := v.Args[0] 22595 if s0.Op != OpAMD64SHLLconst { 22596 break 22597 } 22598 j0 := s0.AuxInt 22599 x0 := s0.Args[0] 22600 if x0.Op != OpAMD64MOVBloadidx1 { 22601 break 22602 } 22603 i0 := x0.AuxInt 22604 s := x0.Aux 22605 _ = x0.Args[2] 22606 p := x0.Args[0] 22607 idx := x0.Args[1] 22608 mem := x0.Args[2] 22609 or := v.Args[1] 22610 if or.Op != OpAMD64ORL { 22611 break 22612 } 22613 _ = or.Args[1] 22614 y := or.Args[0] 22615 s1 := or.Args[1] 22616 if s1.Op != OpAMD64SHLLconst { 22617 break 22618 } 22619 j1 := s1.AuxInt 22620 x1 := s1.Args[0] 22621 if x1.Op != OpAMD64MOVBloadidx1 { 22622 break 22623 } 22624 i1 := x1.AuxInt 22625 if x1.Aux != s { 22626 break 22627 } 22628 _ = x1.Args[2] 22629 if idx != x1.Args[0] { 22630 break 22631 } 22632 if p != x1.Args[1] { 22633 break 22634 } 22635 if mem != x1.Args[2] { 22636 break 22637 } 22638 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22639 break 22640 } 22641 b = mergePoint(b, x0, x1) 22642 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22643 v.reset(OpCopy) 22644 v.AddArg(v0) 22645 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22646 v1.AuxInt = j1 22647 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22648 v2.AuxInt = 8 22649 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22650 v3.AuxInt = i0 22651 v3.Aux = s 22652 v3.AddArg(p) 22653 v3.AddArg(idx) 22654 v3.AddArg(mem) 22655 v2.AddArg(v3) 22656 v1.AddArg(v2) 22657 v0.AddArg(v1) 22658 v0.AddArg(y) 22659 return true 22660 } 22661 return false 22662 } 22663 func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool { 22664 b := v.Block 22665 _ = b 22666 typ := &b.Func.Config.Types 22667 _ = typ 22668 // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 22669 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22670 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22671 for { 22672 _ = v.Args[1] 22673 s0 := v.Args[0] 22674 if s0.Op != OpAMD64SHLLconst { 22675 break 22676 } 22677 j0 := s0.AuxInt 22678 x0 := s0.Args[0] 22679 if x0.Op != OpAMD64MOVBloadidx1 { 22680 break 22681 } 22682 i0 := x0.AuxInt 22683 s := x0.Aux 22684 _ = x0.Args[2] 22685 idx := x0.Args[0] 22686 p := x0.Args[1] 22687 mem := x0.Args[2] 22688 or := v.Args[1] 22689 if or.Op != OpAMD64ORL { 22690 break 22691 } 22692 _ = or.Args[1] 22693 y := or.Args[0] 22694 s1 := or.Args[1] 22695 if s1.Op != OpAMD64SHLLconst { 22696 break 22697 } 22698 j1 := s1.AuxInt 22699 x1 := s1.Args[0] 22700 if x1.Op != OpAMD64MOVBloadidx1 { 22701 break 22702 } 22703 i1 := x1.AuxInt 22704 if x1.Aux != s { 22705 break 22706 } 22707 _ = x1.Args[2] 22708 if idx != x1.Args[0] { 22709 break 22710 } 22711 if p != x1.Args[1] { 22712 break 22713 } 22714 if mem != x1.Args[2] { 22715 break 22716 } 22717 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22718 break 22719 } 22720 b = mergePoint(b, x0, x1) 22721 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22722 v.reset(OpCopy) 22723 v.AddArg(v0) 22724 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22725 v1.AuxInt = j1 22726 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22727 v2.AuxInt = 8 22728 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22729 v3.AuxInt = i0 22730 v3.Aux = s 22731 v3.AddArg(p) 22732 v3.AddArg(idx) 22733 v3.AddArg(mem) 22734 v2.AddArg(v3) 22735 v1.AddArg(v2) 22736 v0.AddArg(v1) 22737 v0.AddArg(y) 22738 return true 22739 } 22740 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22741 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22742 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22743 for { 22744 _ = v.Args[1] 22745 or := v.Args[0] 22746 if or.Op != OpAMD64ORL { 22747 break 22748 } 22749 _ = or.Args[1] 22750 s1 := or.Args[0] 22751 if s1.Op != OpAMD64SHLLconst { 22752 break 22753 } 22754 j1 := s1.AuxInt 22755 x1 := s1.Args[0] 22756 if x1.Op != OpAMD64MOVBloadidx1 { 22757 break 22758 } 22759 i1 := x1.AuxInt 22760 s := x1.Aux 22761 _ = x1.Args[2] 22762 p := x1.Args[0] 22763 idx := x1.Args[1] 22764 mem := x1.Args[2] 22765 y := or.Args[1] 22766 s0 := v.Args[1] 22767 if s0.Op != OpAMD64SHLLconst { 22768 break 22769 } 22770 j0 := s0.AuxInt 22771 x0 := s0.Args[0] 22772 if x0.Op != OpAMD64MOVBloadidx1 { 22773 break 22774 } 22775 i0 := x0.AuxInt 22776 if x0.Aux != s { 22777 break 22778 } 22779 _ = x0.Args[2] 22780 if p != x0.Args[0] { 22781 break 22782 } 22783 if idx != x0.Args[1] { 22784 break 22785 } 22786 if mem != x0.Args[2] { 22787 break 22788 } 22789 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22790 break 22791 } 22792 b = mergePoint(b, x0, x1) 22793 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22794 v.reset(OpCopy) 22795 v.AddArg(v0) 22796 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22797 v1.AuxInt = j1 22798 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22799 v2.AuxInt = 8 22800 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22801 v3.AuxInt = i0 22802 v3.Aux = s 22803 v3.AddArg(p) 22804 v3.AddArg(idx) 22805 v3.AddArg(mem) 22806 v2.AddArg(v3) 22807 v1.AddArg(v2) 22808 v0.AddArg(v1) 22809 v0.AddArg(y) 22810 return true 22811 } 22812 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22813 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22814 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22815 for { 22816 _ = v.Args[1] 22817 or := v.Args[0] 22818 if or.Op != OpAMD64ORL { 22819 break 22820 } 22821 _ = or.Args[1] 22822 s1 := or.Args[0] 22823 if s1.Op != OpAMD64SHLLconst { 22824 break 22825 } 22826 j1 := s1.AuxInt 22827 x1 := s1.Args[0] 22828 if x1.Op != OpAMD64MOVBloadidx1 { 22829 break 22830 } 22831 i1 := x1.AuxInt 22832 s := x1.Aux 22833 _ = x1.Args[2] 22834 idx := x1.Args[0] 22835 p := x1.Args[1] 22836 mem := x1.Args[2] 22837 y := or.Args[1] 22838 s0 := v.Args[1] 22839 if s0.Op != OpAMD64SHLLconst { 22840 break 22841 } 22842 j0 := s0.AuxInt 22843 x0 := s0.Args[0] 22844 if x0.Op != OpAMD64MOVBloadidx1 { 22845 break 22846 } 22847 i0 := x0.AuxInt 22848 if x0.Aux != s { 22849 break 22850 } 22851 _ = x0.Args[2] 22852 if p != x0.Args[0] { 22853 break 22854 } 22855 if idx != x0.Args[1] { 22856 break 22857 } 22858 if mem != x0.Args[2] { 22859 break 22860 } 22861 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22862 break 22863 } 22864 b = mergePoint(b, x0, x1) 22865 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22866 v.reset(OpCopy) 22867 v.AddArg(v0) 22868 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22869 v1.AuxInt = j1 22870 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22871 v2.AuxInt = 8 22872 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22873 v3.AuxInt = i0 22874 v3.Aux = s 22875 v3.AddArg(p) 22876 v3.AddArg(idx) 22877 v3.AddArg(mem) 22878 v2.AddArg(v3) 22879 v1.AddArg(v2) 22880 v0.AddArg(v1) 22881 v0.AddArg(y) 22882 return true 22883 } 22884 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22885 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22886 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22887 for { 22888 _ = v.Args[1] 22889 or := v.Args[0] 22890 if or.Op != OpAMD64ORL { 22891 break 22892 } 22893 _ = or.Args[1] 22894 y := or.Args[0] 22895 s1 := or.Args[1] 22896 if s1.Op != OpAMD64SHLLconst { 22897 break 22898 } 22899 j1 := s1.AuxInt 22900 x1 := s1.Args[0] 22901 if x1.Op != OpAMD64MOVBloadidx1 { 22902 break 22903 } 22904 i1 := x1.AuxInt 22905 s := x1.Aux 22906 _ = x1.Args[2] 22907 p := x1.Args[0] 22908 idx := x1.Args[1] 22909 mem := x1.Args[2] 22910 s0 := v.Args[1] 22911 if s0.Op != OpAMD64SHLLconst { 22912 break 22913 } 22914 j0 := s0.AuxInt 22915 x0 := s0.Args[0] 22916 if x0.Op != OpAMD64MOVBloadidx1 { 22917 break 22918 } 22919 i0 := x0.AuxInt 22920 if x0.Aux != s { 22921 break 22922 } 22923 _ = x0.Args[2] 22924 if p != x0.Args[0] { 22925 break 22926 } 22927 if idx != x0.Args[1] { 22928 break 22929 } 22930 if mem != x0.Args[2] { 22931 break 22932 } 22933 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 22934 break 22935 } 22936 b = mergePoint(b, x0, x1) 22937 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 22938 v.reset(OpCopy) 22939 v.AddArg(v0) 22940 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 22941 v1.AuxInt = j1 22942 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 22943 v2.AuxInt = 8 22944 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 22945 v3.AuxInt = i0 22946 v3.Aux = s 22947 v3.AddArg(p) 22948 v3.AddArg(idx) 22949 v3.AddArg(mem) 22950 v2.AddArg(v3) 22951 v1.AddArg(v2) 22952 v0.AddArg(v1) 22953 v0.AddArg(y) 22954 return true 22955 } 22956 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 22957 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 22958 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 22959 for { 22960 _ = v.Args[1] 22961 or := v.Args[0] 22962 if or.Op != OpAMD64ORL { 22963 break 22964 } 22965 _ = or.Args[1] 22966 y := or.Args[0] 22967 s1 := or.Args[1] 22968 if s1.Op != OpAMD64SHLLconst { 22969 break 22970 } 22971 j1 := s1.AuxInt 22972 x1 := s1.Args[0] 22973 if x1.Op != OpAMD64MOVBloadidx1 { 22974 break 22975 } 22976 i1 := x1.AuxInt 22977 s := x1.Aux 22978 _ = x1.Args[2] 22979 idx := x1.Args[0] 22980 p := x1.Args[1] 22981 mem := x1.Args[2] 22982 s0 := v.Args[1] 22983 if s0.Op != OpAMD64SHLLconst { 22984 break 22985 } 22986 j0 := s0.AuxInt 22987 x0 := s0.Args[0] 22988 if x0.Op != OpAMD64MOVBloadidx1 { 22989 break 22990 } 22991 i0 := x0.AuxInt 22992 if x0.Aux != s { 22993 break 22994 } 22995 _ = x0.Args[2] 22996 if p != x0.Args[0] { 22997 break 22998 } 22999 if idx != x0.Args[1] { 23000 break 23001 } 23002 if mem != x0.Args[2] { 23003 break 23004 } 23005 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23006 break 23007 } 23008 b = mergePoint(b, x0, x1) 23009 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23010 v.reset(OpCopy) 23011 v.AddArg(v0) 23012 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23013 v1.AuxInt = j1 23014 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23015 v2.AuxInt = 8 23016 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23017 v3.AuxInt = i0 23018 v3.Aux = s 23019 v3.AddArg(p) 23020 v3.AddArg(idx) 23021 v3.AddArg(mem) 23022 v2.AddArg(v3) 23023 v1.AddArg(v2) 23024 v0.AddArg(v1) 23025 v0.AddArg(y) 23026 return true 23027 } 23028 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23029 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23030 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23031 for { 23032 _ = v.Args[1] 23033 or := v.Args[0] 23034 if or.Op != OpAMD64ORL { 23035 break 23036 } 23037 _ = or.Args[1] 23038 s1 := or.Args[0] 23039 if s1.Op != OpAMD64SHLLconst { 23040 break 23041 } 23042 j1 := s1.AuxInt 23043 x1 := s1.Args[0] 23044 if x1.Op != OpAMD64MOVBloadidx1 { 23045 break 23046 } 23047 i1 := x1.AuxInt 23048 s := x1.Aux 23049 _ = x1.Args[2] 23050 p := x1.Args[0] 23051 idx := x1.Args[1] 23052 mem := x1.Args[2] 23053 y := or.Args[1] 23054 s0 := v.Args[1] 23055 if s0.Op != OpAMD64SHLLconst { 23056 break 23057 } 23058 j0 := s0.AuxInt 23059 x0 := s0.Args[0] 23060 if x0.Op != OpAMD64MOVBloadidx1 { 23061 break 23062 } 23063 i0 := x0.AuxInt 23064 if x0.Aux != s { 23065 break 23066 } 23067 _ = x0.Args[2] 23068 if idx != x0.Args[0] { 23069 break 23070 } 23071 if p != x0.Args[1] { 23072 break 23073 } 23074 if mem != x0.Args[2] { 23075 break 23076 } 23077 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23078 break 23079 } 23080 b = mergePoint(b, x0, x1) 23081 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23082 v.reset(OpCopy) 23083 v.AddArg(v0) 23084 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23085 v1.AuxInt = j1 23086 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23087 v2.AuxInt = 8 23088 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23089 v3.AuxInt = i0 23090 v3.Aux = s 23091 v3.AddArg(p) 23092 v3.AddArg(idx) 23093 v3.AddArg(mem) 23094 v2.AddArg(v3) 23095 v1.AddArg(v2) 23096 v0.AddArg(v1) 23097 v0.AddArg(y) 23098 return true 23099 } 23100 // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23101 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23102 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23103 for { 23104 _ = v.Args[1] 23105 or := v.Args[0] 23106 if or.Op != OpAMD64ORL { 23107 break 23108 } 23109 _ = or.Args[1] 23110 s1 := or.Args[0] 23111 if s1.Op != OpAMD64SHLLconst { 23112 break 23113 } 23114 j1 := s1.AuxInt 23115 x1 := s1.Args[0] 23116 if x1.Op != OpAMD64MOVBloadidx1 { 23117 break 23118 } 23119 i1 := x1.AuxInt 23120 s := x1.Aux 23121 _ = x1.Args[2] 23122 idx := x1.Args[0] 23123 p := x1.Args[1] 23124 mem := x1.Args[2] 23125 y := or.Args[1] 23126 s0 := v.Args[1] 23127 if s0.Op != OpAMD64SHLLconst { 23128 break 23129 } 23130 j0 := s0.AuxInt 23131 x0 := s0.Args[0] 23132 if x0.Op != OpAMD64MOVBloadidx1 { 23133 break 23134 } 23135 i0 := x0.AuxInt 23136 if x0.Aux != s { 23137 break 23138 } 23139 _ = x0.Args[2] 23140 if idx != x0.Args[0] { 23141 break 23142 } 23143 if p != x0.Args[1] { 23144 break 23145 } 23146 if mem != x0.Args[2] { 23147 break 23148 } 23149 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23150 break 23151 } 23152 b = mergePoint(b, x0, x1) 23153 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23154 v.reset(OpCopy) 23155 v.AddArg(v0) 23156 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23157 v1.AuxInt = j1 23158 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23159 v2.AuxInt = 8 23160 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23161 v3.AuxInt = i0 23162 v3.Aux = s 23163 v3.AddArg(p) 23164 v3.AddArg(idx) 23165 v3.AddArg(mem) 23166 v2.AddArg(v3) 23167 v1.AddArg(v2) 23168 v0.AddArg(v1) 23169 v0.AddArg(y) 23170 return true 23171 } 23172 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23173 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23174 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23175 for { 23176 _ = v.Args[1] 23177 or := v.Args[0] 23178 if or.Op != OpAMD64ORL { 23179 break 23180 } 23181 _ = or.Args[1] 23182 y := or.Args[0] 23183 s1 := or.Args[1] 23184 if s1.Op != OpAMD64SHLLconst { 23185 break 23186 } 23187 j1 := s1.AuxInt 23188 x1 := s1.Args[0] 23189 if x1.Op != OpAMD64MOVBloadidx1 { 23190 break 23191 } 23192 i1 := x1.AuxInt 23193 s := x1.Aux 23194 _ = x1.Args[2] 23195 p := x1.Args[0] 23196 idx := x1.Args[1] 23197 mem := x1.Args[2] 23198 s0 := v.Args[1] 23199 if s0.Op != OpAMD64SHLLconst { 23200 break 23201 } 23202 j0 := s0.AuxInt 23203 x0 := s0.Args[0] 23204 if x0.Op != OpAMD64MOVBloadidx1 { 23205 break 23206 } 23207 i0 := x0.AuxInt 23208 if x0.Aux != s { 23209 break 23210 } 23211 _ = x0.Args[2] 23212 if idx != x0.Args[0] { 23213 break 23214 } 23215 if p != x0.Args[1] { 23216 break 23217 } 23218 if mem != x0.Args[2] { 23219 break 23220 } 23221 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23222 break 23223 } 23224 b = mergePoint(b, x0, x1) 23225 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23226 v.reset(OpCopy) 23227 v.AddArg(v0) 23228 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23229 v1.AuxInt = j1 23230 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23231 v2.AuxInt = 8 23232 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23233 v3.AuxInt = i0 23234 v3.Aux = s 23235 v3.AddArg(p) 23236 v3.AddArg(idx) 23237 v3.AddArg(mem) 23238 v2.AddArg(v3) 23239 v1.AddArg(v2) 23240 v0.AddArg(v1) 23241 v0.AddArg(y) 23242 return true 23243 } 23244 // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 23245 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 23246 // result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 23247 for { 23248 _ = v.Args[1] 23249 or := v.Args[0] 23250 if or.Op != OpAMD64ORL { 23251 break 23252 } 23253 _ = or.Args[1] 23254 y := or.Args[0] 23255 s1 := or.Args[1] 23256 if s1.Op != OpAMD64SHLLconst { 23257 break 23258 } 23259 j1 := s1.AuxInt 23260 x1 := s1.Args[0] 23261 if x1.Op != OpAMD64MOVBloadidx1 { 23262 break 23263 } 23264 i1 := x1.AuxInt 23265 s := x1.Aux 23266 _ = x1.Args[2] 23267 idx := x1.Args[0] 23268 p := x1.Args[1] 23269 mem := x1.Args[2] 23270 s0 := v.Args[1] 23271 if s0.Op != OpAMD64SHLLconst { 23272 break 23273 } 23274 j0 := s0.AuxInt 23275 x0 := s0.Args[0] 23276 if x0.Op != OpAMD64MOVBloadidx1 { 23277 break 23278 } 23279 i0 := x0.AuxInt 23280 if x0.Aux != s { 23281 break 23282 } 23283 _ = x0.Args[2] 23284 if idx != x0.Args[0] { 23285 break 23286 } 23287 if p != x0.Args[1] { 23288 break 23289 } 23290 if mem != x0.Args[2] { 23291 break 23292 } 23293 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 23294 break 23295 } 23296 b = mergePoint(b, x0, x1) 23297 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) 23298 v.reset(OpCopy) 23299 v.AddArg(v0) 23300 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) 23301 v1.AuxInt = j1 23302 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 23303 v2.AuxInt = 8 23304 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 23305 v3.AuxInt = i0 23306 v3.Aux = s 23307 v3.AddArg(p) 23308 v3.AddArg(idx) 23309 v3.AddArg(mem) 23310 v2.AddArg(v3) 23311 v1.AddArg(v2) 23312 v0.AddArg(v1) 23313 v0.AddArg(y) 23314 return true 23315 } 23316 // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) 23317 // cond: canMergeLoad(v, l, x) && clobber(l) 23318 // result: (ORLmem x [off] {sym} ptr mem) 23319 for { 23320 _ = v.Args[1] 23321 x := v.Args[0] 23322 l := v.Args[1] 23323 if l.Op != OpAMD64MOVLload { 23324 break 23325 } 23326 off := l.AuxInt 23327 sym := l.Aux 23328 _ = l.Args[1] 23329 ptr := l.Args[0] 23330 mem := l.Args[1] 23331 if !(canMergeLoad(v, l, x) && clobber(l)) { 23332 break 23333 } 23334 v.reset(OpAMD64ORLmem) 23335 v.AuxInt = off 23336 v.Aux = sym 23337 v.AddArg(x) 23338 v.AddArg(ptr) 23339 v.AddArg(mem) 23340 return true 23341 } 23342 return false 23343 } 23344 func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { 23345 // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) 23346 // cond: canMergeLoad(v, l, x) && clobber(l) 23347 // result: (ORLmem x [off] {sym} ptr mem) 23348 for { 23349 _ = v.Args[1] 23350 l := v.Args[0] 23351 if l.Op != OpAMD64MOVLload { 23352 break 23353 } 23354 off := l.AuxInt 23355 sym := l.Aux 23356 _ = l.Args[1] 23357 ptr := l.Args[0] 23358 mem := l.Args[1] 23359 x := v.Args[1] 23360 if !(canMergeLoad(v, l, x) && clobber(l)) { 23361 break 23362 } 23363 v.reset(OpAMD64ORLmem) 23364 v.AuxInt = off 23365 v.Aux = sym 23366 v.AddArg(x) 23367 v.AddArg(ptr) 23368 v.AddArg(mem) 23369 return true 23370 } 23371 return false 23372 } 23373 func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { 23374 // match: (ORLconst [c] x) 23375 // cond: int32(c)==0 23376 // result: x 23377 for { 23378 c := v.AuxInt 23379 x := v.Args[0] 23380 if !(int32(c) == 0) { 23381 break 23382 } 23383 v.reset(OpCopy) 23384 v.Type = x.Type 23385 v.AddArg(x) 23386 return true 23387 } 23388 // match: (ORLconst [c] _) 23389 // cond: int32(c)==-1 23390 // result: (MOVLconst [-1]) 23391 for { 23392 c := v.AuxInt 23393 if !(int32(c) == -1) { 23394 break 23395 } 23396 v.reset(OpAMD64MOVLconst) 23397 v.AuxInt = -1 23398 return true 23399 } 23400 // match: (ORLconst [c] (MOVLconst [d])) 23401 // cond: 23402 // result: (MOVLconst [c|d]) 23403 for { 23404 c := v.AuxInt 23405 v_0 := v.Args[0] 23406 if v_0.Op != OpAMD64MOVLconst { 23407 break 23408 } 23409 d := v_0.AuxInt 23410 v.reset(OpAMD64MOVLconst) 23411 v.AuxInt = c | d 23412 return true 23413 } 23414 return false 23415 } 23416 func rewriteValueAMD64_OpAMD64ORLmem_0(v *Value) bool { 23417 b := v.Block 23418 _ = b 23419 typ := &b.Func.Config.Types 23420 _ = typ 23421 // match: (ORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 23422 // cond: 23423 // result: ( ORL x (MOVLf2i y)) 23424 for { 23425 off := v.AuxInt 23426 sym := v.Aux 23427 _ = v.Args[2] 23428 x := v.Args[0] 23429 ptr := v.Args[1] 23430 v_2 := v.Args[2] 23431 if v_2.Op != OpAMD64MOVSSstore { 23432 break 23433 } 23434 if v_2.AuxInt != off { 23435 break 23436 } 23437 if v_2.Aux != sym { 23438 break 23439 } 23440 _ = v_2.Args[2] 23441 if ptr != v_2.Args[0] { 23442 break 23443 } 23444 y := v_2.Args[1] 23445 v.reset(OpAMD64ORL) 23446 v.AddArg(x) 23447 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 23448 v0.AddArg(y) 23449 v.AddArg(v0) 23450 return true 23451 } 23452 return false 23453 } 23454 func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { 23455 // match: (ORQ x (MOVQconst [c])) 23456 // cond: is32Bit(c) 23457 // result: (ORQconst [c] x) 23458 for { 23459 _ = v.Args[1] 23460 x := v.Args[0] 23461 v_1 := v.Args[1] 23462 if v_1.Op != OpAMD64MOVQconst { 23463 break 23464 } 23465 c := v_1.AuxInt 23466 if !(is32Bit(c)) { 23467 break 23468 } 23469 v.reset(OpAMD64ORQconst) 23470 v.AuxInt = c 23471 v.AddArg(x) 23472 return true 23473 } 23474 // match: (ORQ (MOVQconst [c]) x) 23475 // cond: is32Bit(c) 23476 // result: (ORQconst [c] x) 23477 for { 23478 _ = v.Args[1] 23479 v_0 := v.Args[0] 23480 if v_0.Op != OpAMD64MOVQconst { 23481 break 23482 } 23483 c := v_0.AuxInt 23484 x := v.Args[1] 23485 if !(is32Bit(c)) { 23486 break 23487 } 23488 v.reset(OpAMD64ORQconst) 23489 v.AuxInt = c 23490 v.AddArg(x) 23491 return true 23492 } 23493 // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) 23494 // cond: d==64-c 23495 // result: (ROLQconst x [c]) 23496 for { 23497 _ = v.Args[1] 23498 v_0 := v.Args[0] 23499 if v_0.Op != OpAMD64SHLQconst { 23500 break 23501 } 23502 c := v_0.AuxInt 23503 x := v_0.Args[0] 23504 v_1 := v.Args[1] 23505 if v_1.Op != OpAMD64SHRQconst { 23506 break 23507 } 23508 d := v_1.AuxInt 23509 if x != v_1.Args[0] { 23510 break 23511 } 23512 if !(d == 64-c) { 23513 break 23514 } 23515 v.reset(OpAMD64ROLQconst) 23516 v.AuxInt = c 23517 v.AddArg(x) 23518 return true 23519 } 23520 // match: (ORQ (SHRQconst x [d]) (SHLQconst x [c])) 23521 // cond: d==64-c 23522 // result: (ROLQconst x [c]) 23523 for { 23524 _ = v.Args[1] 23525 v_0 := v.Args[0] 23526 if v_0.Op != OpAMD64SHRQconst { 23527 break 23528 } 23529 d := v_0.AuxInt 23530 x := v_0.Args[0] 23531 v_1 := v.Args[1] 23532 if v_1.Op != OpAMD64SHLQconst { 23533 break 23534 } 23535 c := v_1.AuxInt 23536 if x != v_1.Args[0] { 23537 break 23538 } 23539 if !(d == 64-c) { 23540 break 23541 } 23542 v.reset(OpAMD64ROLQconst) 23543 v.AuxInt = c 23544 v.AddArg(x) 23545 return true 23546 } 23547 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 23548 // cond: 23549 // result: (ROLQ x y) 23550 for { 23551 _ = v.Args[1] 23552 v_0 := v.Args[0] 23553 if v_0.Op != OpAMD64SHLQ { 23554 break 23555 } 23556 _ = v_0.Args[1] 23557 x := v_0.Args[0] 23558 y := v_0.Args[1] 23559 v_1 := v.Args[1] 23560 if v_1.Op != OpAMD64ANDQ { 23561 break 23562 } 23563 _ = v_1.Args[1] 23564 v_1_0 := v_1.Args[0] 23565 if v_1_0.Op != OpAMD64SHRQ { 23566 break 23567 } 23568 _ = v_1_0.Args[1] 23569 if x != v_1_0.Args[0] { 23570 break 23571 } 23572 v_1_0_1 := v_1_0.Args[1] 23573 if v_1_0_1.Op != OpAMD64NEGQ { 23574 break 23575 } 23576 if y != v_1_0_1.Args[0] { 23577 break 23578 } 23579 v_1_1 := v_1.Args[1] 23580 if v_1_1.Op != OpAMD64SBBQcarrymask { 23581 break 23582 } 23583 v_1_1_0 := v_1_1.Args[0] 23584 if v_1_1_0.Op != OpAMD64CMPQconst { 23585 break 23586 } 23587 if v_1_1_0.AuxInt != 64 { 23588 break 23589 } 23590 v_1_1_0_0 := v_1_1_0.Args[0] 23591 if v_1_1_0_0.Op != OpAMD64NEGQ { 23592 break 23593 } 23594 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 23595 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 23596 break 23597 } 23598 if v_1_1_0_0_0.AuxInt != -64 { 23599 break 23600 } 23601 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 23602 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 23603 break 23604 } 23605 if v_1_1_0_0_0_0.AuxInt != 63 { 23606 break 23607 } 23608 if y != v_1_1_0_0_0_0.Args[0] { 23609 break 23610 } 23611 v.reset(OpAMD64ROLQ) 23612 v.AddArg(x) 23613 v.AddArg(y) 23614 return true 23615 } 23616 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y)))) 23617 // cond: 23618 // result: (ROLQ x y) 23619 for { 23620 _ = v.Args[1] 23621 v_0 := v.Args[0] 23622 if v_0.Op != OpAMD64SHLQ { 23623 break 23624 } 23625 _ = v_0.Args[1] 23626 x := v_0.Args[0] 23627 y := v_0.Args[1] 23628 v_1 := v.Args[1] 23629 if v_1.Op != OpAMD64ANDQ { 23630 break 23631 } 23632 _ = v_1.Args[1] 23633 v_1_0 := v_1.Args[0] 23634 if v_1_0.Op != OpAMD64SBBQcarrymask { 23635 break 23636 } 23637 v_1_0_0 := v_1_0.Args[0] 23638 if v_1_0_0.Op != OpAMD64CMPQconst { 23639 break 23640 } 23641 if v_1_0_0.AuxInt != 64 { 23642 break 23643 } 23644 v_1_0_0_0 := v_1_0_0.Args[0] 23645 if v_1_0_0_0.Op != OpAMD64NEGQ { 23646 break 23647 } 23648 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 23649 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 23650 break 23651 } 23652 if v_1_0_0_0_0.AuxInt != -64 { 23653 break 23654 } 23655 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 23656 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 23657 break 23658 } 23659 if v_1_0_0_0_0_0.AuxInt != 63 { 23660 break 23661 } 23662 if y != v_1_0_0_0_0_0.Args[0] { 23663 break 23664 } 23665 v_1_1 := v_1.Args[1] 23666 if v_1_1.Op != OpAMD64SHRQ { 23667 break 23668 } 23669 _ = v_1_1.Args[1] 23670 if x != v_1_1.Args[0] { 23671 break 23672 } 23673 v_1_1_1 := v_1_1.Args[1] 23674 if v_1_1_1.Op != OpAMD64NEGQ { 23675 break 23676 } 23677 if y != v_1_1_1.Args[0] { 23678 break 23679 } 23680 v.reset(OpAMD64ROLQ) 23681 v.AddArg(x) 23682 v.AddArg(y) 23683 return true 23684 } 23685 // match: (ORQ (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHLQ x y)) 23686 // cond: 23687 // result: (ROLQ x y) 23688 for { 23689 _ = v.Args[1] 23690 v_0 := v.Args[0] 23691 if v_0.Op != OpAMD64ANDQ { 23692 break 23693 } 23694 _ = v_0.Args[1] 23695 v_0_0 := v_0.Args[0] 23696 if v_0_0.Op != OpAMD64SHRQ { 23697 break 23698 } 23699 _ = v_0_0.Args[1] 23700 x := v_0_0.Args[0] 23701 v_0_0_1 := v_0_0.Args[1] 23702 if v_0_0_1.Op != OpAMD64NEGQ { 23703 break 23704 } 23705 y := v_0_0_1.Args[0] 23706 v_0_1 := v_0.Args[1] 23707 if v_0_1.Op != OpAMD64SBBQcarrymask { 23708 break 23709 } 23710 v_0_1_0 := v_0_1.Args[0] 23711 if v_0_1_0.Op != OpAMD64CMPQconst { 23712 break 23713 } 23714 if v_0_1_0.AuxInt != 64 { 23715 break 23716 } 23717 v_0_1_0_0 := v_0_1_0.Args[0] 23718 if v_0_1_0_0.Op != OpAMD64NEGQ { 23719 break 23720 } 23721 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 23722 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 23723 break 23724 } 23725 if v_0_1_0_0_0.AuxInt != -64 { 23726 break 23727 } 23728 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 23729 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 23730 break 23731 } 23732 if v_0_1_0_0_0_0.AuxInt != 63 { 23733 break 23734 } 23735 if y != v_0_1_0_0_0_0.Args[0] { 23736 break 23737 } 23738 v_1 := v.Args[1] 23739 if v_1.Op != OpAMD64SHLQ { 23740 break 23741 } 23742 _ = v_1.Args[1] 23743 if x != v_1.Args[0] { 23744 break 23745 } 23746 if y != v_1.Args[1] { 23747 break 23748 } 23749 v.reset(OpAMD64ROLQ) 23750 v.AddArg(x) 23751 v.AddArg(y) 23752 return true 23753 } 23754 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHRQ x (NEGQ y))) (SHLQ x y)) 23755 // cond: 23756 // result: (ROLQ x y) 23757 for { 23758 _ = v.Args[1] 23759 v_0 := v.Args[0] 23760 if v_0.Op != OpAMD64ANDQ { 23761 break 23762 } 23763 _ = v_0.Args[1] 23764 v_0_0 := v_0.Args[0] 23765 if v_0_0.Op != OpAMD64SBBQcarrymask { 23766 break 23767 } 23768 v_0_0_0 := v_0_0.Args[0] 23769 if v_0_0_0.Op != OpAMD64CMPQconst { 23770 break 23771 } 23772 if v_0_0_0.AuxInt != 64 { 23773 break 23774 } 23775 v_0_0_0_0 := v_0_0_0.Args[0] 23776 if v_0_0_0_0.Op != OpAMD64NEGQ { 23777 break 23778 } 23779 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 23780 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 23781 break 23782 } 23783 if v_0_0_0_0_0.AuxInt != -64 { 23784 break 23785 } 23786 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 23787 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 23788 break 23789 } 23790 if v_0_0_0_0_0_0.AuxInt != 63 { 23791 break 23792 } 23793 y := v_0_0_0_0_0_0.Args[0] 23794 v_0_1 := v_0.Args[1] 23795 if v_0_1.Op != OpAMD64SHRQ { 23796 break 23797 } 23798 _ = v_0_1.Args[1] 23799 x := v_0_1.Args[0] 23800 v_0_1_1 := v_0_1.Args[1] 23801 if v_0_1_1.Op != OpAMD64NEGQ { 23802 break 23803 } 23804 if y != v_0_1_1.Args[0] { 23805 break 23806 } 23807 v_1 := v.Args[1] 23808 if v_1.Op != OpAMD64SHLQ { 23809 break 23810 } 23811 _ = v_1.Args[1] 23812 if x != v_1.Args[0] { 23813 break 23814 } 23815 if y != v_1.Args[1] { 23816 break 23817 } 23818 v.reset(OpAMD64ROLQ) 23819 v.AddArg(x) 23820 v.AddArg(y) 23821 return true 23822 } 23823 // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 23824 // cond: 23825 // result: (ROLQ x y) 23826 for { 23827 _ = v.Args[1] 23828 v_0 := v.Args[0] 23829 if v_0.Op != OpAMD64SHLQ { 23830 break 23831 } 23832 _ = v_0.Args[1] 23833 x := v_0.Args[0] 23834 y := v_0.Args[1] 23835 v_1 := v.Args[1] 23836 if v_1.Op != OpAMD64ANDQ { 23837 break 23838 } 23839 _ = v_1.Args[1] 23840 v_1_0 := v_1.Args[0] 23841 if v_1_0.Op != OpAMD64SHRQ { 23842 break 23843 } 23844 _ = v_1_0.Args[1] 23845 if x != v_1_0.Args[0] { 23846 break 23847 } 23848 v_1_0_1 := v_1_0.Args[1] 23849 if v_1_0_1.Op != OpAMD64NEGL { 23850 break 23851 } 23852 if y != v_1_0_1.Args[0] { 23853 break 23854 } 23855 v_1_1 := v_1.Args[1] 23856 if v_1_1.Op != OpAMD64SBBQcarrymask { 23857 break 23858 } 23859 v_1_1_0 := v_1_1.Args[0] 23860 if v_1_1_0.Op != OpAMD64CMPLconst { 23861 break 23862 } 23863 if v_1_1_0.AuxInt != 64 { 23864 break 23865 } 23866 v_1_1_0_0 := v_1_1_0.Args[0] 23867 if v_1_1_0_0.Op != OpAMD64NEGL { 23868 break 23869 } 23870 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 23871 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 23872 break 23873 } 23874 if v_1_1_0_0_0.AuxInt != -64 { 23875 break 23876 } 23877 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 23878 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 23879 break 23880 } 23881 if v_1_1_0_0_0_0.AuxInt != 63 { 23882 break 23883 } 23884 if y != v_1_1_0_0_0_0.Args[0] { 23885 break 23886 } 23887 v.reset(OpAMD64ROLQ) 23888 v.AddArg(x) 23889 v.AddArg(y) 23890 return true 23891 } 23892 // match: (ORQ (SHLQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y)))) 23893 // cond: 23894 // result: (ROLQ x y) 23895 for { 23896 _ = v.Args[1] 23897 v_0 := v.Args[0] 23898 if v_0.Op != OpAMD64SHLQ { 23899 break 23900 } 23901 _ = v_0.Args[1] 23902 x := v_0.Args[0] 23903 y := v_0.Args[1] 23904 v_1 := v.Args[1] 23905 if v_1.Op != OpAMD64ANDQ { 23906 break 23907 } 23908 _ = v_1.Args[1] 23909 v_1_0 := v_1.Args[0] 23910 if v_1_0.Op != OpAMD64SBBQcarrymask { 23911 break 23912 } 23913 v_1_0_0 := v_1_0.Args[0] 23914 if v_1_0_0.Op != OpAMD64CMPLconst { 23915 break 23916 } 23917 if v_1_0_0.AuxInt != 64 { 23918 break 23919 } 23920 v_1_0_0_0 := v_1_0_0.Args[0] 23921 if v_1_0_0_0.Op != OpAMD64NEGL { 23922 break 23923 } 23924 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 23925 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 23926 break 23927 } 23928 if v_1_0_0_0_0.AuxInt != -64 { 23929 break 23930 } 23931 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 23932 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 23933 break 23934 } 23935 if v_1_0_0_0_0_0.AuxInt != 63 { 23936 break 23937 } 23938 if y != v_1_0_0_0_0_0.Args[0] { 23939 break 23940 } 23941 v_1_1 := v_1.Args[1] 23942 if v_1_1.Op != OpAMD64SHRQ { 23943 break 23944 } 23945 _ = v_1_1.Args[1] 23946 if x != v_1_1.Args[0] { 23947 break 23948 } 23949 v_1_1_1 := v_1_1.Args[1] 23950 if v_1_1_1.Op != OpAMD64NEGL { 23951 break 23952 } 23953 if y != v_1_1_1.Args[0] { 23954 break 23955 } 23956 v.reset(OpAMD64ROLQ) 23957 v.AddArg(x) 23958 v.AddArg(y) 23959 return true 23960 } 23961 return false 23962 } 23963 func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool { 23964 // match: (ORQ (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHLQ x y)) 23965 // cond: 23966 // result: (ROLQ x y) 23967 for { 23968 _ = v.Args[1] 23969 v_0 := v.Args[0] 23970 if v_0.Op != OpAMD64ANDQ { 23971 break 23972 } 23973 _ = v_0.Args[1] 23974 v_0_0 := v_0.Args[0] 23975 if v_0_0.Op != OpAMD64SHRQ { 23976 break 23977 } 23978 _ = v_0_0.Args[1] 23979 x := v_0_0.Args[0] 23980 v_0_0_1 := v_0_0.Args[1] 23981 if v_0_0_1.Op != OpAMD64NEGL { 23982 break 23983 } 23984 y := v_0_0_1.Args[0] 23985 v_0_1 := v_0.Args[1] 23986 if v_0_1.Op != OpAMD64SBBQcarrymask { 23987 break 23988 } 23989 v_0_1_0 := v_0_1.Args[0] 23990 if v_0_1_0.Op != OpAMD64CMPLconst { 23991 break 23992 } 23993 if v_0_1_0.AuxInt != 64 { 23994 break 23995 } 23996 v_0_1_0_0 := v_0_1_0.Args[0] 23997 if v_0_1_0_0.Op != OpAMD64NEGL { 23998 break 23999 } 24000 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24001 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 24002 break 24003 } 24004 if v_0_1_0_0_0.AuxInt != -64 { 24005 break 24006 } 24007 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24008 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 24009 break 24010 } 24011 if v_0_1_0_0_0_0.AuxInt != 63 { 24012 break 24013 } 24014 if y != v_0_1_0_0_0_0.Args[0] { 24015 break 24016 } 24017 v_1 := v.Args[1] 24018 if v_1.Op != OpAMD64SHLQ { 24019 break 24020 } 24021 _ = v_1.Args[1] 24022 if x != v_1.Args[0] { 24023 break 24024 } 24025 if y != v_1.Args[1] { 24026 break 24027 } 24028 v.reset(OpAMD64ROLQ) 24029 v.AddArg(x) 24030 v.AddArg(y) 24031 return true 24032 } 24033 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHRQ x (NEGL y))) (SHLQ x y)) 24034 // cond: 24035 // result: (ROLQ x y) 24036 for { 24037 _ = v.Args[1] 24038 v_0 := v.Args[0] 24039 if v_0.Op != OpAMD64ANDQ { 24040 break 24041 } 24042 _ = v_0.Args[1] 24043 v_0_0 := v_0.Args[0] 24044 if v_0_0.Op != OpAMD64SBBQcarrymask { 24045 break 24046 } 24047 v_0_0_0 := v_0_0.Args[0] 24048 if v_0_0_0.Op != OpAMD64CMPLconst { 24049 break 24050 } 24051 if v_0_0_0.AuxInt != 64 { 24052 break 24053 } 24054 v_0_0_0_0 := v_0_0_0.Args[0] 24055 if v_0_0_0_0.Op != OpAMD64NEGL { 24056 break 24057 } 24058 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24059 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 24060 break 24061 } 24062 if v_0_0_0_0_0.AuxInt != -64 { 24063 break 24064 } 24065 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24066 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 24067 break 24068 } 24069 if v_0_0_0_0_0_0.AuxInt != 63 { 24070 break 24071 } 24072 y := v_0_0_0_0_0_0.Args[0] 24073 v_0_1 := v_0.Args[1] 24074 if v_0_1.Op != OpAMD64SHRQ { 24075 break 24076 } 24077 _ = v_0_1.Args[1] 24078 x := v_0_1.Args[0] 24079 v_0_1_1 := v_0_1.Args[1] 24080 if v_0_1_1.Op != OpAMD64NEGL { 24081 break 24082 } 24083 if y != v_0_1_1.Args[0] { 24084 break 24085 } 24086 v_1 := v.Args[1] 24087 if v_1.Op != OpAMD64SHLQ { 24088 break 24089 } 24090 _ = v_1.Args[1] 24091 if x != v_1.Args[0] { 24092 break 24093 } 24094 if y != v_1.Args[1] { 24095 break 24096 } 24097 v.reset(OpAMD64ROLQ) 24098 v.AddArg(x) 24099 v.AddArg(y) 24100 return true 24101 } 24102 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) 24103 // cond: 24104 // result: (RORQ x y) 24105 for { 24106 _ = v.Args[1] 24107 v_0 := v.Args[0] 24108 if v_0.Op != OpAMD64SHRQ { 24109 break 24110 } 24111 _ = v_0.Args[1] 24112 x := v_0.Args[0] 24113 y := v_0.Args[1] 24114 v_1 := v.Args[1] 24115 if v_1.Op != OpAMD64ANDQ { 24116 break 24117 } 24118 _ = v_1.Args[1] 24119 v_1_0 := v_1.Args[0] 24120 if v_1_0.Op != OpAMD64SHLQ { 24121 break 24122 } 24123 _ = v_1_0.Args[1] 24124 if x != v_1_0.Args[0] { 24125 break 24126 } 24127 v_1_0_1 := v_1_0.Args[1] 24128 if v_1_0_1.Op != OpAMD64NEGQ { 24129 break 24130 } 24131 if y != v_1_0_1.Args[0] { 24132 break 24133 } 24134 v_1_1 := v_1.Args[1] 24135 if v_1_1.Op != OpAMD64SBBQcarrymask { 24136 break 24137 } 24138 v_1_1_0 := v_1_1.Args[0] 24139 if v_1_1_0.Op != OpAMD64CMPQconst { 24140 break 24141 } 24142 if v_1_1_0.AuxInt != 64 { 24143 break 24144 } 24145 v_1_1_0_0 := v_1_1_0.Args[0] 24146 if v_1_1_0_0.Op != OpAMD64NEGQ { 24147 break 24148 } 24149 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 24150 if v_1_1_0_0_0.Op != OpAMD64ADDQconst { 24151 break 24152 } 24153 if v_1_1_0_0_0.AuxInt != -64 { 24154 break 24155 } 24156 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 24157 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst { 24158 break 24159 } 24160 if v_1_1_0_0_0_0.AuxInt != 63 { 24161 break 24162 } 24163 if y != v_1_1_0_0_0_0.Args[0] { 24164 break 24165 } 24166 v.reset(OpAMD64RORQ) 24167 v.AddArg(x) 24168 v.AddArg(y) 24169 return true 24170 } 24171 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y)))) 24172 // cond: 24173 // result: (RORQ x y) 24174 for { 24175 _ = v.Args[1] 24176 v_0 := v.Args[0] 24177 if v_0.Op != OpAMD64SHRQ { 24178 break 24179 } 24180 _ = v_0.Args[1] 24181 x := v_0.Args[0] 24182 y := v_0.Args[1] 24183 v_1 := v.Args[1] 24184 if v_1.Op != OpAMD64ANDQ { 24185 break 24186 } 24187 _ = v_1.Args[1] 24188 v_1_0 := v_1.Args[0] 24189 if v_1_0.Op != OpAMD64SBBQcarrymask { 24190 break 24191 } 24192 v_1_0_0 := v_1_0.Args[0] 24193 if v_1_0_0.Op != OpAMD64CMPQconst { 24194 break 24195 } 24196 if v_1_0_0.AuxInt != 64 { 24197 break 24198 } 24199 v_1_0_0_0 := v_1_0_0.Args[0] 24200 if v_1_0_0_0.Op != OpAMD64NEGQ { 24201 break 24202 } 24203 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 24204 if v_1_0_0_0_0.Op != OpAMD64ADDQconst { 24205 break 24206 } 24207 if v_1_0_0_0_0.AuxInt != -64 { 24208 break 24209 } 24210 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 24211 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst { 24212 break 24213 } 24214 if v_1_0_0_0_0_0.AuxInt != 63 { 24215 break 24216 } 24217 if y != v_1_0_0_0_0_0.Args[0] { 24218 break 24219 } 24220 v_1_1 := v_1.Args[1] 24221 if v_1_1.Op != OpAMD64SHLQ { 24222 break 24223 } 24224 _ = v_1_1.Args[1] 24225 if x != v_1_1.Args[0] { 24226 break 24227 } 24228 v_1_1_1 := v_1_1.Args[1] 24229 if v_1_1_1.Op != OpAMD64NEGQ { 24230 break 24231 } 24232 if y != v_1_1_1.Args[0] { 24233 break 24234 } 24235 v.reset(OpAMD64RORQ) 24236 v.AddArg(x) 24237 v.AddArg(y) 24238 return true 24239 } 24240 // match: (ORQ (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHRQ x y)) 24241 // cond: 24242 // result: (RORQ x y) 24243 for { 24244 _ = v.Args[1] 24245 v_0 := v.Args[0] 24246 if v_0.Op != OpAMD64ANDQ { 24247 break 24248 } 24249 _ = v_0.Args[1] 24250 v_0_0 := v_0.Args[0] 24251 if v_0_0.Op != OpAMD64SHLQ { 24252 break 24253 } 24254 _ = v_0_0.Args[1] 24255 x := v_0_0.Args[0] 24256 v_0_0_1 := v_0_0.Args[1] 24257 if v_0_0_1.Op != OpAMD64NEGQ { 24258 break 24259 } 24260 y := v_0_0_1.Args[0] 24261 v_0_1 := v_0.Args[1] 24262 if v_0_1.Op != OpAMD64SBBQcarrymask { 24263 break 24264 } 24265 v_0_1_0 := v_0_1.Args[0] 24266 if v_0_1_0.Op != OpAMD64CMPQconst { 24267 break 24268 } 24269 if v_0_1_0.AuxInt != 64 { 24270 break 24271 } 24272 v_0_1_0_0 := v_0_1_0.Args[0] 24273 if v_0_1_0_0.Op != OpAMD64NEGQ { 24274 break 24275 } 24276 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24277 if v_0_1_0_0_0.Op != OpAMD64ADDQconst { 24278 break 24279 } 24280 if v_0_1_0_0_0.AuxInt != -64 { 24281 break 24282 } 24283 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24284 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst { 24285 break 24286 } 24287 if v_0_1_0_0_0_0.AuxInt != 63 { 24288 break 24289 } 24290 if y != v_0_1_0_0_0_0.Args[0] { 24291 break 24292 } 24293 v_1 := v.Args[1] 24294 if v_1.Op != OpAMD64SHRQ { 24295 break 24296 } 24297 _ = v_1.Args[1] 24298 if x != v_1.Args[0] { 24299 break 24300 } 24301 if y != v_1.Args[1] { 24302 break 24303 } 24304 v.reset(OpAMD64RORQ) 24305 v.AddArg(x) 24306 v.AddArg(y) 24307 return true 24308 } 24309 // match: (ORQ (ANDQ (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])) (SHLQ x (NEGQ y))) (SHRQ x y)) 24310 // cond: 24311 // result: (RORQ x y) 24312 for { 24313 _ = v.Args[1] 24314 v_0 := v.Args[0] 24315 if v_0.Op != OpAMD64ANDQ { 24316 break 24317 } 24318 _ = v_0.Args[1] 24319 v_0_0 := v_0.Args[0] 24320 if v_0_0.Op != OpAMD64SBBQcarrymask { 24321 break 24322 } 24323 v_0_0_0 := v_0_0.Args[0] 24324 if v_0_0_0.Op != OpAMD64CMPQconst { 24325 break 24326 } 24327 if v_0_0_0.AuxInt != 64 { 24328 break 24329 } 24330 v_0_0_0_0 := v_0_0_0.Args[0] 24331 if v_0_0_0_0.Op != OpAMD64NEGQ { 24332 break 24333 } 24334 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24335 if v_0_0_0_0_0.Op != OpAMD64ADDQconst { 24336 break 24337 } 24338 if v_0_0_0_0_0.AuxInt != -64 { 24339 break 24340 } 24341 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24342 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst { 24343 break 24344 } 24345 if v_0_0_0_0_0_0.AuxInt != 63 { 24346 break 24347 } 24348 y := v_0_0_0_0_0_0.Args[0] 24349 v_0_1 := v_0.Args[1] 24350 if v_0_1.Op != OpAMD64SHLQ { 24351 break 24352 } 24353 _ = v_0_1.Args[1] 24354 x := v_0_1.Args[0] 24355 v_0_1_1 := v_0_1.Args[1] 24356 if v_0_1_1.Op != OpAMD64NEGQ { 24357 break 24358 } 24359 if y != v_0_1_1.Args[0] { 24360 break 24361 } 24362 v_1 := v.Args[1] 24363 if v_1.Op != OpAMD64SHRQ { 24364 break 24365 } 24366 _ = v_1.Args[1] 24367 if x != v_1.Args[0] { 24368 break 24369 } 24370 if y != v_1.Args[1] { 24371 break 24372 } 24373 v.reset(OpAMD64RORQ) 24374 v.AddArg(x) 24375 v.AddArg(y) 24376 return true 24377 } 24378 // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) 24379 // cond: 24380 // result: (RORQ x y) 24381 for { 24382 _ = v.Args[1] 24383 v_0 := v.Args[0] 24384 if v_0.Op != OpAMD64SHRQ { 24385 break 24386 } 24387 _ = v_0.Args[1] 24388 x := v_0.Args[0] 24389 y := v_0.Args[1] 24390 v_1 := v.Args[1] 24391 if v_1.Op != OpAMD64ANDQ { 24392 break 24393 } 24394 _ = v_1.Args[1] 24395 v_1_0 := v_1.Args[0] 24396 if v_1_0.Op != OpAMD64SHLQ { 24397 break 24398 } 24399 _ = v_1_0.Args[1] 24400 if x != v_1_0.Args[0] { 24401 break 24402 } 24403 v_1_0_1 := v_1_0.Args[1] 24404 if v_1_0_1.Op != OpAMD64NEGL { 24405 break 24406 } 24407 if y != v_1_0_1.Args[0] { 24408 break 24409 } 24410 v_1_1 := v_1.Args[1] 24411 if v_1_1.Op != OpAMD64SBBQcarrymask { 24412 break 24413 } 24414 v_1_1_0 := v_1_1.Args[0] 24415 if v_1_1_0.Op != OpAMD64CMPLconst { 24416 break 24417 } 24418 if v_1_1_0.AuxInt != 64 { 24419 break 24420 } 24421 v_1_1_0_0 := v_1_1_0.Args[0] 24422 if v_1_1_0_0.Op != OpAMD64NEGL { 24423 break 24424 } 24425 v_1_1_0_0_0 := v_1_1_0_0.Args[0] 24426 if v_1_1_0_0_0.Op != OpAMD64ADDLconst { 24427 break 24428 } 24429 if v_1_1_0_0_0.AuxInt != -64 { 24430 break 24431 } 24432 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] 24433 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst { 24434 break 24435 } 24436 if v_1_1_0_0_0_0.AuxInt != 63 { 24437 break 24438 } 24439 if y != v_1_1_0_0_0_0.Args[0] { 24440 break 24441 } 24442 v.reset(OpAMD64RORQ) 24443 v.AddArg(x) 24444 v.AddArg(y) 24445 return true 24446 } 24447 // match: (ORQ (SHRQ x y) (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y)))) 24448 // cond: 24449 // result: (RORQ x y) 24450 for { 24451 _ = v.Args[1] 24452 v_0 := v.Args[0] 24453 if v_0.Op != OpAMD64SHRQ { 24454 break 24455 } 24456 _ = v_0.Args[1] 24457 x := v_0.Args[0] 24458 y := v_0.Args[1] 24459 v_1 := v.Args[1] 24460 if v_1.Op != OpAMD64ANDQ { 24461 break 24462 } 24463 _ = v_1.Args[1] 24464 v_1_0 := v_1.Args[0] 24465 if v_1_0.Op != OpAMD64SBBQcarrymask { 24466 break 24467 } 24468 v_1_0_0 := v_1_0.Args[0] 24469 if v_1_0_0.Op != OpAMD64CMPLconst { 24470 break 24471 } 24472 if v_1_0_0.AuxInt != 64 { 24473 break 24474 } 24475 v_1_0_0_0 := v_1_0_0.Args[0] 24476 if v_1_0_0_0.Op != OpAMD64NEGL { 24477 break 24478 } 24479 v_1_0_0_0_0 := v_1_0_0_0.Args[0] 24480 if v_1_0_0_0_0.Op != OpAMD64ADDLconst { 24481 break 24482 } 24483 if v_1_0_0_0_0.AuxInt != -64 { 24484 break 24485 } 24486 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0] 24487 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst { 24488 break 24489 } 24490 if v_1_0_0_0_0_0.AuxInt != 63 { 24491 break 24492 } 24493 if y != v_1_0_0_0_0_0.Args[0] { 24494 break 24495 } 24496 v_1_1 := v_1.Args[1] 24497 if v_1_1.Op != OpAMD64SHLQ { 24498 break 24499 } 24500 _ = v_1_1.Args[1] 24501 if x != v_1_1.Args[0] { 24502 break 24503 } 24504 v_1_1_1 := v_1_1.Args[1] 24505 if v_1_1_1.Op != OpAMD64NEGL { 24506 break 24507 } 24508 if y != v_1_1_1.Args[0] { 24509 break 24510 } 24511 v.reset(OpAMD64RORQ) 24512 v.AddArg(x) 24513 v.AddArg(y) 24514 return true 24515 } 24516 // match: (ORQ (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHRQ x y)) 24517 // cond: 24518 // result: (RORQ x y) 24519 for { 24520 _ = v.Args[1] 24521 v_0 := v.Args[0] 24522 if v_0.Op != OpAMD64ANDQ { 24523 break 24524 } 24525 _ = v_0.Args[1] 24526 v_0_0 := v_0.Args[0] 24527 if v_0_0.Op != OpAMD64SHLQ { 24528 break 24529 } 24530 _ = v_0_0.Args[1] 24531 x := v_0_0.Args[0] 24532 v_0_0_1 := v_0_0.Args[1] 24533 if v_0_0_1.Op != OpAMD64NEGL { 24534 break 24535 } 24536 y := v_0_0_1.Args[0] 24537 v_0_1 := v_0.Args[1] 24538 if v_0_1.Op != OpAMD64SBBQcarrymask { 24539 break 24540 } 24541 v_0_1_0 := v_0_1.Args[0] 24542 if v_0_1_0.Op != OpAMD64CMPLconst { 24543 break 24544 } 24545 if v_0_1_0.AuxInt != 64 { 24546 break 24547 } 24548 v_0_1_0_0 := v_0_1_0.Args[0] 24549 if v_0_1_0_0.Op != OpAMD64NEGL { 24550 break 24551 } 24552 v_0_1_0_0_0 := v_0_1_0_0.Args[0] 24553 if v_0_1_0_0_0.Op != OpAMD64ADDLconst { 24554 break 24555 } 24556 if v_0_1_0_0_0.AuxInt != -64 { 24557 break 24558 } 24559 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0] 24560 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst { 24561 break 24562 } 24563 if v_0_1_0_0_0_0.AuxInt != 63 { 24564 break 24565 } 24566 if y != v_0_1_0_0_0_0.Args[0] { 24567 break 24568 } 24569 v_1 := v.Args[1] 24570 if v_1.Op != OpAMD64SHRQ { 24571 break 24572 } 24573 _ = v_1.Args[1] 24574 if x != v_1.Args[0] { 24575 break 24576 } 24577 if y != v_1.Args[1] { 24578 break 24579 } 24580 v.reset(OpAMD64RORQ) 24581 v.AddArg(x) 24582 v.AddArg(y) 24583 return true 24584 } 24585 // match: (ORQ (ANDQ (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])) (SHLQ x (NEGL y))) (SHRQ x y)) 24586 // cond: 24587 // result: (RORQ x y) 24588 for { 24589 _ = v.Args[1] 24590 v_0 := v.Args[0] 24591 if v_0.Op != OpAMD64ANDQ { 24592 break 24593 } 24594 _ = v_0.Args[1] 24595 v_0_0 := v_0.Args[0] 24596 if v_0_0.Op != OpAMD64SBBQcarrymask { 24597 break 24598 } 24599 v_0_0_0 := v_0_0.Args[0] 24600 if v_0_0_0.Op != OpAMD64CMPLconst { 24601 break 24602 } 24603 if v_0_0_0.AuxInt != 64 { 24604 break 24605 } 24606 v_0_0_0_0 := v_0_0_0.Args[0] 24607 if v_0_0_0_0.Op != OpAMD64NEGL { 24608 break 24609 } 24610 v_0_0_0_0_0 := v_0_0_0_0.Args[0] 24611 if v_0_0_0_0_0.Op != OpAMD64ADDLconst { 24612 break 24613 } 24614 if v_0_0_0_0_0.AuxInt != -64 { 24615 break 24616 } 24617 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] 24618 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst { 24619 break 24620 } 24621 if v_0_0_0_0_0_0.AuxInt != 63 { 24622 break 24623 } 24624 y := v_0_0_0_0_0_0.Args[0] 24625 v_0_1 := v_0.Args[1] 24626 if v_0_1.Op != OpAMD64SHLQ { 24627 break 24628 } 24629 _ = v_0_1.Args[1] 24630 x := v_0_1.Args[0] 24631 v_0_1_1 := v_0_1.Args[1] 24632 if v_0_1_1.Op != OpAMD64NEGL { 24633 break 24634 } 24635 if y != v_0_1_1.Args[0] { 24636 break 24637 } 24638 v_1 := v.Args[1] 24639 if v_1.Op != OpAMD64SHRQ { 24640 break 24641 } 24642 _ = v_1.Args[1] 24643 if x != v_1.Args[0] { 24644 break 24645 } 24646 if y != v_1.Args[1] { 24647 break 24648 } 24649 v.reset(OpAMD64RORQ) 24650 v.AddArg(x) 24651 v.AddArg(y) 24652 return true 24653 } 24654 return false 24655 } 24656 func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool { 24657 b := v.Block 24658 _ = b 24659 typ := &b.Func.Config.Types 24660 _ = typ 24661 // match: (ORQ x x) 24662 // cond: 24663 // result: x 24664 for { 24665 _ = v.Args[1] 24666 x := v.Args[0] 24667 if x != v.Args[1] { 24668 break 24669 } 24670 v.reset(OpCopy) 24671 v.Type = x.Type 24672 v.AddArg(x) 24673 return true 24674 } 24675 // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) 24676 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24677 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 24678 for { 24679 _ = v.Args[1] 24680 x0 := v.Args[0] 24681 if x0.Op != OpAMD64MOVBload { 24682 break 24683 } 24684 i0 := x0.AuxInt 24685 s := x0.Aux 24686 _ = x0.Args[1] 24687 p := x0.Args[0] 24688 mem := x0.Args[1] 24689 sh := v.Args[1] 24690 if sh.Op != OpAMD64SHLQconst { 24691 break 24692 } 24693 if sh.AuxInt != 8 { 24694 break 24695 } 24696 x1 := sh.Args[0] 24697 if x1.Op != OpAMD64MOVBload { 24698 break 24699 } 24700 i1 := x1.AuxInt 24701 if x1.Aux != s { 24702 break 24703 } 24704 _ = x1.Args[1] 24705 if p != x1.Args[0] { 24706 break 24707 } 24708 if mem != x1.Args[1] { 24709 break 24710 } 24711 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24712 break 24713 } 24714 b = mergePoint(b, x0, x1) 24715 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 24716 v.reset(OpCopy) 24717 v.AddArg(v0) 24718 v0.AuxInt = i0 24719 v0.Aux = s 24720 v0.AddArg(p) 24721 v0.AddArg(mem) 24722 return true 24723 } 24724 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) 24725 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24726 // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) 24727 for { 24728 _ = v.Args[1] 24729 sh := v.Args[0] 24730 if sh.Op != OpAMD64SHLQconst { 24731 break 24732 } 24733 if sh.AuxInt != 8 { 24734 break 24735 } 24736 x1 := sh.Args[0] 24737 if x1.Op != OpAMD64MOVBload { 24738 break 24739 } 24740 i1 := x1.AuxInt 24741 s := x1.Aux 24742 _ = x1.Args[1] 24743 p := x1.Args[0] 24744 mem := x1.Args[1] 24745 x0 := v.Args[1] 24746 if x0.Op != OpAMD64MOVBload { 24747 break 24748 } 24749 i0 := x0.AuxInt 24750 if x0.Aux != s { 24751 break 24752 } 24753 _ = x0.Args[1] 24754 if p != x0.Args[0] { 24755 break 24756 } 24757 if mem != x0.Args[1] { 24758 break 24759 } 24760 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24761 break 24762 } 24763 b = mergePoint(b, x0, x1) 24764 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 24765 v.reset(OpCopy) 24766 v.AddArg(v0) 24767 v0.AuxInt = i0 24768 v0.Aux = s 24769 v0.AddArg(p) 24770 v0.AddArg(mem) 24771 return true 24772 } 24773 // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) 24774 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24775 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 24776 for { 24777 _ = v.Args[1] 24778 x0 := v.Args[0] 24779 if x0.Op != OpAMD64MOVWload { 24780 break 24781 } 24782 i0 := x0.AuxInt 24783 s := x0.Aux 24784 _ = x0.Args[1] 24785 p := x0.Args[0] 24786 mem := x0.Args[1] 24787 sh := v.Args[1] 24788 if sh.Op != OpAMD64SHLQconst { 24789 break 24790 } 24791 if sh.AuxInt != 16 { 24792 break 24793 } 24794 x1 := sh.Args[0] 24795 if x1.Op != OpAMD64MOVWload { 24796 break 24797 } 24798 i1 := x1.AuxInt 24799 if x1.Aux != s { 24800 break 24801 } 24802 _ = x1.Args[1] 24803 if p != x1.Args[0] { 24804 break 24805 } 24806 if mem != x1.Args[1] { 24807 break 24808 } 24809 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24810 break 24811 } 24812 b = mergePoint(b, x0, x1) 24813 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 24814 v.reset(OpCopy) 24815 v.AddArg(v0) 24816 v0.AuxInt = i0 24817 v0.Aux = s 24818 v0.AddArg(p) 24819 v0.AddArg(mem) 24820 return true 24821 } 24822 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) 24823 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24824 // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) 24825 for { 24826 _ = v.Args[1] 24827 sh := v.Args[0] 24828 if sh.Op != OpAMD64SHLQconst { 24829 break 24830 } 24831 if sh.AuxInt != 16 { 24832 break 24833 } 24834 x1 := sh.Args[0] 24835 if x1.Op != OpAMD64MOVWload { 24836 break 24837 } 24838 i1 := x1.AuxInt 24839 s := x1.Aux 24840 _ = x1.Args[1] 24841 p := x1.Args[0] 24842 mem := x1.Args[1] 24843 x0 := v.Args[1] 24844 if x0.Op != OpAMD64MOVWload { 24845 break 24846 } 24847 i0 := x0.AuxInt 24848 if x0.Aux != s { 24849 break 24850 } 24851 _ = x0.Args[1] 24852 if p != x0.Args[0] { 24853 break 24854 } 24855 if mem != x0.Args[1] { 24856 break 24857 } 24858 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24859 break 24860 } 24861 b = mergePoint(b, x0, x1) 24862 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 24863 v.reset(OpCopy) 24864 v.AddArg(v0) 24865 v0.AuxInt = i0 24866 v0.Aux = s 24867 v0.AddArg(p) 24868 v0.AddArg(mem) 24869 return true 24870 } 24871 // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) 24872 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24873 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 24874 for { 24875 _ = v.Args[1] 24876 x0 := v.Args[0] 24877 if x0.Op != OpAMD64MOVLload { 24878 break 24879 } 24880 i0 := x0.AuxInt 24881 s := x0.Aux 24882 _ = x0.Args[1] 24883 p := x0.Args[0] 24884 mem := x0.Args[1] 24885 sh := v.Args[1] 24886 if sh.Op != OpAMD64SHLQconst { 24887 break 24888 } 24889 if sh.AuxInt != 32 { 24890 break 24891 } 24892 x1 := sh.Args[0] 24893 if x1.Op != OpAMD64MOVLload { 24894 break 24895 } 24896 i1 := x1.AuxInt 24897 if x1.Aux != s { 24898 break 24899 } 24900 _ = x1.Args[1] 24901 if p != x1.Args[0] { 24902 break 24903 } 24904 if mem != x1.Args[1] { 24905 break 24906 } 24907 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24908 break 24909 } 24910 b = mergePoint(b, x0, x1) 24911 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 24912 v.reset(OpCopy) 24913 v.AddArg(v0) 24914 v0.AuxInt = i0 24915 v0.Aux = s 24916 v0.AddArg(p) 24917 v0.AddArg(mem) 24918 return true 24919 } 24920 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem)) 24921 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 24922 // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) 24923 for { 24924 _ = v.Args[1] 24925 sh := v.Args[0] 24926 if sh.Op != OpAMD64SHLQconst { 24927 break 24928 } 24929 if sh.AuxInt != 32 { 24930 break 24931 } 24932 x1 := sh.Args[0] 24933 if x1.Op != OpAMD64MOVLload { 24934 break 24935 } 24936 i1 := x1.AuxInt 24937 s := x1.Aux 24938 _ = x1.Args[1] 24939 p := x1.Args[0] 24940 mem := x1.Args[1] 24941 x0 := v.Args[1] 24942 if x0.Op != OpAMD64MOVLload { 24943 break 24944 } 24945 i0 := x0.AuxInt 24946 if x0.Aux != s { 24947 break 24948 } 24949 _ = x0.Args[1] 24950 if p != x0.Args[0] { 24951 break 24952 } 24953 if mem != x0.Args[1] { 24954 break 24955 } 24956 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 24957 break 24958 } 24959 b = mergePoint(b, x0, x1) 24960 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 24961 v.reset(OpCopy) 24962 v.AddArg(v0) 24963 v0.AuxInt = i0 24964 v0.Aux = s 24965 v0.AddArg(p) 24966 v0.AddArg(mem) 24967 return true 24968 } 24969 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) 24970 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 24971 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 24972 for { 24973 _ = v.Args[1] 24974 s1 := v.Args[0] 24975 if s1.Op != OpAMD64SHLQconst { 24976 break 24977 } 24978 j1 := s1.AuxInt 24979 x1 := s1.Args[0] 24980 if x1.Op != OpAMD64MOVBload { 24981 break 24982 } 24983 i1 := x1.AuxInt 24984 s := x1.Aux 24985 _ = x1.Args[1] 24986 p := x1.Args[0] 24987 mem := x1.Args[1] 24988 or := v.Args[1] 24989 if or.Op != OpAMD64ORQ { 24990 break 24991 } 24992 _ = or.Args[1] 24993 s0 := or.Args[0] 24994 if s0.Op != OpAMD64SHLQconst { 24995 break 24996 } 24997 j0 := s0.AuxInt 24998 x0 := s0.Args[0] 24999 if x0.Op != OpAMD64MOVBload { 25000 break 25001 } 25002 i0 := x0.AuxInt 25003 if x0.Aux != s { 25004 break 25005 } 25006 _ = x0.Args[1] 25007 if p != x0.Args[0] { 25008 break 25009 } 25010 if mem != x0.Args[1] { 25011 break 25012 } 25013 y := or.Args[1] 25014 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25015 break 25016 } 25017 b = mergePoint(b, x0, x1) 25018 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25019 v.reset(OpCopy) 25020 v.AddArg(v0) 25021 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25022 v1.AuxInt = j0 25023 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25024 v2.AuxInt = i0 25025 v2.Aux = s 25026 v2.AddArg(p) 25027 v2.AddArg(mem) 25028 v1.AddArg(v2) 25029 v0.AddArg(v1) 25030 v0.AddArg(y) 25031 return true 25032 } 25033 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))) 25034 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25035 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 25036 for { 25037 _ = v.Args[1] 25038 s1 := v.Args[0] 25039 if s1.Op != OpAMD64SHLQconst { 25040 break 25041 } 25042 j1 := s1.AuxInt 25043 x1 := s1.Args[0] 25044 if x1.Op != OpAMD64MOVBload { 25045 break 25046 } 25047 i1 := x1.AuxInt 25048 s := x1.Aux 25049 _ = x1.Args[1] 25050 p := x1.Args[0] 25051 mem := x1.Args[1] 25052 or := v.Args[1] 25053 if or.Op != OpAMD64ORQ { 25054 break 25055 } 25056 _ = or.Args[1] 25057 y := or.Args[0] 25058 s0 := or.Args[1] 25059 if s0.Op != OpAMD64SHLQconst { 25060 break 25061 } 25062 j0 := s0.AuxInt 25063 x0 := s0.Args[0] 25064 if x0.Op != OpAMD64MOVBload { 25065 break 25066 } 25067 i0 := x0.AuxInt 25068 if x0.Aux != s { 25069 break 25070 } 25071 _ = x0.Args[1] 25072 if p != x0.Args[0] { 25073 break 25074 } 25075 if mem != x0.Args[1] { 25076 break 25077 } 25078 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25079 break 25080 } 25081 b = mergePoint(b, x0, x1) 25082 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25083 v.reset(OpCopy) 25084 v.AddArg(v0) 25085 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25086 v1.AuxInt = j0 25087 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25088 v2.AuxInt = i0 25089 v2.Aux = s 25090 v2.AddArg(p) 25091 v2.AddArg(mem) 25092 v1.AddArg(v2) 25093 v0.AddArg(v1) 25094 v0.AddArg(y) 25095 return true 25096 } 25097 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 25098 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25099 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 25100 for { 25101 _ = v.Args[1] 25102 or := v.Args[0] 25103 if or.Op != OpAMD64ORQ { 25104 break 25105 } 25106 _ = or.Args[1] 25107 s0 := or.Args[0] 25108 if s0.Op != OpAMD64SHLQconst { 25109 break 25110 } 25111 j0 := s0.AuxInt 25112 x0 := s0.Args[0] 25113 if x0.Op != OpAMD64MOVBload { 25114 break 25115 } 25116 i0 := x0.AuxInt 25117 s := x0.Aux 25118 _ = x0.Args[1] 25119 p := x0.Args[0] 25120 mem := x0.Args[1] 25121 y := or.Args[1] 25122 s1 := v.Args[1] 25123 if s1.Op != OpAMD64SHLQconst { 25124 break 25125 } 25126 j1 := s1.AuxInt 25127 x1 := s1.Args[0] 25128 if x1.Op != OpAMD64MOVBload { 25129 break 25130 } 25131 i1 := x1.AuxInt 25132 if x1.Aux != s { 25133 break 25134 } 25135 _ = x1.Args[1] 25136 if p != x1.Args[0] { 25137 break 25138 } 25139 if mem != x1.Args[1] { 25140 break 25141 } 25142 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25143 break 25144 } 25145 b = mergePoint(b, x0, x1) 25146 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25147 v.reset(OpCopy) 25148 v.AddArg(v0) 25149 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25150 v1.AuxInt = j0 25151 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25152 v2.AuxInt = i0 25153 v2.Aux = s 25154 v2.AddArg(p) 25155 v2.AddArg(mem) 25156 v1.AddArg(v2) 25157 v0.AddArg(v1) 25158 v0.AddArg(y) 25159 return true 25160 } 25161 return false 25162 } 25163 func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool { 25164 b := v.Block 25165 _ = b 25166 typ := &b.Func.Config.Types 25167 _ = typ 25168 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) 25169 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25170 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y) 25171 for { 25172 _ = v.Args[1] 25173 or := v.Args[0] 25174 if or.Op != OpAMD64ORQ { 25175 break 25176 } 25177 _ = or.Args[1] 25178 y := or.Args[0] 25179 s0 := or.Args[1] 25180 if s0.Op != OpAMD64SHLQconst { 25181 break 25182 } 25183 j0 := s0.AuxInt 25184 x0 := s0.Args[0] 25185 if x0.Op != OpAMD64MOVBload { 25186 break 25187 } 25188 i0 := x0.AuxInt 25189 s := x0.Aux 25190 _ = x0.Args[1] 25191 p := x0.Args[0] 25192 mem := x0.Args[1] 25193 s1 := v.Args[1] 25194 if s1.Op != OpAMD64SHLQconst { 25195 break 25196 } 25197 j1 := s1.AuxInt 25198 x1 := s1.Args[0] 25199 if x1.Op != OpAMD64MOVBload { 25200 break 25201 } 25202 i1 := x1.AuxInt 25203 if x1.Aux != s { 25204 break 25205 } 25206 _ = x1.Args[1] 25207 if p != x1.Args[0] { 25208 break 25209 } 25210 if mem != x1.Args[1] { 25211 break 25212 } 25213 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25214 break 25215 } 25216 b = mergePoint(b, x0, x1) 25217 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25218 v.reset(OpCopy) 25219 v.AddArg(v0) 25220 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25221 v1.AuxInt = j0 25222 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 25223 v2.AuxInt = i0 25224 v2.Aux = s 25225 v2.AddArg(p) 25226 v2.AddArg(mem) 25227 v1.AddArg(v2) 25228 v0.AddArg(v1) 25229 v0.AddArg(y) 25230 return true 25231 } 25232 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y)) 25233 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25234 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25235 for { 25236 _ = v.Args[1] 25237 s1 := v.Args[0] 25238 if s1.Op != OpAMD64SHLQconst { 25239 break 25240 } 25241 j1 := s1.AuxInt 25242 x1 := s1.Args[0] 25243 if x1.Op != OpAMD64MOVWload { 25244 break 25245 } 25246 i1 := x1.AuxInt 25247 s := x1.Aux 25248 _ = x1.Args[1] 25249 p := x1.Args[0] 25250 mem := x1.Args[1] 25251 or := v.Args[1] 25252 if or.Op != OpAMD64ORQ { 25253 break 25254 } 25255 _ = or.Args[1] 25256 s0 := or.Args[0] 25257 if s0.Op != OpAMD64SHLQconst { 25258 break 25259 } 25260 j0 := s0.AuxInt 25261 x0 := s0.Args[0] 25262 if x0.Op != OpAMD64MOVWload { 25263 break 25264 } 25265 i0 := x0.AuxInt 25266 if x0.Aux != s { 25267 break 25268 } 25269 _ = x0.Args[1] 25270 if p != x0.Args[0] { 25271 break 25272 } 25273 if mem != x0.Args[1] { 25274 break 25275 } 25276 y := or.Args[1] 25277 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25278 break 25279 } 25280 b = mergePoint(b, x0, x1) 25281 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25282 v.reset(OpCopy) 25283 v.AddArg(v0) 25284 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25285 v1.AuxInt = j0 25286 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25287 v2.AuxInt = i0 25288 v2.Aux = s 25289 v2.AddArg(p) 25290 v2.AddArg(mem) 25291 v1.AddArg(v2) 25292 v0.AddArg(v1) 25293 v0.AddArg(y) 25294 return true 25295 } 25296 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)))) 25297 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25298 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25299 for { 25300 _ = v.Args[1] 25301 s1 := v.Args[0] 25302 if s1.Op != OpAMD64SHLQconst { 25303 break 25304 } 25305 j1 := s1.AuxInt 25306 x1 := s1.Args[0] 25307 if x1.Op != OpAMD64MOVWload { 25308 break 25309 } 25310 i1 := x1.AuxInt 25311 s := x1.Aux 25312 _ = x1.Args[1] 25313 p := x1.Args[0] 25314 mem := x1.Args[1] 25315 or := v.Args[1] 25316 if or.Op != OpAMD64ORQ { 25317 break 25318 } 25319 _ = or.Args[1] 25320 y := or.Args[0] 25321 s0 := or.Args[1] 25322 if s0.Op != OpAMD64SHLQconst { 25323 break 25324 } 25325 j0 := s0.AuxInt 25326 x0 := s0.Args[0] 25327 if x0.Op != OpAMD64MOVWload { 25328 break 25329 } 25330 i0 := x0.AuxInt 25331 if x0.Aux != s { 25332 break 25333 } 25334 _ = x0.Args[1] 25335 if p != x0.Args[0] { 25336 break 25337 } 25338 if mem != x0.Args[1] { 25339 break 25340 } 25341 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25342 break 25343 } 25344 b = mergePoint(b, x0, x1) 25345 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25346 v.reset(OpCopy) 25347 v.AddArg(v0) 25348 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25349 v1.AuxInt = j0 25350 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25351 v2.AuxInt = i0 25352 v2.Aux = s 25353 v2.AddArg(p) 25354 v2.AddArg(mem) 25355 v1.AddArg(v2) 25356 v0.AddArg(v1) 25357 v0.AddArg(y) 25358 return true 25359 } 25360 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 25361 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25362 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25363 for { 25364 _ = v.Args[1] 25365 or := v.Args[0] 25366 if or.Op != OpAMD64ORQ { 25367 break 25368 } 25369 _ = or.Args[1] 25370 s0 := or.Args[0] 25371 if s0.Op != OpAMD64SHLQconst { 25372 break 25373 } 25374 j0 := s0.AuxInt 25375 x0 := s0.Args[0] 25376 if x0.Op != OpAMD64MOVWload { 25377 break 25378 } 25379 i0 := x0.AuxInt 25380 s := x0.Aux 25381 _ = x0.Args[1] 25382 p := x0.Args[0] 25383 mem := x0.Args[1] 25384 y := or.Args[1] 25385 s1 := v.Args[1] 25386 if s1.Op != OpAMD64SHLQconst { 25387 break 25388 } 25389 j1 := s1.AuxInt 25390 x1 := s1.Args[0] 25391 if x1.Op != OpAMD64MOVWload { 25392 break 25393 } 25394 i1 := x1.AuxInt 25395 if x1.Aux != s { 25396 break 25397 } 25398 _ = x1.Args[1] 25399 if p != x1.Args[0] { 25400 break 25401 } 25402 if mem != x1.Args[1] { 25403 break 25404 } 25405 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25406 break 25407 } 25408 b = mergePoint(b, x0, x1) 25409 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25410 v.reset(OpCopy) 25411 v.AddArg(v0) 25412 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25413 v1.AuxInt = j0 25414 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25415 v2.AuxInt = i0 25416 v2.Aux = s 25417 v2.AddArg(p) 25418 v2.AddArg(mem) 25419 v1.AddArg(v2) 25420 v0.AddArg(v1) 25421 v0.AddArg(y) 25422 return true 25423 } 25424 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) 25425 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 25426 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y) 25427 for { 25428 _ = v.Args[1] 25429 or := v.Args[0] 25430 if or.Op != OpAMD64ORQ { 25431 break 25432 } 25433 _ = or.Args[1] 25434 y := or.Args[0] 25435 s0 := or.Args[1] 25436 if s0.Op != OpAMD64SHLQconst { 25437 break 25438 } 25439 j0 := s0.AuxInt 25440 x0 := s0.Args[0] 25441 if x0.Op != OpAMD64MOVWload { 25442 break 25443 } 25444 i0 := x0.AuxInt 25445 s := x0.Aux 25446 _ = x0.Args[1] 25447 p := x0.Args[0] 25448 mem := x0.Args[1] 25449 s1 := v.Args[1] 25450 if s1.Op != OpAMD64SHLQconst { 25451 break 25452 } 25453 j1 := s1.AuxInt 25454 x1 := s1.Args[0] 25455 if x1.Op != OpAMD64MOVWload { 25456 break 25457 } 25458 i1 := x1.AuxInt 25459 if x1.Aux != s { 25460 break 25461 } 25462 _ = x1.Args[1] 25463 if p != x1.Args[0] { 25464 break 25465 } 25466 if mem != x1.Args[1] { 25467 break 25468 } 25469 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 25470 break 25471 } 25472 b = mergePoint(b, x0, x1) 25473 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 25474 v.reset(OpCopy) 25475 v.AddArg(v0) 25476 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 25477 v1.AuxInt = j0 25478 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 25479 v2.AuxInt = i0 25480 v2.Aux = s 25481 v2.AddArg(p) 25482 v2.AddArg(mem) 25483 v1.AddArg(v2) 25484 v0.AddArg(v1) 25485 v0.AddArg(y) 25486 return true 25487 } 25488 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25489 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25490 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25491 for { 25492 _ = v.Args[1] 25493 x0 := v.Args[0] 25494 if x0.Op != OpAMD64MOVBloadidx1 { 25495 break 25496 } 25497 i0 := x0.AuxInt 25498 s := x0.Aux 25499 _ = x0.Args[2] 25500 p := x0.Args[0] 25501 idx := x0.Args[1] 25502 mem := x0.Args[2] 25503 sh := v.Args[1] 25504 if sh.Op != OpAMD64SHLQconst { 25505 break 25506 } 25507 if sh.AuxInt != 8 { 25508 break 25509 } 25510 x1 := sh.Args[0] 25511 if x1.Op != OpAMD64MOVBloadidx1 { 25512 break 25513 } 25514 i1 := x1.AuxInt 25515 if x1.Aux != s { 25516 break 25517 } 25518 _ = x1.Args[2] 25519 if p != x1.Args[0] { 25520 break 25521 } 25522 if idx != x1.Args[1] { 25523 break 25524 } 25525 if mem != x1.Args[2] { 25526 break 25527 } 25528 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25529 break 25530 } 25531 b = mergePoint(b, x0, x1) 25532 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25533 v.reset(OpCopy) 25534 v.AddArg(v0) 25535 v0.AuxInt = i0 25536 v0.Aux = s 25537 v0.AddArg(p) 25538 v0.AddArg(idx) 25539 v0.AddArg(mem) 25540 return true 25541 } 25542 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 25543 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25544 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25545 for { 25546 _ = v.Args[1] 25547 x0 := v.Args[0] 25548 if x0.Op != OpAMD64MOVBloadidx1 { 25549 break 25550 } 25551 i0 := x0.AuxInt 25552 s := x0.Aux 25553 _ = x0.Args[2] 25554 idx := x0.Args[0] 25555 p := x0.Args[1] 25556 mem := x0.Args[2] 25557 sh := v.Args[1] 25558 if sh.Op != OpAMD64SHLQconst { 25559 break 25560 } 25561 if sh.AuxInt != 8 { 25562 break 25563 } 25564 x1 := sh.Args[0] 25565 if x1.Op != OpAMD64MOVBloadidx1 { 25566 break 25567 } 25568 i1 := x1.AuxInt 25569 if x1.Aux != s { 25570 break 25571 } 25572 _ = x1.Args[2] 25573 if p != x1.Args[0] { 25574 break 25575 } 25576 if idx != x1.Args[1] { 25577 break 25578 } 25579 if mem != x1.Args[2] { 25580 break 25581 } 25582 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25583 break 25584 } 25585 b = mergePoint(b, x0, x1) 25586 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25587 v.reset(OpCopy) 25588 v.AddArg(v0) 25589 v0.AuxInt = i0 25590 v0.Aux = s 25591 v0.AddArg(p) 25592 v0.AddArg(idx) 25593 v0.AddArg(mem) 25594 return true 25595 } 25596 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 25597 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25598 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25599 for { 25600 _ = v.Args[1] 25601 x0 := v.Args[0] 25602 if x0.Op != OpAMD64MOVBloadidx1 { 25603 break 25604 } 25605 i0 := x0.AuxInt 25606 s := x0.Aux 25607 _ = x0.Args[2] 25608 p := x0.Args[0] 25609 idx := x0.Args[1] 25610 mem := x0.Args[2] 25611 sh := v.Args[1] 25612 if sh.Op != OpAMD64SHLQconst { 25613 break 25614 } 25615 if sh.AuxInt != 8 { 25616 break 25617 } 25618 x1 := sh.Args[0] 25619 if x1.Op != OpAMD64MOVBloadidx1 { 25620 break 25621 } 25622 i1 := x1.AuxInt 25623 if x1.Aux != s { 25624 break 25625 } 25626 _ = x1.Args[2] 25627 if idx != x1.Args[0] { 25628 break 25629 } 25630 if p != x1.Args[1] { 25631 break 25632 } 25633 if mem != x1.Args[2] { 25634 break 25635 } 25636 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25637 break 25638 } 25639 b = mergePoint(b, x0, x1) 25640 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25641 v.reset(OpCopy) 25642 v.AddArg(v0) 25643 v0.AuxInt = i0 25644 v0.Aux = s 25645 v0.AddArg(p) 25646 v0.AddArg(idx) 25647 v0.AddArg(mem) 25648 return true 25649 } 25650 // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 25651 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25652 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25653 for { 25654 _ = v.Args[1] 25655 x0 := v.Args[0] 25656 if x0.Op != OpAMD64MOVBloadidx1 { 25657 break 25658 } 25659 i0 := x0.AuxInt 25660 s := x0.Aux 25661 _ = x0.Args[2] 25662 idx := x0.Args[0] 25663 p := x0.Args[1] 25664 mem := x0.Args[2] 25665 sh := v.Args[1] 25666 if sh.Op != OpAMD64SHLQconst { 25667 break 25668 } 25669 if sh.AuxInt != 8 { 25670 break 25671 } 25672 x1 := sh.Args[0] 25673 if x1.Op != OpAMD64MOVBloadidx1 { 25674 break 25675 } 25676 i1 := x1.AuxInt 25677 if x1.Aux != s { 25678 break 25679 } 25680 _ = x1.Args[2] 25681 if idx != x1.Args[0] { 25682 break 25683 } 25684 if p != x1.Args[1] { 25685 break 25686 } 25687 if mem != x1.Args[2] { 25688 break 25689 } 25690 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25691 break 25692 } 25693 b = mergePoint(b, x0, x1) 25694 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25695 v.reset(OpCopy) 25696 v.AddArg(v0) 25697 v0.AuxInt = i0 25698 v0.Aux = s 25699 v0.AddArg(p) 25700 v0.AddArg(idx) 25701 v0.AddArg(mem) 25702 return true 25703 } 25704 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 25705 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25706 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25707 for { 25708 _ = v.Args[1] 25709 sh := v.Args[0] 25710 if sh.Op != OpAMD64SHLQconst { 25711 break 25712 } 25713 if sh.AuxInt != 8 { 25714 break 25715 } 25716 x1 := sh.Args[0] 25717 if x1.Op != OpAMD64MOVBloadidx1 { 25718 break 25719 } 25720 i1 := x1.AuxInt 25721 s := x1.Aux 25722 _ = x1.Args[2] 25723 p := x1.Args[0] 25724 idx := x1.Args[1] 25725 mem := x1.Args[2] 25726 x0 := v.Args[1] 25727 if x0.Op != OpAMD64MOVBloadidx1 { 25728 break 25729 } 25730 i0 := x0.AuxInt 25731 if x0.Aux != s { 25732 break 25733 } 25734 _ = x0.Args[2] 25735 if p != x0.Args[0] { 25736 break 25737 } 25738 if idx != x0.Args[1] { 25739 break 25740 } 25741 if mem != x0.Args[2] { 25742 break 25743 } 25744 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25745 break 25746 } 25747 b = mergePoint(b, x0, x1) 25748 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25749 v.reset(OpCopy) 25750 v.AddArg(v0) 25751 v0.AuxInt = i0 25752 v0.Aux = s 25753 v0.AddArg(p) 25754 v0.AddArg(idx) 25755 v0.AddArg(mem) 25756 return true 25757 } 25758 return false 25759 } 25760 func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool { 25761 b := v.Block 25762 _ = b 25763 typ := &b.Func.Config.Types 25764 _ = typ 25765 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) 25766 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25767 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25768 for { 25769 _ = v.Args[1] 25770 sh := v.Args[0] 25771 if sh.Op != OpAMD64SHLQconst { 25772 break 25773 } 25774 if sh.AuxInt != 8 { 25775 break 25776 } 25777 x1 := sh.Args[0] 25778 if x1.Op != OpAMD64MOVBloadidx1 { 25779 break 25780 } 25781 i1 := x1.AuxInt 25782 s := x1.Aux 25783 _ = x1.Args[2] 25784 idx := x1.Args[0] 25785 p := x1.Args[1] 25786 mem := x1.Args[2] 25787 x0 := v.Args[1] 25788 if x0.Op != OpAMD64MOVBloadidx1 { 25789 break 25790 } 25791 i0 := x0.AuxInt 25792 if x0.Aux != s { 25793 break 25794 } 25795 _ = x0.Args[2] 25796 if p != x0.Args[0] { 25797 break 25798 } 25799 if idx != x0.Args[1] { 25800 break 25801 } 25802 if mem != x0.Args[2] { 25803 break 25804 } 25805 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25806 break 25807 } 25808 b = mergePoint(b, x0, x1) 25809 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25810 v.reset(OpCopy) 25811 v.AddArg(v0) 25812 v0.AuxInt = i0 25813 v0.Aux = s 25814 v0.AddArg(p) 25815 v0.AddArg(idx) 25816 v0.AddArg(mem) 25817 return true 25818 } 25819 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 25820 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25821 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25822 for { 25823 _ = v.Args[1] 25824 sh := v.Args[0] 25825 if sh.Op != OpAMD64SHLQconst { 25826 break 25827 } 25828 if sh.AuxInt != 8 { 25829 break 25830 } 25831 x1 := sh.Args[0] 25832 if x1.Op != OpAMD64MOVBloadidx1 { 25833 break 25834 } 25835 i1 := x1.AuxInt 25836 s := x1.Aux 25837 _ = x1.Args[2] 25838 p := x1.Args[0] 25839 idx := x1.Args[1] 25840 mem := x1.Args[2] 25841 x0 := v.Args[1] 25842 if x0.Op != OpAMD64MOVBloadidx1 { 25843 break 25844 } 25845 i0 := x0.AuxInt 25846 if x0.Aux != s { 25847 break 25848 } 25849 _ = x0.Args[2] 25850 if idx != x0.Args[0] { 25851 break 25852 } 25853 if p != x0.Args[1] { 25854 break 25855 } 25856 if mem != x0.Args[2] { 25857 break 25858 } 25859 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25860 break 25861 } 25862 b = mergePoint(b, x0, x1) 25863 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25864 v.reset(OpCopy) 25865 v.AddArg(v0) 25866 v0.AuxInt = i0 25867 v0.Aux = s 25868 v0.AddArg(p) 25869 v0.AddArg(idx) 25870 v0.AddArg(mem) 25871 return true 25872 } 25873 // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) 25874 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25875 // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem) 25876 for { 25877 _ = v.Args[1] 25878 sh := v.Args[0] 25879 if sh.Op != OpAMD64SHLQconst { 25880 break 25881 } 25882 if sh.AuxInt != 8 { 25883 break 25884 } 25885 x1 := sh.Args[0] 25886 if x1.Op != OpAMD64MOVBloadidx1 { 25887 break 25888 } 25889 i1 := x1.AuxInt 25890 s := x1.Aux 25891 _ = x1.Args[2] 25892 idx := x1.Args[0] 25893 p := x1.Args[1] 25894 mem := x1.Args[2] 25895 x0 := v.Args[1] 25896 if x0.Op != OpAMD64MOVBloadidx1 { 25897 break 25898 } 25899 i0 := x0.AuxInt 25900 if x0.Aux != s { 25901 break 25902 } 25903 _ = x0.Args[2] 25904 if idx != x0.Args[0] { 25905 break 25906 } 25907 if p != x0.Args[1] { 25908 break 25909 } 25910 if mem != x0.Args[2] { 25911 break 25912 } 25913 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25914 break 25915 } 25916 b = mergePoint(b, x0, x1) 25917 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) 25918 v.reset(OpCopy) 25919 v.AddArg(v0) 25920 v0.AuxInt = i0 25921 v0.Aux = s 25922 v0.AddArg(p) 25923 v0.AddArg(idx) 25924 v0.AddArg(mem) 25925 return true 25926 } 25927 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 25928 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25929 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 25930 for { 25931 _ = v.Args[1] 25932 x0 := v.Args[0] 25933 if x0.Op != OpAMD64MOVWloadidx1 { 25934 break 25935 } 25936 i0 := x0.AuxInt 25937 s := x0.Aux 25938 _ = x0.Args[2] 25939 p := x0.Args[0] 25940 idx := x0.Args[1] 25941 mem := x0.Args[2] 25942 sh := v.Args[1] 25943 if sh.Op != OpAMD64SHLQconst { 25944 break 25945 } 25946 if sh.AuxInt != 16 { 25947 break 25948 } 25949 x1 := sh.Args[0] 25950 if x1.Op != OpAMD64MOVWloadidx1 { 25951 break 25952 } 25953 i1 := x1.AuxInt 25954 if x1.Aux != s { 25955 break 25956 } 25957 _ = x1.Args[2] 25958 if p != x1.Args[0] { 25959 break 25960 } 25961 if idx != x1.Args[1] { 25962 break 25963 } 25964 if mem != x1.Args[2] { 25965 break 25966 } 25967 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 25968 break 25969 } 25970 b = mergePoint(b, x0, x1) 25971 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 25972 v.reset(OpCopy) 25973 v.AddArg(v0) 25974 v0.AuxInt = i0 25975 v0.Aux = s 25976 v0.AddArg(p) 25977 v0.AddArg(idx) 25978 v0.AddArg(mem) 25979 return true 25980 } 25981 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 25982 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 25983 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 25984 for { 25985 _ = v.Args[1] 25986 x0 := v.Args[0] 25987 if x0.Op != OpAMD64MOVWloadidx1 { 25988 break 25989 } 25990 i0 := x0.AuxInt 25991 s := x0.Aux 25992 _ = x0.Args[2] 25993 idx := x0.Args[0] 25994 p := x0.Args[1] 25995 mem := x0.Args[2] 25996 sh := v.Args[1] 25997 if sh.Op != OpAMD64SHLQconst { 25998 break 25999 } 26000 if sh.AuxInt != 16 { 26001 break 26002 } 26003 x1 := sh.Args[0] 26004 if x1.Op != OpAMD64MOVWloadidx1 { 26005 break 26006 } 26007 i1 := x1.AuxInt 26008 if x1.Aux != s { 26009 break 26010 } 26011 _ = x1.Args[2] 26012 if p != x1.Args[0] { 26013 break 26014 } 26015 if idx != x1.Args[1] { 26016 break 26017 } 26018 if mem != x1.Args[2] { 26019 break 26020 } 26021 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26022 break 26023 } 26024 b = mergePoint(b, x0, x1) 26025 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26026 v.reset(OpCopy) 26027 v.AddArg(v0) 26028 v0.AuxInt = i0 26029 v0.Aux = s 26030 v0.AddArg(p) 26031 v0.AddArg(idx) 26032 v0.AddArg(mem) 26033 return true 26034 } 26035 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 26036 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26037 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26038 for { 26039 _ = v.Args[1] 26040 x0 := v.Args[0] 26041 if x0.Op != OpAMD64MOVWloadidx1 { 26042 break 26043 } 26044 i0 := x0.AuxInt 26045 s := x0.Aux 26046 _ = x0.Args[2] 26047 p := x0.Args[0] 26048 idx := x0.Args[1] 26049 mem := x0.Args[2] 26050 sh := v.Args[1] 26051 if sh.Op != OpAMD64SHLQconst { 26052 break 26053 } 26054 if sh.AuxInt != 16 { 26055 break 26056 } 26057 x1 := sh.Args[0] 26058 if x1.Op != OpAMD64MOVWloadidx1 { 26059 break 26060 } 26061 i1 := x1.AuxInt 26062 if x1.Aux != s { 26063 break 26064 } 26065 _ = x1.Args[2] 26066 if idx != x1.Args[0] { 26067 break 26068 } 26069 if p != x1.Args[1] { 26070 break 26071 } 26072 if mem != x1.Args[2] { 26073 break 26074 } 26075 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26076 break 26077 } 26078 b = mergePoint(b, x0, x1) 26079 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26080 v.reset(OpCopy) 26081 v.AddArg(v0) 26082 v0.AuxInt = i0 26083 v0.Aux = s 26084 v0.AddArg(p) 26085 v0.AddArg(idx) 26086 v0.AddArg(mem) 26087 return true 26088 } 26089 // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 26090 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26091 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26092 for { 26093 _ = v.Args[1] 26094 x0 := v.Args[0] 26095 if x0.Op != OpAMD64MOVWloadidx1 { 26096 break 26097 } 26098 i0 := x0.AuxInt 26099 s := x0.Aux 26100 _ = x0.Args[2] 26101 idx := x0.Args[0] 26102 p := x0.Args[1] 26103 mem := x0.Args[2] 26104 sh := v.Args[1] 26105 if sh.Op != OpAMD64SHLQconst { 26106 break 26107 } 26108 if sh.AuxInt != 16 { 26109 break 26110 } 26111 x1 := sh.Args[0] 26112 if x1.Op != OpAMD64MOVWloadidx1 { 26113 break 26114 } 26115 i1 := x1.AuxInt 26116 if x1.Aux != s { 26117 break 26118 } 26119 _ = x1.Args[2] 26120 if idx != x1.Args[0] { 26121 break 26122 } 26123 if p != x1.Args[1] { 26124 break 26125 } 26126 if mem != x1.Args[2] { 26127 break 26128 } 26129 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26130 break 26131 } 26132 b = mergePoint(b, x0, x1) 26133 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26134 v.reset(OpCopy) 26135 v.AddArg(v0) 26136 v0.AuxInt = i0 26137 v0.Aux = s 26138 v0.AddArg(p) 26139 v0.AddArg(idx) 26140 v0.AddArg(mem) 26141 return true 26142 } 26143 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 26144 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26145 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26146 for { 26147 _ = v.Args[1] 26148 sh := v.Args[0] 26149 if sh.Op != OpAMD64SHLQconst { 26150 break 26151 } 26152 if sh.AuxInt != 16 { 26153 break 26154 } 26155 x1 := sh.Args[0] 26156 if x1.Op != OpAMD64MOVWloadidx1 { 26157 break 26158 } 26159 i1 := x1.AuxInt 26160 s := x1.Aux 26161 _ = x1.Args[2] 26162 p := x1.Args[0] 26163 idx := x1.Args[1] 26164 mem := x1.Args[2] 26165 x0 := v.Args[1] 26166 if x0.Op != OpAMD64MOVWloadidx1 { 26167 break 26168 } 26169 i0 := x0.AuxInt 26170 if x0.Aux != s { 26171 break 26172 } 26173 _ = x0.Args[2] 26174 if p != x0.Args[0] { 26175 break 26176 } 26177 if idx != x0.Args[1] { 26178 break 26179 } 26180 if mem != x0.Args[2] { 26181 break 26182 } 26183 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26184 break 26185 } 26186 b = mergePoint(b, x0, x1) 26187 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26188 v.reset(OpCopy) 26189 v.AddArg(v0) 26190 v0.AuxInt = i0 26191 v0.Aux = s 26192 v0.AddArg(p) 26193 v0.AddArg(idx) 26194 v0.AddArg(mem) 26195 return true 26196 } 26197 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) 26198 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26199 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26200 for { 26201 _ = v.Args[1] 26202 sh := v.Args[0] 26203 if sh.Op != OpAMD64SHLQconst { 26204 break 26205 } 26206 if sh.AuxInt != 16 { 26207 break 26208 } 26209 x1 := sh.Args[0] 26210 if x1.Op != OpAMD64MOVWloadidx1 { 26211 break 26212 } 26213 i1 := x1.AuxInt 26214 s := x1.Aux 26215 _ = x1.Args[2] 26216 idx := x1.Args[0] 26217 p := x1.Args[1] 26218 mem := x1.Args[2] 26219 x0 := v.Args[1] 26220 if x0.Op != OpAMD64MOVWloadidx1 { 26221 break 26222 } 26223 i0 := x0.AuxInt 26224 if x0.Aux != s { 26225 break 26226 } 26227 _ = x0.Args[2] 26228 if p != x0.Args[0] { 26229 break 26230 } 26231 if idx != x0.Args[1] { 26232 break 26233 } 26234 if mem != x0.Args[2] { 26235 break 26236 } 26237 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26238 break 26239 } 26240 b = mergePoint(b, x0, x1) 26241 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26242 v.reset(OpCopy) 26243 v.AddArg(v0) 26244 v0.AuxInt = i0 26245 v0.Aux = s 26246 v0.AddArg(p) 26247 v0.AddArg(idx) 26248 v0.AddArg(mem) 26249 return true 26250 } 26251 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 26252 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26253 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26254 for { 26255 _ = v.Args[1] 26256 sh := v.Args[0] 26257 if sh.Op != OpAMD64SHLQconst { 26258 break 26259 } 26260 if sh.AuxInt != 16 { 26261 break 26262 } 26263 x1 := sh.Args[0] 26264 if x1.Op != OpAMD64MOVWloadidx1 { 26265 break 26266 } 26267 i1 := x1.AuxInt 26268 s := x1.Aux 26269 _ = x1.Args[2] 26270 p := x1.Args[0] 26271 idx := x1.Args[1] 26272 mem := x1.Args[2] 26273 x0 := v.Args[1] 26274 if x0.Op != OpAMD64MOVWloadidx1 { 26275 break 26276 } 26277 i0 := x0.AuxInt 26278 if x0.Aux != s { 26279 break 26280 } 26281 _ = x0.Args[2] 26282 if idx != x0.Args[0] { 26283 break 26284 } 26285 if p != x0.Args[1] { 26286 break 26287 } 26288 if mem != x0.Args[2] { 26289 break 26290 } 26291 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26292 break 26293 } 26294 b = mergePoint(b, x0, x1) 26295 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26296 v.reset(OpCopy) 26297 v.AddArg(v0) 26298 v0.AuxInt = i0 26299 v0.Aux = s 26300 v0.AddArg(p) 26301 v0.AddArg(idx) 26302 v0.AddArg(mem) 26303 return true 26304 } 26305 return false 26306 } 26307 func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool { 26308 b := v.Block 26309 _ = b 26310 typ := &b.Func.Config.Types 26311 _ = typ 26312 // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) 26313 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26314 // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) 26315 for { 26316 _ = v.Args[1] 26317 sh := v.Args[0] 26318 if sh.Op != OpAMD64SHLQconst { 26319 break 26320 } 26321 if sh.AuxInt != 16 { 26322 break 26323 } 26324 x1 := sh.Args[0] 26325 if x1.Op != OpAMD64MOVWloadidx1 { 26326 break 26327 } 26328 i1 := x1.AuxInt 26329 s := x1.Aux 26330 _ = x1.Args[2] 26331 idx := x1.Args[0] 26332 p := x1.Args[1] 26333 mem := x1.Args[2] 26334 x0 := v.Args[1] 26335 if x0.Op != OpAMD64MOVWloadidx1 { 26336 break 26337 } 26338 i0 := x0.AuxInt 26339 if x0.Aux != s { 26340 break 26341 } 26342 _ = x0.Args[2] 26343 if idx != x0.Args[0] { 26344 break 26345 } 26346 if p != x0.Args[1] { 26347 break 26348 } 26349 if mem != x0.Args[2] { 26350 break 26351 } 26352 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26353 break 26354 } 26355 b = mergePoint(b, x0, x1) 26356 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 26357 v.reset(OpCopy) 26358 v.AddArg(v0) 26359 v0.AuxInt = i0 26360 v0.Aux = s 26361 v0.AddArg(p) 26362 v0.AddArg(idx) 26363 v0.AddArg(mem) 26364 return true 26365 } 26366 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 26367 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26368 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26369 for { 26370 _ = v.Args[1] 26371 x0 := v.Args[0] 26372 if x0.Op != OpAMD64MOVLloadidx1 { 26373 break 26374 } 26375 i0 := x0.AuxInt 26376 s := x0.Aux 26377 _ = x0.Args[2] 26378 p := x0.Args[0] 26379 idx := x0.Args[1] 26380 mem := x0.Args[2] 26381 sh := v.Args[1] 26382 if sh.Op != OpAMD64SHLQconst { 26383 break 26384 } 26385 if sh.AuxInt != 32 { 26386 break 26387 } 26388 x1 := sh.Args[0] 26389 if x1.Op != OpAMD64MOVLloadidx1 { 26390 break 26391 } 26392 i1 := x1.AuxInt 26393 if x1.Aux != s { 26394 break 26395 } 26396 _ = x1.Args[2] 26397 if p != x1.Args[0] { 26398 break 26399 } 26400 if idx != x1.Args[1] { 26401 break 26402 } 26403 if mem != x1.Args[2] { 26404 break 26405 } 26406 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26407 break 26408 } 26409 b = mergePoint(b, x0, x1) 26410 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26411 v.reset(OpCopy) 26412 v.AddArg(v0) 26413 v0.AuxInt = i0 26414 v0.Aux = s 26415 v0.AddArg(p) 26416 v0.AddArg(idx) 26417 v0.AddArg(mem) 26418 return true 26419 } 26420 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) 26421 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26422 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26423 for { 26424 _ = v.Args[1] 26425 x0 := v.Args[0] 26426 if x0.Op != OpAMD64MOVLloadidx1 { 26427 break 26428 } 26429 i0 := x0.AuxInt 26430 s := x0.Aux 26431 _ = x0.Args[2] 26432 idx := x0.Args[0] 26433 p := x0.Args[1] 26434 mem := x0.Args[2] 26435 sh := v.Args[1] 26436 if sh.Op != OpAMD64SHLQconst { 26437 break 26438 } 26439 if sh.AuxInt != 32 { 26440 break 26441 } 26442 x1 := sh.Args[0] 26443 if x1.Op != OpAMD64MOVLloadidx1 { 26444 break 26445 } 26446 i1 := x1.AuxInt 26447 if x1.Aux != s { 26448 break 26449 } 26450 _ = x1.Args[2] 26451 if p != x1.Args[0] { 26452 break 26453 } 26454 if idx != x1.Args[1] { 26455 break 26456 } 26457 if mem != x1.Args[2] { 26458 break 26459 } 26460 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26461 break 26462 } 26463 b = mergePoint(b, x0, x1) 26464 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26465 v.reset(OpCopy) 26466 v.AddArg(v0) 26467 v0.AuxInt = i0 26468 v0.Aux = s 26469 v0.AddArg(p) 26470 v0.AddArg(idx) 26471 v0.AddArg(mem) 26472 return true 26473 } 26474 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 26475 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26476 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26477 for { 26478 _ = v.Args[1] 26479 x0 := v.Args[0] 26480 if x0.Op != OpAMD64MOVLloadidx1 { 26481 break 26482 } 26483 i0 := x0.AuxInt 26484 s := x0.Aux 26485 _ = x0.Args[2] 26486 p := x0.Args[0] 26487 idx := x0.Args[1] 26488 mem := x0.Args[2] 26489 sh := v.Args[1] 26490 if sh.Op != OpAMD64SHLQconst { 26491 break 26492 } 26493 if sh.AuxInt != 32 { 26494 break 26495 } 26496 x1 := sh.Args[0] 26497 if x1.Op != OpAMD64MOVLloadidx1 { 26498 break 26499 } 26500 i1 := x1.AuxInt 26501 if x1.Aux != s { 26502 break 26503 } 26504 _ = x1.Args[2] 26505 if idx != x1.Args[0] { 26506 break 26507 } 26508 if p != x1.Args[1] { 26509 break 26510 } 26511 if mem != x1.Args[2] { 26512 break 26513 } 26514 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26515 break 26516 } 26517 b = mergePoint(b, x0, x1) 26518 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26519 v.reset(OpCopy) 26520 v.AddArg(v0) 26521 v0.AuxInt = i0 26522 v0.Aux = s 26523 v0.AddArg(p) 26524 v0.AddArg(idx) 26525 v0.AddArg(mem) 26526 return true 26527 } 26528 // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) 26529 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26530 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26531 for { 26532 _ = v.Args[1] 26533 x0 := v.Args[0] 26534 if x0.Op != OpAMD64MOVLloadidx1 { 26535 break 26536 } 26537 i0 := x0.AuxInt 26538 s := x0.Aux 26539 _ = x0.Args[2] 26540 idx := x0.Args[0] 26541 p := x0.Args[1] 26542 mem := x0.Args[2] 26543 sh := v.Args[1] 26544 if sh.Op != OpAMD64SHLQconst { 26545 break 26546 } 26547 if sh.AuxInt != 32 { 26548 break 26549 } 26550 x1 := sh.Args[0] 26551 if x1.Op != OpAMD64MOVLloadidx1 { 26552 break 26553 } 26554 i1 := x1.AuxInt 26555 if x1.Aux != s { 26556 break 26557 } 26558 _ = x1.Args[2] 26559 if idx != x1.Args[0] { 26560 break 26561 } 26562 if p != x1.Args[1] { 26563 break 26564 } 26565 if mem != x1.Args[2] { 26566 break 26567 } 26568 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26569 break 26570 } 26571 b = mergePoint(b, x0, x1) 26572 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26573 v.reset(OpCopy) 26574 v.AddArg(v0) 26575 v0.AuxInt = i0 26576 v0.Aux = s 26577 v0.AddArg(p) 26578 v0.AddArg(idx) 26579 v0.AddArg(mem) 26580 return true 26581 } 26582 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 26583 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26584 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26585 for { 26586 _ = v.Args[1] 26587 sh := v.Args[0] 26588 if sh.Op != OpAMD64SHLQconst { 26589 break 26590 } 26591 if sh.AuxInt != 32 { 26592 break 26593 } 26594 x1 := sh.Args[0] 26595 if x1.Op != OpAMD64MOVLloadidx1 { 26596 break 26597 } 26598 i1 := x1.AuxInt 26599 s := x1.Aux 26600 _ = x1.Args[2] 26601 p := x1.Args[0] 26602 idx := x1.Args[1] 26603 mem := x1.Args[2] 26604 x0 := v.Args[1] 26605 if x0.Op != OpAMD64MOVLloadidx1 { 26606 break 26607 } 26608 i0 := x0.AuxInt 26609 if x0.Aux != s { 26610 break 26611 } 26612 _ = x0.Args[2] 26613 if p != x0.Args[0] { 26614 break 26615 } 26616 if idx != x0.Args[1] { 26617 break 26618 } 26619 if mem != x0.Args[2] { 26620 break 26621 } 26622 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26623 break 26624 } 26625 b = mergePoint(b, x0, x1) 26626 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26627 v.reset(OpCopy) 26628 v.AddArg(v0) 26629 v0.AuxInt = i0 26630 v0.Aux = s 26631 v0.AddArg(p) 26632 v0.AddArg(idx) 26633 v0.AddArg(mem) 26634 return true 26635 } 26636 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) 26637 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26638 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26639 for { 26640 _ = v.Args[1] 26641 sh := v.Args[0] 26642 if sh.Op != OpAMD64SHLQconst { 26643 break 26644 } 26645 if sh.AuxInt != 32 { 26646 break 26647 } 26648 x1 := sh.Args[0] 26649 if x1.Op != OpAMD64MOVLloadidx1 { 26650 break 26651 } 26652 i1 := x1.AuxInt 26653 s := x1.Aux 26654 _ = x1.Args[2] 26655 idx := x1.Args[0] 26656 p := x1.Args[1] 26657 mem := x1.Args[2] 26658 x0 := v.Args[1] 26659 if x0.Op != OpAMD64MOVLloadidx1 { 26660 break 26661 } 26662 i0 := x0.AuxInt 26663 if x0.Aux != s { 26664 break 26665 } 26666 _ = x0.Args[2] 26667 if p != x0.Args[0] { 26668 break 26669 } 26670 if idx != x0.Args[1] { 26671 break 26672 } 26673 if mem != x0.Args[2] { 26674 break 26675 } 26676 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26677 break 26678 } 26679 b = mergePoint(b, x0, x1) 26680 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26681 v.reset(OpCopy) 26682 v.AddArg(v0) 26683 v0.AuxInt = i0 26684 v0.Aux = s 26685 v0.AddArg(p) 26686 v0.AddArg(idx) 26687 v0.AddArg(mem) 26688 return true 26689 } 26690 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 26691 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26692 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26693 for { 26694 _ = v.Args[1] 26695 sh := v.Args[0] 26696 if sh.Op != OpAMD64SHLQconst { 26697 break 26698 } 26699 if sh.AuxInt != 32 { 26700 break 26701 } 26702 x1 := sh.Args[0] 26703 if x1.Op != OpAMD64MOVLloadidx1 { 26704 break 26705 } 26706 i1 := x1.AuxInt 26707 s := x1.Aux 26708 _ = x1.Args[2] 26709 p := x1.Args[0] 26710 idx := x1.Args[1] 26711 mem := x1.Args[2] 26712 x0 := v.Args[1] 26713 if x0.Op != OpAMD64MOVLloadidx1 { 26714 break 26715 } 26716 i0 := x0.AuxInt 26717 if x0.Aux != s { 26718 break 26719 } 26720 _ = x0.Args[2] 26721 if idx != x0.Args[0] { 26722 break 26723 } 26724 if p != x0.Args[1] { 26725 break 26726 } 26727 if mem != x0.Args[2] { 26728 break 26729 } 26730 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26731 break 26732 } 26733 b = mergePoint(b, x0, x1) 26734 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26735 v.reset(OpCopy) 26736 v.AddArg(v0) 26737 v0.AuxInt = i0 26738 v0.Aux = s 26739 v0.AddArg(p) 26740 v0.AddArg(idx) 26741 v0.AddArg(mem) 26742 return true 26743 } 26744 // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) 26745 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 26746 // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) 26747 for { 26748 _ = v.Args[1] 26749 sh := v.Args[0] 26750 if sh.Op != OpAMD64SHLQconst { 26751 break 26752 } 26753 if sh.AuxInt != 32 { 26754 break 26755 } 26756 x1 := sh.Args[0] 26757 if x1.Op != OpAMD64MOVLloadidx1 { 26758 break 26759 } 26760 i1 := x1.AuxInt 26761 s := x1.Aux 26762 _ = x1.Args[2] 26763 idx := x1.Args[0] 26764 p := x1.Args[1] 26765 mem := x1.Args[2] 26766 x0 := v.Args[1] 26767 if x0.Op != OpAMD64MOVLloadidx1 { 26768 break 26769 } 26770 i0 := x0.AuxInt 26771 if x0.Aux != s { 26772 break 26773 } 26774 _ = x0.Args[2] 26775 if idx != x0.Args[0] { 26776 break 26777 } 26778 if p != x0.Args[1] { 26779 break 26780 } 26781 if mem != x0.Args[2] { 26782 break 26783 } 26784 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 26785 break 26786 } 26787 b = mergePoint(b, x0, x1) 26788 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 26789 v.reset(OpCopy) 26790 v.AddArg(v0) 26791 v0.AuxInt = i0 26792 v0.Aux = s 26793 v0.AddArg(p) 26794 v0.AddArg(idx) 26795 v0.AddArg(mem) 26796 return true 26797 } 26798 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 26799 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26800 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26801 for { 26802 _ = v.Args[1] 26803 s1 := v.Args[0] 26804 if s1.Op != OpAMD64SHLQconst { 26805 break 26806 } 26807 j1 := s1.AuxInt 26808 x1 := s1.Args[0] 26809 if x1.Op != OpAMD64MOVBloadidx1 { 26810 break 26811 } 26812 i1 := x1.AuxInt 26813 s := x1.Aux 26814 _ = x1.Args[2] 26815 p := x1.Args[0] 26816 idx := x1.Args[1] 26817 mem := x1.Args[2] 26818 or := v.Args[1] 26819 if or.Op != OpAMD64ORQ { 26820 break 26821 } 26822 _ = or.Args[1] 26823 s0 := or.Args[0] 26824 if s0.Op != OpAMD64SHLQconst { 26825 break 26826 } 26827 j0 := s0.AuxInt 26828 x0 := s0.Args[0] 26829 if x0.Op != OpAMD64MOVBloadidx1 { 26830 break 26831 } 26832 i0 := x0.AuxInt 26833 if x0.Aux != s { 26834 break 26835 } 26836 _ = x0.Args[2] 26837 if p != x0.Args[0] { 26838 break 26839 } 26840 if idx != x0.Args[1] { 26841 break 26842 } 26843 if mem != x0.Args[2] { 26844 break 26845 } 26846 y := or.Args[1] 26847 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26848 break 26849 } 26850 b = mergePoint(b, x0, x1) 26851 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26852 v.reset(OpCopy) 26853 v.AddArg(v0) 26854 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26855 v1.AuxInt = j0 26856 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26857 v2.AuxInt = i0 26858 v2.Aux = s 26859 v2.AddArg(p) 26860 v2.AddArg(idx) 26861 v2.AddArg(mem) 26862 v1.AddArg(v2) 26863 v0.AddArg(v1) 26864 v0.AddArg(y) 26865 return true 26866 } 26867 return false 26868 } 26869 func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool { 26870 b := v.Block 26871 _ = b 26872 typ := &b.Func.Config.Types 26873 _ = typ 26874 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) 26875 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26876 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26877 for { 26878 _ = v.Args[1] 26879 s1 := v.Args[0] 26880 if s1.Op != OpAMD64SHLQconst { 26881 break 26882 } 26883 j1 := s1.AuxInt 26884 x1 := s1.Args[0] 26885 if x1.Op != OpAMD64MOVBloadidx1 { 26886 break 26887 } 26888 i1 := x1.AuxInt 26889 s := x1.Aux 26890 _ = x1.Args[2] 26891 idx := x1.Args[0] 26892 p := x1.Args[1] 26893 mem := x1.Args[2] 26894 or := v.Args[1] 26895 if or.Op != OpAMD64ORQ { 26896 break 26897 } 26898 _ = or.Args[1] 26899 s0 := or.Args[0] 26900 if s0.Op != OpAMD64SHLQconst { 26901 break 26902 } 26903 j0 := s0.AuxInt 26904 x0 := s0.Args[0] 26905 if x0.Op != OpAMD64MOVBloadidx1 { 26906 break 26907 } 26908 i0 := x0.AuxInt 26909 if x0.Aux != s { 26910 break 26911 } 26912 _ = x0.Args[2] 26913 if p != x0.Args[0] { 26914 break 26915 } 26916 if idx != x0.Args[1] { 26917 break 26918 } 26919 if mem != x0.Args[2] { 26920 break 26921 } 26922 y := or.Args[1] 26923 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26924 break 26925 } 26926 b = mergePoint(b, x0, x1) 26927 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26928 v.reset(OpCopy) 26929 v.AddArg(v0) 26930 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 26931 v1.AuxInt = j0 26932 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 26933 v2.AuxInt = i0 26934 v2.Aux = s 26935 v2.AddArg(p) 26936 v2.AddArg(idx) 26937 v2.AddArg(mem) 26938 v1.AddArg(v2) 26939 v0.AddArg(v1) 26940 v0.AddArg(y) 26941 return true 26942 } 26943 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 26944 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 26945 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 26946 for { 26947 _ = v.Args[1] 26948 s1 := v.Args[0] 26949 if s1.Op != OpAMD64SHLQconst { 26950 break 26951 } 26952 j1 := s1.AuxInt 26953 x1 := s1.Args[0] 26954 if x1.Op != OpAMD64MOVBloadidx1 { 26955 break 26956 } 26957 i1 := x1.AuxInt 26958 s := x1.Aux 26959 _ = x1.Args[2] 26960 p := x1.Args[0] 26961 idx := x1.Args[1] 26962 mem := x1.Args[2] 26963 or := v.Args[1] 26964 if or.Op != OpAMD64ORQ { 26965 break 26966 } 26967 _ = or.Args[1] 26968 s0 := or.Args[0] 26969 if s0.Op != OpAMD64SHLQconst { 26970 break 26971 } 26972 j0 := s0.AuxInt 26973 x0 := s0.Args[0] 26974 if x0.Op != OpAMD64MOVBloadidx1 { 26975 break 26976 } 26977 i0 := x0.AuxInt 26978 if x0.Aux != s { 26979 break 26980 } 26981 _ = x0.Args[2] 26982 if idx != x0.Args[0] { 26983 break 26984 } 26985 if p != x0.Args[1] { 26986 break 26987 } 26988 if mem != x0.Args[2] { 26989 break 26990 } 26991 y := or.Args[1] 26992 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 26993 break 26994 } 26995 b = mergePoint(b, x0, x1) 26996 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 26997 v.reset(OpCopy) 26998 v.AddArg(v0) 26999 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27000 v1.AuxInt = j0 27001 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27002 v2.AuxInt = i0 27003 v2.Aux = s 27004 v2.AddArg(p) 27005 v2.AddArg(idx) 27006 v2.AddArg(mem) 27007 v1.AddArg(v2) 27008 v0.AddArg(v1) 27009 v0.AddArg(y) 27010 return true 27011 } 27012 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) 27013 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27014 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27015 for { 27016 _ = v.Args[1] 27017 s1 := v.Args[0] 27018 if s1.Op != OpAMD64SHLQconst { 27019 break 27020 } 27021 j1 := s1.AuxInt 27022 x1 := s1.Args[0] 27023 if x1.Op != OpAMD64MOVBloadidx1 { 27024 break 27025 } 27026 i1 := x1.AuxInt 27027 s := x1.Aux 27028 _ = x1.Args[2] 27029 idx := x1.Args[0] 27030 p := x1.Args[1] 27031 mem := x1.Args[2] 27032 or := v.Args[1] 27033 if or.Op != OpAMD64ORQ { 27034 break 27035 } 27036 _ = or.Args[1] 27037 s0 := or.Args[0] 27038 if s0.Op != OpAMD64SHLQconst { 27039 break 27040 } 27041 j0 := s0.AuxInt 27042 x0 := s0.Args[0] 27043 if x0.Op != OpAMD64MOVBloadidx1 { 27044 break 27045 } 27046 i0 := x0.AuxInt 27047 if x0.Aux != s { 27048 break 27049 } 27050 _ = x0.Args[2] 27051 if idx != x0.Args[0] { 27052 break 27053 } 27054 if p != x0.Args[1] { 27055 break 27056 } 27057 if mem != x0.Args[2] { 27058 break 27059 } 27060 y := or.Args[1] 27061 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27062 break 27063 } 27064 b = mergePoint(b, x0, x1) 27065 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27066 v.reset(OpCopy) 27067 v.AddArg(v0) 27068 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27069 v1.AuxInt = j0 27070 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27071 v2.AuxInt = i0 27072 v2.Aux = s 27073 v2.AddArg(p) 27074 v2.AddArg(idx) 27075 v2.AddArg(mem) 27076 v1.AddArg(v2) 27077 v0.AddArg(v1) 27078 v0.AddArg(y) 27079 return true 27080 } 27081 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 27082 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27083 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27084 for { 27085 _ = v.Args[1] 27086 s1 := v.Args[0] 27087 if s1.Op != OpAMD64SHLQconst { 27088 break 27089 } 27090 j1 := s1.AuxInt 27091 x1 := s1.Args[0] 27092 if x1.Op != OpAMD64MOVBloadidx1 { 27093 break 27094 } 27095 i1 := x1.AuxInt 27096 s := x1.Aux 27097 _ = x1.Args[2] 27098 p := x1.Args[0] 27099 idx := x1.Args[1] 27100 mem := x1.Args[2] 27101 or := v.Args[1] 27102 if or.Op != OpAMD64ORQ { 27103 break 27104 } 27105 _ = or.Args[1] 27106 y := or.Args[0] 27107 s0 := or.Args[1] 27108 if s0.Op != OpAMD64SHLQconst { 27109 break 27110 } 27111 j0 := s0.AuxInt 27112 x0 := s0.Args[0] 27113 if x0.Op != OpAMD64MOVBloadidx1 { 27114 break 27115 } 27116 i0 := x0.AuxInt 27117 if x0.Aux != s { 27118 break 27119 } 27120 _ = x0.Args[2] 27121 if p != x0.Args[0] { 27122 break 27123 } 27124 if idx != x0.Args[1] { 27125 break 27126 } 27127 if mem != x0.Args[2] { 27128 break 27129 } 27130 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27131 break 27132 } 27133 b = mergePoint(b, x0, x1) 27134 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27135 v.reset(OpCopy) 27136 v.AddArg(v0) 27137 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27138 v1.AuxInt = j0 27139 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27140 v2.AuxInt = i0 27141 v2.Aux = s 27142 v2.AddArg(p) 27143 v2.AddArg(idx) 27144 v2.AddArg(mem) 27145 v1.AddArg(v2) 27146 v0.AddArg(v1) 27147 v0.AddArg(y) 27148 return true 27149 } 27150 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) 27151 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27152 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27153 for { 27154 _ = v.Args[1] 27155 s1 := v.Args[0] 27156 if s1.Op != OpAMD64SHLQconst { 27157 break 27158 } 27159 j1 := s1.AuxInt 27160 x1 := s1.Args[0] 27161 if x1.Op != OpAMD64MOVBloadidx1 { 27162 break 27163 } 27164 i1 := x1.AuxInt 27165 s := x1.Aux 27166 _ = x1.Args[2] 27167 idx := x1.Args[0] 27168 p := x1.Args[1] 27169 mem := x1.Args[2] 27170 or := v.Args[1] 27171 if or.Op != OpAMD64ORQ { 27172 break 27173 } 27174 _ = or.Args[1] 27175 y := or.Args[0] 27176 s0 := or.Args[1] 27177 if s0.Op != OpAMD64SHLQconst { 27178 break 27179 } 27180 j0 := s0.AuxInt 27181 x0 := s0.Args[0] 27182 if x0.Op != OpAMD64MOVBloadidx1 { 27183 break 27184 } 27185 i0 := x0.AuxInt 27186 if x0.Aux != s { 27187 break 27188 } 27189 _ = x0.Args[2] 27190 if p != x0.Args[0] { 27191 break 27192 } 27193 if idx != x0.Args[1] { 27194 break 27195 } 27196 if mem != x0.Args[2] { 27197 break 27198 } 27199 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27200 break 27201 } 27202 b = mergePoint(b, x0, x1) 27203 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27204 v.reset(OpCopy) 27205 v.AddArg(v0) 27206 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27207 v1.AuxInt = j0 27208 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27209 v2.AuxInt = i0 27210 v2.Aux = s 27211 v2.AddArg(p) 27212 v2.AddArg(idx) 27213 v2.AddArg(mem) 27214 v1.AddArg(v2) 27215 v0.AddArg(v1) 27216 v0.AddArg(y) 27217 return true 27218 } 27219 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 27220 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27221 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27222 for { 27223 _ = v.Args[1] 27224 s1 := v.Args[0] 27225 if s1.Op != OpAMD64SHLQconst { 27226 break 27227 } 27228 j1 := s1.AuxInt 27229 x1 := s1.Args[0] 27230 if x1.Op != OpAMD64MOVBloadidx1 { 27231 break 27232 } 27233 i1 := x1.AuxInt 27234 s := x1.Aux 27235 _ = x1.Args[2] 27236 p := x1.Args[0] 27237 idx := x1.Args[1] 27238 mem := x1.Args[2] 27239 or := v.Args[1] 27240 if or.Op != OpAMD64ORQ { 27241 break 27242 } 27243 _ = or.Args[1] 27244 y := or.Args[0] 27245 s0 := or.Args[1] 27246 if s0.Op != OpAMD64SHLQconst { 27247 break 27248 } 27249 j0 := s0.AuxInt 27250 x0 := s0.Args[0] 27251 if x0.Op != OpAMD64MOVBloadidx1 { 27252 break 27253 } 27254 i0 := x0.AuxInt 27255 if x0.Aux != s { 27256 break 27257 } 27258 _ = x0.Args[2] 27259 if idx != x0.Args[0] { 27260 break 27261 } 27262 if p != x0.Args[1] { 27263 break 27264 } 27265 if mem != x0.Args[2] { 27266 break 27267 } 27268 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27269 break 27270 } 27271 b = mergePoint(b, x0, x1) 27272 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27273 v.reset(OpCopy) 27274 v.AddArg(v0) 27275 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27276 v1.AuxInt = j0 27277 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27278 v2.AuxInt = i0 27279 v2.Aux = s 27280 v2.AddArg(p) 27281 v2.AddArg(idx) 27282 v2.AddArg(mem) 27283 v1.AddArg(v2) 27284 v0.AddArg(v1) 27285 v0.AddArg(y) 27286 return true 27287 } 27288 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) 27289 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27290 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27291 for { 27292 _ = v.Args[1] 27293 s1 := v.Args[0] 27294 if s1.Op != OpAMD64SHLQconst { 27295 break 27296 } 27297 j1 := s1.AuxInt 27298 x1 := s1.Args[0] 27299 if x1.Op != OpAMD64MOVBloadidx1 { 27300 break 27301 } 27302 i1 := x1.AuxInt 27303 s := x1.Aux 27304 _ = x1.Args[2] 27305 idx := x1.Args[0] 27306 p := x1.Args[1] 27307 mem := x1.Args[2] 27308 or := v.Args[1] 27309 if or.Op != OpAMD64ORQ { 27310 break 27311 } 27312 _ = or.Args[1] 27313 y := or.Args[0] 27314 s0 := or.Args[1] 27315 if s0.Op != OpAMD64SHLQconst { 27316 break 27317 } 27318 j0 := s0.AuxInt 27319 x0 := s0.Args[0] 27320 if x0.Op != OpAMD64MOVBloadidx1 { 27321 break 27322 } 27323 i0 := x0.AuxInt 27324 if x0.Aux != s { 27325 break 27326 } 27327 _ = x0.Args[2] 27328 if idx != x0.Args[0] { 27329 break 27330 } 27331 if p != x0.Args[1] { 27332 break 27333 } 27334 if mem != x0.Args[2] { 27335 break 27336 } 27337 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27338 break 27339 } 27340 b = mergePoint(b, x0, x1) 27341 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27342 v.reset(OpCopy) 27343 v.AddArg(v0) 27344 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27345 v1.AuxInt = j0 27346 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27347 v2.AuxInt = i0 27348 v2.Aux = s 27349 v2.AddArg(p) 27350 v2.AddArg(idx) 27351 v2.AddArg(mem) 27352 v1.AddArg(v2) 27353 v0.AddArg(v1) 27354 v0.AddArg(y) 27355 return true 27356 } 27357 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27358 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27359 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27360 for { 27361 _ = v.Args[1] 27362 or := v.Args[0] 27363 if or.Op != OpAMD64ORQ { 27364 break 27365 } 27366 _ = or.Args[1] 27367 s0 := or.Args[0] 27368 if s0.Op != OpAMD64SHLQconst { 27369 break 27370 } 27371 j0 := s0.AuxInt 27372 x0 := s0.Args[0] 27373 if x0.Op != OpAMD64MOVBloadidx1 { 27374 break 27375 } 27376 i0 := x0.AuxInt 27377 s := x0.Aux 27378 _ = x0.Args[2] 27379 p := x0.Args[0] 27380 idx := x0.Args[1] 27381 mem := x0.Args[2] 27382 y := or.Args[1] 27383 s1 := v.Args[1] 27384 if s1.Op != OpAMD64SHLQconst { 27385 break 27386 } 27387 j1 := s1.AuxInt 27388 x1 := s1.Args[0] 27389 if x1.Op != OpAMD64MOVBloadidx1 { 27390 break 27391 } 27392 i1 := x1.AuxInt 27393 if x1.Aux != s { 27394 break 27395 } 27396 _ = x1.Args[2] 27397 if p != x1.Args[0] { 27398 break 27399 } 27400 if idx != x1.Args[1] { 27401 break 27402 } 27403 if mem != x1.Args[2] { 27404 break 27405 } 27406 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27407 break 27408 } 27409 b = mergePoint(b, x0, x1) 27410 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27411 v.reset(OpCopy) 27412 v.AddArg(v0) 27413 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27414 v1.AuxInt = j0 27415 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27416 v2.AuxInt = i0 27417 v2.Aux = s 27418 v2.AddArg(p) 27419 v2.AddArg(idx) 27420 v2.AddArg(mem) 27421 v1.AddArg(v2) 27422 v0.AddArg(v1) 27423 v0.AddArg(y) 27424 return true 27425 } 27426 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27427 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27428 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27429 for { 27430 _ = v.Args[1] 27431 or := v.Args[0] 27432 if or.Op != OpAMD64ORQ { 27433 break 27434 } 27435 _ = or.Args[1] 27436 s0 := or.Args[0] 27437 if s0.Op != OpAMD64SHLQconst { 27438 break 27439 } 27440 j0 := s0.AuxInt 27441 x0 := s0.Args[0] 27442 if x0.Op != OpAMD64MOVBloadidx1 { 27443 break 27444 } 27445 i0 := x0.AuxInt 27446 s := x0.Aux 27447 _ = x0.Args[2] 27448 idx := x0.Args[0] 27449 p := x0.Args[1] 27450 mem := x0.Args[2] 27451 y := or.Args[1] 27452 s1 := v.Args[1] 27453 if s1.Op != OpAMD64SHLQconst { 27454 break 27455 } 27456 j1 := s1.AuxInt 27457 x1 := s1.Args[0] 27458 if x1.Op != OpAMD64MOVBloadidx1 { 27459 break 27460 } 27461 i1 := x1.AuxInt 27462 if x1.Aux != s { 27463 break 27464 } 27465 _ = x1.Args[2] 27466 if p != x1.Args[0] { 27467 break 27468 } 27469 if idx != x1.Args[1] { 27470 break 27471 } 27472 if mem != x1.Args[2] { 27473 break 27474 } 27475 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27476 break 27477 } 27478 b = mergePoint(b, x0, x1) 27479 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27480 v.reset(OpCopy) 27481 v.AddArg(v0) 27482 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27483 v1.AuxInt = j0 27484 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27485 v2.AuxInt = i0 27486 v2.Aux = s 27487 v2.AddArg(p) 27488 v2.AddArg(idx) 27489 v2.AddArg(mem) 27490 v1.AddArg(v2) 27491 v0.AddArg(v1) 27492 v0.AddArg(y) 27493 return true 27494 } 27495 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27496 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27497 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27498 for { 27499 _ = v.Args[1] 27500 or := v.Args[0] 27501 if or.Op != OpAMD64ORQ { 27502 break 27503 } 27504 _ = or.Args[1] 27505 y := or.Args[0] 27506 s0 := or.Args[1] 27507 if s0.Op != OpAMD64SHLQconst { 27508 break 27509 } 27510 j0 := s0.AuxInt 27511 x0 := s0.Args[0] 27512 if x0.Op != OpAMD64MOVBloadidx1 { 27513 break 27514 } 27515 i0 := x0.AuxInt 27516 s := x0.Aux 27517 _ = x0.Args[2] 27518 p := x0.Args[0] 27519 idx := x0.Args[1] 27520 mem := x0.Args[2] 27521 s1 := v.Args[1] 27522 if s1.Op != OpAMD64SHLQconst { 27523 break 27524 } 27525 j1 := s1.AuxInt 27526 x1 := s1.Args[0] 27527 if x1.Op != OpAMD64MOVBloadidx1 { 27528 break 27529 } 27530 i1 := x1.AuxInt 27531 if x1.Aux != s { 27532 break 27533 } 27534 _ = x1.Args[2] 27535 if p != x1.Args[0] { 27536 break 27537 } 27538 if idx != x1.Args[1] { 27539 break 27540 } 27541 if mem != x1.Args[2] { 27542 break 27543 } 27544 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27545 break 27546 } 27547 b = mergePoint(b, x0, x1) 27548 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27549 v.reset(OpCopy) 27550 v.AddArg(v0) 27551 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27552 v1.AuxInt = j0 27553 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27554 v2.AuxInt = i0 27555 v2.Aux = s 27556 v2.AddArg(p) 27557 v2.AddArg(idx) 27558 v2.AddArg(mem) 27559 v1.AddArg(v2) 27560 v0.AddArg(v1) 27561 v0.AddArg(y) 27562 return true 27563 } 27564 return false 27565 } 27566 func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool { 27567 b := v.Block 27568 _ = b 27569 typ := &b.Func.Config.Types 27570 _ = typ 27571 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) 27572 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27573 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27574 for { 27575 _ = v.Args[1] 27576 or := v.Args[0] 27577 if or.Op != OpAMD64ORQ { 27578 break 27579 } 27580 _ = or.Args[1] 27581 y := or.Args[0] 27582 s0 := or.Args[1] 27583 if s0.Op != OpAMD64SHLQconst { 27584 break 27585 } 27586 j0 := s0.AuxInt 27587 x0 := s0.Args[0] 27588 if x0.Op != OpAMD64MOVBloadidx1 { 27589 break 27590 } 27591 i0 := x0.AuxInt 27592 s := x0.Aux 27593 _ = x0.Args[2] 27594 idx := x0.Args[0] 27595 p := x0.Args[1] 27596 mem := x0.Args[2] 27597 s1 := v.Args[1] 27598 if s1.Op != OpAMD64SHLQconst { 27599 break 27600 } 27601 j1 := s1.AuxInt 27602 x1 := s1.Args[0] 27603 if x1.Op != OpAMD64MOVBloadidx1 { 27604 break 27605 } 27606 i1 := x1.AuxInt 27607 if x1.Aux != s { 27608 break 27609 } 27610 _ = x1.Args[2] 27611 if p != x1.Args[0] { 27612 break 27613 } 27614 if idx != x1.Args[1] { 27615 break 27616 } 27617 if mem != x1.Args[2] { 27618 break 27619 } 27620 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27621 break 27622 } 27623 b = mergePoint(b, x0, x1) 27624 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27625 v.reset(OpCopy) 27626 v.AddArg(v0) 27627 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27628 v1.AuxInt = j0 27629 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27630 v2.AuxInt = i0 27631 v2.Aux = s 27632 v2.AddArg(p) 27633 v2.AddArg(idx) 27634 v2.AddArg(mem) 27635 v1.AddArg(v2) 27636 v0.AddArg(v1) 27637 v0.AddArg(y) 27638 return true 27639 } 27640 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27641 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27642 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27643 for { 27644 _ = v.Args[1] 27645 or := v.Args[0] 27646 if or.Op != OpAMD64ORQ { 27647 break 27648 } 27649 _ = or.Args[1] 27650 s0 := or.Args[0] 27651 if s0.Op != OpAMD64SHLQconst { 27652 break 27653 } 27654 j0 := s0.AuxInt 27655 x0 := s0.Args[0] 27656 if x0.Op != OpAMD64MOVBloadidx1 { 27657 break 27658 } 27659 i0 := x0.AuxInt 27660 s := x0.Aux 27661 _ = x0.Args[2] 27662 p := x0.Args[0] 27663 idx := x0.Args[1] 27664 mem := x0.Args[2] 27665 y := or.Args[1] 27666 s1 := v.Args[1] 27667 if s1.Op != OpAMD64SHLQconst { 27668 break 27669 } 27670 j1 := s1.AuxInt 27671 x1 := s1.Args[0] 27672 if x1.Op != OpAMD64MOVBloadidx1 { 27673 break 27674 } 27675 i1 := x1.AuxInt 27676 if x1.Aux != s { 27677 break 27678 } 27679 _ = x1.Args[2] 27680 if idx != x1.Args[0] { 27681 break 27682 } 27683 if p != x1.Args[1] { 27684 break 27685 } 27686 if mem != x1.Args[2] { 27687 break 27688 } 27689 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27690 break 27691 } 27692 b = mergePoint(b, x0, x1) 27693 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27694 v.reset(OpCopy) 27695 v.AddArg(v0) 27696 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27697 v1.AuxInt = j0 27698 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27699 v2.AuxInt = i0 27700 v2.Aux = s 27701 v2.AddArg(p) 27702 v2.AddArg(idx) 27703 v2.AddArg(mem) 27704 v1.AddArg(v2) 27705 v0.AddArg(v1) 27706 v0.AddArg(y) 27707 return true 27708 } 27709 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27710 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27711 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27712 for { 27713 _ = v.Args[1] 27714 or := v.Args[0] 27715 if or.Op != OpAMD64ORQ { 27716 break 27717 } 27718 _ = or.Args[1] 27719 s0 := or.Args[0] 27720 if s0.Op != OpAMD64SHLQconst { 27721 break 27722 } 27723 j0 := s0.AuxInt 27724 x0 := s0.Args[0] 27725 if x0.Op != OpAMD64MOVBloadidx1 { 27726 break 27727 } 27728 i0 := x0.AuxInt 27729 s := x0.Aux 27730 _ = x0.Args[2] 27731 idx := x0.Args[0] 27732 p := x0.Args[1] 27733 mem := x0.Args[2] 27734 y := or.Args[1] 27735 s1 := v.Args[1] 27736 if s1.Op != OpAMD64SHLQconst { 27737 break 27738 } 27739 j1 := s1.AuxInt 27740 x1 := s1.Args[0] 27741 if x1.Op != OpAMD64MOVBloadidx1 { 27742 break 27743 } 27744 i1 := x1.AuxInt 27745 if x1.Aux != s { 27746 break 27747 } 27748 _ = x1.Args[2] 27749 if idx != x1.Args[0] { 27750 break 27751 } 27752 if p != x1.Args[1] { 27753 break 27754 } 27755 if mem != x1.Args[2] { 27756 break 27757 } 27758 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27759 break 27760 } 27761 b = mergePoint(b, x0, x1) 27762 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27763 v.reset(OpCopy) 27764 v.AddArg(v0) 27765 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27766 v1.AuxInt = j0 27767 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27768 v2.AuxInt = i0 27769 v2.Aux = s 27770 v2.AddArg(p) 27771 v2.AddArg(idx) 27772 v2.AddArg(mem) 27773 v1.AddArg(v2) 27774 v0.AddArg(v1) 27775 v0.AddArg(y) 27776 return true 27777 } 27778 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27779 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27780 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27781 for { 27782 _ = v.Args[1] 27783 or := v.Args[0] 27784 if or.Op != OpAMD64ORQ { 27785 break 27786 } 27787 _ = or.Args[1] 27788 y := or.Args[0] 27789 s0 := or.Args[1] 27790 if s0.Op != OpAMD64SHLQconst { 27791 break 27792 } 27793 j0 := s0.AuxInt 27794 x0 := s0.Args[0] 27795 if x0.Op != OpAMD64MOVBloadidx1 { 27796 break 27797 } 27798 i0 := x0.AuxInt 27799 s := x0.Aux 27800 _ = x0.Args[2] 27801 p := x0.Args[0] 27802 idx := x0.Args[1] 27803 mem := x0.Args[2] 27804 s1 := v.Args[1] 27805 if s1.Op != OpAMD64SHLQconst { 27806 break 27807 } 27808 j1 := s1.AuxInt 27809 x1 := s1.Args[0] 27810 if x1.Op != OpAMD64MOVBloadidx1 { 27811 break 27812 } 27813 i1 := x1.AuxInt 27814 if x1.Aux != s { 27815 break 27816 } 27817 _ = x1.Args[2] 27818 if idx != x1.Args[0] { 27819 break 27820 } 27821 if p != x1.Args[1] { 27822 break 27823 } 27824 if mem != x1.Args[2] { 27825 break 27826 } 27827 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27828 break 27829 } 27830 b = mergePoint(b, x0, x1) 27831 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27832 v.reset(OpCopy) 27833 v.AddArg(v0) 27834 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27835 v1.AuxInt = j0 27836 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27837 v2.AuxInt = i0 27838 v2.Aux = s 27839 v2.AddArg(p) 27840 v2.AddArg(idx) 27841 v2.AddArg(mem) 27842 v1.AddArg(v2) 27843 v0.AddArg(v1) 27844 v0.AddArg(y) 27845 return true 27846 } 27847 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) 27848 // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27849 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) 27850 for { 27851 _ = v.Args[1] 27852 or := v.Args[0] 27853 if or.Op != OpAMD64ORQ { 27854 break 27855 } 27856 _ = or.Args[1] 27857 y := or.Args[0] 27858 s0 := or.Args[1] 27859 if s0.Op != OpAMD64SHLQconst { 27860 break 27861 } 27862 j0 := s0.AuxInt 27863 x0 := s0.Args[0] 27864 if x0.Op != OpAMD64MOVBloadidx1 { 27865 break 27866 } 27867 i0 := x0.AuxInt 27868 s := x0.Aux 27869 _ = x0.Args[2] 27870 idx := x0.Args[0] 27871 p := x0.Args[1] 27872 mem := x0.Args[2] 27873 s1 := v.Args[1] 27874 if s1.Op != OpAMD64SHLQconst { 27875 break 27876 } 27877 j1 := s1.AuxInt 27878 x1 := s1.Args[0] 27879 if x1.Op != OpAMD64MOVBloadidx1 { 27880 break 27881 } 27882 i1 := x1.AuxInt 27883 if x1.Aux != s { 27884 break 27885 } 27886 _ = x1.Args[2] 27887 if idx != x1.Args[0] { 27888 break 27889 } 27890 if p != x1.Args[1] { 27891 break 27892 } 27893 if mem != x1.Args[2] { 27894 break 27895 } 27896 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27897 break 27898 } 27899 b = mergePoint(b, x0, x1) 27900 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27901 v.reset(OpCopy) 27902 v.AddArg(v0) 27903 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27904 v1.AuxInt = j0 27905 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 27906 v2.AuxInt = i0 27907 v2.Aux = s 27908 v2.AddArg(p) 27909 v2.AddArg(idx) 27910 v2.AddArg(mem) 27911 v1.AddArg(v2) 27912 v0.AddArg(v1) 27913 v0.AddArg(y) 27914 return true 27915 } 27916 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 27917 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27918 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27919 for { 27920 _ = v.Args[1] 27921 s1 := v.Args[0] 27922 if s1.Op != OpAMD64SHLQconst { 27923 break 27924 } 27925 j1 := s1.AuxInt 27926 x1 := s1.Args[0] 27927 if x1.Op != OpAMD64MOVWloadidx1 { 27928 break 27929 } 27930 i1 := x1.AuxInt 27931 s := x1.Aux 27932 _ = x1.Args[2] 27933 p := x1.Args[0] 27934 idx := x1.Args[1] 27935 mem := x1.Args[2] 27936 or := v.Args[1] 27937 if or.Op != OpAMD64ORQ { 27938 break 27939 } 27940 _ = or.Args[1] 27941 s0 := or.Args[0] 27942 if s0.Op != OpAMD64SHLQconst { 27943 break 27944 } 27945 j0 := s0.AuxInt 27946 x0 := s0.Args[0] 27947 if x0.Op != OpAMD64MOVWloadidx1 { 27948 break 27949 } 27950 i0 := x0.AuxInt 27951 if x0.Aux != s { 27952 break 27953 } 27954 _ = x0.Args[2] 27955 if p != x0.Args[0] { 27956 break 27957 } 27958 if idx != x0.Args[1] { 27959 break 27960 } 27961 if mem != x0.Args[2] { 27962 break 27963 } 27964 y := or.Args[1] 27965 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 27966 break 27967 } 27968 b = mergePoint(b, x0, x1) 27969 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 27970 v.reset(OpCopy) 27971 v.AddArg(v0) 27972 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 27973 v1.AuxInt = j0 27974 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 27975 v2.AuxInt = i0 27976 v2.Aux = s 27977 v2.AddArg(p) 27978 v2.AddArg(idx) 27979 v2.AddArg(mem) 27980 v1.AddArg(v2) 27981 v0.AddArg(v1) 27982 v0.AddArg(y) 27983 return true 27984 } 27985 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) 27986 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 27987 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 27988 for { 27989 _ = v.Args[1] 27990 s1 := v.Args[0] 27991 if s1.Op != OpAMD64SHLQconst { 27992 break 27993 } 27994 j1 := s1.AuxInt 27995 x1 := s1.Args[0] 27996 if x1.Op != OpAMD64MOVWloadidx1 { 27997 break 27998 } 27999 i1 := x1.AuxInt 28000 s := x1.Aux 28001 _ = x1.Args[2] 28002 idx := x1.Args[0] 28003 p := x1.Args[1] 28004 mem := x1.Args[2] 28005 or := v.Args[1] 28006 if or.Op != OpAMD64ORQ { 28007 break 28008 } 28009 _ = or.Args[1] 28010 s0 := or.Args[0] 28011 if s0.Op != OpAMD64SHLQconst { 28012 break 28013 } 28014 j0 := s0.AuxInt 28015 x0 := s0.Args[0] 28016 if x0.Op != OpAMD64MOVWloadidx1 { 28017 break 28018 } 28019 i0 := x0.AuxInt 28020 if x0.Aux != s { 28021 break 28022 } 28023 _ = x0.Args[2] 28024 if p != x0.Args[0] { 28025 break 28026 } 28027 if idx != x0.Args[1] { 28028 break 28029 } 28030 if mem != x0.Args[2] { 28031 break 28032 } 28033 y := or.Args[1] 28034 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28035 break 28036 } 28037 b = mergePoint(b, x0, x1) 28038 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28039 v.reset(OpCopy) 28040 v.AddArg(v0) 28041 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28042 v1.AuxInt = j0 28043 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28044 v2.AuxInt = i0 28045 v2.Aux = s 28046 v2.AddArg(p) 28047 v2.AddArg(idx) 28048 v2.AddArg(mem) 28049 v1.AddArg(v2) 28050 v0.AddArg(v1) 28051 v0.AddArg(y) 28052 return true 28053 } 28054 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 28055 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28056 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28057 for { 28058 _ = v.Args[1] 28059 s1 := v.Args[0] 28060 if s1.Op != OpAMD64SHLQconst { 28061 break 28062 } 28063 j1 := s1.AuxInt 28064 x1 := s1.Args[0] 28065 if x1.Op != OpAMD64MOVWloadidx1 { 28066 break 28067 } 28068 i1 := x1.AuxInt 28069 s := x1.Aux 28070 _ = x1.Args[2] 28071 p := x1.Args[0] 28072 idx := x1.Args[1] 28073 mem := x1.Args[2] 28074 or := v.Args[1] 28075 if or.Op != OpAMD64ORQ { 28076 break 28077 } 28078 _ = or.Args[1] 28079 s0 := or.Args[0] 28080 if s0.Op != OpAMD64SHLQconst { 28081 break 28082 } 28083 j0 := s0.AuxInt 28084 x0 := s0.Args[0] 28085 if x0.Op != OpAMD64MOVWloadidx1 { 28086 break 28087 } 28088 i0 := x0.AuxInt 28089 if x0.Aux != s { 28090 break 28091 } 28092 _ = x0.Args[2] 28093 if idx != x0.Args[0] { 28094 break 28095 } 28096 if p != x0.Args[1] { 28097 break 28098 } 28099 if mem != x0.Args[2] { 28100 break 28101 } 28102 y := or.Args[1] 28103 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28104 break 28105 } 28106 b = mergePoint(b, x0, x1) 28107 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28108 v.reset(OpCopy) 28109 v.AddArg(v0) 28110 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28111 v1.AuxInt = j0 28112 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28113 v2.AuxInt = i0 28114 v2.Aux = s 28115 v2.AddArg(p) 28116 v2.AddArg(idx) 28117 v2.AddArg(mem) 28118 v1.AddArg(v2) 28119 v0.AddArg(v1) 28120 v0.AddArg(y) 28121 return true 28122 } 28123 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) 28124 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28125 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28126 for { 28127 _ = v.Args[1] 28128 s1 := v.Args[0] 28129 if s1.Op != OpAMD64SHLQconst { 28130 break 28131 } 28132 j1 := s1.AuxInt 28133 x1 := s1.Args[0] 28134 if x1.Op != OpAMD64MOVWloadidx1 { 28135 break 28136 } 28137 i1 := x1.AuxInt 28138 s := x1.Aux 28139 _ = x1.Args[2] 28140 idx := x1.Args[0] 28141 p := x1.Args[1] 28142 mem := x1.Args[2] 28143 or := v.Args[1] 28144 if or.Op != OpAMD64ORQ { 28145 break 28146 } 28147 _ = or.Args[1] 28148 s0 := or.Args[0] 28149 if s0.Op != OpAMD64SHLQconst { 28150 break 28151 } 28152 j0 := s0.AuxInt 28153 x0 := s0.Args[0] 28154 if x0.Op != OpAMD64MOVWloadidx1 { 28155 break 28156 } 28157 i0 := x0.AuxInt 28158 if x0.Aux != s { 28159 break 28160 } 28161 _ = x0.Args[2] 28162 if idx != x0.Args[0] { 28163 break 28164 } 28165 if p != x0.Args[1] { 28166 break 28167 } 28168 if mem != x0.Args[2] { 28169 break 28170 } 28171 y := or.Args[1] 28172 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28173 break 28174 } 28175 b = mergePoint(b, x0, x1) 28176 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28177 v.reset(OpCopy) 28178 v.AddArg(v0) 28179 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28180 v1.AuxInt = j0 28181 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28182 v2.AuxInt = i0 28183 v2.Aux = s 28184 v2.AddArg(p) 28185 v2.AddArg(idx) 28186 v2.AddArg(mem) 28187 v1.AddArg(v2) 28188 v0.AddArg(v1) 28189 v0.AddArg(y) 28190 return true 28191 } 28192 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 28193 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28194 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28195 for { 28196 _ = v.Args[1] 28197 s1 := v.Args[0] 28198 if s1.Op != OpAMD64SHLQconst { 28199 break 28200 } 28201 j1 := s1.AuxInt 28202 x1 := s1.Args[0] 28203 if x1.Op != OpAMD64MOVWloadidx1 { 28204 break 28205 } 28206 i1 := x1.AuxInt 28207 s := x1.Aux 28208 _ = x1.Args[2] 28209 p := x1.Args[0] 28210 idx := x1.Args[1] 28211 mem := x1.Args[2] 28212 or := v.Args[1] 28213 if or.Op != OpAMD64ORQ { 28214 break 28215 } 28216 _ = or.Args[1] 28217 y := or.Args[0] 28218 s0 := or.Args[1] 28219 if s0.Op != OpAMD64SHLQconst { 28220 break 28221 } 28222 j0 := s0.AuxInt 28223 x0 := s0.Args[0] 28224 if x0.Op != OpAMD64MOVWloadidx1 { 28225 break 28226 } 28227 i0 := x0.AuxInt 28228 if x0.Aux != s { 28229 break 28230 } 28231 _ = x0.Args[2] 28232 if p != x0.Args[0] { 28233 break 28234 } 28235 if idx != x0.Args[1] { 28236 break 28237 } 28238 if mem != x0.Args[2] { 28239 break 28240 } 28241 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28242 break 28243 } 28244 b = mergePoint(b, x0, x1) 28245 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28246 v.reset(OpCopy) 28247 v.AddArg(v0) 28248 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28249 v1.AuxInt = j0 28250 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28251 v2.AuxInt = i0 28252 v2.Aux = s 28253 v2.AddArg(p) 28254 v2.AddArg(idx) 28255 v2.AddArg(mem) 28256 v1.AddArg(v2) 28257 v0.AddArg(v1) 28258 v0.AddArg(y) 28259 return true 28260 } 28261 return false 28262 } 28263 func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool { 28264 b := v.Block 28265 _ = b 28266 typ := &b.Func.Config.Types 28267 _ = typ 28268 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 28269 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28270 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28271 for { 28272 _ = v.Args[1] 28273 s1 := v.Args[0] 28274 if s1.Op != OpAMD64SHLQconst { 28275 break 28276 } 28277 j1 := s1.AuxInt 28278 x1 := s1.Args[0] 28279 if x1.Op != OpAMD64MOVWloadidx1 { 28280 break 28281 } 28282 i1 := x1.AuxInt 28283 s := x1.Aux 28284 _ = x1.Args[2] 28285 idx := x1.Args[0] 28286 p := x1.Args[1] 28287 mem := x1.Args[2] 28288 or := v.Args[1] 28289 if or.Op != OpAMD64ORQ { 28290 break 28291 } 28292 _ = or.Args[1] 28293 y := or.Args[0] 28294 s0 := or.Args[1] 28295 if s0.Op != OpAMD64SHLQconst { 28296 break 28297 } 28298 j0 := s0.AuxInt 28299 x0 := s0.Args[0] 28300 if x0.Op != OpAMD64MOVWloadidx1 { 28301 break 28302 } 28303 i0 := x0.AuxInt 28304 if x0.Aux != s { 28305 break 28306 } 28307 _ = x0.Args[2] 28308 if p != x0.Args[0] { 28309 break 28310 } 28311 if idx != x0.Args[1] { 28312 break 28313 } 28314 if mem != x0.Args[2] { 28315 break 28316 } 28317 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28318 break 28319 } 28320 b = mergePoint(b, x0, x1) 28321 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28322 v.reset(OpCopy) 28323 v.AddArg(v0) 28324 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28325 v1.AuxInt = j0 28326 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28327 v2.AuxInt = i0 28328 v2.Aux = s 28329 v2.AddArg(p) 28330 v2.AddArg(idx) 28331 v2.AddArg(mem) 28332 v1.AddArg(v2) 28333 v0.AddArg(v1) 28334 v0.AddArg(y) 28335 return true 28336 } 28337 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 28338 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28339 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28340 for { 28341 _ = v.Args[1] 28342 s1 := v.Args[0] 28343 if s1.Op != OpAMD64SHLQconst { 28344 break 28345 } 28346 j1 := s1.AuxInt 28347 x1 := s1.Args[0] 28348 if x1.Op != OpAMD64MOVWloadidx1 { 28349 break 28350 } 28351 i1 := x1.AuxInt 28352 s := x1.Aux 28353 _ = x1.Args[2] 28354 p := x1.Args[0] 28355 idx := x1.Args[1] 28356 mem := x1.Args[2] 28357 or := v.Args[1] 28358 if or.Op != OpAMD64ORQ { 28359 break 28360 } 28361 _ = or.Args[1] 28362 y := or.Args[0] 28363 s0 := or.Args[1] 28364 if s0.Op != OpAMD64SHLQconst { 28365 break 28366 } 28367 j0 := s0.AuxInt 28368 x0 := s0.Args[0] 28369 if x0.Op != OpAMD64MOVWloadidx1 { 28370 break 28371 } 28372 i0 := x0.AuxInt 28373 if x0.Aux != s { 28374 break 28375 } 28376 _ = x0.Args[2] 28377 if idx != x0.Args[0] { 28378 break 28379 } 28380 if p != x0.Args[1] { 28381 break 28382 } 28383 if mem != x0.Args[2] { 28384 break 28385 } 28386 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28387 break 28388 } 28389 b = mergePoint(b, x0, x1) 28390 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28391 v.reset(OpCopy) 28392 v.AddArg(v0) 28393 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28394 v1.AuxInt = j0 28395 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28396 v2.AuxInt = i0 28397 v2.Aux = s 28398 v2.AddArg(p) 28399 v2.AddArg(idx) 28400 v2.AddArg(mem) 28401 v1.AddArg(v2) 28402 v0.AddArg(v1) 28403 v0.AddArg(y) 28404 return true 28405 } 28406 // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 28407 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28408 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28409 for { 28410 _ = v.Args[1] 28411 s1 := v.Args[0] 28412 if s1.Op != OpAMD64SHLQconst { 28413 break 28414 } 28415 j1 := s1.AuxInt 28416 x1 := s1.Args[0] 28417 if x1.Op != OpAMD64MOVWloadidx1 { 28418 break 28419 } 28420 i1 := x1.AuxInt 28421 s := x1.Aux 28422 _ = x1.Args[2] 28423 idx := x1.Args[0] 28424 p := x1.Args[1] 28425 mem := x1.Args[2] 28426 or := v.Args[1] 28427 if or.Op != OpAMD64ORQ { 28428 break 28429 } 28430 _ = or.Args[1] 28431 y := or.Args[0] 28432 s0 := or.Args[1] 28433 if s0.Op != OpAMD64SHLQconst { 28434 break 28435 } 28436 j0 := s0.AuxInt 28437 x0 := s0.Args[0] 28438 if x0.Op != OpAMD64MOVWloadidx1 { 28439 break 28440 } 28441 i0 := x0.AuxInt 28442 if x0.Aux != s { 28443 break 28444 } 28445 _ = x0.Args[2] 28446 if idx != x0.Args[0] { 28447 break 28448 } 28449 if p != x0.Args[1] { 28450 break 28451 } 28452 if mem != x0.Args[2] { 28453 break 28454 } 28455 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28456 break 28457 } 28458 b = mergePoint(b, x0, x1) 28459 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28460 v.reset(OpCopy) 28461 v.AddArg(v0) 28462 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28463 v1.AuxInt = j0 28464 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28465 v2.AuxInt = i0 28466 v2.Aux = s 28467 v2.AddArg(p) 28468 v2.AddArg(idx) 28469 v2.AddArg(mem) 28470 v1.AddArg(v2) 28471 v0.AddArg(v1) 28472 v0.AddArg(y) 28473 return true 28474 } 28475 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28476 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28477 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28478 for { 28479 _ = v.Args[1] 28480 or := v.Args[0] 28481 if or.Op != OpAMD64ORQ { 28482 break 28483 } 28484 _ = or.Args[1] 28485 s0 := or.Args[0] 28486 if s0.Op != OpAMD64SHLQconst { 28487 break 28488 } 28489 j0 := s0.AuxInt 28490 x0 := s0.Args[0] 28491 if x0.Op != OpAMD64MOVWloadidx1 { 28492 break 28493 } 28494 i0 := x0.AuxInt 28495 s := x0.Aux 28496 _ = x0.Args[2] 28497 p := x0.Args[0] 28498 idx := x0.Args[1] 28499 mem := x0.Args[2] 28500 y := or.Args[1] 28501 s1 := v.Args[1] 28502 if s1.Op != OpAMD64SHLQconst { 28503 break 28504 } 28505 j1 := s1.AuxInt 28506 x1 := s1.Args[0] 28507 if x1.Op != OpAMD64MOVWloadidx1 { 28508 break 28509 } 28510 i1 := x1.AuxInt 28511 if x1.Aux != s { 28512 break 28513 } 28514 _ = x1.Args[2] 28515 if p != x1.Args[0] { 28516 break 28517 } 28518 if idx != x1.Args[1] { 28519 break 28520 } 28521 if mem != x1.Args[2] { 28522 break 28523 } 28524 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28525 break 28526 } 28527 b = mergePoint(b, x0, x1) 28528 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28529 v.reset(OpCopy) 28530 v.AddArg(v0) 28531 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28532 v1.AuxInt = j0 28533 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28534 v2.AuxInt = i0 28535 v2.Aux = s 28536 v2.AddArg(p) 28537 v2.AddArg(idx) 28538 v2.AddArg(mem) 28539 v1.AddArg(v2) 28540 v0.AddArg(v1) 28541 v0.AddArg(y) 28542 return true 28543 } 28544 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28545 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28546 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28547 for { 28548 _ = v.Args[1] 28549 or := v.Args[0] 28550 if or.Op != OpAMD64ORQ { 28551 break 28552 } 28553 _ = or.Args[1] 28554 s0 := or.Args[0] 28555 if s0.Op != OpAMD64SHLQconst { 28556 break 28557 } 28558 j0 := s0.AuxInt 28559 x0 := s0.Args[0] 28560 if x0.Op != OpAMD64MOVWloadidx1 { 28561 break 28562 } 28563 i0 := x0.AuxInt 28564 s := x0.Aux 28565 _ = x0.Args[2] 28566 idx := x0.Args[0] 28567 p := x0.Args[1] 28568 mem := x0.Args[2] 28569 y := or.Args[1] 28570 s1 := v.Args[1] 28571 if s1.Op != OpAMD64SHLQconst { 28572 break 28573 } 28574 j1 := s1.AuxInt 28575 x1 := s1.Args[0] 28576 if x1.Op != OpAMD64MOVWloadidx1 { 28577 break 28578 } 28579 i1 := x1.AuxInt 28580 if x1.Aux != s { 28581 break 28582 } 28583 _ = x1.Args[2] 28584 if p != x1.Args[0] { 28585 break 28586 } 28587 if idx != x1.Args[1] { 28588 break 28589 } 28590 if mem != x1.Args[2] { 28591 break 28592 } 28593 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28594 break 28595 } 28596 b = mergePoint(b, x0, x1) 28597 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28598 v.reset(OpCopy) 28599 v.AddArg(v0) 28600 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28601 v1.AuxInt = j0 28602 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28603 v2.AuxInt = i0 28604 v2.Aux = s 28605 v2.AddArg(p) 28606 v2.AddArg(idx) 28607 v2.AddArg(mem) 28608 v1.AddArg(v2) 28609 v0.AddArg(v1) 28610 v0.AddArg(y) 28611 return true 28612 } 28613 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28614 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28615 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28616 for { 28617 _ = v.Args[1] 28618 or := v.Args[0] 28619 if or.Op != OpAMD64ORQ { 28620 break 28621 } 28622 _ = or.Args[1] 28623 y := or.Args[0] 28624 s0 := or.Args[1] 28625 if s0.Op != OpAMD64SHLQconst { 28626 break 28627 } 28628 j0 := s0.AuxInt 28629 x0 := s0.Args[0] 28630 if x0.Op != OpAMD64MOVWloadidx1 { 28631 break 28632 } 28633 i0 := x0.AuxInt 28634 s := x0.Aux 28635 _ = x0.Args[2] 28636 p := x0.Args[0] 28637 idx := x0.Args[1] 28638 mem := x0.Args[2] 28639 s1 := v.Args[1] 28640 if s1.Op != OpAMD64SHLQconst { 28641 break 28642 } 28643 j1 := s1.AuxInt 28644 x1 := s1.Args[0] 28645 if x1.Op != OpAMD64MOVWloadidx1 { 28646 break 28647 } 28648 i1 := x1.AuxInt 28649 if x1.Aux != s { 28650 break 28651 } 28652 _ = x1.Args[2] 28653 if p != x1.Args[0] { 28654 break 28655 } 28656 if idx != x1.Args[1] { 28657 break 28658 } 28659 if mem != x1.Args[2] { 28660 break 28661 } 28662 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28663 break 28664 } 28665 b = mergePoint(b, x0, x1) 28666 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28667 v.reset(OpCopy) 28668 v.AddArg(v0) 28669 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28670 v1.AuxInt = j0 28671 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28672 v2.AuxInt = i0 28673 v2.Aux = s 28674 v2.AddArg(p) 28675 v2.AddArg(idx) 28676 v2.AddArg(mem) 28677 v1.AddArg(v2) 28678 v0.AddArg(v1) 28679 v0.AddArg(y) 28680 return true 28681 } 28682 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 28683 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28684 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28685 for { 28686 _ = v.Args[1] 28687 or := v.Args[0] 28688 if or.Op != OpAMD64ORQ { 28689 break 28690 } 28691 _ = or.Args[1] 28692 y := or.Args[0] 28693 s0 := or.Args[1] 28694 if s0.Op != OpAMD64SHLQconst { 28695 break 28696 } 28697 j0 := s0.AuxInt 28698 x0 := s0.Args[0] 28699 if x0.Op != OpAMD64MOVWloadidx1 { 28700 break 28701 } 28702 i0 := x0.AuxInt 28703 s := x0.Aux 28704 _ = x0.Args[2] 28705 idx := x0.Args[0] 28706 p := x0.Args[1] 28707 mem := x0.Args[2] 28708 s1 := v.Args[1] 28709 if s1.Op != OpAMD64SHLQconst { 28710 break 28711 } 28712 j1 := s1.AuxInt 28713 x1 := s1.Args[0] 28714 if x1.Op != OpAMD64MOVWloadidx1 { 28715 break 28716 } 28717 i1 := x1.AuxInt 28718 if x1.Aux != s { 28719 break 28720 } 28721 _ = x1.Args[2] 28722 if p != x1.Args[0] { 28723 break 28724 } 28725 if idx != x1.Args[1] { 28726 break 28727 } 28728 if mem != x1.Args[2] { 28729 break 28730 } 28731 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28732 break 28733 } 28734 b = mergePoint(b, x0, x1) 28735 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28736 v.reset(OpCopy) 28737 v.AddArg(v0) 28738 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28739 v1.AuxInt = j0 28740 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28741 v2.AuxInt = i0 28742 v2.Aux = s 28743 v2.AddArg(p) 28744 v2.AddArg(idx) 28745 v2.AddArg(mem) 28746 v1.AddArg(v2) 28747 v0.AddArg(v1) 28748 v0.AddArg(y) 28749 return true 28750 } 28751 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28752 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28753 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28754 for { 28755 _ = v.Args[1] 28756 or := v.Args[0] 28757 if or.Op != OpAMD64ORQ { 28758 break 28759 } 28760 _ = or.Args[1] 28761 s0 := or.Args[0] 28762 if s0.Op != OpAMD64SHLQconst { 28763 break 28764 } 28765 j0 := s0.AuxInt 28766 x0 := s0.Args[0] 28767 if x0.Op != OpAMD64MOVWloadidx1 { 28768 break 28769 } 28770 i0 := x0.AuxInt 28771 s := x0.Aux 28772 _ = x0.Args[2] 28773 p := x0.Args[0] 28774 idx := x0.Args[1] 28775 mem := x0.Args[2] 28776 y := or.Args[1] 28777 s1 := v.Args[1] 28778 if s1.Op != OpAMD64SHLQconst { 28779 break 28780 } 28781 j1 := s1.AuxInt 28782 x1 := s1.Args[0] 28783 if x1.Op != OpAMD64MOVWloadidx1 { 28784 break 28785 } 28786 i1 := x1.AuxInt 28787 if x1.Aux != s { 28788 break 28789 } 28790 _ = x1.Args[2] 28791 if idx != x1.Args[0] { 28792 break 28793 } 28794 if p != x1.Args[1] { 28795 break 28796 } 28797 if mem != x1.Args[2] { 28798 break 28799 } 28800 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28801 break 28802 } 28803 b = mergePoint(b, x0, x1) 28804 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28805 v.reset(OpCopy) 28806 v.AddArg(v0) 28807 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28808 v1.AuxInt = j0 28809 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28810 v2.AuxInt = i0 28811 v2.Aux = s 28812 v2.AddArg(p) 28813 v2.AddArg(idx) 28814 v2.AddArg(mem) 28815 v1.AddArg(v2) 28816 v0.AddArg(v1) 28817 v0.AddArg(y) 28818 return true 28819 } 28820 // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28821 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28822 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28823 for { 28824 _ = v.Args[1] 28825 or := v.Args[0] 28826 if or.Op != OpAMD64ORQ { 28827 break 28828 } 28829 _ = or.Args[1] 28830 s0 := or.Args[0] 28831 if s0.Op != OpAMD64SHLQconst { 28832 break 28833 } 28834 j0 := s0.AuxInt 28835 x0 := s0.Args[0] 28836 if x0.Op != OpAMD64MOVWloadidx1 { 28837 break 28838 } 28839 i0 := x0.AuxInt 28840 s := x0.Aux 28841 _ = x0.Args[2] 28842 idx := x0.Args[0] 28843 p := x0.Args[1] 28844 mem := x0.Args[2] 28845 y := or.Args[1] 28846 s1 := v.Args[1] 28847 if s1.Op != OpAMD64SHLQconst { 28848 break 28849 } 28850 j1 := s1.AuxInt 28851 x1 := s1.Args[0] 28852 if x1.Op != OpAMD64MOVWloadidx1 { 28853 break 28854 } 28855 i1 := x1.AuxInt 28856 if x1.Aux != s { 28857 break 28858 } 28859 _ = x1.Args[2] 28860 if idx != x1.Args[0] { 28861 break 28862 } 28863 if p != x1.Args[1] { 28864 break 28865 } 28866 if mem != x1.Args[2] { 28867 break 28868 } 28869 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28870 break 28871 } 28872 b = mergePoint(b, x0, x1) 28873 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28874 v.reset(OpCopy) 28875 v.AddArg(v0) 28876 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28877 v1.AuxInt = j0 28878 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28879 v2.AuxInt = i0 28880 v2.Aux = s 28881 v2.AddArg(p) 28882 v2.AddArg(idx) 28883 v2.AddArg(mem) 28884 v1.AddArg(v2) 28885 v0.AddArg(v1) 28886 v0.AddArg(y) 28887 return true 28888 } 28889 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28890 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28891 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28892 for { 28893 _ = v.Args[1] 28894 or := v.Args[0] 28895 if or.Op != OpAMD64ORQ { 28896 break 28897 } 28898 _ = or.Args[1] 28899 y := or.Args[0] 28900 s0 := or.Args[1] 28901 if s0.Op != OpAMD64SHLQconst { 28902 break 28903 } 28904 j0 := s0.AuxInt 28905 x0 := s0.Args[0] 28906 if x0.Op != OpAMD64MOVWloadidx1 { 28907 break 28908 } 28909 i0 := x0.AuxInt 28910 s := x0.Aux 28911 _ = x0.Args[2] 28912 p := x0.Args[0] 28913 idx := x0.Args[1] 28914 mem := x0.Args[2] 28915 s1 := v.Args[1] 28916 if s1.Op != OpAMD64SHLQconst { 28917 break 28918 } 28919 j1 := s1.AuxInt 28920 x1 := s1.Args[0] 28921 if x1.Op != OpAMD64MOVWloadidx1 { 28922 break 28923 } 28924 i1 := x1.AuxInt 28925 if x1.Aux != s { 28926 break 28927 } 28928 _ = x1.Args[2] 28929 if idx != x1.Args[0] { 28930 break 28931 } 28932 if p != x1.Args[1] { 28933 break 28934 } 28935 if mem != x1.Args[2] { 28936 break 28937 } 28938 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 28939 break 28940 } 28941 b = mergePoint(b, x0, x1) 28942 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 28943 v.reset(OpCopy) 28944 v.AddArg(v0) 28945 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 28946 v1.AuxInt = j0 28947 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 28948 v2.AuxInt = i0 28949 v2.Aux = s 28950 v2.AddArg(p) 28951 v2.AddArg(idx) 28952 v2.AddArg(mem) 28953 v1.AddArg(v2) 28954 v0.AddArg(v1) 28955 v0.AddArg(y) 28956 return true 28957 } 28958 return false 28959 } 28960 func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool { 28961 b := v.Block 28962 _ = b 28963 typ := &b.Func.Config.Types 28964 _ = typ 28965 // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 28966 // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 28967 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) 28968 for { 28969 _ = v.Args[1] 28970 or := v.Args[0] 28971 if or.Op != OpAMD64ORQ { 28972 break 28973 } 28974 _ = or.Args[1] 28975 y := or.Args[0] 28976 s0 := or.Args[1] 28977 if s0.Op != OpAMD64SHLQconst { 28978 break 28979 } 28980 j0 := s0.AuxInt 28981 x0 := s0.Args[0] 28982 if x0.Op != OpAMD64MOVWloadidx1 { 28983 break 28984 } 28985 i0 := x0.AuxInt 28986 s := x0.Aux 28987 _ = x0.Args[2] 28988 idx := x0.Args[0] 28989 p := x0.Args[1] 28990 mem := x0.Args[2] 28991 s1 := v.Args[1] 28992 if s1.Op != OpAMD64SHLQconst { 28993 break 28994 } 28995 j1 := s1.AuxInt 28996 x1 := s1.Args[0] 28997 if x1.Op != OpAMD64MOVWloadidx1 { 28998 break 28999 } 29000 i1 := x1.AuxInt 29001 if x1.Aux != s { 29002 break 29003 } 29004 _ = x1.Args[2] 29005 if idx != x1.Args[0] { 29006 break 29007 } 29008 if p != x1.Args[1] { 29009 break 29010 } 29011 if mem != x1.Args[2] { 29012 break 29013 } 29014 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29015 break 29016 } 29017 b = mergePoint(b, x0, x1) 29018 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29019 v.reset(OpCopy) 29020 v.AddArg(v0) 29021 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29022 v1.AuxInt = j0 29023 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 29024 v2.AuxInt = i0 29025 v2.Aux = s 29026 v2.AddArg(p) 29027 v2.AddArg(idx) 29028 v2.AddArg(mem) 29029 v1.AddArg(v2) 29030 v0.AddArg(v1) 29031 v0.AddArg(y) 29032 return true 29033 } 29034 // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) 29035 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29036 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 29037 for { 29038 _ = v.Args[1] 29039 x1 := v.Args[0] 29040 if x1.Op != OpAMD64MOVBload { 29041 break 29042 } 29043 i1 := x1.AuxInt 29044 s := x1.Aux 29045 _ = x1.Args[1] 29046 p := x1.Args[0] 29047 mem := x1.Args[1] 29048 sh := v.Args[1] 29049 if sh.Op != OpAMD64SHLQconst { 29050 break 29051 } 29052 if sh.AuxInt != 8 { 29053 break 29054 } 29055 x0 := sh.Args[0] 29056 if x0.Op != OpAMD64MOVBload { 29057 break 29058 } 29059 i0 := x0.AuxInt 29060 if x0.Aux != s { 29061 break 29062 } 29063 _ = x0.Args[1] 29064 if p != x0.Args[0] { 29065 break 29066 } 29067 if mem != x0.Args[1] { 29068 break 29069 } 29070 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29071 break 29072 } 29073 b = mergePoint(b, x0, x1) 29074 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 29075 v.reset(OpCopy) 29076 v.AddArg(v0) 29077 v0.AuxInt = 8 29078 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29079 v1.AuxInt = i0 29080 v1.Aux = s 29081 v1.AddArg(p) 29082 v1.AddArg(mem) 29083 v0.AddArg(v1) 29084 return true 29085 } 29086 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) 29087 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29088 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem)) 29089 for { 29090 _ = v.Args[1] 29091 sh := v.Args[0] 29092 if sh.Op != OpAMD64SHLQconst { 29093 break 29094 } 29095 if sh.AuxInt != 8 { 29096 break 29097 } 29098 x0 := sh.Args[0] 29099 if x0.Op != OpAMD64MOVBload { 29100 break 29101 } 29102 i0 := x0.AuxInt 29103 s := x0.Aux 29104 _ = x0.Args[1] 29105 p := x0.Args[0] 29106 mem := x0.Args[1] 29107 x1 := v.Args[1] 29108 if x1.Op != OpAMD64MOVBload { 29109 break 29110 } 29111 i1 := x1.AuxInt 29112 if x1.Aux != s { 29113 break 29114 } 29115 _ = x1.Args[1] 29116 if p != x1.Args[0] { 29117 break 29118 } 29119 if mem != x1.Args[1] { 29120 break 29121 } 29122 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 29123 break 29124 } 29125 b = mergePoint(b, x0, x1) 29126 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 29127 v.reset(OpCopy) 29128 v.AddArg(v0) 29129 v0.AuxInt = 8 29130 v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29131 v1.AuxInt = i0 29132 v1.Aux = s 29133 v1.AddArg(p) 29134 v1.AddArg(mem) 29135 v0.AddArg(v1) 29136 return true 29137 } 29138 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 29139 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29140 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 29141 for { 29142 _ = v.Args[1] 29143 r1 := v.Args[0] 29144 if r1.Op != OpAMD64ROLWconst { 29145 break 29146 } 29147 if r1.AuxInt != 8 { 29148 break 29149 } 29150 x1 := r1.Args[0] 29151 if x1.Op != OpAMD64MOVWload { 29152 break 29153 } 29154 i1 := x1.AuxInt 29155 s := x1.Aux 29156 _ = x1.Args[1] 29157 p := x1.Args[0] 29158 mem := x1.Args[1] 29159 sh := v.Args[1] 29160 if sh.Op != OpAMD64SHLQconst { 29161 break 29162 } 29163 if sh.AuxInt != 16 { 29164 break 29165 } 29166 r0 := sh.Args[0] 29167 if r0.Op != OpAMD64ROLWconst { 29168 break 29169 } 29170 if r0.AuxInt != 8 { 29171 break 29172 } 29173 x0 := r0.Args[0] 29174 if x0.Op != OpAMD64MOVWload { 29175 break 29176 } 29177 i0 := x0.AuxInt 29178 if x0.Aux != s { 29179 break 29180 } 29181 _ = x0.Args[1] 29182 if p != x0.Args[0] { 29183 break 29184 } 29185 if mem != x0.Args[1] { 29186 break 29187 } 29188 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29189 break 29190 } 29191 b = mergePoint(b, x0, x1) 29192 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29193 v.reset(OpCopy) 29194 v.AddArg(v0) 29195 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29196 v1.AuxInt = i0 29197 v1.Aux = s 29198 v1.AddArg(p) 29199 v1.AddArg(mem) 29200 v0.AddArg(v1) 29201 return true 29202 } 29203 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) 29204 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29205 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem)) 29206 for { 29207 _ = v.Args[1] 29208 sh := v.Args[0] 29209 if sh.Op != OpAMD64SHLQconst { 29210 break 29211 } 29212 if sh.AuxInt != 16 { 29213 break 29214 } 29215 r0 := sh.Args[0] 29216 if r0.Op != OpAMD64ROLWconst { 29217 break 29218 } 29219 if r0.AuxInt != 8 { 29220 break 29221 } 29222 x0 := r0.Args[0] 29223 if x0.Op != OpAMD64MOVWload { 29224 break 29225 } 29226 i0 := x0.AuxInt 29227 s := x0.Aux 29228 _ = x0.Args[1] 29229 p := x0.Args[0] 29230 mem := x0.Args[1] 29231 r1 := v.Args[1] 29232 if r1.Op != OpAMD64ROLWconst { 29233 break 29234 } 29235 if r1.AuxInt != 8 { 29236 break 29237 } 29238 x1 := r1.Args[0] 29239 if x1.Op != OpAMD64MOVWload { 29240 break 29241 } 29242 i1 := x1.AuxInt 29243 if x1.Aux != s { 29244 break 29245 } 29246 _ = x1.Args[1] 29247 if p != x1.Args[0] { 29248 break 29249 } 29250 if mem != x1.Args[1] { 29251 break 29252 } 29253 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29254 break 29255 } 29256 b = mergePoint(b, x0, x1) 29257 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 29258 v.reset(OpCopy) 29259 v.AddArg(v0) 29260 v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29261 v1.AuxInt = i0 29262 v1.Aux = s 29263 v1.AddArg(p) 29264 v1.AddArg(mem) 29265 v0.AddArg(v1) 29266 return true 29267 } 29268 // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) 29269 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29270 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 29271 for { 29272 _ = v.Args[1] 29273 r1 := v.Args[0] 29274 if r1.Op != OpAMD64BSWAPL { 29275 break 29276 } 29277 x1 := r1.Args[0] 29278 if x1.Op != OpAMD64MOVLload { 29279 break 29280 } 29281 i1 := x1.AuxInt 29282 s := x1.Aux 29283 _ = x1.Args[1] 29284 p := x1.Args[0] 29285 mem := x1.Args[1] 29286 sh := v.Args[1] 29287 if sh.Op != OpAMD64SHLQconst { 29288 break 29289 } 29290 if sh.AuxInt != 32 { 29291 break 29292 } 29293 r0 := sh.Args[0] 29294 if r0.Op != OpAMD64BSWAPL { 29295 break 29296 } 29297 x0 := r0.Args[0] 29298 if x0.Op != OpAMD64MOVLload { 29299 break 29300 } 29301 i0 := x0.AuxInt 29302 if x0.Aux != s { 29303 break 29304 } 29305 _ = x0.Args[1] 29306 if p != x0.Args[0] { 29307 break 29308 } 29309 if mem != x0.Args[1] { 29310 break 29311 } 29312 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29313 break 29314 } 29315 b = mergePoint(b, x0, x1) 29316 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29317 v.reset(OpCopy) 29318 v.AddArg(v0) 29319 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 29320 v1.AuxInt = i0 29321 v1.Aux = s 29322 v1.AddArg(p) 29323 v1.AddArg(mem) 29324 v0.AddArg(v1) 29325 return true 29326 } 29327 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))) 29328 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 29329 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem)) 29330 for { 29331 _ = v.Args[1] 29332 sh := v.Args[0] 29333 if sh.Op != OpAMD64SHLQconst { 29334 break 29335 } 29336 if sh.AuxInt != 32 { 29337 break 29338 } 29339 r0 := sh.Args[0] 29340 if r0.Op != OpAMD64BSWAPL { 29341 break 29342 } 29343 x0 := r0.Args[0] 29344 if x0.Op != OpAMD64MOVLload { 29345 break 29346 } 29347 i0 := x0.AuxInt 29348 s := x0.Aux 29349 _ = x0.Args[1] 29350 p := x0.Args[0] 29351 mem := x0.Args[1] 29352 r1 := v.Args[1] 29353 if r1.Op != OpAMD64BSWAPL { 29354 break 29355 } 29356 x1 := r1.Args[0] 29357 if x1.Op != OpAMD64MOVLload { 29358 break 29359 } 29360 i1 := x1.AuxInt 29361 if x1.Aux != s { 29362 break 29363 } 29364 _ = x1.Args[1] 29365 if p != x1.Args[0] { 29366 break 29367 } 29368 if mem != x1.Args[1] { 29369 break 29370 } 29371 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 29372 break 29373 } 29374 b = mergePoint(b, x0, x1) 29375 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 29376 v.reset(OpCopy) 29377 v.AddArg(v0) 29378 v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 29379 v1.AuxInt = i0 29380 v1.Aux = s 29381 v1.AddArg(p) 29382 v1.AddArg(mem) 29383 v0.AddArg(v1) 29384 return true 29385 } 29386 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) 29387 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29388 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29389 for { 29390 _ = v.Args[1] 29391 s0 := v.Args[0] 29392 if s0.Op != OpAMD64SHLQconst { 29393 break 29394 } 29395 j0 := s0.AuxInt 29396 x0 := s0.Args[0] 29397 if x0.Op != OpAMD64MOVBload { 29398 break 29399 } 29400 i0 := x0.AuxInt 29401 s := x0.Aux 29402 _ = x0.Args[1] 29403 p := x0.Args[0] 29404 mem := x0.Args[1] 29405 or := v.Args[1] 29406 if or.Op != OpAMD64ORQ { 29407 break 29408 } 29409 _ = or.Args[1] 29410 s1 := or.Args[0] 29411 if s1.Op != OpAMD64SHLQconst { 29412 break 29413 } 29414 j1 := s1.AuxInt 29415 x1 := s1.Args[0] 29416 if x1.Op != OpAMD64MOVBload { 29417 break 29418 } 29419 i1 := x1.AuxInt 29420 if x1.Aux != s { 29421 break 29422 } 29423 _ = x1.Args[1] 29424 if p != x1.Args[0] { 29425 break 29426 } 29427 if mem != x1.Args[1] { 29428 break 29429 } 29430 y := or.Args[1] 29431 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29432 break 29433 } 29434 b = mergePoint(b, x0, x1) 29435 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29436 v.reset(OpCopy) 29437 v.AddArg(v0) 29438 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29439 v1.AuxInt = j1 29440 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29441 v2.AuxInt = 8 29442 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29443 v3.AuxInt = i0 29444 v3.Aux = s 29445 v3.AddArg(p) 29446 v3.AddArg(mem) 29447 v2.AddArg(v3) 29448 v1.AddArg(v2) 29449 v0.AddArg(v1) 29450 v0.AddArg(y) 29451 return true 29452 } 29453 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))) 29454 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29455 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29456 for { 29457 _ = v.Args[1] 29458 s0 := v.Args[0] 29459 if s0.Op != OpAMD64SHLQconst { 29460 break 29461 } 29462 j0 := s0.AuxInt 29463 x0 := s0.Args[0] 29464 if x0.Op != OpAMD64MOVBload { 29465 break 29466 } 29467 i0 := x0.AuxInt 29468 s := x0.Aux 29469 _ = x0.Args[1] 29470 p := x0.Args[0] 29471 mem := x0.Args[1] 29472 or := v.Args[1] 29473 if or.Op != OpAMD64ORQ { 29474 break 29475 } 29476 _ = or.Args[1] 29477 y := or.Args[0] 29478 s1 := or.Args[1] 29479 if s1.Op != OpAMD64SHLQconst { 29480 break 29481 } 29482 j1 := s1.AuxInt 29483 x1 := s1.Args[0] 29484 if x1.Op != OpAMD64MOVBload { 29485 break 29486 } 29487 i1 := x1.AuxInt 29488 if x1.Aux != s { 29489 break 29490 } 29491 _ = x1.Args[1] 29492 if p != x1.Args[0] { 29493 break 29494 } 29495 if mem != x1.Args[1] { 29496 break 29497 } 29498 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29499 break 29500 } 29501 b = mergePoint(b, x0, x1) 29502 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29503 v.reset(OpCopy) 29504 v.AddArg(v0) 29505 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29506 v1.AuxInt = j1 29507 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29508 v2.AuxInt = 8 29509 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29510 v3.AuxInt = i0 29511 v3.Aux = s 29512 v3.AddArg(p) 29513 v3.AddArg(mem) 29514 v2.AddArg(v3) 29515 v1.AddArg(v2) 29516 v0.AddArg(v1) 29517 v0.AddArg(y) 29518 return true 29519 } 29520 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 29521 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29522 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29523 for { 29524 _ = v.Args[1] 29525 or := v.Args[0] 29526 if or.Op != OpAMD64ORQ { 29527 break 29528 } 29529 _ = or.Args[1] 29530 s1 := or.Args[0] 29531 if s1.Op != OpAMD64SHLQconst { 29532 break 29533 } 29534 j1 := s1.AuxInt 29535 x1 := s1.Args[0] 29536 if x1.Op != OpAMD64MOVBload { 29537 break 29538 } 29539 i1 := x1.AuxInt 29540 s := x1.Aux 29541 _ = x1.Args[1] 29542 p := x1.Args[0] 29543 mem := x1.Args[1] 29544 y := or.Args[1] 29545 s0 := v.Args[1] 29546 if s0.Op != OpAMD64SHLQconst { 29547 break 29548 } 29549 j0 := s0.AuxInt 29550 x0 := s0.Args[0] 29551 if x0.Op != OpAMD64MOVBload { 29552 break 29553 } 29554 i0 := x0.AuxInt 29555 if x0.Aux != s { 29556 break 29557 } 29558 _ = x0.Args[1] 29559 if p != x0.Args[0] { 29560 break 29561 } 29562 if mem != x0.Args[1] { 29563 break 29564 } 29565 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29566 break 29567 } 29568 b = mergePoint(b, x0, x1) 29569 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29570 v.reset(OpCopy) 29571 v.AddArg(v0) 29572 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29573 v1.AuxInt = j1 29574 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29575 v2.AuxInt = 8 29576 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29577 v3.AuxInt = i0 29578 v3.Aux = s 29579 v3.AddArg(p) 29580 v3.AddArg(mem) 29581 v2.AddArg(v3) 29582 v1.AddArg(v2) 29583 v0.AddArg(v1) 29584 v0.AddArg(y) 29585 return true 29586 } 29587 return false 29588 } 29589 func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool { 29590 b := v.Block 29591 _ = b 29592 typ := &b.Func.Config.Types 29593 _ = typ 29594 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) 29595 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 29596 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y) 29597 for { 29598 _ = v.Args[1] 29599 or := v.Args[0] 29600 if or.Op != OpAMD64ORQ { 29601 break 29602 } 29603 _ = or.Args[1] 29604 y := or.Args[0] 29605 s1 := or.Args[1] 29606 if s1.Op != OpAMD64SHLQconst { 29607 break 29608 } 29609 j1 := s1.AuxInt 29610 x1 := s1.Args[0] 29611 if x1.Op != OpAMD64MOVBload { 29612 break 29613 } 29614 i1 := x1.AuxInt 29615 s := x1.Aux 29616 _ = x1.Args[1] 29617 p := x1.Args[0] 29618 mem := x1.Args[1] 29619 s0 := v.Args[1] 29620 if s0.Op != OpAMD64SHLQconst { 29621 break 29622 } 29623 j0 := s0.AuxInt 29624 x0 := s0.Args[0] 29625 if x0.Op != OpAMD64MOVBload { 29626 break 29627 } 29628 i0 := x0.AuxInt 29629 if x0.Aux != s { 29630 break 29631 } 29632 _ = x0.Args[1] 29633 if p != x0.Args[0] { 29634 break 29635 } 29636 if mem != x0.Args[1] { 29637 break 29638 } 29639 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 29640 break 29641 } 29642 b = mergePoint(b, x0, x1) 29643 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29644 v.reset(OpCopy) 29645 v.AddArg(v0) 29646 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29647 v1.AuxInt = j1 29648 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 29649 v2.AuxInt = 8 29650 v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 29651 v3.AuxInt = i0 29652 v3.Aux = s 29653 v3.AddArg(p) 29654 v3.AddArg(mem) 29655 v2.AddArg(v3) 29656 v1.AddArg(v2) 29657 v0.AddArg(v1) 29658 v0.AddArg(y) 29659 return true 29660 } 29661 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y)) 29662 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29663 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29664 for { 29665 _ = v.Args[1] 29666 s0 := v.Args[0] 29667 if s0.Op != OpAMD64SHLQconst { 29668 break 29669 } 29670 j0 := s0.AuxInt 29671 r0 := s0.Args[0] 29672 if r0.Op != OpAMD64ROLWconst { 29673 break 29674 } 29675 if r0.AuxInt != 8 { 29676 break 29677 } 29678 x0 := r0.Args[0] 29679 if x0.Op != OpAMD64MOVWload { 29680 break 29681 } 29682 i0 := x0.AuxInt 29683 s := x0.Aux 29684 _ = x0.Args[1] 29685 p := x0.Args[0] 29686 mem := x0.Args[1] 29687 or := v.Args[1] 29688 if or.Op != OpAMD64ORQ { 29689 break 29690 } 29691 _ = or.Args[1] 29692 s1 := or.Args[0] 29693 if s1.Op != OpAMD64SHLQconst { 29694 break 29695 } 29696 j1 := s1.AuxInt 29697 r1 := s1.Args[0] 29698 if r1.Op != OpAMD64ROLWconst { 29699 break 29700 } 29701 if r1.AuxInt != 8 { 29702 break 29703 } 29704 x1 := r1.Args[0] 29705 if x1.Op != OpAMD64MOVWload { 29706 break 29707 } 29708 i1 := x1.AuxInt 29709 if x1.Aux != s { 29710 break 29711 } 29712 _ = x1.Args[1] 29713 if p != x1.Args[0] { 29714 break 29715 } 29716 if mem != x1.Args[1] { 29717 break 29718 } 29719 y := or.Args[1] 29720 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29721 break 29722 } 29723 b = mergePoint(b, x0, x1) 29724 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29725 v.reset(OpCopy) 29726 v.AddArg(v0) 29727 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29728 v1.AuxInt = j1 29729 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29730 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29731 v3.AuxInt = i0 29732 v3.Aux = s 29733 v3.AddArg(p) 29734 v3.AddArg(mem) 29735 v2.AddArg(v3) 29736 v1.AddArg(v2) 29737 v0.AddArg(v1) 29738 v0.AddArg(y) 29739 return true 29740 } 29741 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))))) 29742 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29743 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29744 for { 29745 _ = v.Args[1] 29746 s0 := v.Args[0] 29747 if s0.Op != OpAMD64SHLQconst { 29748 break 29749 } 29750 j0 := s0.AuxInt 29751 r0 := s0.Args[0] 29752 if r0.Op != OpAMD64ROLWconst { 29753 break 29754 } 29755 if r0.AuxInt != 8 { 29756 break 29757 } 29758 x0 := r0.Args[0] 29759 if x0.Op != OpAMD64MOVWload { 29760 break 29761 } 29762 i0 := x0.AuxInt 29763 s := x0.Aux 29764 _ = x0.Args[1] 29765 p := x0.Args[0] 29766 mem := x0.Args[1] 29767 or := v.Args[1] 29768 if or.Op != OpAMD64ORQ { 29769 break 29770 } 29771 _ = or.Args[1] 29772 y := or.Args[0] 29773 s1 := or.Args[1] 29774 if s1.Op != OpAMD64SHLQconst { 29775 break 29776 } 29777 j1 := s1.AuxInt 29778 r1 := s1.Args[0] 29779 if r1.Op != OpAMD64ROLWconst { 29780 break 29781 } 29782 if r1.AuxInt != 8 { 29783 break 29784 } 29785 x1 := r1.Args[0] 29786 if x1.Op != OpAMD64MOVWload { 29787 break 29788 } 29789 i1 := x1.AuxInt 29790 if x1.Aux != s { 29791 break 29792 } 29793 _ = x1.Args[1] 29794 if p != x1.Args[0] { 29795 break 29796 } 29797 if mem != x1.Args[1] { 29798 break 29799 } 29800 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29801 break 29802 } 29803 b = mergePoint(b, x0, x1) 29804 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29805 v.reset(OpCopy) 29806 v.AddArg(v0) 29807 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29808 v1.AuxInt = j1 29809 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29810 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29811 v3.AuxInt = i0 29812 v3.Aux = s 29813 v3.AddArg(p) 29814 v3.AddArg(mem) 29815 v2.AddArg(v3) 29816 v1.AddArg(v2) 29817 v0.AddArg(v1) 29818 v0.AddArg(y) 29819 return true 29820 } 29821 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 29822 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29823 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29824 for { 29825 _ = v.Args[1] 29826 or := v.Args[0] 29827 if or.Op != OpAMD64ORQ { 29828 break 29829 } 29830 _ = or.Args[1] 29831 s1 := or.Args[0] 29832 if s1.Op != OpAMD64SHLQconst { 29833 break 29834 } 29835 j1 := s1.AuxInt 29836 r1 := s1.Args[0] 29837 if r1.Op != OpAMD64ROLWconst { 29838 break 29839 } 29840 if r1.AuxInt != 8 { 29841 break 29842 } 29843 x1 := r1.Args[0] 29844 if x1.Op != OpAMD64MOVWload { 29845 break 29846 } 29847 i1 := x1.AuxInt 29848 s := x1.Aux 29849 _ = x1.Args[1] 29850 p := x1.Args[0] 29851 mem := x1.Args[1] 29852 y := or.Args[1] 29853 s0 := v.Args[1] 29854 if s0.Op != OpAMD64SHLQconst { 29855 break 29856 } 29857 j0 := s0.AuxInt 29858 r0 := s0.Args[0] 29859 if r0.Op != OpAMD64ROLWconst { 29860 break 29861 } 29862 if r0.AuxInt != 8 { 29863 break 29864 } 29865 x0 := r0.Args[0] 29866 if x0.Op != OpAMD64MOVWload { 29867 break 29868 } 29869 i0 := x0.AuxInt 29870 if x0.Aux != s { 29871 break 29872 } 29873 _ = x0.Args[1] 29874 if p != x0.Args[0] { 29875 break 29876 } 29877 if mem != x0.Args[1] { 29878 break 29879 } 29880 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29881 break 29882 } 29883 b = mergePoint(b, x0, x1) 29884 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29885 v.reset(OpCopy) 29886 v.AddArg(v0) 29887 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29888 v1.AuxInt = j1 29889 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29890 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29891 v3.AuxInt = i0 29892 v3.Aux = s 29893 v3.AddArg(p) 29894 v3.AddArg(mem) 29895 v2.AddArg(v3) 29896 v1.AddArg(v2) 29897 v0.AddArg(v1) 29898 v0.AddArg(y) 29899 return true 29900 } 29901 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) 29902 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 29903 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y) 29904 for { 29905 _ = v.Args[1] 29906 or := v.Args[0] 29907 if or.Op != OpAMD64ORQ { 29908 break 29909 } 29910 _ = or.Args[1] 29911 y := or.Args[0] 29912 s1 := or.Args[1] 29913 if s1.Op != OpAMD64SHLQconst { 29914 break 29915 } 29916 j1 := s1.AuxInt 29917 r1 := s1.Args[0] 29918 if r1.Op != OpAMD64ROLWconst { 29919 break 29920 } 29921 if r1.AuxInt != 8 { 29922 break 29923 } 29924 x1 := r1.Args[0] 29925 if x1.Op != OpAMD64MOVWload { 29926 break 29927 } 29928 i1 := x1.AuxInt 29929 s := x1.Aux 29930 _ = x1.Args[1] 29931 p := x1.Args[0] 29932 mem := x1.Args[1] 29933 s0 := v.Args[1] 29934 if s0.Op != OpAMD64SHLQconst { 29935 break 29936 } 29937 j0 := s0.AuxInt 29938 r0 := s0.Args[0] 29939 if r0.Op != OpAMD64ROLWconst { 29940 break 29941 } 29942 if r0.AuxInt != 8 { 29943 break 29944 } 29945 x0 := r0.Args[0] 29946 if x0.Op != OpAMD64MOVWload { 29947 break 29948 } 29949 i0 := x0.AuxInt 29950 if x0.Aux != s { 29951 break 29952 } 29953 _ = x0.Args[1] 29954 if p != x0.Args[0] { 29955 break 29956 } 29957 if mem != x0.Args[1] { 29958 break 29959 } 29960 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 29961 break 29962 } 29963 b = mergePoint(b, x0, x1) 29964 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 29965 v.reset(OpCopy) 29966 v.AddArg(v0) 29967 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 29968 v1.AuxInt = j1 29969 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 29970 v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 29971 v3.AuxInt = i0 29972 v3.Aux = s 29973 v3.AddArg(p) 29974 v3.AddArg(mem) 29975 v2.AddArg(v3) 29976 v1.AddArg(v2) 29977 v0.AddArg(v1) 29978 v0.AddArg(y) 29979 return true 29980 } 29981 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 29982 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 29983 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 29984 for { 29985 _ = v.Args[1] 29986 x1 := v.Args[0] 29987 if x1.Op != OpAMD64MOVBloadidx1 { 29988 break 29989 } 29990 i1 := x1.AuxInt 29991 s := x1.Aux 29992 _ = x1.Args[2] 29993 p := x1.Args[0] 29994 idx := x1.Args[1] 29995 mem := x1.Args[2] 29996 sh := v.Args[1] 29997 if sh.Op != OpAMD64SHLQconst { 29998 break 29999 } 30000 if sh.AuxInt != 8 { 30001 break 30002 } 30003 x0 := sh.Args[0] 30004 if x0.Op != OpAMD64MOVBloadidx1 { 30005 break 30006 } 30007 i0 := x0.AuxInt 30008 if x0.Aux != s { 30009 break 30010 } 30011 _ = x0.Args[2] 30012 if p != x0.Args[0] { 30013 break 30014 } 30015 if idx != x0.Args[1] { 30016 break 30017 } 30018 if mem != x0.Args[2] { 30019 break 30020 } 30021 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30022 break 30023 } 30024 b = mergePoint(b, x0, x1) 30025 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30026 v.reset(OpCopy) 30027 v.AddArg(v0) 30028 v0.AuxInt = 8 30029 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30030 v1.AuxInt = i0 30031 v1.Aux = s 30032 v1.AddArg(p) 30033 v1.AddArg(idx) 30034 v1.AddArg(mem) 30035 v0.AddArg(v1) 30036 return true 30037 } 30038 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 30039 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30040 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30041 for { 30042 _ = v.Args[1] 30043 x1 := v.Args[0] 30044 if x1.Op != OpAMD64MOVBloadidx1 { 30045 break 30046 } 30047 i1 := x1.AuxInt 30048 s := x1.Aux 30049 _ = x1.Args[2] 30050 idx := x1.Args[0] 30051 p := x1.Args[1] 30052 mem := x1.Args[2] 30053 sh := v.Args[1] 30054 if sh.Op != OpAMD64SHLQconst { 30055 break 30056 } 30057 if sh.AuxInt != 8 { 30058 break 30059 } 30060 x0 := sh.Args[0] 30061 if x0.Op != OpAMD64MOVBloadidx1 { 30062 break 30063 } 30064 i0 := x0.AuxInt 30065 if x0.Aux != s { 30066 break 30067 } 30068 _ = x0.Args[2] 30069 if p != x0.Args[0] { 30070 break 30071 } 30072 if idx != x0.Args[1] { 30073 break 30074 } 30075 if mem != x0.Args[2] { 30076 break 30077 } 30078 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30079 break 30080 } 30081 b = mergePoint(b, x0, x1) 30082 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30083 v.reset(OpCopy) 30084 v.AddArg(v0) 30085 v0.AuxInt = 8 30086 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30087 v1.AuxInt = i0 30088 v1.Aux = s 30089 v1.AddArg(p) 30090 v1.AddArg(idx) 30091 v1.AddArg(mem) 30092 v0.AddArg(v1) 30093 return true 30094 } 30095 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30096 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30097 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30098 for { 30099 _ = v.Args[1] 30100 x1 := v.Args[0] 30101 if x1.Op != OpAMD64MOVBloadidx1 { 30102 break 30103 } 30104 i1 := x1.AuxInt 30105 s := x1.Aux 30106 _ = x1.Args[2] 30107 p := x1.Args[0] 30108 idx := x1.Args[1] 30109 mem := x1.Args[2] 30110 sh := v.Args[1] 30111 if sh.Op != OpAMD64SHLQconst { 30112 break 30113 } 30114 if sh.AuxInt != 8 { 30115 break 30116 } 30117 x0 := sh.Args[0] 30118 if x0.Op != OpAMD64MOVBloadidx1 { 30119 break 30120 } 30121 i0 := x0.AuxInt 30122 if x0.Aux != s { 30123 break 30124 } 30125 _ = x0.Args[2] 30126 if idx != x0.Args[0] { 30127 break 30128 } 30129 if p != x0.Args[1] { 30130 break 30131 } 30132 if mem != x0.Args[2] { 30133 break 30134 } 30135 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30136 break 30137 } 30138 b = mergePoint(b, x0, x1) 30139 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30140 v.reset(OpCopy) 30141 v.AddArg(v0) 30142 v0.AuxInt = 8 30143 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30144 v1.AuxInt = i0 30145 v1.Aux = s 30146 v1.AddArg(p) 30147 v1.AddArg(idx) 30148 v1.AddArg(mem) 30149 v0.AddArg(v1) 30150 return true 30151 } 30152 // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 30153 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30154 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30155 for { 30156 _ = v.Args[1] 30157 x1 := v.Args[0] 30158 if x1.Op != OpAMD64MOVBloadidx1 { 30159 break 30160 } 30161 i1 := x1.AuxInt 30162 s := x1.Aux 30163 _ = x1.Args[2] 30164 idx := x1.Args[0] 30165 p := x1.Args[1] 30166 mem := x1.Args[2] 30167 sh := v.Args[1] 30168 if sh.Op != OpAMD64SHLQconst { 30169 break 30170 } 30171 if sh.AuxInt != 8 { 30172 break 30173 } 30174 x0 := sh.Args[0] 30175 if x0.Op != OpAMD64MOVBloadidx1 { 30176 break 30177 } 30178 i0 := x0.AuxInt 30179 if x0.Aux != s { 30180 break 30181 } 30182 _ = x0.Args[2] 30183 if idx != x0.Args[0] { 30184 break 30185 } 30186 if p != x0.Args[1] { 30187 break 30188 } 30189 if mem != x0.Args[2] { 30190 break 30191 } 30192 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30193 break 30194 } 30195 b = mergePoint(b, x0, x1) 30196 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30197 v.reset(OpCopy) 30198 v.AddArg(v0) 30199 v0.AuxInt = 8 30200 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30201 v1.AuxInt = i0 30202 v1.Aux = s 30203 v1.AddArg(p) 30204 v1.AddArg(idx) 30205 v1.AddArg(mem) 30206 v0.AddArg(v1) 30207 return true 30208 } 30209 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 30210 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30211 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30212 for { 30213 _ = v.Args[1] 30214 sh := v.Args[0] 30215 if sh.Op != OpAMD64SHLQconst { 30216 break 30217 } 30218 if sh.AuxInt != 8 { 30219 break 30220 } 30221 x0 := sh.Args[0] 30222 if x0.Op != OpAMD64MOVBloadidx1 { 30223 break 30224 } 30225 i0 := x0.AuxInt 30226 s := x0.Aux 30227 _ = x0.Args[2] 30228 p := x0.Args[0] 30229 idx := x0.Args[1] 30230 mem := x0.Args[2] 30231 x1 := v.Args[1] 30232 if x1.Op != OpAMD64MOVBloadidx1 { 30233 break 30234 } 30235 i1 := x1.AuxInt 30236 if x1.Aux != s { 30237 break 30238 } 30239 _ = x1.Args[2] 30240 if p != x1.Args[0] { 30241 break 30242 } 30243 if idx != x1.Args[1] { 30244 break 30245 } 30246 if mem != x1.Args[2] { 30247 break 30248 } 30249 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30250 break 30251 } 30252 b = mergePoint(b, x0, x1) 30253 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30254 v.reset(OpCopy) 30255 v.AddArg(v0) 30256 v0.AuxInt = 8 30257 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30258 v1.AuxInt = i0 30259 v1.Aux = s 30260 v1.AddArg(p) 30261 v1.AddArg(idx) 30262 v1.AddArg(mem) 30263 v0.AddArg(v1) 30264 return true 30265 } 30266 return false 30267 } 30268 func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool { 30269 b := v.Block 30270 _ = b 30271 typ := &b.Func.Config.Types 30272 _ = typ 30273 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) 30274 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30275 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30276 for { 30277 _ = v.Args[1] 30278 sh := v.Args[0] 30279 if sh.Op != OpAMD64SHLQconst { 30280 break 30281 } 30282 if sh.AuxInt != 8 { 30283 break 30284 } 30285 x0 := sh.Args[0] 30286 if x0.Op != OpAMD64MOVBloadidx1 { 30287 break 30288 } 30289 i0 := x0.AuxInt 30290 s := x0.Aux 30291 _ = x0.Args[2] 30292 idx := x0.Args[0] 30293 p := x0.Args[1] 30294 mem := x0.Args[2] 30295 x1 := v.Args[1] 30296 if x1.Op != OpAMD64MOVBloadidx1 { 30297 break 30298 } 30299 i1 := x1.AuxInt 30300 if x1.Aux != s { 30301 break 30302 } 30303 _ = x1.Args[2] 30304 if p != x1.Args[0] { 30305 break 30306 } 30307 if idx != x1.Args[1] { 30308 break 30309 } 30310 if mem != x1.Args[2] { 30311 break 30312 } 30313 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30314 break 30315 } 30316 b = mergePoint(b, x0, x1) 30317 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30318 v.reset(OpCopy) 30319 v.AddArg(v0) 30320 v0.AuxInt = 8 30321 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30322 v1.AuxInt = i0 30323 v1.Aux = s 30324 v1.AddArg(p) 30325 v1.AddArg(idx) 30326 v1.AddArg(mem) 30327 v0.AddArg(v1) 30328 return true 30329 } 30330 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 30331 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30332 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30333 for { 30334 _ = v.Args[1] 30335 sh := v.Args[0] 30336 if sh.Op != OpAMD64SHLQconst { 30337 break 30338 } 30339 if sh.AuxInt != 8 { 30340 break 30341 } 30342 x0 := sh.Args[0] 30343 if x0.Op != OpAMD64MOVBloadidx1 { 30344 break 30345 } 30346 i0 := x0.AuxInt 30347 s := x0.Aux 30348 _ = x0.Args[2] 30349 p := x0.Args[0] 30350 idx := x0.Args[1] 30351 mem := x0.Args[2] 30352 x1 := v.Args[1] 30353 if x1.Op != OpAMD64MOVBloadidx1 { 30354 break 30355 } 30356 i1 := x1.AuxInt 30357 if x1.Aux != s { 30358 break 30359 } 30360 _ = x1.Args[2] 30361 if idx != x1.Args[0] { 30362 break 30363 } 30364 if p != x1.Args[1] { 30365 break 30366 } 30367 if mem != x1.Args[2] { 30368 break 30369 } 30370 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30371 break 30372 } 30373 b = mergePoint(b, x0, x1) 30374 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30375 v.reset(OpCopy) 30376 v.AddArg(v0) 30377 v0.AuxInt = 8 30378 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30379 v1.AuxInt = i0 30380 v1.Aux = s 30381 v1.AddArg(p) 30382 v1.AddArg(idx) 30383 v1.AddArg(mem) 30384 v0.AddArg(v1) 30385 return true 30386 } 30387 // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) 30388 // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) 30389 // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem)) 30390 for { 30391 _ = v.Args[1] 30392 sh := v.Args[0] 30393 if sh.Op != OpAMD64SHLQconst { 30394 break 30395 } 30396 if sh.AuxInt != 8 { 30397 break 30398 } 30399 x0 := sh.Args[0] 30400 if x0.Op != OpAMD64MOVBloadidx1 { 30401 break 30402 } 30403 i0 := x0.AuxInt 30404 s := x0.Aux 30405 _ = x0.Args[2] 30406 idx := x0.Args[0] 30407 p := x0.Args[1] 30408 mem := x0.Args[2] 30409 x1 := v.Args[1] 30410 if x1.Op != OpAMD64MOVBloadidx1 { 30411 break 30412 } 30413 i1 := x1.AuxInt 30414 if x1.Aux != s { 30415 break 30416 } 30417 _ = x1.Args[2] 30418 if idx != x1.Args[0] { 30419 break 30420 } 30421 if p != x1.Args[1] { 30422 break 30423 } 30424 if mem != x1.Args[2] { 30425 break 30426 } 30427 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { 30428 break 30429 } 30430 b = mergePoint(b, x0, x1) 30431 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) 30432 v.reset(OpCopy) 30433 v.AddArg(v0) 30434 v0.AuxInt = 8 30435 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 30436 v1.AuxInt = i0 30437 v1.Aux = s 30438 v1.AddArg(p) 30439 v1.AddArg(idx) 30440 v1.AddArg(mem) 30441 v0.AddArg(v1) 30442 return true 30443 } 30444 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 30445 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30446 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30447 for { 30448 _ = v.Args[1] 30449 r1 := v.Args[0] 30450 if r1.Op != OpAMD64ROLWconst { 30451 break 30452 } 30453 if r1.AuxInt != 8 { 30454 break 30455 } 30456 x1 := r1.Args[0] 30457 if x1.Op != OpAMD64MOVWloadidx1 { 30458 break 30459 } 30460 i1 := x1.AuxInt 30461 s := x1.Aux 30462 _ = x1.Args[2] 30463 p := x1.Args[0] 30464 idx := x1.Args[1] 30465 mem := x1.Args[2] 30466 sh := v.Args[1] 30467 if sh.Op != OpAMD64SHLQconst { 30468 break 30469 } 30470 if sh.AuxInt != 16 { 30471 break 30472 } 30473 r0 := sh.Args[0] 30474 if r0.Op != OpAMD64ROLWconst { 30475 break 30476 } 30477 if r0.AuxInt != 8 { 30478 break 30479 } 30480 x0 := r0.Args[0] 30481 if x0.Op != OpAMD64MOVWloadidx1 { 30482 break 30483 } 30484 i0 := x0.AuxInt 30485 if x0.Aux != s { 30486 break 30487 } 30488 _ = x0.Args[2] 30489 if p != x0.Args[0] { 30490 break 30491 } 30492 if idx != x0.Args[1] { 30493 break 30494 } 30495 if mem != x0.Args[2] { 30496 break 30497 } 30498 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30499 break 30500 } 30501 b = mergePoint(b, x0, x1) 30502 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30503 v.reset(OpCopy) 30504 v.AddArg(v0) 30505 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30506 v1.AuxInt = i0 30507 v1.Aux = s 30508 v1.AddArg(p) 30509 v1.AddArg(idx) 30510 v1.AddArg(mem) 30511 v0.AddArg(v1) 30512 return true 30513 } 30514 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 30515 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30516 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30517 for { 30518 _ = v.Args[1] 30519 r1 := v.Args[0] 30520 if r1.Op != OpAMD64ROLWconst { 30521 break 30522 } 30523 if r1.AuxInt != 8 { 30524 break 30525 } 30526 x1 := r1.Args[0] 30527 if x1.Op != OpAMD64MOVWloadidx1 { 30528 break 30529 } 30530 i1 := x1.AuxInt 30531 s := x1.Aux 30532 _ = x1.Args[2] 30533 idx := x1.Args[0] 30534 p := x1.Args[1] 30535 mem := x1.Args[2] 30536 sh := v.Args[1] 30537 if sh.Op != OpAMD64SHLQconst { 30538 break 30539 } 30540 if sh.AuxInt != 16 { 30541 break 30542 } 30543 r0 := sh.Args[0] 30544 if r0.Op != OpAMD64ROLWconst { 30545 break 30546 } 30547 if r0.AuxInt != 8 { 30548 break 30549 } 30550 x0 := r0.Args[0] 30551 if x0.Op != OpAMD64MOVWloadidx1 { 30552 break 30553 } 30554 i0 := x0.AuxInt 30555 if x0.Aux != s { 30556 break 30557 } 30558 _ = x0.Args[2] 30559 if p != x0.Args[0] { 30560 break 30561 } 30562 if idx != x0.Args[1] { 30563 break 30564 } 30565 if mem != x0.Args[2] { 30566 break 30567 } 30568 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30569 break 30570 } 30571 b = mergePoint(b, x0, x1) 30572 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30573 v.reset(OpCopy) 30574 v.AddArg(v0) 30575 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30576 v1.AuxInt = i0 30577 v1.Aux = s 30578 v1.AddArg(p) 30579 v1.AddArg(idx) 30580 v1.AddArg(mem) 30581 v0.AddArg(v1) 30582 return true 30583 } 30584 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 30585 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30586 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30587 for { 30588 _ = v.Args[1] 30589 r1 := v.Args[0] 30590 if r1.Op != OpAMD64ROLWconst { 30591 break 30592 } 30593 if r1.AuxInt != 8 { 30594 break 30595 } 30596 x1 := r1.Args[0] 30597 if x1.Op != OpAMD64MOVWloadidx1 { 30598 break 30599 } 30600 i1 := x1.AuxInt 30601 s := x1.Aux 30602 _ = x1.Args[2] 30603 p := x1.Args[0] 30604 idx := x1.Args[1] 30605 mem := x1.Args[2] 30606 sh := v.Args[1] 30607 if sh.Op != OpAMD64SHLQconst { 30608 break 30609 } 30610 if sh.AuxInt != 16 { 30611 break 30612 } 30613 r0 := sh.Args[0] 30614 if r0.Op != OpAMD64ROLWconst { 30615 break 30616 } 30617 if r0.AuxInt != 8 { 30618 break 30619 } 30620 x0 := r0.Args[0] 30621 if x0.Op != OpAMD64MOVWloadidx1 { 30622 break 30623 } 30624 i0 := x0.AuxInt 30625 if x0.Aux != s { 30626 break 30627 } 30628 _ = x0.Args[2] 30629 if idx != x0.Args[0] { 30630 break 30631 } 30632 if p != x0.Args[1] { 30633 break 30634 } 30635 if mem != x0.Args[2] { 30636 break 30637 } 30638 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30639 break 30640 } 30641 b = mergePoint(b, x0, x1) 30642 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30643 v.reset(OpCopy) 30644 v.AddArg(v0) 30645 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30646 v1.AuxInt = i0 30647 v1.Aux = s 30648 v1.AddArg(p) 30649 v1.AddArg(idx) 30650 v1.AddArg(mem) 30651 v0.AddArg(v1) 30652 return true 30653 } 30654 // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 30655 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30656 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30657 for { 30658 _ = v.Args[1] 30659 r1 := v.Args[0] 30660 if r1.Op != OpAMD64ROLWconst { 30661 break 30662 } 30663 if r1.AuxInt != 8 { 30664 break 30665 } 30666 x1 := r1.Args[0] 30667 if x1.Op != OpAMD64MOVWloadidx1 { 30668 break 30669 } 30670 i1 := x1.AuxInt 30671 s := x1.Aux 30672 _ = x1.Args[2] 30673 idx := x1.Args[0] 30674 p := x1.Args[1] 30675 mem := x1.Args[2] 30676 sh := v.Args[1] 30677 if sh.Op != OpAMD64SHLQconst { 30678 break 30679 } 30680 if sh.AuxInt != 16 { 30681 break 30682 } 30683 r0 := sh.Args[0] 30684 if r0.Op != OpAMD64ROLWconst { 30685 break 30686 } 30687 if r0.AuxInt != 8 { 30688 break 30689 } 30690 x0 := r0.Args[0] 30691 if x0.Op != OpAMD64MOVWloadidx1 { 30692 break 30693 } 30694 i0 := x0.AuxInt 30695 if x0.Aux != s { 30696 break 30697 } 30698 _ = x0.Args[2] 30699 if idx != x0.Args[0] { 30700 break 30701 } 30702 if p != x0.Args[1] { 30703 break 30704 } 30705 if mem != x0.Args[2] { 30706 break 30707 } 30708 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30709 break 30710 } 30711 b = mergePoint(b, x0, x1) 30712 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30713 v.reset(OpCopy) 30714 v.AddArg(v0) 30715 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30716 v1.AuxInt = i0 30717 v1.Aux = s 30718 v1.AddArg(p) 30719 v1.AddArg(idx) 30720 v1.AddArg(mem) 30721 v0.AddArg(v1) 30722 return true 30723 } 30724 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 30725 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30726 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30727 for { 30728 _ = v.Args[1] 30729 sh := v.Args[0] 30730 if sh.Op != OpAMD64SHLQconst { 30731 break 30732 } 30733 if sh.AuxInt != 16 { 30734 break 30735 } 30736 r0 := sh.Args[0] 30737 if r0.Op != OpAMD64ROLWconst { 30738 break 30739 } 30740 if r0.AuxInt != 8 { 30741 break 30742 } 30743 x0 := r0.Args[0] 30744 if x0.Op != OpAMD64MOVWloadidx1 { 30745 break 30746 } 30747 i0 := x0.AuxInt 30748 s := x0.Aux 30749 _ = x0.Args[2] 30750 p := x0.Args[0] 30751 idx := x0.Args[1] 30752 mem := x0.Args[2] 30753 r1 := v.Args[1] 30754 if r1.Op != OpAMD64ROLWconst { 30755 break 30756 } 30757 if r1.AuxInt != 8 { 30758 break 30759 } 30760 x1 := r1.Args[0] 30761 if x1.Op != OpAMD64MOVWloadidx1 { 30762 break 30763 } 30764 i1 := x1.AuxInt 30765 if x1.Aux != s { 30766 break 30767 } 30768 _ = x1.Args[2] 30769 if p != x1.Args[0] { 30770 break 30771 } 30772 if idx != x1.Args[1] { 30773 break 30774 } 30775 if mem != x1.Args[2] { 30776 break 30777 } 30778 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30779 break 30780 } 30781 b = mergePoint(b, x0, x1) 30782 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30783 v.reset(OpCopy) 30784 v.AddArg(v0) 30785 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30786 v1.AuxInt = i0 30787 v1.Aux = s 30788 v1.AddArg(p) 30789 v1.AddArg(idx) 30790 v1.AddArg(mem) 30791 v0.AddArg(v1) 30792 return true 30793 } 30794 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) 30795 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30796 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30797 for { 30798 _ = v.Args[1] 30799 sh := v.Args[0] 30800 if sh.Op != OpAMD64SHLQconst { 30801 break 30802 } 30803 if sh.AuxInt != 16 { 30804 break 30805 } 30806 r0 := sh.Args[0] 30807 if r0.Op != OpAMD64ROLWconst { 30808 break 30809 } 30810 if r0.AuxInt != 8 { 30811 break 30812 } 30813 x0 := r0.Args[0] 30814 if x0.Op != OpAMD64MOVWloadidx1 { 30815 break 30816 } 30817 i0 := x0.AuxInt 30818 s := x0.Aux 30819 _ = x0.Args[2] 30820 idx := x0.Args[0] 30821 p := x0.Args[1] 30822 mem := x0.Args[2] 30823 r1 := v.Args[1] 30824 if r1.Op != OpAMD64ROLWconst { 30825 break 30826 } 30827 if r1.AuxInt != 8 { 30828 break 30829 } 30830 x1 := r1.Args[0] 30831 if x1.Op != OpAMD64MOVWloadidx1 { 30832 break 30833 } 30834 i1 := x1.AuxInt 30835 if x1.Aux != s { 30836 break 30837 } 30838 _ = x1.Args[2] 30839 if p != x1.Args[0] { 30840 break 30841 } 30842 if idx != x1.Args[1] { 30843 break 30844 } 30845 if mem != x1.Args[2] { 30846 break 30847 } 30848 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30849 break 30850 } 30851 b = mergePoint(b, x0, x1) 30852 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30853 v.reset(OpCopy) 30854 v.AddArg(v0) 30855 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30856 v1.AuxInt = i0 30857 v1.Aux = s 30858 v1.AddArg(p) 30859 v1.AddArg(idx) 30860 v1.AddArg(mem) 30861 v0.AddArg(v1) 30862 return true 30863 } 30864 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 30865 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30866 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30867 for { 30868 _ = v.Args[1] 30869 sh := v.Args[0] 30870 if sh.Op != OpAMD64SHLQconst { 30871 break 30872 } 30873 if sh.AuxInt != 16 { 30874 break 30875 } 30876 r0 := sh.Args[0] 30877 if r0.Op != OpAMD64ROLWconst { 30878 break 30879 } 30880 if r0.AuxInt != 8 { 30881 break 30882 } 30883 x0 := r0.Args[0] 30884 if x0.Op != OpAMD64MOVWloadidx1 { 30885 break 30886 } 30887 i0 := x0.AuxInt 30888 s := x0.Aux 30889 _ = x0.Args[2] 30890 p := x0.Args[0] 30891 idx := x0.Args[1] 30892 mem := x0.Args[2] 30893 r1 := v.Args[1] 30894 if r1.Op != OpAMD64ROLWconst { 30895 break 30896 } 30897 if r1.AuxInt != 8 { 30898 break 30899 } 30900 x1 := r1.Args[0] 30901 if x1.Op != OpAMD64MOVWloadidx1 { 30902 break 30903 } 30904 i1 := x1.AuxInt 30905 if x1.Aux != s { 30906 break 30907 } 30908 _ = x1.Args[2] 30909 if idx != x1.Args[0] { 30910 break 30911 } 30912 if p != x1.Args[1] { 30913 break 30914 } 30915 if mem != x1.Args[2] { 30916 break 30917 } 30918 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30919 break 30920 } 30921 b = mergePoint(b, x0, x1) 30922 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 30923 v.reset(OpCopy) 30924 v.AddArg(v0) 30925 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 30926 v1.AuxInt = i0 30927 v1.Aux = s 30928 v1.AddArg(p) 30929 v1.AddArg(idx) 30930 v1.AddArg(mem) 30931 v0.AddArg(v1) 30932 return true 30933 } 30934 return false 30935 } 30936 func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool { 30937 b := v.Block 30938 _ = b 30939 typ := &b.Func.Config.Types 30940 _ = typ 30941 // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) 30942 // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 30943 // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem)) 30944 for { 30945 _ = v.Args[1] 30946 sh := v.Args[0] 30947 if sh.Op != OpAMD64SHLQconst { 30948 break 30949 } 30950 if sh.AuxInt != 16 { 30951 break 30952 } 30953 r0 := sh.Args[0] 30954 if r0.Op != OpAMD64ROLWconst { 30955 break 30956 } 30957 if r0.AuxInt != 8 { 30958 break 30959 } 30960 x0 := r0.Args[0] 30961 if x0.Op != OpAMD64MOVWloadidx1 { 30962 break 30963 } 30964 i0 := x0.AuxInt 30965 s := x0.Aux 30966 _ = x0.Args[2] 30967 idx := x0.Args[0] 30968 p := x0.Args[1] 30969 mem := x0.Args[2] 30970 r1 := v.Args[1] 30971 if r1.Op != OpAMD64ROLWconst { 30972 break 30973 } 30974 if r1.AuxInt != 8 { 30975 break 30976 } 30977 x1 := r1.Args[0] 30978 if x1.Op != OpAMD64MOVWloadidx1 { 30979 break 30980 } 30981 i1 := x1.AuxInt 30982 if x1.Aux != s { 30983 break 30984 } 30985 _ = x1.Args[2] 30986 if idx != x1.Args[0] { 30987 break 30988 } 30989 if p != x1.Args[1] { 30990 break 30991 } 30992 if mem != x1.Args[2] { 30993 break 30994 } 30995 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 30996 break 30997 } 30998 b = mergePoint(b, x0, x1) 30999 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) 31000 v.reset(OpCopy) 31001 v.AddArg(v0) 31002 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 31003 v1.AuxInt = i0 31004 v1.Aux = s 31005 v1.AddArg(p) 31006 v1.AddArg(idx) 31007 v1.AddArg(mem) 31008 v0.AddArg(v1) 31009 return true 31010 } 31011 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 31012 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31013 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31014 for { 31015 _ = v.Args[1] 31016 r1 := v.Args[0] 31017 if r1.Op != OpAMD64BSWAPL { 31018 break 31019 } 31020 x1 := r1.Args[0] 31021 if x1.Op != OpAMD64MOVLloadidx1 { 31022 break 31023 } 31024 i1 := x1.AuxInt 31025 s := x1.Aux 31026 _ = x1.Args[2] 31027 p := x1.Args[0] 31028 idx := x1.Args[1] 31029 mem := x1.Args[2] 31030 sh := v.Args[1] 31031 if sh.Op != OpAMD64SHLQconst { 31032 break 31033 } 31034 if sh.AuxInt != 32 { 31035 break 31036 } 31037 r0 := sh.Args[0] 31038 if r0.Op != OpAMD64BSWAPL { 31039 break 31040 } 31041 x0 := r0.Args[0] 31042 if x0.Op != OpAMD64MOVLloadidx1 { 31043 break 31044 } 31045 i0 := x0.AuxInt 31046 if x0.Aux != s { 31047 break 31048 } 31049 _ = x0.Args[2] 31050 if p != x0.Args[0] { 31051 break 31052 } 31053 if idx != x0.Args[1] { 31054 break 31055 } 31056 if mem != x0.Args[2] { 31057 break 31058 } 31059 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31060 break 31061 } 31062 b = mergePoint(b, x0, x1) 31063 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31064 v.reset(OpCopy) 31065 v.AddArg(v0) 31066 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31067 v1.AuxInt = i0 31068 v1.Aux = s 31069 v1.AddArg(p) 31070 v1.AddArg(idx) 31071 v1.AddArg(mem) 31072 v0.AddArg(v1) 31073 return true 31074 } 31075 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) 31076 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31077 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31078 for { 31079 _ = v.Args[1] 31080 r1 := v.Args[0] 31081 if r1.Op != OpAMD64BSWAPL { 31082 break 31083 } 31084 x1 := r1.Args[0] 31085 if x1.Op != OpAMD64MOVLloadidx1 { 31086 break 31087 } 31088 i1 := x1.AuxInt 31089 s := x1.Aux 31090 _ = x1.Args[2] 31091 idx := x1.Args[0] 31092 p := x1.Args[1] 31093 mem := x1.Args[2] 31094 sh := v.Args[1] 31095 if sh.Op != OpAMD64SHLQconst { 31096 break 31097 } 31098 if sh.AuxInt != 32 { 31099 break 31100 } 31101 r0 := sh.Args[0] 31102 if r0.Op != OpAMD64BSWAPL { 31103 break 31104 } 31105 x0 := r0.Args[0] 31106 if x0.Op != OpAMD64MOVLloadidx1 { 31107 break 31108 } 31109 i0 := x0.AuxInt 31110 if x0.Aux != s { 31111 break 31112 } 31113 _ = x0.Args[2] 31114 if p != x0.Args[0] { 31115 break 31116 } 31117 if idx != x0.Args[1] { 31118 break 31119 } 31120 if mem != x0.Args[2] { 31121 break 31122 } 31123 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31124 break 31125 } 31126 b = mergePoint(b, x0, x1) 31127 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31128 v.reset(OpCopy) 31129 v.AddArg(v0) 31130 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31131 v1.AuxInt = i0 31132 v1.Aux = s 31133 v1.AddArg(p) 31134 v1.AddArg(idx) 31135 v1.AddArg(mem) 31136 v0.AddArg(v1) 31137 return true 31138 } 31139 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 31140 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31141 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31142 for { 31143 _ = v.Args[1] 31144 r1 := v.Args[0] 31145 if r1.Op != OpAMD64BSWAPL { 31146 break 31147 } 31148 x1 := r1.Args[0] 31149 if x1.Op != OpAMD64MOVLloadidx1 { 31150 break 31151 } 31152 i1 := x1.AuxInt 31153 s := x1.Aux 31154 _ = x1.Args[2] 31155 p := x1.Args[0] 31156 idx := x1.Args[1] 31157 mem := x1.Args[2] 31158 sh := v.Args[1] 31159 if sh.Op != OpAMD64SHLQconst { 31160 break 31161 } 31162 if sh.AuxInt != 32 { 31163 break 31164 } 31165 r0 := sh.Args[0] 31166 if r0.Op != OpAMD64BSWAPL { 31167 break 31168 } 31169 x0 := r0.Args[0] 31170 if x0.Op != OpAMD64MOVLloadidx1 { 31171 break 31172 } 31173 i0 := x0.AuxInt 31174 if x0.Aux != s { 31175 break 31176 } 31177 _ = x0.Args[2] 31178 if idx != x0.Args[0] { 31179 break 31180 } 31181 if p != x0.Args[1] { 31182 break 31183 } 31184 if mem != x0.Args[2] { 31185 break 31186 } 31187 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31188 break 31189 } 31190 b = mergePoint(b, x0, x1) 31191 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31192 v.reset(OpCopy) 31193 v.AddArg(v0) 31194 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31195 v1.AuxInt = i0 31196 v1.Aux = s 31197 v1.AddArg(p) 31198 v1.AddArg(idx) 31199 v1.AddArg(mem) 31200 v0.AddArg(v1) 31201 return true 31202 } 31203 // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) 31204 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31205 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31206 for { 31207 _ = v.Args[1] 31208 r1 := v.Args[0] 31209 if r1.Op != OpAMD64BSWAPL { 31210 break 31211 } 31212 x1 := r1.Args[0] 31213 if x1.Op != OpAMD64MOVLloadidx1 { 31214 break 31215 } 31216 i1 := x1.AuxInt 31217 s := x1.Aux 31218 _ = x1.Args[2] 31219 idx := x1.Args[0] 31220 p := x1.Args[1] 31221 mem := x1.Args[2] 31222 sh := v.Args[1] 31223 if sh.Op != OpAMD64SHLQconst { 31224 break 31225 } 31226 if sh.AuxInt != 32 { 31227 break 31228 } 31229 r0 := sh.Args[0] 31230 if r0.Op != OpAMD64BSWAPL { 31231 break 31232 } 31233 x0 := r0.Args[0] 31234 if x0.Op != OpAMD64MOVLloadidx1 { 31235 break 31236 } 31237 i0 := x0.AuxInt 31238 if x0.Aux != s { 31239 break 31240 } 31241 _ = x0.Args[2] 31242 if idx != x0.Args[0] { 31243 break 31244 } 31245 if p != x0.Args[1] { 31246 break 31247 } 31248 if mem != x0.Args[2] { 31249 break 31250 } 31251 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31252 break 31253 } 31254 b = mergePoint(b, x0, x1) 31255 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31256 v.reset(OpCopy) 31257 v.AddArg(v0) 31258 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31259 v1.AuxInt = i0 31260 v1.Aux = s 31261 v1.AddArg(p) 31262 v1.AddArg(idx) 31263 v1.AddArg(mem) 31264 v0.AddArg(v1) 31265 return true 31266 } 31267 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 31268 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31269 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31270 for { 31271 _ = v.Args[1] 31272 sh := v.Args[0] 31273 if sh.Op != OpAMD64SHLQconst { 31274 break 31275 } 31276 if sh.AuxInt != 32 { 31277 break 31278 } 31279 r0 := sh.Args[0] 31280 if r0.Op != OpAMD64BSWAPL { 31281 break 31282 } 31283 x0 := r0.Args[0] 31284 if x0.Op != OpAMD64MOVLloadidx1 { 31285 break 31286 } 31287 i0 := x0.AuxInt 31288 s := x0.Aux 31289 _ = x0.Args[2] 31290 p := x0.Args[0] 31291 idx := x0.Args[1] 31292 mem := x0.Args[2] 31293 r1 := v.Args[1] 31294 if r1.Op != OpAMD64BSWAPL { 31295 break 31296 } 31297 x1 := r1.Args[0] 31298 if x1.Op != OpAMD64MOVLloadidx1 { 31299 break 31300 } 31301 i1 := x1.AuxInt 31302 if x1.Aux != s { 31303 break 31304 } 31305 _ = x1.Args[2] 31306 if p != x1.Args[0] { 31307 break 31308 } 31309 if idx != x1.Args[1] { 31310 break 31311 } 31312 if mem != x1.Args[2] { 31313 break 31314 } 31315 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31316 break 31317 } 31318 b = mergePoint(b, x0, x1) 31319 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31320 v.reset(OpCopy) 31321 v.AddArg(v0) 31322 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31323 v1.AuxInt = i0 31324 v1.Aux = s 31325 v1.AddArg(p) 31326 v1.AddArg(idx) 31327 v1.AddArg(mem) 31328 v0.AddArg(v1) 31329 return true 31330 } 31331 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) 31332 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31333 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31334 for { 31335 _ = v.Args[1] 31336 sh := v.Args[0] 31337 if sh.Op != OpAMD64SHLQconst { 31338 break 31339 } 31340 if sh.AuxInt != 32 { 31341 break 31342 } 31343 r0 := sh.Args[0] 31344 if r0.Op != OpAMD64BSWAPL { 31345 break 31346 } 31347 x0 := r0.Args[0] 31348 if x0.Op != OpAMD64MOVLloadidx1 { 31349 break 31350 } 31351 i0 := x0.AuxInt 31352 s := x0.Aux 31353 _ = x0.Args[2] 31354 idx := x0.Args[0] 31355 p := x0.Args[1] 31356 mem := x0.Args[2] 31357 r1 := v.Args[1] 31358 if r1.Op != OpAMD64BSWAPL { 31359 break 31360 } 31361 x1 := r1.Args[0] 31362 if x1.Op != OpAMD64MOVLloadidx1 { 31363 break 31364 } 31365 i1 := x1.AuxInt 31366 if x1.Aux != s { 31367 break 31368 } 31369 _ = x1.Args[2] 31370 if p != x1.Args[0] { 31371 break 31372 } 31373 if idx != x1.Args[1] { 31374 break 31375 } 31376 if mem != x1.Args[2] { 31377 break 31378 } 31379 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31380 break 31381 } 31382 b = mergePoint(b, x0, x1) 31383 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31384 v.reset(OpCopy) 31385 v.AddArg(v0) 31386 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31387 v1.AuxInt = i0 31388 v1.Aux = s 31389 v1.AddArg(p) 31390 v1.AddArg(idx) 31391 v1.AddArg(mem) 31392 v0.AddArg(v1) 31393 return true 31394 } 31395 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 31396 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31397 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31398 for { 31399 _ = v.Args[1] 31400 sh := v.Args[0] 31401 if sh.Op != OpAMD64SHLQconst { 31402 break 31403 } 31404 if sh.AuxInt != 32 { 31405 break 31406 } 31407 r0 := sh.Args[0] 31408 if r0.Op != OpAMD64BSWAPL { 31409 break 31410 } 31411 x0 := r0.Args[0] 31412 if x0.Op != OpAMD64MOVLloadidx1 { 31413 break 31414 } 31415 i0 := x0.AuxInt 31416 s := x0.Aux 31417 _ = x0.Args[2] 31418 p := x0.Args[0] 31419 idx := x0.Args[1] 31420 mem := x0.Args[2] 31421 r1 := v.Args[1] 31422 if r1.Op != OpAMD64BSWAPL { 31423 break 31424 } 31425 x1 := r1.Args[0] 31426 if x1.Op != OpAMD64MOVLloadidx1 { 31427 break 31428 } 31429 i1 := x1.AuxInt 31430 if x1.Aux != s { 31431 break 31432 } 31433 _ = x1.Args[2] 31434 if idx != x1.Args[0] { 31435 break 31436 } 31437 if p != x1.Args[1] { 31438 break 31439 } 31440 if mem != x1.Args[2] { 31441 break 31442 } 31443 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31444 break 31445 } 31446 b = mergePoint(b, x0, x1) 31447 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31448 v.reset(OpCopy) 31449 v.AddArg(v0) 31450 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31451 v1.AuxInt = i0 31452 v1.Aux = s 31453 v1.AddArg(p) 31454 v1.AddArg(idx) 31455 v1.AddArg(mem) 31456 v0.AddArg(v1) 31457 return true 31458 } 31459 // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) 31460 // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) 31461 // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem)) 31462 for { 31463 _ = v.Args[1] 31464 sh := v.Args[0] 31465 if sh.Op != OpAMD64SHLQconst { 31466 break 31467 } 31468 if sh.AuxInt != 32 { 31469 break 31470 } 31471 r0 := sh.Args[0] 31472 if r0.Op != OpAMD64BSWAPL { 31473 break 31474 } 31475 x0 := r0.Args[0] 31476 if x0.Op != OpAMD64MOVLloadidx1 { 31477 break 31478 } 31479 i0 := x0.AuxInt 31480 s := x0.Aux 31481 _ = x0.Args[2] 31482 idx := x0.Args[0] 31483 p := x0.Args[1] 31484 mem := x0.Args[2] 31485 r1 := v.Args[1] 31486 if r1.Op != OpAMD64BSWAPL { 31487 break 31488 } 31489 x1 := r1.Args[0] 31490 if x1.Op != OpAMD64MOVLloadidx1 { 31491 break 31492 } 31493 i1 := x1.AuxInt 31494 if x1.Aux != s { 31495 break 31496 } 31497 _ = x1.Args[2] 31498 if idx != x1.Args[0] { 31499 break 31500 } 31501 if p != x1.Args[1] { 31502 break 31503 } 31504 if mem != x1.Args[2] { 31505 break 31506 } 31507 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { 31508 break 31509 } 31510 b = mergePoint(b, x0, x1) 31511 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) 31512 v.reset(OpCopy) 31513 v.AddArg(v0) 31514 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64) 31515 v1.AuxInt = i0 31516 v1.Aux = s 31517 v1.AddArg(p) 31518 v1.AddArg(idx) 31519 v1.AddArg(mem) 31520 v0.AddArg(v1) 31521 return true 31522 } 31523 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 31524 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31525 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31526 for { 31527 _ = v.Args[1] 31528 s0 := v.Args[0] 31529 if s0.Op != OpAMD64SHLQconst { 31530 break 31531 } 31532 j0 := s0.AuxInt 31533 x0 := s0.Args[0] 31534 if x0.Op != OpAMD64MOVBloadidx1 { 31535 break 31536 } 31537 i0 := x0.AuxInt 31538 s := x0.Aux 31539 _ = x0.Args[2] 31540 p := x0.Args[0] 31541 idx := x0.Args[1] 31542 mem := x0.Args[2] 31543 or := v.Args[1] 31544 if or.Op != OpAMD64ORQ { 31545 break 31546 } 31547 _ = or.Args[1] 31548 s1 := or.Args[0] 31549 if s1.Op != OpAMD64SHLQconst { 31550 break 31551 } 31552 j1 := s1.AuxInt 31553 x1 := s1.Args[0] 31554 if x1.Op != OpAMD64MOVBloadidx1 { 31555 break 31556 } 31557 i1 := x1.AuxInt 31558 if x1.Aux != s { 31559 break 31560 } 31561 _ = x1.Args[2] 31562 if p != x1.Args[0] { 31563 break 31564 } 31565 if idx != x1.Args[1] { 31566 break 31567 } 31568 if mem != x1.Args[2] { 31569 break 31570 } 31571 y := or.Args[1] 31572 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31573 break 31574 } 31575 b = mergePoint(b, x0, x1) 31576 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31577 v.reset(OpCopy) 31578 v.AddArg(v0) 31579 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31580 v1.AuxInt = j1 31581 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31582 v2.AuxInt = 8 31583 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31584 v3.AuxInt = i0 31585 v3.Aux = s 31586 v3.AddArg(p) 31587 v3.AddArg(idx) 31588 v3.AddArg(mem) 31589 v2.AddArg(v3) 31590 v1.AddArg(v2) 31591 v0.AddArg(v1) 31592 v0.AddArg(y) 31593 return true 31594 } 31595 return false 31596 } 31597 func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool { 31598 b := v.Block 31599 _ = b 31600 typ := &b.Func.Config.Types 31601 _ = typ 31602 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) 31603 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31604 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31605 for { 31606 _ = v.Args[1] 31607 s0 := v.Args[0] 31608 if s0.Op != OpAMD64SHLQconst { 31609 break 31610 } 31611 j0 := s0.AuxInt 31612 x0 := s0.Args[0] 31613 if x0.Op != OpAMD64MOVBloadidx1 { 31614 break 31615 } 31616 i0 := x0.AuxInt 31617 s := x0.Aux 31618 _ = x0.Args[2] 31619 idx := x0.Args[0] 31620 p := x0.Args[1] 31621 mem := x0.Args[2] 31622 or := v.Args[1] 31623 if or.Op != OpAMD64ORQ { 31624 break 31625 } 31626 _ = or.Args[1] 31627 s1 := or.Args[0] 31628 if s1.Op != OpAMD64SHLQconst { 31629 break 31630 } 31631 j1 := s1.AuxInt 31632 x1 := s1.Args[0] 31633 if x1.Op != OpAMD64MOVBloadidx1 { 31634 break 31635 } 31636 i1 := x1.AuxInt 31637 if x1.Aux != s { 31638 break 31639 } 31640 _ = x1.Args[2] 31641 if p != x1.Args[0] { 31642 break 31643 } 31644 if idx != x1.Args[1] { 31645 break 31646 } 31647 if mem != x1.Args[2] { 31648 break 31649 } 31650 y := or.Args[1] 31651 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31652 break 31653 } 31654 b = mergePoint(b, x0, x1) 31655 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31656 v.reset(OpCopy) 31657 v.AddArg(v0) 31658 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31659 v1.AuxInt = j1 31660 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31661 v2.AuxInt = 8 31662 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31663 v3.AuxInt = i0 31664 v3.Aux = s 31665 v3.AddArg(p) 31666 v3.AddArg(idx) 31667 v3.AddArg(mem) 31668 v2.AddArg(v3) 31669 v1.AddArg(v2) 31670 v0.AddArg(v1) 31671 v0.AddArg(y) 31672 return true 31673 } 31674 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 31675 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31676 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31677 for { 31678 _ = v.Args[1] 31679 s0 := v.Args[0] 31680 if s0.Op != OpAMD64SHLQconst { 31681 break 31682 } 31683 j0 := s0.AuxInt 31684 x0 := s0.Args[0] 31685 if x0.Op != OpAMD64MOVBloadidx1 { 31686 break 31687 } 31688 i0 := x0.AuxInt 31689 s := x0.Aux 31690 _ = x0.Args[2] 31691 p := x0.Args[0] 31692 idx := x0.Args[1] 31693 mem := x0.Args[2] 31694 or := v.Args[1] 31695 if or.Op != OpAMD64ORQ { 31696 break 31697 } 31698 _ = or.Args[1] 31699 s1 := or.Args[0] 31700 if s1.Op != OpAMD64SHLQconst { 31701 break 31702 } 31703 j1 := s1.AuxInt 31704 x1 := s1.Args[0] 31705 if x1.Op != OpAMD64MOVBloadidx1 { 31706 break 31707 } 31708 i1 := x1.AuxInt 31709 if x1.Aux != s { 31710 break 31711 } 31712 _ = x1.Args[2] 31713 if idx != x1.Args[0] { 31714 break 31715 } 31716 if p != x1.Args[1] { 31717 break 31718 } 31719 if mem != x1.Args[2] { 31720 break 31721 } 31722 y := or.Args[1] 31723 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31724 break 31725 } 31726 b = mergePoint(b, x0, x1) 31727 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31728 v.reset(OpCopy) 31729 v.AddArg(v0) 31730 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31731 v1.AuxInt = j1 31732 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31733 v2.AuxInt = 8 31734 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31735 v3.AuxInt = i0 31736 v3.Aux = s 31737 v3.AddArg(p) 31738 v3.AddArg(idx) 31739 v3.AddArg(mem) 31740 v2.AddArg(v3) 31741 v1.AddArg(v2) 31742 v0.AddArg(v1) 31743 v0.AddArg(y) 31744 return true 31745 } 31746 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) 31747 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31748 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31749 for { 31750 _ = v.Args[1] 31751 s0 := v.Args[0] 31752 if s0.Op != OpAMD64SHLQconst { 31753 break 31754 } 31755 j0 := s0.AuxInt 31756 x0 := s0.Args[0] 31757 if x0.Op != OpAMD64MOVBloadidx1 { 31758 break 31759 } 31760 i0 := x0.AuxInt 31761 s := x0.Aux 31762 _ = x0.Args[2] 31763 idx := x0.Args[0] 31764 p := x0.Args[1] 31765 mem := x0.Args[2] 31766 or := v.Args[1] 31767 if or.Op != OpAMD64ORQ { 31768 break 31769 } 31770 _ = or.Args[1] 31771 s1 := or.Args[0] 31772 if s1.Op != OpAMD64SHLQconst { 31773 break 31774 } 31775 j1 := s1.AuxInt 31776 x1 := s1.Args[0] 31777 if x1.Op != OpAMD64MOVBloadidx1 { 31778 break 31779 } 31780 i1 := x1.AuxInt 31781 if x1.Aux != s { 31782 break 31783 } 31784 _ = x1.Args[2] 31785 if idx != x1.Args[0] { 31786 break 31787 } 31788 if p != x1.Args[1] { 31789 break 31790 } 31791 if mem != x1.Args[2] { 31792 break 31793 } 31794 y := or.Args[1] 31795 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31796 break 31797 } 31798 b = mergePoint(b, x0, x1) 31799 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31800 v.reset(OpCopy) 31801 v.AddArg(v0) 31802 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31803 v1.AuxInt = j1 31804 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31805 v2.AuxInt = 8 31806 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31807 v3.AuxInt = i0 31808 v3.Aux = s 31809 v3.AddArg(p) 31810 v3.AddArg(idx) 31811 v3.AddArg(mem) 31812 v2.AddArg(v3) 31813 v1.AddArg(v2) 31814 v0.AddArg(v1) 31815 v0.AddArg(y) 31816 return true 31817 } 31818 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 31819 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31820 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31821 for { 31822 _ = v.Args[1] 31823 s0 := v.Args[0] 31824 if s0.Op != OpAMD64SHLQconst { 31825 break 31826 } 31827 j0 := s0.AuxInt 31828 x0 := s0.Args[0] 31829 if x0.Op != OpAMD64MOVBloadidx1 { 31830 break 31831 } 31832 i0 := x0.AuxInt 31833 s := x0.Aux 31834 _ = x0.Args[2] 31835 p := x0.Args[0] 31836 idx := x0.Args[1] 31837 mem := x0.Args[2] 31838 or := v.Args[1] 31839 if or.Op != OpAMD64ORQ { 31840 break 31841 } 31842 _ = or.Args[1] 31843 y := or.Args[0] 31844 s1 := or.Args[1] 31845 if s1.Op != OpAMD64SHLQconst { 31846 break 31847 } 31848 j1 := s1.AuxInt 31849 x1 := s1.Args[0] 31850 if x1.Op != OpAMD64MOVBloadidx1 { 31851 break 31852 } 31853 i1 := x1.AuxInt 31854 if x1.Aux != s { 31855 break 31856 } 31857 _ = x1.Args[2] 31858 if p != x1.Args[0] { 31859 break 31860 } 31861 if idx != x1.Args[1] { 31862 break 31863 } 31864 if mem != x1.Args[2] { 31865 break 31866 } 31867 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31868 break 31869 } 31870 b = mergePoint(b, x0, x1) 31871 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31872 v.reset(OpCopy) 31873 v.AddArg(v0) 31874 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31875 v1.AuxInt = j1 31876 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31877 v2.AuxInt = 8 31878 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31879 v3.AuxInt = i0 31880 v3.Aux = s 31881 v3.AddArg(p) 31882 v3.AddArg(idx) 31883 v3.AddArg(mem) 31884 v2.AddArg(v3) 31885 v1.AddArg(v2) 31886 v0.AddArg(v1) 31887 v0.AddArg(y) 31888 return true 31889 } 31890 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) 31891 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31892 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31893 for { 31894 _ = v.Args[1] 31895 s0 := v.Args[0] 31896 if s0.Op != OpAMD64SHLQconst { 31897 break 31898 } 31899 j0 := s0.AuxInt 31900 x0 := s0.Args[0] 31901 if x0.Op != OpAMD64MOVBloadidx1 { 31902 break 31903 } 31904 i0 := x0.AuxInt 31905 s := x0.Aux 31906 _ = x0.Args[2] 31907 idx := x0.Args[0] 31908 p := x0.Args[1] 31909 mem := x0.Args[2] 31910 or := v.Args[1] 31911 if or.Op != OpAMD64ORQ { 31912 break 31913 } 31914 _ = or.Args[1] 31915 y := or.Args[0] 31916 s1 := or.Args[1] 31917 if s1.Op != OpAMD64SHLQconst { 31918 break 31919 } 31920 j1 := s1.AuxInt 31921 x1 := s1.Args[0] 31922 if x1.Op != OpAMD64MOVBloadidx1 { 31923 break 31924 } 31925 i1 := x1.AuxInt 31926 if x1.Aux != s { 31927 break 31928 } 31929 _ = x1.Args[2] 31930 if p != x1.Args[0] { 31931 break 31932 } 31933 if idx != x1.Args[1] { 31934 break 31935 } 31936 if mem != x1.Args[2] { 31937 break 31938 } 31939 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 31940 break 31941 } 31942 b = mergePoint(b, x0, x1) 31943 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 31944 v.reset(OpCopy) 31945 v.AddArg(v0) 31946 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 31947 v1.AuxInt = j1 31948 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 31949 v2.AuxInt = 8 31950 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 31951 v3.AuxInt = i0 31952 v3.Aux = s 31953 v3.AddArg(p) 31954 v3.AddArg(idx) 31955 v3.AddArg(mem) 31956 v2.AddArg(v3) 31957 v1.AddArg(v2) 31958 v0.AddArg(v1) 31959 v0.AddArg(y) 31960 return true 31961 } 31962 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 31963 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 31964 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 31965 for { 31966 _ = v.Args[1] 31967 s0 := v.Args[0] 31968 if s0.Op != OpAMD64SHLQconst { 31969 break 31970 } 31971 j0 := s0.AuxInt 31972 x0 := s0.Args[0] 31973 if x0.Op != OpAMD64MOVBloadidx1 { 31974 break 31975 } 31976 i0 := x0.AuxInt 31977 s := x0.Aux 31978 _ = x0.Args[2] 31979 p := x0.Args[0] 31980 idx := x0.Args[1] 31981 mem := x0.Args[2] 31982 or := v.Args[1] 31983 if or.Op != OpAMD64ORQ { 31984 break 31985 } 31986 _ = or.Args[1] 31987 y := or.Args[0] 31988 s1 := or.Args[1] 31989 if s1.Op != OpAMD64SHLQconst { 31990 break 31991 } 31992 j1 := s1.AuxInt 31993 x1 := s1.Args[0] 31994 if x1.Op != OpAMD64MOVBloadidx1 { 31995 break 31996 } 31997 i1 := x1.AuxInt 31998 if x1.Aux != s { 31999 break 32000 } 32001 _ = x1.Args[2] 32002 if idx != x1.Args[0] { 32003 break 32004 } 32005 if p != x1.Args[1] { 32006 break 32007 } 32008 if mem != x1.Args[2] { 32009 break 32010 } 32011 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32012 break 32013 } 32014 b = mergePoint(b, x0, x1) 32015 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32016 v.reset(OpCopy) 32017 v.AddArg(v0) 32018 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32019 v1.AuxInt = j1 32020 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32021 v2.AuxInt = 8 32022 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32023 v3.AuxInt = i0 32024 v3.Aux = s 32025 v3.AddArg(p) 32026 v3.AddArg(idx) 32027 v3.AddArg(mem) 32028 v2.AddArg(v3) 32029 v1.AddArg(v2) 32030 v0.AddArg(v1) 32031 v0.AddArg(y) 32032 return true 32033 } 32034 // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) 32035 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32036 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32037 for { 32038 _ = v.Args[1] 32039 s0 := v.Args[0] 32040 if s0.Op != OpAMD64SHLQconst { 32041 break 32042 } 32043 j0 := s0.AuxInt 32044 x0 := s0.Args[0] 32045 if x0.Op != OpAMD64MOVBloadidx1 { 32046 break 32047 } 32048 i0 := x0.AuxInt 32049 s := x0.Aux 32050 _ = x0.Args[2] 32051 idx := x0.Args[0] 32052 p := x0.Args[1] 32053 mem := x0.Args[2] 32054 or := v.Args[1] 32055 if or.Op != OpAMD64ORQ { 32056 break 32057 } 32058 _ = or.Args[1] 32059 y := or.Args[0] 32060 s1 := or.Args[1] 32061 if s1.Op != OpAMD64SHLQconst { 32062 break 32063 } 32064 j1 := s1.AuxInt 32065 x1 := s1.Args[0] 32066 if x1.Op != OpAMD64MOVBloadidx1 { 32067 break 32068 } 32069 i1 := x1.AuxInt 32070 if x1.Aux != s { 32071 break 32072 } 32073 _ = x1.Args[2] 32074 if idx != x1.Args[0] { 32075 break 32076 } 32077 if p != x1.Args[1] { 32078 break 32079 } 32080 if mem != x1.Args[2] { 32081 break 32082 } 32083 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32084 break 32085 } 32086 b = mergePoint(b, x0, x1) 32087 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32088 v.reset(OpCopy) 32089 v.AddArg(v0) 32090 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32091 v1.AuxInt = j1 32092 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32093 v2.AuxInt = 8 32094 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32095 v3.AuxInt = i0 32096 v3.Aux = s 32097 v3.AddArg(p) 32098 v3.AddArg(idx) 32099 v3.AddArg(mem) 32100 v2.AddArg(v3) 32101 v1.AddArg(v2) 32102 v0.AddArg(v1) 32103 v0.AddArg(y) 32104 return true 32105 } 32106 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32107 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32108 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32109 for { 32110 _ = v.Args[1] 32111 or := v.Args[0] 32112 if or.Op != OpAMD64ORQ { 32113 break 32114 } 32115 _ = or.Args[1] 32116 s1 := or.Args[0] 32117 if s1.Op != OpAMD64SHLQconst { 32118 break 32119 } 32120 j1 := s1.AuxInt 32121 x1 := s1.Args[0] 32122 if x1.Op != OpAMD64MOVBloadidx1 { 32123 break 32124 } 32125 i1 := x1.AuxInt 32126 s := x1.Aux 32127 _ = x1.Args[2] 32128 p := x1.Args[0] 32129 idx := x1.Args[1] 32130 mem := x1.Args[2] 32131 y := or.Args[1] 32132 s0 := v.Args[1] 32133 if s0.Op != OpAMD64SHLQconst { 32134 break 32135 } 32136 j0 := s0.AuxInt 32137 x0 := s0.Args[0] 32138 if x0.Op != OpAMD64MOVBloadidx1 { 32139 break 32140 } 32141 i0 := x0.AuxInt 32142 if x0.Aux != s { 32143 break 32144 } 32145 _ = x0.Args[2] 32146 if p != x0.Args[0] { 32147 break 32148 } 32149 if idx != x0.Args[1] { 32150 break 32151 } 32152 if mem != x0.Args[2] { 32153 break 32154 } 32155 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32156 break 32157 } 32158 b = mergePoint(b, x0, x1) 32159 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32160 v.reset(OpCopy) 32161 v.AddArg(v0) 32162 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32163 v1.AuxInt = j1 32164 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32165 v2.AuxInt = 8 32166 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32167 v3.AuxInt = i0 32168 v3.Aux = s 32169 v3.AddArg(p) 32170 v3.AddArg(idx) 32171 v3.AddArg(mem) 32172 v2.AddArg(v3) 32173 v1.AddArg(v2) 32174 v0.AddArg(v1) 32175 v0.AddArg(y) 32176 return true 32177 } 32178 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32179 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32180 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32181 for { 32182 _ = v.Args[1] 32183 or := v.Args[0] 32184 if or.Op != OpAMD64ORQ { 32185 break 32186 } 32187 _ = or.Args[1] 32188 s1 := or.Args[0] 32189 if s1.Op != OpAMD64SHLQconst { 32190 break 32191 } 32192 j1 := s1.AuxInt 32193 x1 := s1.Args[0] 32194 if x1.Op != OpAMD64MOVBloadidx1 { 32195 break 32196 } 32197 i1 := x1.AuxInt 32198 s := x1.Aux 32199 _ = x1.Args[2] 32200 idx := x1.Args[0] 32201 p := x1.Args[1] 32202 mem := x1.Args[2] 32203 y := or.Args[1] 32204 s0 := v.Args[1] 32205 if s0.Op != OpAMD64SHLQconst { 32206 break 32207 } 32208 j0 := s0.AuxInt 32209 x0 := s0.Args[0] 32210 if x0.Op != OpAMD64MOVBloadidx1 { 32211 break 32212 } 32213 i0 := x0.AuxInt 32214 if x0.Aux != s { 32215 break 32216 } 32217 _ = x0.Args[2] 32218 if p != x0.Args[0] { 32219 break 32220 } 32221 if idx != x0.Args[1] { 32222 break 32223 } 32224 if mem != x0.Args[2] { 32225 break 32226 } 32227 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32228 break 32229 } 32230 b = mergePoint(b, x0, x1) 32231 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32232 v.reset(OpCopy) 32233 v.AddArg(v0) 32234 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32235 v1.AuxInt = j1 32236 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32237 v2.AuxInt = 8 32238 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32239 v3.AuxInt = i0 32240 v3.Aux = s 32241 v3.AddArg(p) 32242 v3.AddArg(idx) 32243 v3.AddArg(mem) 32244 v2.AddArg(v3) 32245 v1.AddArg(v2) 32246 v0.AddArg(v1) 32247 v0.AddArg(y) 32248 return true 32249 } 32250 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32251 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32252 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32253 for { 32254 _ = v.Args[1] 32255 or := v.Args[0] 32256 if or.Op != OpAMD64ORQ { 32257 break 32258 } 32259 _ = or.Args[1] 32260 y := or.Args[0] 32261 s1 := or.Args[1] 32262 if s1.Op != OpAMD64SHLQconst { 32263 break 32264 } 32265 j1 := s1.AuxInt 32266 x1 := s1.Args[0] 32267 if x1.Op != OpAMD64MOVBloadidx1 { 32268 break 32269 } 32270 i1 := x1.AuxInt 32271 s := x1.Aux 32272 _ = x1.Args[2] 32273 p := x1.Args[0] 32274 idx := x1.Args[1] 32275 mem := x1.Args[2] 32276 s0 := v.Args[1] 32277 if s0.Op != OpAMD64SHLQconst { 32278 break 32279 } 32280 j0 := s0.AuxInt 32281 x0 := s0.Args[0] 32282 if x0.Op != OpAMD64MOVBloadidx1 { 32283 break 32284 } 32285 i0 := x0.AuxInt 32286 if x0.Aux != s { 32287 break 32288 } 32289 _ = x0.Args[2] 32290 if p != x0.Args[0] { 32291 break 32292 } 32293 if idx != x0.Args[1] { 32294 break 32295 } 32296 if mem != x0.Args[2] { 32297 break 32298 } 32299 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32300 break 32301 } 32302 b = mergePoint(b, x0, x1) 32303 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32304 v.reset(OpCopy) 32305 v.AddArg(v0) 32306 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32307 v1.AuxInt = j1 32308 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32309 v2.AuxInt = 8 32310 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32311 v3.AuxInt = i0 32312 v3.Aux = s 32313 v3.AddArg(p) 32314 v3.AddArg(idx) 32315 v3.AddArg(mem) 32316 v2.AddArg(v3) 32317 v1.AddArg(v2) 32318 v0.AddArg(v1) 32319 v0.AddArg(y) 32320 return true 32321 } 32322 return false 32323 } 32324 func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool { 32325 b := v.Block 32326 _ = b 32327 typ := &b.Func.Config.Types 32328 _ = typ 32329 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) 32330 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32331 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32332 for { 32333 _ = v.Args[1] 32334 or := v.Args[0] 32335 if or.Op != OpAMD64ORQ { 32336 break 32337 } 32338 _ = or.Args[1] 32339 y := or.Args[0] 32340 s1 := or.Args[1] 32341 if s1.Op != OpAMD64SHLQconst { 32342 break 32343 } 32344 j1 := s1.AuxInt 32345 x1 := s1.Args[0] 32346 if x1.Op != OpAMD64MOVBloadidx1 { 32347 break 32348 } 32349 i1 := x1.AuxInt 32350 s := x1.Aux 32351 _ = x1.Args[2] 32352 idx := x1.Args[0] 32353 p := x1.Args[1] 32354 mem := x1.Args[2] 32355 s0 := v.Args[1] 32356 if s0.Op != OpAMD64SHLQconst { 32357 break 32358 } 32359 j0 := s0.AuxInt 32360 x0 := s0.Args[0] 32361 if x0.Op != OpAMD64MOVBloadidx1 { 32362 break 32363 } 32364 i0 := x0.AuxInt 32365 if x0.Aux != s { 32366 break 32367 } 32368 _ = x0.Args[2] 32369 if p != x0.Args[0] { 32370 break 32371 } 32372 if idx != x0.Args[1] { 32373 break 32374 } 32375 if mem != x0.Args[2] { 32376 break 32377 } 32378 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32379 break 32380 } 32381 b = mergePoint(b, x0, x1) 32382 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32383 v.reset(OpCopy) 32384 v.AddArg(v0) 32385 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32386 v1.AuxInt = j1 32387 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32388 v2.AuxInt = 8 32389 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32390 v3.AuxInt = i0 32391 v3.Aux = s 32392 v3.AddArg(p) 32393 v3.AddArg(idx) 32394 v3.AddArg(mem) 32395 v2.AddArg(v3) 32396 v1.AddArg(v2) 32397 v0.AddArg(v1) 32398 v0.AddArg(y) 32399 return true 32400 } 32401 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32402 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32403 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32404 for { 32405 _ = v.Args[1] 32406 or := v.Args[0] 32407 if or.Op != OpAMD64ORQ { 32408 break 32409 } 32410 _ = or.Args[1] 32411 s1 := or.Args[0] 32412 if s1.Op != OpAMD64SHLQconst { 32413 break 32414 } 32415 j1 := s1.AuxInt 32416 x1 := s1.Args[0] 32417 if x1.Op != OpAMD64MOVBloadidx1 { 32418 break 32419 } 32420 i1 := x1.AuxInt 32421 s := x1.Aux 32422 _ = x1.Args[2] 32423 p := x1.Args[0] 32424 idx := x1.Args[1] 32425 mem := x1.Args[2] 32426 y := or.Args[1] 32427 s0 := v.Args[1] 32428 if s0.Op != OpAMD64SHLQconst { 32429 break 32430 } 32431 j0 := s0.AuxInt 32432 x0 := s0.Args[0] 32433 if x0.Op != OpAMD64MOVBloadidx1 { 32434 break 32435 } 32436 i0 := x0.AuxInt 32437 if x0.Aux != s { 32438 break 32439 } 32440 _ = x0.Args[2] 32441 if idx != x0.Args[0] { 32442 break 32443 } 32444 if p != x0.Args[1] { 32445 break 32446 } 32447 if mem != x0.Args[2] { 32448 break 32449 } 32450 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32451 break 32452 } 32453 b = mergePoint(b, x0, x1) 32454 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32455 v.reset(OpCopy) 32456 v.AddArg(v0) 32457 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32458 v1.AuxInt = j1 32459 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32460 v2.AuxInt = 8 32461 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32462 v3.AuxInt = i0 32463 v3.Aux = s 32464 v3.AddArg(p) 32465 v3.AddArg(idx) 32466 v3.AddArg(mem) 32467 v2.AddArg(v3) 32468 v1.AddArg(v2) 32469 v0.AddArg(v1) 32470 v0.AddArg(y) 32471 return true 32472 } 32473 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32474 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32475 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32476 for { 32477 _ = v.Args[1] 32478 or := v.Args[0] 32479 if or.Op != OpAMD64ORQ { 32480 break 32481 } 32482 _ = or.Args[1] 32483 s1 := or.Args[0] 32484 if s1.Op != OpAMD64SHLQconst { 32485 break 32486 } 32487 j1 := s1.AuxInt 32488 x1 := s1.Args[0] 32489 if x1.Op != OpAMD64MOVBloadidx1 { 32490 break 32491 } 32492 i1 := x1.AuxInt 32493 s := x1.Aux 32494 _ = x1.Args[2] 32495 idx := x1.Args[0] 32496 p := x1.Args[1] 32497 mem := x1.Args[2] 32498 y := or.Args[1] 32499 s0 := v.Args[1] 32500 if s0.Op != OpAMD64SHLQconst { 32501 break 32502 } 32503 j0 := s0.AuxInt 32504 x0 := s0.Args[0] 32505 if x0.Op != OpAMD64MOVBloadidx1 { 32506 break 32507 } 32508 i0 := x0.AuxInt 32509 if x0.Aux != s { 32510 break 32511 } 32512 _ = x0.Args[2] 32513 if idx != x0.Args[0] { 32514 break 32515 } 32516 if p != x0.Args[1] { 32517 break 32518 } 32519 if mem != x0.Args[2] { 32520 break 32521 } 32522 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32523 break 32524 } 32525 b = mergePoint(b, x0, x1) 32526 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32527 v.reset(OpCopy) 32528 v.AddArg(v0) 32529 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32530 v1.AuxInt = j1 32531 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32532 v2.AuxInt = 8 32533 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32534 v3.AuxInt = i0 32535 v3.Aux = s 32536 v3.AddArg(p) 32537 v3.AddArg(idx) 32538 v3.AddArg(mem) 32539 v2.AddArg(v3) 32540 v1.AddArg(v2) 32541 v0.AddArg(v1) 32542 v0.AddArg(y) 32543 return true 32544 } 32545 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32546 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32547 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32548 for { 32549 _ = v.Args[1] 32550 or := v.Args[0] 32551 if or.Op != OpAMD64ORQ { 32552 break 32553 } 32554 _ = or.Args[1] 32555 y := or.Args[0] 32556 s1 := or.Args[1] 32557 if s1.Op != OpAMD64SHLQconst { 32558 break 32559 } 32560 j1 := s1.AuxInt 32561 x1 := s1.Args[0] 32562 if x1.Op != OpAMD64MOVBloadidx1 { 32563 break 32564 } 32565 i1 := x1.AuxInt 32566 s := x1.Aux 32567 _ = x1.Args[2] 32568 p := x1.Args[0] 32569 idx := x1.Args[1] 32570 mem := x1.Args[2] 32571 s0 := v.Args[1] 32572 if s0.Op != OpAMD64SHLQconst { 32573 break 32574 } 32575 j0 := s0.AuxInt 32576 x0 := s0.Args[0] 32577 if x0.Op != OpAMD64MOVBloadidx1 { 32578 break 32579 } 32580 i0 := x0.AuxInt 32581 if x0.Aux != s { 32582 break 32583 } 32584 _ = x0.Args[2] 32585 if idx != x0.Args[0] { 32586 break 32587 } 32588 if p != x0.Args[1] { 32589 break 32590 } 32591 if mem != x0.Args[2] { 32592 break 32593 } 32594 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32595 break 32596 } 32597 b = mergePoint(b, x0, x1) 32598 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32599 v.reset(OpCopy) 32600 v.AddArg(v0) 32601 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32602 v1.AuxInt = j1 32603 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32604 v2.AuxInt = 8 32605 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32606 v3.AuxInt = i0 32607 v3.Aux = s 32608 v3.AddArg(p) 32609 v3.AddArg(idx) 32610 v3.AddArg(mem) 32611 v2.AddArg(v3) 32612 v1.AddArg(v2) 32613 v0.AddArg(v1) 32614 v0.AddArg(y) 32615 return true 32616 } 32617 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) 32618 // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) 32619 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) 32620 for { 32621 _ = v.Args[1] 32622 or := v.Args[0] 32623 if or.Op != OpAMD64ORQ { 32624 break 32625 } 32626 _ = or.Args[1] 32627 y := or.Args[0] 32628 s1 := or.Args[1] 32629 if s1.Op != OpAMD64SHLQconst { 32630 break 32631 } 32632 j1 := s1.AuxInt 32633 x1 := s1.Args[0] 32634 if x1.Op != OpAMD64MOVBloadidx1 { 32635 break 32636 } 32637 i1 := x1.AuxInt 32638 s := x1.Aux 32639 _ = x1.Args[2] 32640 idx := x1.Args[0] 32641 p := x1.Args[1] 32642 mem := x1.Args[2] 32643 s0 := v.Args[1] 32644 if s0.Op != OpAMD64SHLQconst { 32645 break 32646 } 32647 j0 := s0.AuxInt 32648 x0 := s0.Args[0] 32649 if x0.Op != OpAMD64MOVBloadidx1 { 32650 break 32651 } 32652 i0 := x0.AuxInt 32653 if x0.Aux != s { 32654 break 32655 } 32656 _ = x0.Args[2] 32657 if idx != x0.Args[0] { 32658 break 32659 } 32660 if p != x0.Args[1] { 32661 break 32662 } 32663 if mem != x0.Args[2] { 32664 break 32665 } 32666 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { 32667 break 32668 } 32669 b = mergePoint(b, x0, x1) 32670 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32671 v.reset(OpCopy) 32672 v.AddArg(v0) 32673 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32674 v1.AuxInt = j1 32675 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) 32676 v2.AuxInt = 8 32677 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16) 32678 v3.AuxInt = i0 32679 v3.Aux = s 32680 v3.AddArg(p) 32681 v3.AddArg(idx) 32682 v3.AddArg(mem) 32683 v2.AddArg(v3) 32684 v1.AddArg(v2) 32685 v0.AddArg(v1) 32686 v0.AddArg(y) 32687 return true 32688 } 32689 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 32690 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32691 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32692 for { 32693 _ = v.Args[1] 32694 s0 := v.Args[0] 32695 if s0.Op != OpAMD64SHLQconst { 32696 break 32697 } 32698 j0 := s0.AuxInt 32699 r0 := s0.Args[0] 32700 if r0.Op != OpAMD64ROLWconst { 32701 break 32702 } 32703 if r0.AuxInt != 8 { 32704 break 32705 } 32706 x0 := r0.Args[0] 32707 if x0.Op != OpAMD64MOVWloadidx1 { 32708 break 32709 } 32710 i0 := x0.AuxInt 32711 s := x0.Aux 32712 _ = x0.Args[2] 32713 p := x0.Args[0] 32714 idx := x0.Args[1] 32715 mem := x0.Args[2] 32716 or := v.Args[1] 32717 if or.Op != OpAMD64ORQ { 32718 break 32719 } 32720 _ = or.Args[1] 32721 s1 := or.Args[0] 32722 if s1.Op != OpAMD64SHLQconst { 32723 break 32724 } 32725 j1 := s1.AuxInt 32726 r1 := s1.Args[0] 32727 if r1.Op != OpAMD64ROLWconst { 32728 break 32729 } 32730 if r1.AuxInt != 8 { 32731 break 32732 } 32733 x1 := r1.Args[0] 32734 if x1.Op != OpAMD64MOVWloadidx1 { 32735 break 32736 } 32737 i1 := x1.AuxInt 32738 if x1.Aux != s { 32739 break 32740 } 32741 _ = x1.Args[2] 32742 if p != x1.Args[0] { 32743 break 32744 } 32745 if idx != x1.Args[1] { 32746 break 32747 } 32748 if mem != x1.Args[2] { 32749 break 32750 } 32751 y := or.Args[1] 32752 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32753 break 32754 } 32755 b = mergePoint(b, x0, x1) 32756 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32757 v.reset(OpCopy) 32758 v.AddArg(v0) 32759 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32760 v1.AuxInt = j1 32761 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32762 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32763 v3.AuxInt = i0 32764 v3.Aux = s 32765 v3.AddArg(p) 32766 v3.AddArg(idx) 32767 v3.AddArg(mem) 32768 v2.AddArg(v3) 32769 v1.AddArg(v2) 32770 v0.AddArg(v1) 32771 v0.AddArg(y) 32772 return true 32773 } 32774 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) 32775 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32776 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32777 for { 32778 _ = v.Args[1] 32779 s0 := v.Args[0] 32780 if s0.Op != OpAMD64SHLQconst { 32781 break 32782 } 32783 j0 := s0.AuxInt 32784 r0 := s0.Args[0] 32785 if r0.Op != OpAMD64ROLWconst { 32786 break 32787 } 32788 if r0.AuxInt != 8 { 32789 break 32790 } 32791 x0 := r0.Args[0] 32792 if x0.Op != OpAMD64MOVWloadidx1 { 32793 break 32794 } 32795 i0 := x0.AuxInt 32796 s := x0.Aux 32797 _ = x0.Args[2] 32798 idx := x0.Args[0] 32799 p := x0.Args[1] 32800 mem := x0.Args[2] 32801 or := v.Args[1] 32802 if or.Op != OpAMD64ORQ { 32803 break 32804 } 32805 _ = or.Args[1] 32806 s1 := or.Args[0] 32807 if s1.Op != OpAMD64SHLQconst { 32808 break 32809 } 32810 j1 := s1.AuxInt 32811 r1 := s1.Args[0] 32812 if r1.Op != OpAMD64ROLWconst { 32813 break 32814 } 32815 if r1.AuxInt != 8 { 32816 break 32817 } 32818 x1 := r1.Args[0] 32819 if x1.Op != OpAMD64MOVWloadidx1 { 32820 break 32821 } 32822 i1 := x1.AuxInt 32823 if x1.Aux != s { 32824 break 32825 } 32826 _ = x1.Args[2] 32827 if p != x1.Args[0] { 32828 break 32829 } 32830 if idx != x1.Args[1] { 32831 break 32832 } 32833 if mem != x1.Args[2] { 32834 break 32835 } 32836 y := or.Args[1] 32837 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32838 break 32839 } 32840 b = mergePoint(b, x0, x1) 32841 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32842 v.reset(OpCopy) 32843 v.AddArg(v0) 32844 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32845 v1.AuxInt = j1 32846 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32847 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32848 v3.AuxInt = i0 32849 v3.Aux = s 32850 v3.AddArg(p) 32851 v3.AddArg(idx) 32852 v3.AddArg(mem) 32853 v2.AddArg(v3) 32854 v1.AddArg(v2) 32855 v0.AddArg(v1) 32856 v0.AddArg(y) 32857 return true 32858 } 32859 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 32860 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32861 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32862 for { 32863 _ = v.Args[1] 32864 s0 := v.Args[0] 32865 if s0.Op != OpAMD64SHLQconst { 32866 break 32867 } 32868 j0 := s0.AuxInt 32869 r0 := s0.Args[0] 32870 if r0.Op != OpAMD64ROLWconst { 32871 break 32872 } 32873 if r0.AuxInt != 8 { 32874 break 32875 } 32876 x0 := r0.Args[0] 32877 if x0.Op != OpAMD64MOVWloadidx1 { 32878 break 32879 } 32880 i0 := x0.AuxInt 32881 s := x0.Aux 32882 _ = x0.Args[2] 32883 p := x0.Args[0] 32884 idx := x0.Args[1] 32885 mem := x0.Args[2] 32886 or := v.Args[1] 32887 if or.Op != OpAMD64ORQ { 32888 break 32889 } 32890 _ = or.Args[1] 32891 s1 := or.Args[0] 32892 if s1.Op != OpAMD64SHLQconst { 32893 break 32894 } 32895 j1 := s1.AuxInt 32896 r1 := s1.Args[0] 32897 if r1.Op != OpAMD64ROLWconst { 32898 break 32899 } 32900 if r1.AuxInt != 8 { 32901 break 32902 } 32903 x1 := r1.Args[0] 32904 if x1.Op != OpAMD64MOVWloadidx1 { 32905 break 32906 } 32907 i1 := x1.AuxInt 32908 if x1.Aux != s { 32909 break 32910 } 32911 _ = x1.Args[2] 32912 if idx != x1.Args[0] { 32913 break 32914 } 32915 if p != x1.Args[1] { 32916 break 32917 } 32918 if mem != x1.Args[2] { 32919 break 32920 } 32921 y := or.Args[1] 32922 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 32923 break 32924 } 32925 b = mergePoint(b, x0, x1) 32926 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 32927 v.reset(OpCopy) 32928 v.AddArg(v0) 32929 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 32930 v1.AuxInt = j1 32931 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 32932 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 32933 v3.AuxInt = i0 32934 v3.Aux = s 32935 v3.AddArg(p) 32936 v3.AddArg(idx) 32937 v3.AddArg(mem) 32938 v2.AddArg(v3) 32939 v1.AddArg(v2) 32940 v0.AddArg(v1) 32941 v0.AddArg(y) 32942 return true 32943 } 32944 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) 32945 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 32946 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 32947 for { 32948 _ = v.Args[1] 32949 s0 := v.Args[0] 32950 if s0.Op != OpAMD64SHLQconst { 32951 break 32952 } 32953 j0 := s0.AuxInt 32954 r0 := s0.Args[0] 32955 if r0.Op != OpAMD64ROLWconst { 32956 break 32957 } 32958 if r0.AuxInt != 8 { 32959 break 32960 } 32961 x0 := r0.Args[0] 32962 if x0.Op != OpAMD64MOVWloadidx1 { 32963 break 32964 } 32965 i0 := x0.AuxInt 32966 s := x0.Aux 32967 _ = x0.Args[2] 32968 idx := x0.Args[0] 32969 p := x0.Args[1] 32970 mem := x0.Args[2] 32971 or := v.Args[1] 32972 if or.Op != OpAMD64ORQ { 32973 break 32974 } 32975 _ = or.Args[1] 32976 s1 := or.Args[0] 32977 if s1.Op != OpAMD64SHLQconst { 32978 break 32979 } 32980 j1 := s1.AuxInt 32981 r1 := s1.Args[0] 32982 if r1.Op != OpAMD64ROLWconst { 32983 break 32984 } 32985 if r1.AuxInt != 8 { 32986 break 32987 } 32988 x1 := r1.Args[0] 32989 if x1.Op != OpAMD64MOVWloadidx1 { 32990 break 32991 } 32992 i1 := x1.AuxInt 32993 if x1.Aux != s { 32994 break 32995 } 32996 _ = x1.Args[2] 32997 if idx != x1.Args[0] { 32998 break 32999 } 33000 if p != x1.Args[1] { 33001 break 33002 } 33003 if mem != x1.Args[2] { 33004 break 33005 } 33006 y := or.Args[1] 33007 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33008 break 33009 } 33010 b = mergePoint(b, x0, x1) 33011 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33012 v.reset(OpCopy) 33013 v.AddArg(v0) 33014 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33015 v1.AuxInt = j1 33016 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33017 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33018 v3.AuxInt = i0 33019 v3.Aux = s 33020 v3.AddArg(p) 33021 v3.AddArg(idx) 33022 v3.AddArg(mem) 33023 v2.AddArg(v3) 33024 v1.AddArg(v2) 33025 v0.AddArg(v1) 33026 v0.AddArg(y) 33027 return true 33028 } 33029 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 33030 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33031 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33032 for { 33033 _ = v.Args[1] 33034 s0 := v.Args[0] 33035 if s0.Op != OpAMD64SHLQconst { 33036 break 33037 } 33038 j0 := s0.AuxInt 33039 r0 := s0.Args[0] 33040 if r0.Op != OpAMD64ROLWconst { 33041 break 33042 } 33043 if r0.AuxInt != 8 { 33044 break 33045 } 33046 x0 := r0.Args[0] 33047 if x0.Op != OpAMD64MOVWloadidx1 { 33048 break 33049 } 33050 i0 := x0.AuxInt 33051 s := x0.Aux 33052 _ = x0.Args[2] 33053 p := x0.Args[0] 33054 idx := x0.Args[1] 33055 mem := x0.Args[2] 33056 or := v.Args[1] 33057 if or.Op != OpAMD64ORQ { 33058 break 33059 } 33060 _ = or.Args[1] 33061 y := or.Args[0] 33062 s1 := or.Args[1] 33063 if s1.Op != OpAMD64SHLQconst { 33064 break 33065 } 33066 j1 := s1.AuxInt 33067 r1 := s1.Args[0] 33068 if r1.Op != OpAMD64ROLWconst { 33069 break 33070 } 33071 if r1.AuxInt != 8 { 33072 break 33073 } 33074 x1 := r1.Args[0] 33075 if x1.Op != OpAMD64MOVWloadidx1 { 33076 break 33077 } 33078 i1 := x1.AuxInt 33079 if x1.Aux != s { 33080 break 33081 } 33082 _ = x1.Args[2] 33083 if p != x1.Args[0] { 33084 break 33085 } 33086 if idx != x1.Args[1] { 33087 break 33088 } 33089 if mem != x1.Args[2] { 33090 break 33091 } 33092 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33093 break 33094 } 33095 b = mergePoint(b, x0, x1) 33096 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33097 v.reset(OpCopy) 33098 v.AddArg(v0) 33099 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33100 v1.AuxInt = j1 33101 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33102 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33103 v3.AuxInt = i0 33104 v3.Aux = s 33105 v3.AddArg(p) 33106 v3.AddArg(idx) 33107 v3.AddArg(mem) 33108 v2.AddArg(v3) 33109 v1.AddArg(v2) 33110 v0.AddArg(v1) 33111 v0.AddArg(y) 33112 return true 33113 } 33114 return false 33115 } 33116 func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool { 33117 b := v.Block 33118 _ = b 33119 typ := &b.Func.Config.Types 33120 _ = typ 33121 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) 33122 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33123 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33124 for { 33125 _ = v.Args[1] 33126 s0 := v.Args[0] 33127 if s0.Op != OpAMD64SHLQconst { 33128 break 33129 } 33130 j0 := s0.AuxInt 33131 r0 := s0.Args[0] 33132 if r0.Op != OpAMD64ROLWconst { 33133 break 33134 } 33135 if r0.AuxInt != 8 { 33136 break 33137 } 33138 x0 := r0.Args[0] 33139 if x0.Op != OpAMD64MOVWloadidx1 { 33140 break 33141 } 33142 i0 := x0.AuxInt 33143 s := x0.Aux 33144 _ = x0.Args[2] 33145 idx := x0.Args[0] 33146 p := x0.Args[1] 33147 mem := x0.Args[2] 33148 or := v.Args[1] 33149 if or.Op != OpAMD64ORQ { 33150 break 33151 } 33152 _ = or.Args[1] 33153 y := or.Args[0] 33154 s1 := or.Args[1] 33155 if s1.Op != OpAMD64SHLQconst { 33156 break 33157 } 33158 j1 := s1.AuxInt 33159 r1 := s1.Args[0] 33160 if r1.Op != OpAMD64ROLWconst { 33161 break 33162 } 33163 if r1.AuxInt != 8 { 33164 break 33165 } 33166 x1 := r1.Args[0] 33167 if x1.Op != OpAMD64MOVWloadidx1 { 33168 break 33169 } 33170 i1 := x1.AuxInt 33171 if x1.Aux != s { 33172 break 33173 } 33174 _ = x1.Args[2] 33175 if p != x1.Args[0] { 33176 break 33177 } 33178 if idx != x1.Args[1] { 33179 break 33180 } 33181 if mem != x1.Args[2] { 33182 break 33183 } 33184 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33185 break 33186 } 33187 b = mergePoint(b, x0, x1) 33188 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33189 v.reset(OpCopy) 33190 v.AddArg(v0) 33191 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33192 v1.AuxInt = j1 33193 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33194 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33195 v3.AuxInt = i0 33196 v3.Aux = s 33197 v3.AddArg(p) 33198 v3.AddArg(idx) 33199 v3.AddArg(mem) 33200 v2.AddArg(v3) 33201 v1.AddArg(v2) 33202 v0.AddArg(v1) 33203 v0.AddArg(y) 33204 return true 33205 } 33206 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 33207 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33208 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33209 for { 33210 _ = v.Args[1] 33211 s0 := v.Args[0] 33212 if s0.Op != OpAMD64SHLQconst { 33213 break 33214 } 33215 j0 := s0.AuxInt 33216 r0 := s0.Args[0] 33217 if r0.Op != OpAMD64ROLWconst { 33218 break 33219 } 33220 if r0.AuxInt != 8 { 33221 break 33222 } 33223 x0 := r0.Args[0] 33224 if x0.Op != OpAMD64MOVWloadidx1 { 33225 break 33226 } 33227 i0 := x0.AuxInt 33228 s := x0.Aux 33229 _ = x0.Args[2] 33230 p := x0.Args[0] 33231 idx := x0.Args[1] 33232 mem := x0.Args[2] 33233 or := v.Args[1] 33234 if or.Op != OpAMD64ORQ { 33235 break 33236 } 33237 _ = or.Args[1] 33238 y := or.Args[0] 33239 s1 := or.Args[1] 33240 if s1.Op != OpAMD64SHLQconst { 33241 break 33242 } 33243 j1 := s1.AuxInt 33244 r1 := s1.Args[0] 33245 if r1.Op != OpAMD64ROLWconst { 33246 break 33247 } 33248 if r1.AuxInt != 8 { 33249 break 33250 } 33251 x1 := r1.Args[0] 33252 if x1.Op != OpAMD64MOVWloadidx1 { 33253 break 33254 } 33255 i1 := x1.AuxInt 33256 if x1.Aux != s { 33257 break 33258 } 33259 _ = x1.Args[2] 33260 if idx != x1.Args[0] { 33261 break 33262 } 33263 if p != x1.Args[1] { 33264 break 33265 } 33266 if mem != x1.Args[2] { 33267 break 33268 } 33269 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33270 break 33271 } 33272 b = mergePoint(b, x0, x1) 33273 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33274 v.reset(OpCopy) 33275 v.AddArg(v0) 33276 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33277 v1.AuxInt = j1 33278 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33279 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33280 v3.AuxInt = i0 33281 v3.Aux = s 33282 v3.AddArg(p) 33283 v3.AddArg(idx) 33284 v3.AddArg(mem) 33285 v2.AddArg(v3) 33286 v1.AddArg(v2) 33287 v0.AddArg(v1) 33288 v0.AddArg(y) 33289 return true 33290 } 33291 // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) 33292 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33293 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33294 for { 33295 _ = v.Args[1] 33296 s0 := v.Args[0] 33297 if s0.Op != OpAMD64SHLQconst { 33298 break 33299 } 33300 j0 := s0.AuxInt 33301 r0 := s0.Args[0] 33302 if r0.Op != OpAMD64ROLWconst { 33303 break 33304 } 33305 if r0.AuxInt != 8 { 33306 break 33307 } 33308 x0 := r0.Args[0] 33309 if x0.Op != OpAMD64MOVWloadidx1 { 33310 break 33311 } 33312 i0 := x0.AuxInt 33313 s := x0.Aux 33314 _ = x0.Args[2] 33315 idx := x0.Args[0] 33316 p := x0.Args[1] 33317 mem := x0.Args[2] 33318 or := v.Args[1] 33319 if or.Op != OpAMD64ORQ { 33320 break 33321 } 33322 _ = or.Args[1] 33323 y := or.Args[0] 33324 s1 := or.Args[1] 33325 if s1.Op != OpAMD64SHLQconst { 33326 break 33327 } 33328 j1 := s1.AuxInt 33329 r1 := s1.Args[0] 33330 if r1.Op != OpAMD64ROLWconst { 33331 break 33332 } 33333 if r1.AuxInt != 8 { 33334 break 33335 } 33336 x1 := r1.Args[0] 33337 if x1.Op != OpAMD64MOVWloadidx1 { 33338 break 33339 } 33340 i1 := x1.AuxInt 33341 if x1.Aux != s { 33342 break 33343 } 33344 _ = x1.Args[2] 33345 if idx != x1.Args[0] { 33346 break 33347 } 33348 if p != x1.Args[1] { 33349 break 33350 } 33351 if mem != x1.Args[2] { 33352 break 33353 } 33354 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33355 break 33356 } 33357 b = mergePoint(b, x0, x1) 33358 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33359 v.reset(OpCopy) 33360 v.AddArg(v0) 33361 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33362 v1.AuxInt = j1 33363 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33364 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33365 v3.AuxInt = i0 33366 v3.Aux = s 33367 v3.AddArg(p) 33368 v3.AddArg(idx) 33369 v3.AddArg(mem) 33370 v2.AddArg(v3) 33371 v1.AddArg(v2) 33372 v0.AddArg(v1) 33373 v0.AddArg(y) 33374 return true 33375 } 33376 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33377 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33378 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33379 for { 33380 _ = v.Args[1] 33381 or := v.Args[0] 33382 if or.Op != OpAMD64ORQ { 33383 break 33384 } 33385 _ = or.Args[1] 33386 s1 := or.Args[0] 33387 if s1.Op != OpAMD64SHLQconst { 33388 break 33389 } 33390 j1 := s1.AuxInt 33391 r1 := s1.Args[0] 33392 if r1.Op != OpAMD64ROLWconst { 33393 break 33394 } 33395 if r1.AuxInt != 8 { 33396 break 33397 } 33398 x1 := r1.Args[0] 33399 if x1.Op != OpAMD64MOVWloadidx1 { 33400 break 33401 } 33402 i1 := x1.AuxInt 33403 s := x1.Aux 33404 _ = x1.Args[2] 33405 p := x1.Args[0] 33406 idx := x1.Args[1] 33407 mem := x1.Args[2] 33408 y := or.Args[1] 33409 s0 := v.Args[1] 33410 if s0.Op != OpAMD64SHLQconst { 33411 break 33412 } 33413 j0 := s0.AuxInt 33414 r0 := s0.Args[0] 33415 if r0.Op != OpAMD64ROLWconst { 33416 break 33417 } 33418 if r0.AuxInt != 8 { 33419 break 33420 } 33421 x0 := r0.Args[0] 33422 if x0.Op != OpAMD64MOVWloadidx1 { 33423 break 33424 } 33425 i0 := x0.AuxInt 33426 if x0.Aux != s { 33427 break 33428 } 33429 _ = x0.Args[2] 33430 if p != x0.Args[0] { 33431 break 33432 } 33433 if idx != x0.Args[1] { 33434 break 33435 } 33436 if mem != x0.Args[2] { 33437 break 33438 } 33439 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33440 break 33441 } 33442 b = mergePoint(b, x0, x1) 33443 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33444 v.reset(OpCopy) 33445 v.AddArg(v0) 33446 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33447 v1.AuxInt = j1 33448 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33449 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33450 v3.AuxInt = i0 33451 v3.Aux = s 33452 v3.AddArg(p) 33453 v3.AddArg(idx) 33454 v3.AddArg(mem) 33455 v2.AddArg(v3) 33456 v1.AddArg(v2) 33457 v0.AddArg(v1) 33458 v0.AddArg(y) 33459 return true 33460 } 33461 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33462 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33463 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33464 for { 33465 _ = v.Args[1] 33466 or := v.Args[0] 33467 if or.Op != OpAMD64ORQ { 33468 break 33469 } 33470 _ = or.Args[1] 33471 s1 := or.Args[0] 33472 if s1.Op != OpAMD64SHLQconst { 33473 break 33474 } 33475 j1 := s1.AuxInt 33476 r1 := s1.Args[0] 33477 if r1.Op != OpAMD64ROLWconst { 33478 break 33479 } 33480 if r1.AuxInt != 8 { 33481 break 33482 } 33483 x1 := r1.Args[0] 33484 if x1.Op != OpAMD64MOVWloadidx1 { 33485 break 33486 } 33487 i1 := x1.AuxInt 33488 s := x1.Aux 33489 _ = x1.Args[2] 33490 idx := x1.Args[0] 33491 p := x1.Args[1] 33492 mem := x1.Args[2] 33493 y := or.Args[1] 33494 s0 := v.Args[1] 33495 if s0.Op != OpAMD64SHLQconst { 33496 break 33497 } 33498 j0 := s0.AuxInt 33499 r0 := s0.Args[0] 33500 if r0.Op != OpAMD64ROLWconst { 33501 break 33502 } 33503 if r0.AuxInt != 8 { 33504 break 33505 } 33506 x0 := r0.Args[0] 33507 if x0.Op != OpAMD64MOVWloadidx1 { 33508 break 33509 } 33510 i0 := x0.AuxInt 33511 if x0.Aux != s { 33512 break 33513 } 33514 _ = x0.Args[2] 33515 if p != x0.Args[0] { 33516 break 33517 } 33518 if idx != x0.Args[1] { 33519 break 33520 } 33521 if mem != x0.Args[2] { 33522 break 33523 } 33524 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33525 break 33526 } 33527 b = mergePoint(b, x0, x1) 33528 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33529 v.reset(OpCopy) 33530 v.AddArg(v0) 33531 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33532 v1.AuxInt = j1 33533 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33534 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33535 v3.AuxInt = i0 33536 v3.Aux = s 33537 v3.AddArg(p) 33538 v3.AddArg(idx) 33539 v3.AddArg(mem) 33540 v2.AddArg(v3) 33541 v1.AddArg(v2) 33542 v0.AddArg(v1) 33543 v0.AddArg(y) 33544 return true 33545 } 33546 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33547 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33548 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33549 for { 33550 _ = v.Args[1] 33551 or := v.Args[0] 33552 if or.Op != OpAMD64ORQ { 33553 break 33554 } 33555 _ = or.Args[1] 33556 y := or.Args[0] 33557 s1 := or.Args[1] 33558 if s1.Op != OpAMD64SHLQconst { 33559 break 33560 } 33561 j1 := s1.AuxInt 33562 r1 := s1.Args[0] 33563 if r1.Op != OpAMD64ROLWconst { 33564 break 33565 } 33566 if r1.AuxInt != 8 { 33567 break 33568 } 33569 x1 := r1.Args[0] 33570 if x1.Op != OpAMD64MOVWloadidx1 { 33571 break 33572 } 33573 i1 := x1.AuxInt 33574 s := x1.Aux 33575 _ = x1.Args[2] 33576 p := x1.Args[0] 33577 idx := x1.Args[1] 33578 mem := x1.Args[2] 33579 s0 := v.Args[1] 33580 if s0.Op != OpAMD64SHLQconst { 33581 break 33582 } 33583 j0 := s0.AuxInt 33584 r0 := s0.Args[0] 33585 if r0.Op != OpAMD64ROLWconst { 33586 break 33587 } 33588 if r0.AuxInt != 8 { 33589 break 33590 } 33591 x0 := r0.Args[0] 33592 if x0.Op != OpAMD64MOVWloadidx1 { 33593 break 33594 } 33595 i0 := x0.AuxInt 33596 if x0.Aux != s { 33597 break 33598 } 33599 _ = x0.Args[2] 33600 if p != x0.Args[0] { 33601 break 33602 } 33603 if idx != x0.Args[1] { 33604 break 33605 } 33606 if mem != x0.Args[2] { 33607 break 33608 } 33609 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33610 break 33611 } 33612 b = mergePoint(b, x0, x1) 33613 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33614 v.reset(OpCopy) 33615 v.AddArg(v0) 33616 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33617 v1.AuxInt = j1 33618 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33619 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33620 v3.AuxInt = i0 33621 v3.Aux = s 33622 v3.AddArg(p) 33623 v3.AddArg(idx) 33624 v3.AddArg(mem) 33625 v2.AddArg(v3) 33626 v1.AddArg(v2) 33627 v0.AddArg(v1) 33628 v0.AddArg(y) 33629 return true 33630 } 33631 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) 33632 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33633 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33634 for { 33635 _ = v.Args[1] 33636 or := v.Args[0] 33637 if or.Op != OpAMD64ORQ { 33638 break 33639 } 33640 _ = or.Args[1] 33641 y := or.Args[0] 33642 s1 := or.Args[1] 33643 if s1.Op != OpAMD64SHLQconst { 33644 break 33645 } 33646 j1 := s1.AuxInt 33647 r1 := s1.Args[0] 33648 if r1.Op != OpAMD64ROLWconst { 33649 break 33650 } 33651 if r1.AuxInt != 8 { 33652 break 33653 } 33654 x1 := r1.Args[0] 33655 if x1.Op != OpAMD64MOVWloadidx1 { 33656 break 33657 } 33658 i1 := x1.AuxInt 33659 s := x1.Aux 33660 _ = x1.Args[2] 33661 idx := x1.Args[0] 33662 p := x1.Args[1] 33663 mem := x1.Args[2] 33664 s0 := v.Args[1] 33665 if s0.Op != OpAMD64SHLQconst { 33666 break 33667 } 33668 j0 := s0.AuxInt 33669 r0 := s0.Args[0] 33670 if r0.Op != OpAMD64ROLWconst { 33671 break 33672 } 33673 if r0.AuxInt != 8 { 33674 break 33675 } 33676 x0 := r0.Args[0] 33677 if x0.Op != OpAMD64MOVWloadidx1 { 33678 break 33679 } 33680 i0 := x0.AuxInt 33681 if x0.Aux != s { 33682 break 33683 } 33684 _ = x0.Args[2] 33685 if p != x0.Args[0] { 33686 break 33687 } 33688 if idx != x0.Args[1] { 33689 break 33690 } 33691 if mem != x0.Args[2] { 33692 break 33693 } 33694 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33695 break 33696 } 33697 b = mergePoint(b, x0, x1) 33698 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33699 v.reset(OpCopy) 33700 v.AddArg(v0) 33701 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33702 v1.AuxInt = j1 33703 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33704 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33705 v3.AuxInt = i0 33706 v3.Aux = s 33707 v3.AddArg(p) 33708 v3.AddArg(idx) 33709 v3.AddArg(mem) 33710 v2.AddArg(v3) 33711 v1.AddArg(v2) 33712 v0.AddArg(v1) 33713 v0.AddArg(y) 33714 return true 33715 } 33716 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33717 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33718 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33719 for { 33720 _ = v.Args[1] 33721 or := v.Args[0] 33722 if or.Op != OpAMD64ORQ { 33723 break 33724 } 33725 _ = or.Args[1] 33726 s1 := or.Args[0] 33727 if s1.Op != OpAMD64SHLQconst { 33728 break 33729 } 33730 j1 := s1.AuxInt 33731 r1 := s1.Args[0] 33732 if r1.Op != OpAMD64ROLWconst { 33733 break 33734 } 33735 if r1.AuxInt != 8 { 33736 break 33737 } 33738 x1 := r1.Args[0] 33739 if x1.Op != OpAMD64MOVWloadidx1 { 33740 break 33741 } 33742 i1 := x1.AuxInt 33743 s := x1.Aux 33744 _ = x1.Args[2] 33745 p := x1.Args[0] 33746 idx := x1.Args[1] 33747 mem := x1.Args[2] 33748 y := or.Args[1] 33749 s0 := v.Args[1] 33750 if s0.Op != OpAMD64SHLQconst { 33751 break 33752 } 33753 j0 := s0.AuxInt 33754 r0 := s0.Args[0] 33755 if r0.Op != OpAMD64ROLWconst { 33756 break 33757 } 33758 if r0.AuxInt != 8 { 33759 break 33760 } 33761 x0 := r0.Args[0] 33762 if x0.Op != OpAMD64MOVWloadidx1 { 33763 break 33764 } 33765 i0 := x0.AuxInt 33766 if x0.Aux != s { 33767 break 33768 } 33769 _ = x0.Args[2] 33770 if idx != x0.Args[0] { 33771 break 33772 } 33773 if p != x0.Args[1] { 33774 break 33775 } 33776 if mem != x0.Args[2] { 33777 break 33778 } 33779 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33780 break 33781 } 33782 b = mergePoint(b, x0, x1) 33783 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33784 v.reset(OpCopy) 33785 v.AddArg(v0) 33786 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33787 v1.AuxInt = j1 33788 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33789 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33790 v3.AuxInt = i0 33791 v3.Aux = s 33792 v3.AddArg(p) 33793 v3.AddArg(idx) 33794 v3.AddArg(mem) 33795 v2.AddArg(v3) 33796 v1.AddArg(v2) 33797 v0.AddArg(v1) 33798 v0.AddArg(y) 33799 return true 33800 } 33801 // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33802 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33803 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33804 for { 33805 _ = v.Args[1] 33806 or := v.Args[0] 33807 if or.Op != OpAMD64ORQ { 33808 break 33809 } 33810 _ = or.Args[1] 33811 s1 := or.Args[0] 33812 if s1.Op != OpAMD64SHLQconst { 33813 break 33814 } 33815 j1 := s1.AuxInt 33816 r1 := s1.Args[0] 33817 if r1.Op != OpAMD64ROLWconst { 33818 break 33819 } 33820 if r1.AuxInt != 8 { 33821 break 33822 } 33823 x1 := r1.Args[0] 33824 if x1.Op != OpAMD64MOVWloadidx1 { 33825 break 33826 } 33827 i1 := x1.AuxInt 33828 s := x1.Aux 33829 _ = x1.Args[2] 33830 idx := x1.Args[0] 33831 p := x1.Args[1] 33832 mem := x1.Args[2] 33833 y := or.Args[1] 33834 s0 := v.Args[1] 33835 if s0.Op != OpAMD64SHLQconst { 33836 break 33837 } 33838 j0 := s0.AuxInt 33839 r0 := s0.Args[0] 33840 if r0.Op != OpAMD64ROLWconst { 33841 break 33842 } 33843 if r0.AuxInt != 8 { 33844 break 33845 } 33846 x0 := r0.Args[0] 33847 if x0.Op != OpAMD64MOVWloadidx1 { 33848 break 33849 } 33850 i0 := x0.AuxInt 33851 if x0.Aux != s { 33852 break 33853 } 33854 _ = x0.Args[2] 33855 if idx != x0.Args[0] { 33856 break 33857 } 33858 if p != x0.Args[1] { 33859 break 33860 } 33861 if mem != x0.Args[2] { 33862 break 33863 } 33864 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33865 break 33866 } 33867 b = mergePoint(b, x0, x1) 33868 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33869 v.reset(OpCopy) 33870 v.AddArg(v0) 33871 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33872 v1.AuxInt = j1 33873 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33874 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33875 v3.AuxInt = i0 33876 v3.Aux = s 33877 v3.AddArg(p) 33878 v3.AddArg(idx) 33879 v3.AddArg(mem) 33880 v2.AddArg(v3) 33881 v1.AddArg(v2) 33882 v0.AddArg(v1) 33883 v0.AddArg(y) 33884 return true 33885 } 33886 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33887 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33888 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33889 for { 33890 _ = v.Args[1] 33891 or := v.Args[0] 33892 if or.Op != OpAMD64ORQ { 33893 break 33894 } 33895 _ = or.Args[1] 33896 y := or.Args[0] 33897 s1 := or.Args[1] 33898 if s1.Op != OpAMD64SHLQconst { 33899 break 33900 } 33901 j1 := s1.AuxInt 33902 r1 := s1.Args[0] 33903 if r1.Op != OpAMD64ROLWconst { 33904 break 33905 } 33906 if r1.AuxInt != 8 { 33907 break 33908 } 33909 x1 := r1.Args[0] 33910 if x1.Op != OpAMD64MOVWloadidx1 { 33911 break 33912 } 33913 i1 := x1.AuxInt 33914 s := x1.Aux 33915 _ = x1.Args[2] 33916 p := x1.Args[0] 33917 idx := x1.Args[1] 33918 mem := x1.Args[2] 33919 s0 := v.Args[1] 33920 if s0.Op != OpAMD64SHLQconst { 33921 break 33922 } 33923 j0 := s0.AuxInt 33924 r0 := s0.Args[0] 33925 if r0.Op != OpAMD64ROLWconst { 33926 break 33927 } 33928 if r0.AuxInt != 8 { 33929 break 33930 } 33931 x0 := r0.Args[0] 33932 if x0.Op != OpAMD64MOVWloadidx1 { 33933 break 33934 } 33935 i0 := x0.AuxInt 33936 if x0.Aux != s { 33937 break 33938 } 33939 _ = x0.Args[2] 33940 if idx != x0.Args[0] { 33941 break 33942 } 33943 if p != x0.Args[1] { 33944 break 33945 } 33946 if mem != x0.Args[2] { 33947 break 33948 } 33949 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 33950 break 33951 } 33952 b = mergePoint(b, x0, x1) 33953 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 33954 v.reset(OpCopy) 33955 v.AddArg(v0) 33956 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 33957 v1.AuxInt = j1 33958 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 33959 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 33960 v3.AuxInt = i0 33961 v3.Aux = s 33962 v3.AddArg(p) 33963 v3.AddArg(idx) 33964 v3.AddArg(mem) 33965 v2.AddArg(v3) 33966 v1.AddArg(v2) 33967 v0.AddArg(v1) 33968 v0.AddArg(y) 33969 return true 33970 } 33971 return false 33972 } 33973 func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { 33974 b := v.Block 33975 _ = b 33976 typ := &b.Func.Config.Types 33977 _ = typ 33978 // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) 33979 // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) 33980 // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y) 33981 for { 33982 _ = v.Args[1] 33983 or := v.Args[0] 33984 if or.Op != OpAMD64ORQ { 33985 break 33986 } 33987 _ = or.Args[1] 33988 y := or.Args[0] 33989 s1 := or.Args[1] 33990 if s1.Op != OpAMD64SHLQconst { 33991 break 33992 } 33993 j1 := s1.AuxInt 33994 r1 := s1.Args[0] 33995 if r1.Op != OpAMD64ROLWconst { 33996 break 33997 } 33998 if r1.AuxInt != 8 { 33999 break 34000 } 34001 x1 := r1.Args[0] 34002 if x1.Op != OpAMD64MOVWloadidx1 { 34003 break 34004 } 34005 i1 := x1.AuxInt 34006 s := x1.Aux 34007 _ = x1.Args[2] 34008 idx := x1.Args[0] 34009 p := x1.Args[1] 34010 mem := x1.Args[2] 34011 s0 := v.Args[1] 34012 if s0.Op != OpAMD64SHLQconst { 34013 break 34014 } 34015 j0 := s0.AuxInt 34016 r0 := s0.Args[0] 34017 if r0.Op != OpAMD64ROLWconst { 34018 break 34019 } 34020 if r0.AuxInt != 8 { 34021 break 34022 } 34023 x0 := r0.Args[0] 34024 if x0.Op != OpAMD64MOVWloadidx1 { 34025 break 34026 } 34027 i0 := x0.AuxInt 34028 if x0.Aux != s { 34029 break 34030 } 34031 _ = x0.Args[2] 34032 if idx != x0.Args[0] { 34033 break 34034 } 34035 if p != x0.Args[1] { 34036 break 34037 } 34038 if mem != x0.Args[2] { 34039 break 34040 } 34041 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { 34042 break 34043 } 34044 b = mergePoint(b, x0, x1) 34045 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) 34046 v.reset(OpCopy) 34047 v.AddArg(v0) 34048 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) 34049 v1.AuxInt = j1 34050 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32) 34051 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32) 34052 v3.AuxInt = i0 34053 v3.Aux = s 34054 v3.AddArg(p) 34055 v3.AddArg(idx) 34056 v3.AddArg(mem) 34057 v2.AddArg(v3) 34058 v1.AddArg(v2) 34059 v0.AddArg(v1) 34060 v0.AddArg(y) 34061 return true 34062 } 34063 // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) 34064 // cond: canMergeLoad(v, l, x) && clobber(l) 34065 // result: (ORQmem x [off] {sym} ptr mem) 34066 for { 34067 _ = v.Args[1] 34068 x := v.Args[0] 34069 l := v.Args[1] 34070 if l.Op != OpAMD64MOVQload { 34071 break 34072 } 34073 off := l.AuxInt 34074 sym := l.Aux 34075 _ = l.Args[1] 34076 ptr := l.Args[0] 34077 mem := l.Args[1] 34078 if !(canMergeLoad(v, l, x) && clobber(l)) { 34079 break 34080 } 34081 v.reset(OpAMD64ORQmem) 34082 v.AuxInt = off 34083 v.Aux = sym 34084 v.AddArg(x) 34085 v.AddArg(ptr) 34086 v.AddArg(mem) 34087 return true 34088 } 34089 // match: (ORQ l:(MOVQload [off] {sym} ptr mem) x) 34090 // cond: canMergeLoad(v, l, x) && clobber(l) 34091 // result: (ORQmem x [off] {sym} ptr mem) 34092 for { 34093 _ = v.Args[1] 34094 l := v.Args[0] 34095 if l.Op != OpAMD64MOVQload { 34096 break 34097 } 34098 off := l.AuxInt 34099 sym := l.Aux 34100 _ = l.Args[1] 34101 ptr := l.Args[0] 34102 mem := l.Args[1] 34103 x := v.Args[1] 34104 if !(canMergeLoad(v, l, x) && clobber(l)) { 34105 break 34106 } 34107 v.reset(OpAMD64ORQmem) 34108 v.AuxInt = off 34109 v.Aux = sym 34110 v.AddArg(x) 34111 v.AddArg(ptr) 34112 v.AddArg(mem) 34113 return true 34114 } 34115 return false 34116 } 34117 func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool { 34118 // match: (ORQconst [0] x) 34119 // cond: 34120 // result: x 34121 for { 34122 if v.AuxInt != 0 { 34123 break 34124 } 34125 x := v.Args[0] 34126 v.reset(OpCopy) 34127 v.Type = x.Type 34128 v.AddArg(x) 34129 return true 34130 } 34131 // match: (ORQconst [-1] _) 34132 // cond: 34133 // result: (MOVQconst [-1]) 34134 for { 34135 if v.AuxInt != -1 { 34136 break 34137 } 34138 v.reset(OpAMD64MOVQconst) 34139 v.AuxInt = -1 34140 return true 34141 } 34142 // match: (ORQconst [c] (MOVQconst [d])) 34143 // cond: 34144 // result: (MOVQconst [c|d]) 34145 for { 34146 c := v.AuxInt 34147 v_0 := v.Args[0] 34148 if v_0.Op != OpAMD64MOVQconst { 34149 break 34150 } 34151 d := v_0.AuxInt 34152 v.reset(OpAMD64MOVQconst) 34153 v.AuxInt = c | d 34154 return true 34155 } 34156 return false 34157 } 34158 func rewriteValueAMD64_OpAMD64ORQmem_0(v *Value) bool { 34159 b := v.Block 34160 _ = b 34161 typ := &b.Func.Config.Types 34162 _ = typ 34163 // match: (ORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 34164 // cond: 34165 // result: ( ORQ x (MOVQf2i y)) 34166 for { 34167 off := v.AuxInt 34168 sym := v.Aux 34169 _ = v.Args[2] 34170 x := v.Args[0] 34171 ptr := v.Args[1] 34172 v_2 := v.Args[2] 34173 if v_2.Op != OpAMD64MOVSDstore { 34174 break 34175 } 34176 if v_2.AuxInt != off { 34177 break 34178 } 34179 if v_2.Aux != sym { 34180 break 34181 } 34182 _ = v_2.Args[2] 34183 if ptr != v_2.Args[0] { 34184 break 34185 } 34186 y := v_2.Args[1] 34187 v.reset(OpAMD64ORQ) 34188 v.AddArg(x) 34189 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 34190 v0.AddArg(y) 34191 v.AddArg(v0) 34192 return true 34193 } 34194 return false 34195 } 34196 func rewriteValueAMD64_OpAMD64ROLB_0(v *Value) bool { 34197 // match: (ROLB x (NEGQ y)) 34198 // cond: 34199 // result: (RORB x y) 34200 for { 34201 _ = v.Args[1] 34202 x := v.Args[0] 34203 v_1 := v.Args[1] 34204 if v_1.Op != OpAMD64NEGQ { 34205 break 34206 } 34207 y := v_1.Args[0] 34208 v.reset(OpAMD64RORB) 34209 v.AddArg(x) 34210 v.AddArg(y) 34211 return true 34212 } 34213 // match: (ROLB x (NEGL y)) 34214 // cond: 34215 // result: (RORB x y) 34216 for { 34217 _ = v.Args[1] 34218 x := v.Args[0] 34219 v_1 := v.Args[1] 34220 if v_1.Op != OpAMD64NEGL { 34221 break 34222 } 34223 y := v_1.Args[0] 34224 v.reset(OpAMD64RORB) 34225 v.AddArg(x) 34226 v.AddArg(y) 34227 return true 34228 } 34229 // match: (ROLB x (MOVQconst [c])) 34230 // cond: 34231 // result: (ROLBconst [c&7 ] x) 34232 for { 34233 _ = v.Args[1] 34234 x := v.Args[0] 34235 v_1 := v.Args[1] 34236 if v_1.Op != OpAMD64MOVQconst { 34237 break 34238 } 34239 c := v_1.AuxInt 34240 v.reset(OpAMD64ROLBconst) 34241 v.AuxInt = c & 7 34242 v.AddArg(x) 34243 return true 34244 } 34245 // match: (ROLB x (MOVLconst [c])) 34246 // cond: 34247 // result: (ROLBconst [c&7 ] x) 34248 for { 34249 _ = v.Args[1] 34250 x := v.Args[0] 34251 v_1 := v.Args[1] 34252 if v_1.Op != OpAMD64MOVLconst { 34253 break 34254 } 34255 c := v_1.AuxInt 34256 v.reset(OpAMD64ROLBconst) 34257 v.AuxInt = c & 7 34258 v.AddArg(x) 34259 return true 34260 } 34261 return false 34262 } 34263 func rewriteValueAMD64_OpAMD64ROLBconst_0(v *Value) bool { 34264 // match: (ROLBconst [c] (ROLBconst [d] x)) 34265 // cond: 34266 // result: (ROLBconst [(c+d)& 7] x) 34267 for { 34268 c := v.AuxInt 34269 v_0 := v.Args[0] 34270 if v_0.Op != OpAMD64ROLBconst { 34271 break 34272 } 34273 d := v_0.AuxInt 34274 x := v_0.Args[0] 34275 v.reset(OpAMD64ROLBconst) 34276 v.AuxInt = (c + d) & 7 34277 v.AddArg(x) 34278 return true 34279 } 34280 // match: (ROLBconst x [0]) 34281 // cond: 34282 // result: x 34283 for { 34284 if v.AuxInt != 0 { 34285 break 34286 } 34287 x := v.Args[0] 34288 v.reset(OpCopy) 34289 v.Type = x.Type 34290 v.AddArg(x) 34291 return true 34292 } 34293 return false 34294 } 34295 func rewriteValueAMD64_OpAMD64ROLL_0(v *Value) bool { 34296 // match: (ROLL x (NEGQ y)) 34297 // cond: 34298 // result: (RORL x y) 34299 for { 34300 _ = v.Args[1] 34301 x := v.Args[0] 34302 v_1 := v.Args[1] 34303 if v_1.Op != OpAMD64NEGQ { 34304 break 34305 } 34306 y := v_1.Args[0] 34307 v.reset(OpAMD64RORL) 34308 v.AddArg(x) 34309 v.AddArg(y) 34310 return true 34311 } 34312 // match: (ROLL x (NEGL y)) 34313 // cond: 34314 // result: (RORL x y) 34315 for { 34316 _ = v.Args[1] 34317 x := v.Args[0] 34318 v_1 := v.Args[1] 34319 if v_1.Op != OpAMD64NEGL { 34320 break 34321 } 34322 y := v_1.Args[0] 34323 v.reset(OpAMD64RORL) 34324 v.AddArg(x) 34325 v.AddArg(y) 34326 return true 34327 } 34328 // match: (ROLL x (MOVQconst [c])) 34329 // cond: 34330 // result: (ROLLconst [c&31] x) 34331 for { 34332 _ = v.Args[1] 34333 x := v.Args[0] 34334 v_1 := v.Args[1] 34335 if v_1.Op != OpAMD64MOVQconst { 34336 break 34337 } 34338 c := v_1.AuxInt 34339 v.reset(OpAMD64ROLLconst) 34340 v.AuxInt = c & 31 34341 v.AddArg(x) 34342 return true 34343 } 34344 // match: (ROLL x (MOVLconst [c])) 34345 // cond: 34346 // result: (ROLLconst [c&31] x) 34347 for { 34348 _ = v.Args[1] 34349 x := v.Args[0] 34350 v_1 := v.Args[1] 34351 if v_1.Op != OpAMD64MOVLconst { 34352 break 34353 } 34354 c := v_1.AuxInt 34355 v.reset(OpAMD64ROLLconst) 34356 v.AuxInt = c & 31 34357 v.AddArg(x) 34358 return true 34359 } 34360 return false 34361 } 34362 func rewriteValueAMD64_OpAMD64ROLLconst_0(v *Value) bool { 34363 // match: (ROLLconst [c] (ROLLconst [d] x)) 34364 // cond: 34365 // result: (ROLLconst [(c+d)&31] x) 34366 for { 34367 c := v.AuxInt 34368 v_0 := v.Args[0] 34369 if v_0.Op != OpAMD64ROLLconst { 34370 break 34371 } 34372 d := v_0.AuxInt 34373 x := v_0.Args[0] 34374 v.reset(OpAMD64ROLLconst) 34375 v.AuxInt = (c + d) & 31 34376 v.AddArg(x) 34377 return true 34378 } 34379 // match: (ROLLconst x [0]) 34380 // cond: 34381 // result: x 34382 for { 34383 if v.AuxInt != 0 { 34384 break 34385 } 34386 x := v.Args[0] 34387 v.reset(OpCopy) 34388 v.Type = x.Type 34389 v.AddArg(x) 34390 return true 34391 } 34392 return false 34393 } 34394 func rewriteValueAMD64_OpAMD64ROLQ_0(v *Value) bool { 34395 // match: (ROLQ x (NEGQ y)) 34396 // cond: 34397 // result: (RORQ x y) 34398 for { 34399 _ = v.Args[1] 34400 x := v.Args[0] 34401 v_1 := v.Args[1] 34402 if v_1.Op != OpAMD64NEGQ { 34403 break 34404 } 34405 y := v_1.Args[0] 34406 v.reset(OpAMD64RORQ) 34407 v.AddArg(x) 34408 v.AddArg(y) 34409 return true 34410 } 34411 // match: (ROLQ x (NEGL y)) 34412 // cond: 34413 // result: (RORQ x y) 34414 for { 34415 _ = v.Args[1] 34416 x := v.Args[0] 34417 v_1 := v.Args[1] 34418 if v_1.Op != OpAMD64NEGL { 34419 break 34420 } 34421 y := v_1.Args[0] 34422 v.reset(OpAMD64RORQ) 34423 v.AddArg(x) 34424 v.AddArg(y) 34425 return true 34426 } 34427 // match: (ROLQ x (MOVQconst [c])) 34428 // cond: 34429 // result: (ROLQconst [c&63] x) 34430 for { 34431 _ = v.Args[1] 34432 x := v.Args[0] 34433 v_1 := v.Args[1] 34434 if v_1.Op != OpAMD64MOVQconst { 34435 break 34436 } 34437 c := v_1.AuxInt 34438 v.reset(OpAMD64ROLQconst) 34439 v.AuxInt = c & 63 34440 v.AddArg(x) 34441 return true 34442 } 34443 // match: (ROLQ x (MOVLconst [c])) 34444 // cond: 34445 // result: (ROLQconst [c&63] x) 34446 for { 34447 _ = v.Args[1] 34448 x := v.Args[0] 34449 v_1 := v.Args[1] 34450 if v_1.Op != OpAMD64MOVLconst { 34451 break 34452 } 34453 c := v_1.AuxInt 34454 v.reset(OpAMD64ROLQconst) 34455 v.AuxInt = c & 63 34456 v.AddArg(x) 34457 return true 34458 } 34459 return false 34460 } 34461 func rewriteValueAMD64_OpAMD64ROLQconst_0(v *Value) bool { 34462 // match: (ROLQconst [c] (ROLQconst [d] x)) 34463 // cond: 34464 // result: (ROLQconst [(c+d)&63] x) 34465 for { 34466 c := v.AuxInt 34467 v_0 := v.Args[0] 34468 if v_0.Op != OpAMD64ROLQconst { 34469 break 34470 } 34471 d := v_0.AuxInt 34472 x := v_0.Args[0] 34473 v.reset(OpAMD64ROLQconst) 34474 v.AuxInt = (c + d) & 63 34475 v.AddArg(x) 34476 return true 34477 } 34478 // match: (ROLQconst x [0]) 34479 // cond: 34480 // result: x 34481 for { 34482 if v.AuxInt != 0 { 34483 break 34484 } 34485 x := v.Args[0] 34486 v.reset(OpCopy) 34487 v.Type = x.Type 34488 v.AddArg(x) 34489 return true 34490 } 34491 return false 34492 } 34493 func rewriteValueAMD64_OpAMD64ROLW_0(v *Value) bool { 34494 // match: (ROLW x (NEGQ y)) 34495 // cond: 34496 // result: (RORW x y) 34497 for { 34498 _ = v.Args[1] 34499 x := v.Args[0] 34500 v_1 := v.Args[1] 34501 if v_1.Op != OpAMD64NEGQ { 34502 break 34503 } 34504 y := v_1.Args[0] 34505 v.reset(OpAMD64RORW) 34506 v.AddArg(x) 34507 v.AddArg(y) 34508 return true 34509 } 34510 // match: (ROLW x (NEGL y)) 34511 // cond: 34512 // result: (RORW x y) 34513 for { 34514 _ = v.Args[1] 34515 x := v.Args[0] 34516 v_1 := v.Args[1] 34517 if v_1.Op != OpAMD64NEGL { 34518 break 34519 } 34520 y := v_1.Args[0] 34521 v.reset(OpAMD64RORW) 34522 v.AddArg(x) 34523 v.AddArg(y) 34524 return true 34525 } 34526 // match: (ROLW x (MOVQconst [c])) 34527 // cond: 34528 // result: (ROLWconst [c&15] x) 34529 for { 34530 _ = v.Args[1] 34531 x := v.Args[0] 34532 v_1 := v.Args[1] 34533 if v_1.Op != OpAMD64MOVQconst { 34534 break 34535 } 34536 c := v_1.AuxInt 34537 v.reset(OpAMD64ROLWconst) 34538 v.AuxInt = c & 15 34539 v.AddArg(x) 34540 return true 34541 } 34542 // match: (ROLW x (MOVLconst [c])) 34543 // cond: 34544 // result: (ROLWconst [c&15] x) 34545 for { 34546 _ = v.Args[1] 34547 x := v.Args[0] 34548 v_1 := v.Args[1] 34549 if v_1.Op != OpAMD64MOVLconst { 34550 break 34551 } 34552 c := v_1.AuxInt 34553 v.reset(OpAMD64ROLWconst) 34554 v.AuxInt = c & 15 34555 v.AddArg(x) 34556 return true 34557 } 34558 return false 34559 } 34560 func rewriteValueAMD64_OpAMD64ROLWconst_0(v *Value) bool { 34561 // match: (ROLWconst [c] (ROLWconst [d] x)) 34562 // cond: 34563 // result: (ROLWconst [(c+d)&15] x) 34564 for { 34565 c := v.AuxInt 34566 v_0 := v.Args[0] 34567 if v_0.Op != OpAMD64ROLWconst { 34568 break 34569 } 34570 d := v_0.AuxInt 34571 x := v_0.Args[0] 34572 v.reset(OpAMD64ROLWconst) 34573 v.AuxInt = (c + d) & 15 34574 v.AddArg(x) 34575 return true 34576 } 34577 // match: (ROLWconst x [0]) 34578 // cond: 34579 // result: x 34580 for { 34581 if v.AuxInt != 0 { 34582 break 34583 } 34584 x := v.Args[0] 34585 v.reset(OpCopy) 34586 v.Type = x.Type 34587 v.AddArg(x) 34588 return true 34589 } 34590 return false 34591 } 34592 func rewriteValueAMD64_OpAMD64RORB_0(v *Value) bool { 34593 // match: (RORB x (NEGQ y)) 34594 // cond: 34595 // result: (ROLB x y) 34596 for { 34597 _ = v.Args[1] 34598 x := v.Args[0] 34599 v_1 := v.Args[1] 34600 if v_1.Op != OpAMD64NEGQ { 34601 break 34602 } 34603 y := v_1.Args[0] 34604 v.reset(OpAMD64ROLB) 34605 v.AddArg(x) 34606 v.AddArg(y) 34607 return true 34608 } 34609 // match: (RORB x (NEGL y)) 34610 // cond: 34611 // result: (ROLB x y) 34612 for { 34613 _ = v.Args[1] 34614 x := v.Args[0] 34615 v_1 := v.Args[1] 34616 if v_1.Op != OpAMD64NEGL { 34617 break 34618 } 34619 y := v_1.Args[0] 34620 v.reset(OpAMD64ROLB) 34621 v.AddArg(x) 34622 v.AddArg(y) 34623 return true 34624 } 34625 // match: (RORB x (MOVQconst [c])) 34626 // cond: 34627 // result: (ROLBconst [(-c)&7 ] x) 34628 for { 34629 _ = v.Args[1] 34630 x := v.Args[0] 34631 v_1 := v.Args[1] 34632 if v_1.Op != OpAMD64MOVQconst { 34633 break 34634 } 34635 c := v_1.AuxInt 34636 v.reset(OpAMD64ROLBconst) 34637 v.AuxInt = (-c) & 7 34638 v.AddArg(x) 34639 return true 34640 } 34641 // match: (RORB x (MOVLconst [c])) 34642 // cond: 34643 // result: (ROLBconst [(-c)&7 ] x) 34644 for { 34645 _ = v.Args[1] 34646 x := v.Args[0] 34647 v_1 := v.Args[1] 34648 if v_1.Op != OpAMD64MOVLconst { 34649 break 34650 } 34651 c := v_1.AuxInt 34652 v.reset(OpAMD64ROLBconst) 34653 v.AuxInt = (-c) & 7 34654 v.AddArg(x) 34655 return true 34656 } 34657 return false 34658 } 34659 func rewriteValueAMD64_OpAMD64RORL_0(v *Value) bool { 34660 // match: (RORL x (NEGQ y)) 34661 // cond: 34662 // result: (ROLL x y) 34663 for { 34664 _ = v.Args[1] 34665 x := v.Args[0] 34666 v_1 := v.Args[1] 34667 if v_1.Op != OpAMD64NEGQ { 34668 break 34669 } 34670 y := v_1.Args[0] 34671 v.reset(OpAMD64ROLL) 34672 v.AddArg(x) 34673 v.AddArg(y) 34674 return true 34675 } 34676 // match: (RORL x (NEGL y)) 34677 // cond: 34678 // result: (ROLL x y) 34679 for { 34680 _ = v.Args[1] 34681 x := v.Args[0] 34682 v_1 := v.Args[1] 34683 if v_1.Op != OpAMD64NEGL { 34684 break 34685 } 34686 y := v_1.Args[0] 34687 v.reset(OpAMD64ROLL) 34688 v.AddArg(x) 34689 v.AddArg(y) 34690 return true 34691 } 34692 // match: (RORL x (MOVQconst [c])) 34693 // cond: 34694 // result: (ROLLconst [(-c)&31] x) 34695 for { 34696 _ = v.Args[1] 34697 x := v.Args[0] 34698 v_1 := v.Args[1] 34699 if v_1.Op != OpAMD64MOVQconst { 34700 break 34701 } 34702 c := v_1.AuxInt 34703 v.reset(OpAMD64ROLLconst) 34704 v.AuxInt = (-c) & 31 34705 v.AddArg(x) 34706 return true 34707 } 34708 // match: (RORL x (MOVLconst [c])) 34709 // cond: 34710 // result: (ROLLconst [(-c)&31] x) 34711 for { 34712 _ = v.Args[1] 34713 x := v.Args[0] 34714 v_1 := v.Args[1] 34715 if v_1.Op != OpAMD64MOVLconst { 34716 break 34717 } 34718 c := v_1.AuxInt 34719 v.reset(OpAMD64ROLLconst) 34720 v.AuxInt = (-c) & 31 34721 v.AddArg(x) 34722 return true 34723 } 34724 return false 34725 } 34726 func rewriteValueAMD64_OpAMD64RORQ_0(v *Value) bool { 34727 // match: (RORQ x (NEGQ y)) 34728 // cond: 34729 // result: (ROLQ x y) 34730 for { 34731 _ = v.Args[1] 34732 x := v.Args[0] 34733 v_1 := v.Args[1] 34734 if v_1.Op != OpAMD64NEGQ { 34735 break 34736 } 34737 y := v_1.Args[0] 34738 v.reset(OpAMD64ROLQ) 34739 v.AddArg(x) 34740 v.AddArg(y) 34741 return true 34742 } 34743 // match: (RORQ x (NEGL y)) 34744 // cond: 34745 // result: (ROLQ x y) 34746 for { 34747 _ = v.Args[1] 34748 x := v.Args[0] 34749 v_1 := v.Args[1] 34750 if v_1.Op != OpAMD64NEGL { 34751 break 34752 } 34753 y := v_1.Args[0] 34754 v.reset(OpAMD64ROLQ) 34755 v.AddArg(x) 34756 v.AddArg(y) 34757 return true 34758 } 34759 // match: (RORQ x (MOVQconst [c])) 34760 // cond: 34761 // result: (ROLQconst [(-c)&63] x) 34762 for { 34763 _ = v.Args[1] 34764 x := v.Args[0] 34765 v_1 := v.Args[1] 34766 if v_1.Op != OpAMD64MOVQconst { 34767 break 34768 } 34769 c := v_1.AuxInt 34770 v.reset(OpAMD64ROLQconst) 34771 v.AuxInt = (-c) & 63 34772 v.AddArg(x) 34773 return true 34774 } 34775 // match: (RORQ x (MOVLconst [c])) 34776 // cond: 34777 // result: (ROLQconst [(-c)&63] x) 34778 for { 34779 _ = v.Args[1] 34780 x := v.Args[0] 34781 v_1 := v.Args[1] 34782 if v_1.Op != OpAMD64MOVLconst { 34783 break 34784 } 34785 c := v_1.AuxInt 34786 v.reset(OpAMD64ROLQconst) 34787 v.AuxInt = (-c) & 63 34788 v.AddArg(x) 34789 return true 34790 } 34791 return false 34792 } 34793 func rewriteValueAMD64_OpAMD64RORW_0(v *Value) bool { 34794 // match: (RORW x (NEGQ y)) 34795 // cond: 34796 // result: (ROLW x y) 34797 for { 34798 _ = v.Args[1] 34799 x := v.Args[0] 34800 v_1 := v.Args[1] 34801 if v_1.Op != OpAMD64NEGQ { 34802 break 34803 } 34804 y := v_1.Args[0] 34805 v.reset(OpAMD64ROLW) 34806 v.AddArg(x) 34807 v.AddArg(y) 34808 return true 34809 } 34810 // match: (RORW x (NEGL y)) 34811 // cond: 34812 // result: (ROLW x y) 34813 for { 34814 _ = v.Args[1] 34815 x := v.Args[0] 34816 v_1 := v.Args[1] 34817 if v_1.Op != OpAMD64NEGL { 34818 break 34819 } 34820 y := v_1.Args[0] 34821 v.reset(OpAMD64ROLW) 34822 v.AddArg(x) 34823 v.AddArg(y) 34824 return true 34825 } 34826 // match: (RORW x (MOVQconst [c])) 34827 // cond: 34828 // result: (ROLWconst [(-c)&15] x) 34829 for { 34830 _ = v.Args[1] 34831 x := v.Args[0] 34832 v_1 := v.Args[1] 34833 if v_1.Op != OpAMD64MOVQconst { 34834 break 34835 } 34836 c := v_1.AuxInt 34837 v.reset(OpAMD64ROLWconst) 34838 v.AuxInt = (-c) & 15 34839 v.AddArg(x) 34840 return true 34841 } 34842 // match: (RORW x (MOVLconst [c])) 34843 // cond: 34844 // result: (ROLWconst [(-c)&15] x) 34845 for { 34846 _ = v.Args[1] 34847 x := v.Args[0] 34848 v_1 := v.Args[1] 34849 if v_1.Op != OpAMD64MOVLconst { 34850 break 34851 } 34852 c := v_1.AuxInt 34853 v.reset(OpAMD64ROLWconst) 34854 v.AuxInt = (-c) & 15 34855 v.AddArg(x) 34856 return true 34857 } 34858 return false 34859 } 34860 func rewriteValueAMD64_OpAMD64SARB_0(v *Value) bool { 34861 // match: (SARB x (MOVQconst [c])) 34862 // cond: 34863 // result: (SARBconst [min(c&31,7)] x) 34864 for { 34865 _ = v.Args[1] 34866 x := v.Args[0] 34867 v_1 := v.Args[1] 34868 if v_1.Op != OpAMD64MOVQconst { 34869 break 34870 } 34871 c := v_1.AuxInt 34872 v.reset(OpAMD64SARBconst) 34873 v.AuxInt = min(c&31, 7) 34874 v.AddArg(x) 34875 return true 34876 } 34877 // match: (SARB x (MOVLconst [c])) 34878 // cond: 34879 // result: (SARBconst [min(c&31,7)] x) 34880 for { 34881 _ = v.Args[1] 34882 x := v.Args[0] 34883 v_1 := v.Args[1] 34884 if v_1.Op != OpAMD64MOVLconst { 34885 break 34886 } 34887 c := v_1.AuxInt 34888 v.reset(OpAMD64SARBconst) 34889 v.AuxInt = min(c&31, 7) 34890 v.AddArg(x) 34891 return true 34892 } 34893 return false 34894 } 34895 func rewriteValueAMD64_OpAMD64SARBconst_0(v *Value) bool { 34896 // match: (SARBconst x [0]) 34897 // cond: 34898 // result: x 34899 for { 34900 if v.AuxInt != 0 { 34901 break 34902 } 34903 x := v.Args[0] 34904 v.reset(OpCopy) 34905 v.Type = x.Type 34906 v.AddArg(x) 34907 return true 34908 } 34909 // match: (SARBconst [c] (MOVQconst [d])) 34910 // cond: 34911 // result: (MOVQconst [int64(int8(d))>>uint64(c)]) 34912 for { 34913 c := v.AuxInt 34914 v_0 := v.Args[0] 34915 if v_0.Op != OpAMD64MOVQconst { 34916 break 34917 } 34918 d := v_0.AuxInt 34919 v.reset(OpAMD64MOVQconst) 34920 v.AuxInt = int64(int8(d)) >> uint64(c) 34921 return true 34922 } 34923 return false 34924 } 34925 func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool { 34926 b := v.Block 34927 _ = b 34928 // match: (SARL x (MOVQconst [c])) 34929 // cond: 34930 // result: (SARLconst [c&31] x) 34931 for { 34932 _ = v.Args[1] 34933 x := v.Args[0] 34934 v_1 := v.Args[1] 34935 if v_1.Op != OpAMD64MOVQconst { 34936 break 34937 } 34938 c := v_1.AuxInt 34939 v.reset(OpAMD64SARLconst) 34940 v.AuxInt = c & 31 34941 v.AddArg(x) 34942 return true 34943 } 34944 // match: (SARL x (MOVLconst [c])) 34945 // cond: 34946 // result: (SARLconst [c&31] x) 34947 for { 34948 _ = v.Args[1] 34949 x := v.Args[0] 34950 v_1 := v.Args[1] 34951 if v_1.Op != OpAMD64MOVLconst { 34952 break 34953 } 34954 c := v_1.AuxInt 34955 v.reset(OpAMD64SARLconst) 34956 v.AuxInt = c & 31 34957 v.AddArg(x) 34958 return true 34959 } 34960 // match: (SARL x (ADDQconst [c] y)) 34961 // cond: c & 31 == 0 34962 // result: (SARL x y) 34963 for { 34964 _ = v.Args[1] 34965 x := v.Args[0] 34966 v_1 := v.Args[1] 34967 if v_1.Op != OpAMD64ADDQconst { 34968 break 34969 } 34970 c := v_1.AuxInt 34971 y := v_1.Args[0] 34972 if !(c&31 == 0) { 34973 break 34974 } 34975 v.reset(OpAMD64SARL) 34976 v.AddArg(x) 34977 v.AddArg(y) 34978 return true 34979 } 34980 // match: (SARL x (NEGQ <t> (ADDQconst [c] y))) 34981 // cond: c & 31 == 0 34982 // result: (SARL x (NEGQ <t> y)) 34983 for { 34984 _ = v.Args[1] 34985 x := v.Args[0] 34986 v_1 := v.Args[1] 34987 if v_1.Op != OpAMD64NEGQ { 34988 break 34989 } 34990 t := v_1.Type 34991 v_1_0 := v_1.Args[0] 34992 if v_1_0.Op != OpAMD64ADDQconst { 34993 break 34994 } 34995 c := v_1_0.AuxInt 34996 y := v_1_0.Args[0] 34997 if !(c&31 == 0) { 34998 break 34999 } 35000 v.reset(OpAMD64SARL) 35001 v.AddArg(x) 35002 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35003 v0.AddArg(y) 35004 v.AddArg(v0) 35005 return true 35006 } 35007 // match: (SARL x (ANDQconst [c] y)) 35008 // cond: c & 31 == 31 35009 // result: (SARL x y) 35010 for { 35011 _ = v.Args[1] 35012 x := v.Args[0] 35013 v_1 := v.Args[1] 35014 if v_1.Op != OpAMD64ANDQconst { 35015 break 35016 } 35017 c := v_1.AuxInt 35018 y := v_1.Args[0] 35019 if !(c&31 == 31) { 35020 break 35021 } 35022 v.reset(OpAMD64SARL) 35023 v.AddArg(x) 35024 v.AddArg(y) 35025 return true 35026 } 35027 // match: (SARL x (NEGQ <t> (ANDQconst [c] y))) 35028 // cond: c & 31 == 31 35029 // result: (SARL x (NEGQ <t> y)) 35030 for { 35031 _ = v.Args[1] 35032 x := v.Args[0] 35033 v_1 := v.Args[1] 35034 if v_1.Op != OpAMD64NEGQ { 35035 break 35036 } 35037 t := v_1.Type 35038 v_1_0 := v_1.Args[0] 35039 if v_1_0.Op != OpAMD64ANDQconst { 35040 break 35041 } 35042 c := v_1_0.AuxInt 35043 y := v_1_0.Args[0] 35044 if !(c&31 == 31) { 35045 break 35046 } 35047 v.reset(OpAMD64SARL) 35048 v.AddArg(x) 35049 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35050 v0.AddArg(y) 35051 v.AddArg(v0) 35052 return true 35053 } 35054 // match: (SARL x (ADDLconst [c] y)) 35055 // cond: c & 31 == 0 35056 // result: (SARL x y) 35057 for { 35058 _ = v.Args[1] 35059 x := v.Args[0] 35060 v_1 := v.Args[1] 35061 if v_1.Op != OpAMD64ADDLconst { 35062 break 35063 } 35064 c := v_1.AuxInt 35065 y := v_1.Args[0] 35066 if !(c&31 == 0) { 35067 break 35068 } 35069 v.reset(OpAMD64SARL) 35070 v.AddArg(x) 35071 v.AddArg(y) 35072 return true 35073 } 35074 // match: (SARL x (NEGL <t> (ADDLconst [c] y))) 35075 // cond: c & 31 == 0 35076 // result: (SARL x (NEGL <t> y)) 35077 for { 35078 _ = v.Args[1] 35079 x := v.Args[0] 35080 v_1 := v.Args[1] 35081 if v_1.Op != OpAMD64NEGL { 35082 break 35083 } 35084 t := v_1.Type 35085 v_1_0 := v_1.Args[0] 35086 if v_1_0.Op != OpAMD64ADDLconst { 35087 break 35088 } 35089 c := v_1_0.AuxInt 35090 y := v_1_0.Args[0] 35091 if !(c&31 == 0) { 35092 break 35093 } 35094 v.reset(OpAMD64SARL) 35095 v.AddArg(x) 35096 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35097 v0.AddArg(y) 35098 v.AddArg(v0) 35099 return true 35100 } 35101 // match: (SARL x (ANDLconst [c] y)) 35102 // cond: c & 31 == 31 35103 // result: (SARL x y) 35104 for { 35105 _ = v.Args[1] 35106 x := v.Args[0] 35107 v_1 := v.Args[1] 35108 if v_1.Op != OpAMD64ANDLconst { 35109 break 35110 } 35111 c := v_1.AuxInt 35112 y := v_1.Args[0] 35113 if !(c&31 == 31) { 35114 break 35115 } 35116 v.reset(OpAMD64SARL) 35117 v.AddArg(x) 35118 v.AddArg(y) 35119 return true 35120 } 35121 // match: (SARL x (NEGL <t> (ANDLconst [c] y))) 35122 // cond: c & 31 == 31 35123 // result: (SARL x (NEGL <t> y)) 35124 for { 35125 _ = v.Args[1] 35126 x := v.Args[0] 35127 v_1 := v.Args[1] 35128 if v_1.Op != OpAMD64NEGL { 35129 break 35130 } 35131 t := v_1.Type 35132 v_1_0 := v_1.Args[0] 35133 if v_1_0.Op != OpAMD64ANDLconst { 35134 break 35135 } 35136 c := v_1_0.AuxInt 35137 y := v_1_0.Args[0] 35138 if !(c&31 == 31) { 35139 break 35140 } 35141 v.reset(OpAMD64SARL) 35142 v.AddArg(x) 35143 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35144 v0.AddArg(y) 35145 v.AddArg(v0) 35146 return true 35147 } 35148 return false 35149 } 35150 func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool { 35151 // match: (SARLconst x [0]) 35152 // cond: 35153 // result: x 35154 for { 35155 if v.AuxInt != 0 { 35156 break 35157 } 35158 x := v.Args[0] 35159 v.reset(OpCopy) 35160 v.Type = x.Type 35161 v.AddArg(x) 35162 return true 35163 } 35164 // match: (SARLconst [c] (MOVQconst [d])) 35165 // cond: 35166 // result: (MOVQconst [int64(int32(d))>>uint64(c)]) 35167 for { 35168 c := v.AuxInt 35169 v_0 := v.Args[0] 35170 if v_0.Op != OpAMD64MOVQconst { 35171 break 35172 } 35173 d := v_0.AuxInt 35174 v.reset(OpAMD64MOVQconst) 35175 v.AuxInt = int64(int32(d)) >> uint64(c) 35176 return true 35177 } 35178 return false 35179 } 35180 func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool { 35181 b := v.Block 35182 _ = b 35183 // match: (SARQ x (MOVQconst [c])) 35184 // cond: 35185 // result: (SARQconst [c&63] x) 35186 for { 35187 _ = v.Args[1] 35188 x := v.Args[0] 35189 v_1 := v.Args[1] 35190 if v_1.Op != OpAMD64MOVQconst { 35191 break 35192 } 35193 c := v_1.AuxInt 35194 v.reset(OpAMD64SARQconst) 35195 v.AuxInt = c & 63 35196 v.AddArg(x) 35197 return true 35198 } 35199 // match: (SARQ x (MOVLconst [c])) 35200 // cond: 35201 // result: (SARQconst [c&63] x) 35202 for { 35203 _ = v.Args[1] 35204 x := v.Args[0] 35205 v_1 := v.Args[1] 35206 if v_1.Op != OpAMD64MOVLconst { 35207 break 35208 } 35209 c := v_1.AuxInt 35210 v.reset(OpAMD64SARQconst) 35211 v.AuxInt = c & 63 35212 v.AddArg(x) 35213 return true 35214 } 35215 // match: (SARQ x (ADDQconst [c] y)) 35216 // cond: c & 63 == 0 35217 // result: (SARQ x y) 35218 for { 35219 _ = v.Args[1] 35220 x := v.Args[0] 35221 v_1 := v.Args[1] 35222 if v_1.Op != OpAMD64ADDQconst { 35223 break 35224 } 35225 c := v_1.AuxInt 35226 y := v_1.Args[0] 35227 if !(c&63 == 0) { 35228 break 35229 } 35230 v.reset(OpAMD64SARQ) 35231 v.AddArg(x) 35232 v.AddArg(y) 35233 return true 35234 } 35235 // match: (SARQ x (NEGQ <t> (ADDQconst [c] y))) 35236 // cond: c & 63 == 0 35237 // result: (SARQ x (NEGQ <t> y)) 35238 for { 35239 _ = v.Args[1] 35240 x := v.Args[0] 35241 v_1 := v.Args[1] 35242 if v_1.Op != OpAMD64NEGQ { 35243 break 35244 } 35245 t := v_1.Type 35246 v_1_0 := v_1.Args[0] 35247 if v_1_0.Op != OpAMD64ADDQconst { 35248 break 35249 } 35250 c := v_1_0.AuxInt 35251 y := v_1_0.Args[0] 35252 if !(c&63 == 0) { 35253 break 35254 } 35255 v.reset(OpAMD64SARQ) 35256 v.AddArg(x) 35257 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35258 v0.AddArg(y) 35259 v.AddArg(v0) 35260 return true 35261 } 35262 // match: (SARQ x (ANDQconst [c] y)) 35263 // cond: c & 63 == 63 35264 // result: (SARQ x y) 35265 for { 35266 _ = v.Args[1] 35267 x := v.Args[0] 35268 v_1 := v.Args[1] 35269 if v_1.Op != OpAMD64ANDQconst { 35270 break 35271 } 35272 c := v_1.AuxInt 35273 y := v_1.Args[0] 35274 if !(c&63 == 63) { 35275 break 35276 } 35277 v.reset(OpAMD64SARQ) 35278 v.AddArg(x) 35279 v.AddArg(y) 35280 return true 35281 } 35282 // match: (SARQ x (NEGQ <t> (ANDQconst [c] y))) 35283 // cond: c & 63 == 63 35284 // result: (SARQ x (NEGQ <t> y)) 35285 for { 35286 _ = v.Args[1] 35287 x := v.Args[0] 35288 v_1 := v.Args[1] 35289 if v_1.Op != OpAMD64NEGQ { 35290 break 35291 } 35292 t := v_1.Type 35293 v_1_0 := v_1.Args[0] 35294 if v_1_0.Op != OpAMD64ANDQconst { 35295 break 35296 } 35297 c := v_1_0.AuxInt 35298 y := v_1_0.Args[0] 35299 if !(c&63 == 63) { 35300 break 35301 } 35302 v.reset(OpAMD64SARQ) 35303 v.AddArg(x) 35304 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 35305 v0.AddArg(y) 35306 v.AddArg(v0) 35307 return true 35308 } 35309 // match: (SARQ x (ADDLconst [c] y)) 35310 // cond: c & 63 == 0 35311 // result: (SARQ x y) 35312 for { 35313 _ = v.Args[1] 35314 x := v.Args[0] 35315 v_1 := v.Args[1] 35316 if v_1.Op != OpAMD64ADDLconst { 35317 break 35318 } 35319 c := v_1.AuxInt 35320 y := v_1.Args[0] 35321 if !(c&63 == 0) { 35322 break 35323 } 35324 v.reset(OpAMD64SARQ) 35325 v.AddArg(x) 35326 v.AddArg(y) 35327 return true 35328 } 35329 // match: (SARQ x (NEGL <t> (ADDLconst [c] y))) 35330 // cond: c & 63 == 0 35331 // result: (SARQ x (NEGL <t> y)) 35332 for { 35333 _ = v.Args[1] 35334 x := v.Args[0] 35335 v_1 := v.Args[1] 35336 if v_1.Op != OpAMD64NEGL { 35337 break 35338 } 35339 t := v_1.Type 35340 v_1_0 := v_1.Args[0] 35341 if v_1_0.Op != OpAMD64ADDLconst { 35342 break 35343 } 35344 c := v_1_0.AuxInt 35345 y := v_1_0.Args[0] 35346 if !(c&63 == 0) { 35347 break 35348 } 35349 v.reset(OpAMD64SARQ) 35350 v.AddArg(x) 35351 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35352 v0.AddArg(y) 35353 v.AddArg(v0) 35354 return true 35355 } 35356 // match: (SARQ x (ANDLconst [c] y)) 35357 // cond: c & 63 == 63 35358 // result: (SARQ x y) 35359 for { 35360 _ = v.Args[1] 35361 x := v.Args[0] 35362 v_1 := v.Args[1] 35363 if v_1.Op != OpAMD64ANDLconst { 35364 break 35365 } 35366 c := v_1.AuxInt 35367 y := v_1.Args[0] 35368 if !(c&63 == 63) { 35369 break 35370 } 35371 v.reset(OpAMD64SARQ) 35372 v.AddArg(x) 35373 v.AddArg(y) 35374 return true 35375 } 35376 // match: (SARQ x (NEGL <t> (ANDLconst [c] y))) 35377 // cond: c & 63 == 63 35378 // result: (SARQ x (NEGL <t> y)) 35379 for { 35380 _ = v.Args[1] 35381 x := v.Args[0] 35382 v_1 := v.Args[1] 35383 if v_1.Op != OpAMD64NEGL { 35384 break 35385 } 35386 t := v_1.Type 35387 v_1_0 := v_1.Args[0] 35388 if v_1_0.Op != OpAMD64ANDLconst { 35389 break 35390 } 35391 c := v_1_0.AuxInt 35392 y := v_1_0.Args[0] 35393 if !(c&63 == 63) { 35394 break 35395 } 35396 v.reset(OpAMD64SARQ) 35397 v.AddArg(x) 35398 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 35399 v0.AddArg(y) 35400 v.AddArg(v0) 35401 return true 35402 } 35403 return false 35404 } 35405 func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool { 35406 // match: (SARQconst x [0]) 35407 // cond: 35408 // result: x 35409 for { 35410 if v.AuxInt != 0 { 35411 break 35412 } 35413 x := v.Args[0] 35414 v.reset(OpCopy) 35415 v.Type = x.Type 35416 v.AddArg(x) 35417 return true 35418 } 35419 // match: (SARQconst [c] (MOVQconst [d])) 35420 // cond: 35421 // result: (MOVQconst [d>>uint64(c)]) 35422 for { 35423 c := v.AuxInt 35424 v_0 := v.Args[0] 35425 if v_0.Op != OpAMD64MOVQconst { 35426 break 35427 } 35428 d := v_0.AuxInt 35429 v.reset(OpAMD64MOVQconst) 35430 v.AuxInt = d >> uint64(c) 35431 return true 35432 } 35433 return false 35434 } 35435 func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool { 35436 // match: (SARW x (MOVQconst [c])) 35437 // cond: 35438 // result: (SARWconst [min(c&31,15)] x) 35439 for { 35440 _ = v.Args[1] 35441 x := v.Args[0] 35442 v_1 := v.Args[1] 35443 if v_1.Op != OpAMD64MOVQconst { 35444 break 35445 } 35446 c := v_1.AuxInt 35447 v.reset(OpAMD64SARWconst) 35448 v.AuxInt = min(c&31, 15) 35449 v.AddArg(x) 35450 return true 35451 } 35452 // match: (SARW x (MOVLconst [c])) 35453 // cond: 35454 // result: (SARWconst [min(c&31,15)] x) 35455 for { 35456 _ = v.Args[1] 35457 x := v.Args[0] 35458 v_1 := v.Args[1] 35459 if v_1.Op != OpAMD64MOVLconst { 35460 break 35461 } 35462 c := v_1.AuxInt 35463 v.reset(OpAMD64SARWconst) 35464 v.AuxInt = min(c&31, 15) 35465 v.AddArg(x) 35466 return true 35467 } 35468 return false 35469 } 35470 func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool { 35471 // match: (SARWconst x [0]) 35472 // cond: 35473 // result: x 35474 for { 35475 if v.AuxInt != 0 { 35476 break 35477 } 35478 x := v.Args[0] 35479 v.reset(OpCopy) 35480 v.Type = x.Type 35481 v.AddArg(x) 35482 return true 35483 } 35484 // match: (SARWconst [c] (MOVQconst [d])) 35485 // cond: 35486 // result: (MOVQconst [int64(int16(d))>>uint64(c)]) 35487 for { 35488 c := v.AuxInt 35489 v_0 := v.Args[0] 35490 if v_0.Op != OpAMD64MOVQconst { 35491 break 35492 } 35493 d := v_0.AuxInt 35494 v.reset(OpAMD64MOVQconst) 35495 v.AuxInt = int64(int16(d)) >> uint64(c) 35496 return true 35497 } 35498 return false 35499 } 35500 func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { 35501 // match: (SBBLcarrymask (FlagEQ)) 35502 // cond: 35503 // result: (MOVLconst [0]) 35504 for { 35505 v_0 := v.Args[0] 35506 if v_0.Op != OpAMD64FlagEQ { 35507 break 35508 } 35509 v.reset(OpAMD64MOVLconst) 35510 v.AuxInt = 0 35511 return true 35512 } 35513 // match: (SBBLcarrymask (FlagLT_ULT)) 35514 // cond: 35515 // result: (MOVLconst [-1]) 35516 for { 35517 v_0 := v.Args[0] 35518 if v_0.Op != OpAMD64FlagLT_ULT { 35519 break 35520 } 35521 v.reset(OpAMD64MOVLconst) 35522 v.AuxInt = -1 35523 return true 35524 } 35525 // match: (SBBLcarrymask (FlagLT_UGT)) 35526 // cond: 35527 // result: (MOVLconst [0]) 35528 for { 35529 v_0 := v.Args[0] 35530 if v_0.Op != OpAMD64FlagLT_UGT { 35531 break 35532 } 35533 v.reset(OpAMD64MOVLconst) 35534 v.AuxInt = 0 35535 return true 35536 } 35537 // match: (SBBLcarrymask (FlagGT_ULT)) 35538 // cond: 35539 // result: (MOVLconst [-1]) 35540 for { 35541 v_0 := v.Args[0] 35542 if v_0.Op != OpAMD64FlagGT_ULT { 35543 break 35544 } 35545 v.reset(OpAMD64MOVLconst) 35546 v.AuxInt = -1 35547 return true 35548 } 35549 // match: (SBBLcarrymask (FlagGT_UGT)) 35550 // cond: 35551 // result: (MOVLconst [0]) 35552 for { 35553 v_0 := v.Args[0] 35554 if v_0.Op != OpAMD64FlagGT_UGT { 35555 break 35556 } 35557 v.reset(OpAMD64MOVLconst) 35558 v.AuxInt = 0 35559 return true 35560 } 35561 return false 35562 } 35563 func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { 35564 // match: (SBBQcarrymask (FlagEQ)) 35565 // cond: 35566 // result: (MOVQconst [0]) 35567 for { 35568 v_0 := v.Args[0] 35569 if v_0.Op != OpAMD64FlagEQ { 35570 break 35571 } 35572 v.reset(OpAMD64MOVQconst) 35573 v.AuxInt = 0 35574 return true 35575 } 35576 // match: (SBBQcarrymask (FlagLT_ULT)) 35577 // cond: 35578 // result: (MOVQconst [-1]) 35579 for { 35580 v_0 := v.Args[0] 35581 if v_0.Op != OpAMD64FlagLT_ULT { 35582 break 35583 } 35584 v.reset(OpAMD64MOVQconst) 35585 v.AuxInt = -1 35586 return true 35587 } 35588 // match: (SBBQcarrymask (FlagLT_UGT)) 35589 // cond: 35590 // result: (MOVQconst [0]) 35591 for { 35592 v_0 := v.Args[0] 35593 if v_0.Op != OpAMD64FlagLT_UGT { 35594 break 35595 } 35596 v.reset(OpAMD64MOVQconst) 35597 v.AuxInt = 0 35598 return true 35599 } 35600 // match: (SBBQcarrymask (FlagGT_ULT)) 35601 // cond: 35602 // result: (MOVQconst [-1]) 35603 for { 35604 v_0 := v.Args[0] 35605 if v_0.Op != OpAMD64FlagGT_ULT { 35606 break 35607 } 35608 v.reset(OpAMD64MOVQconst) 35609 v.AuxInt = -1 35610 return true 35611 } 35612 // match: (SBBQcarrymask (FlagGT_UGT)) 35613 // cond: 35614 // result: (MOVQconst [0]) 35615 for { 35616 v_0 := v.Args[0] 35617 if v_0.Op != OpAMD64FlagGT_UGT { 35618 break 35619 } 35620 v.reset(OpAMD64MOVQconst) 35621 v.AuxInt = 0 35622 return true 35623 } 35624 return false 35625 } 35626 func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { 35627 // match: (SETA (InvertFlags x)) 35628 // cond: 35629 // result: (SETB x) 35630 for { 35631 v_0 := v.Args[0] 35632 if v_0.Op != OpAMD64InvertFlags { 35633 break 35634 } 35635 x := v_0.Args[0] 35636 v.reset(OpAMD64SETB) 35637 v.AddArg(x) 35638 return true 35639 } 35640 // match: (SETA (FlagEQ)) 35641 // cond: 35642 // result: (MOVLconst [0]) 35643 for { 35644 v_0 := v.Args[0] 35645 if v_0.Op != OpAMD64FlagEQ { 35646 break 35647 } 35648 v.reset(OpAMD64MOVLconst) 35649 v.AuxInt = 0 35650 return true 35651 } 35652 // match: (SETA (FlagLT_ULT)) 35653 // cond: 35654 // result: (MOVLconst [0]) 35655 for { 35656 v_0 := v.Args[0] 35657 if v_0.Op != OpAMD64FlagLT_ULT { 35658 break 35659 } 35660 v.reset(OpAMD64MOVLconst) 35661 v.AuxInt = 0 35662 return true 35663 } 35664 // match: (SETA (FlagLT_UGT)) 35665 // cond: 35666 // result: (MOVLconst [1]) 35667 for { 35668 v_0 := v.Args[0] 35669 if v_0.Op != OpAMD64FlagLT_UGT { 35670 break 35671 } 35672 v.reset(OpAMD64MOVLconst) 35673 v.AuxInt = 1 35674 return true 35675 } 35676 // match: (SETA (FlagGT_ULT)) 35677 // cond: 35678 // result: (MOVLconst [0]) 35679 for { 35680 v_0 := v.Args[0] 35681 if v_0.Op != OpAMD64FlagGT_ULT { 35682 break 35683 } 35684 v.reset(OpAMD64MOVLconst) 35685 v.AuxInt = 0 35686 return true 35687 } 35688 // match: (SETA (FlagGT_UGT)) 35689 // cond: 35690 // result: (MOVLconst [1]) 35691 for { 35692 v_0 := v.Args[0] 35693 if v_0.Op != OpAMD64FlagGT_UGT { 35694 break 35695 } 35696 v.reset(OpAMD64MOVLconst) 35697 v.AuxInt = 1 35698 return true 35699 } 35700 return false 35701 } 35702 func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { 35703 // match: (SETAE (InvertFlags x)) 35704 // cond: 35705 // result: (SETBE x) 35706 for { 35707 v_0 := v.Args[0] 35708 if v_0.Op != OpAMD64InvertFlags { 35709 break 35710 } 35711 x := v_0.Args[0] 35712 v.reset(OpAMD64SETBE) 35713 v.AddArg(x) 35714 return true 35715 } 35716 // match: (SETAE (FlagEQ)) 35717 // cond: 35718 // result: (MOVLconst [1]) 35719 for { 35720 v_0 := v.Args[0] 35721 if v_0.Op != OpAMD64FlagEQ { 35722 break 35723 } 35724 v.reset(OpAMD64MOVLconst) 35725 v.AuxInt = 1 35726 return true 35727 } 35728 // match: (SETAE (FlagLT_ULT)) 35729 // cond: 35730 // result: (MOVLconst [0]) 35731 for { 35732 v_0 := v.Args[0] 35733 if v_0.Op != OpAMD64FlagLT_ULT { 35734 break 35735 } 35736 v.reset(OpAMD64MOVLconst) 35737 v.AuxInt = 0 35738 return true 35739 } 35740 // match: (SETAE (FlagLT_UGT)) 35741 // cond: 35742 // result: (MOVLconst [1]) 35743 for { 35744 v_0 := v.Args[0] 35745 if v_0.Op != OpAMD64FlagLT_UGT { 35746 break 35747 } 35748 v.reset(OpAMD64MOVLconst) 35749 v.AuxInt = 1 35750 return true 35751 } 35752 // match: (SETAE (FlagGT_ULT)) 35753 // cond: 35754 // result: (MOVLconst [0]) 35755 for { 35756 v_0 := v.Args[0] 35757 if v_0.Op != OpAMD64FlagGT_ULT { 35758 break 35759 } 35760 v.reset(OpAMD64MOVLconst) 35761 v.AuxInt = 0 35762 return true 35763 } 35764 // match: (SETAE (FlagGT_UGT)) 35765 // cond: 35766 // result: (MOVLconst [1]) 35767 for { 35768 v_0 := v.Args[0] 35769 if v_0.Op != OpAMD64FlagGT_UGT { 35770 break 35771 } 35772 v.reset(OpAMD64MOVLconst) 35773 v.AuxInt = 1 35774 return true 35775 } 35776 return false 35777 } 35778 func rewriteValueAMD64_OpAMD64SETAEmem_0(v *Value) bool { 35779 b := v.Block 35780 _ = b 35781 // match: (SETAEmem [off] {sym} ptr (InvertFlags x) mem) 35782 // cond: 35783 // result: (SETBEmem [off] {sym} ptr x mem) 35784 for { 35785 off := v.AuxInt 35786 sym := v.Aux 35787 _ = v.Args[2] 35788 ptr := v.Args[0] 35789 v_1 := v.Args[1] 35790 if v_1.Op != OpAMD64InvertFlags { 35791 break 35792 } 35793 x := v_1.Args[0] 35794 mem := v.Args[2] 35795 v.reset(OpAMD64SETBEmem) 35796 v.AuxInt = off 35797 v.Aux = sym 35798 v.AddArg(ptr) 35799 v.AddArg(x) 35800 v.AddArg(mem) 35801 return true 35802 } 35803 // match: (SETAEmem [off] {sym} ptr x:(FlagEQ) mem) 35804 // cond: 35805 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 35806 for { 35807 off := v.AuxInt 35808 sym := v.Aux 35809 _ = v.Args[2] 35810 ptr := v.Args[0] 35811 x := v.Args[1] 35812 if x.Op != OpAMD64FlagEQ { 35813 break 35814 } 35815 mem := v.Args[2] 35816 v.reset(OpAMD64MOVBstore) 35817 v.AuxInt = off 35818 v.Aux = sym 35819 v.AddArg(ptr) 35820 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35821 v0.AuxInt = 1 35822 v.AddArg(v0) 35823 v.AddArg(mem) 35824 return true 35825 } 35826 // match: (SETAEmem [off] {sym} ptr x:(FlagLT_ULT) mem) 35827 // cond: 35828 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 35829 for { 35830 off := v.AuxInt 35831 sym := v.Aux 35832 _ = v.Args[2] 35833 ptr := v.Args[0] 35834 x := v.Args[1] 35835 if x.Op != OpAMD64FlagLT_ULT { 35836 break 35837 } 35838 mem := v.Args[2] 35839 v.reset(OpAMD64MOVBstore) 35840 v.AuxInt = off 35841 v.Aux = sym 35842 v.AddArg(ptr) 35843 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35844 v0.AuxInt = 0 35845 v.AddArg(v0) 35846 v.AddArg(mem) 35847 return true 35848 } 35849 // match: (SETAEmem [off] {sym} ptr x:(FlagLT_UGT) mem) 35850 // cond: 35851 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 35852 for { 35853 off := v.AuxInt 35854 sym := v.Aux 35855 _ = v.Args[2] 35856 ptr := v.Args[0] 35857 x := v.Args[1] 35858 if x.Op != OpAMD64FlagLT_UGT { 35859 break 35860 } 35861 mem := v.Args[2] 35862 v.reset(OpAMD64MOVBstore) 35863 v.AuxInt = off 35864 v.Aux = sym 35865 v.AddArg(ptr) 35866 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35867 v0.AuxInt = 1 35868 v.AddArg(v0) 35869 v.AddArg(mem) 35870 return true 35871 } 35872 // match: (SETAEmem [off] {sym} ptr x:(FlagGT_ULT) mem) 35873 // cond: 35874 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 35875 for { 35876 off := v.AuxInt 35877 sym := v.Aux 35878 _ = v.Args[2] 35879 ptr := v.Args[0] 35880 x := v.Args[1] 35881 if x.Op != OpAMD64FlagGT_ULT { 35882 break 35883 } 35884 mem := v.Args[2] 35885 v.reset(OpAMD64MOVBstore) 35886 v.AuxInt = off 35887 v.Aux = sym 35888 v.AddArg(ptr) 35889 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35890 v0.AuxInt = 0 35891 v.AddArg(v0) 35892 v.AddArg(mem) 35893 return true 35894 } 35895 // match: (SETAEmem [off] {sym} ptr x:(FlagGT_UGT) mem) 35896 // cond: 35897 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 35898 for { 35899 off := v.AuxInt 35900 sym := v.Aux 35901 _ = v.Args[2] 35902 ptr := v.Args[0] 35903 x := v.Args[1] 35904 if x.Op != OpAMD64FlagGT_UGT { 35905 break 35906 } 35907 mem := v.Args[2] 35908 v.reset(OpAMD64MOVBstore) 35909 v.AuxInt = off 35910 v.Aux = sym 35911 v.AddArg(ptr) 35912 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35913 v0.AuxInt = 1 35914 v.AddArg(v0) 35915 v.AddArg(mem) 35916 return true 35917 } 35918 return false 35919 } 35920 func rewriteValueAMD64_OpAMD64SETAmem_0(v *Value) bool { 35921 b := v.Block 35922 _ = b 35923 // match: (SETAmem [off] {sym} ptr (InvertFlags x) mem) 35924 // cond: 35925 // result: (SETBmem [off] {sym} ptr x mem) 35926 for { 35927 off := v.AuxInt 35928 sym := v.Aux 35929 _ = v.Args[2] 35930 ptr := v.Args[0] 35931 v_1 := v.Args[1] 35932 if v_1.Op != OpAMD64InvertFlags { 35933 break 35934 } 35935 x := v_1.Args[0] 35936 mem := v.Args[2] 35937 v.reset(OpAMD64SETBmem) 35938 v.AuxInt = off 35939 v.Aux = sym 35940 v.AddArg(ptr) 35941 v.AddArg(x) 35942 v.AddArg(mem) 35943 return true 35944 } 35945 // match: (SETAmem [off] {sym} ptr x:(FlagEQ) mem) 35946 // cond: 35947 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 35948 for { 35949 off := v.AuxInt 35950 sym := v.Aux 35951 _ = v.Args[2] 35952 ptr := v.Args[0] 35953 x := v.Args[1] 35954 if x.Op != OpAMD64FlagEQ { 35955 break 35956 } 35957 mem := v.Args[2] 35958 v.reset(OpAMD64MOVBstore) 35959 v.AuxInt = off 35960 v.Aux = sym 35961 v.AddArg(ptr) 35962 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35963 v0.AuxInt = 0 35964 v.AddArg(v0) 35965 v.AddArg(mem) 35966 return true 35967 } 35968 // match: (SETAmem [off] {sym} ptr x:(FlagLT_ULT) mem) 35969 // cond: 35970 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 35971 for { 35972 off := v.AuxInt 35973 sym := v.Aux 35974 _ = v.Args[2] 35975 ptr := v.Args[0] 35976 x := v.Args[1] 35977 if x.Op != OpAMD64FlagLT_ULT { 35978 break 35979 } 35980 mem := v.Args[2] 35981 v.reset(OpAMD64MOVBstore) 35982 v.AuxInt = off 35983 v.Aux = sym 35984 v.AddArg(ptr) 35985 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 35986 v0.AuxInt = 0 35987 v.AddArg(v0) 35988 v.AddArg(mem) 35989 return true 35990 } 35991 // match: (SETAmem [off] {sym} ptr x:(FlagLT_UGT) mem) 35992 // cond: 35993 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 35994 for { 35995 off := v.AuxInt 35996 sym := v.Aux 35997 _ = v.Args[2] 35998 ptr := v.Args[0] 35999 x := v.Args[1] 36000 if x.Op != OpAMD64FlagLT_UGT { 36001 break 36002 } 36003 mem := v.Args[2] 36004 v.reset(OpAMD64MOVBstore) 36005 v.AuxInt = off 36006 v.Aux = sym 36007 v.AddArg(ptr) 36008 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36009 v0.AuxInt = 1 36010 v.AddArg(v0) 36011 v.AddArg(mem) 36012 return true 36013 } 36014 // match: (SETAmem [off] {sym} ptr x:(FlagGT_ULT) mem) 36015 // cond: 36016 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36017 for { 36018 off := v.AuxInt 36019 sym := v.Aux 36020 _ = v.Args[2] 36021 ptr := v.Args[0] 36022 x := v.Args[1] 36023 if x.Op != OpAMD64FlagGT_ULT { 36024 break 36025 } 36026 mem := v.Args[2] 36027 v.reset(OpAMD64MOVBstore) 36028 v.AuxInt = off 36029 v.Aux = sym 36030 v.AddArg(ptr) 36031 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36032 v0.AuxInt = 0 36033 v.AddArg(v0) 36034 v.AddArg(mem) 36035 return true 36036 } 36037 // match: (SETAmem [off] {sym} ptr x:(FlagGT_UGT) mem) 36038 // cond: 36039 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36040 for { 36041 off := v.AuxInt 36042 sym := v.Aux 36043 _ = v.Args[2] 36044 ptr := v.Args[0] 36045 x := v.Args[1] 36046 if x.Op != OpAMD64FlagGT_UGT { 36047 break 36048 } 36049 mem := v.Args[2] 36050 v.reset(OpAMD64MOVBstore) 36051 v.AuxInt = off 36052 v.Aux = sym 36053 v.AddArg(ptr) 36054 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36055 v0.AuxInt = 1 36056 v.AddArg(v0) 36057 v.AddArg(mem) 36058 return true 36059 } 36060 return false 36061 } 36062 func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { 36063 // match: (SETB (InvertFlags x)) 36064 // cond: 36065 // result: (SETA x) 36066 for { 36067 v_0 := v.Args[0] 36068 if v_0.Op != OpAMD64InvertFlags { 36069 break 36070 } 36071 x := v_0.Args[0] 36072 v.reset(OpAMD64SETA) 36073 v.AddArg(x) 36074 return true 36075 } 36076 // match: (SETB (FlagEQ)) 36077 // cond: 36078 // result: (MOVLconst [0]) 36079 for { 36080 v_0 := v.Args[0] 36081 if v_0.Op != OpAMD64FlagEQ { 36082 break 36083 } 36084 v.reset(OpAMD64MOVLconst) 36085 v.AuxInt = 0 36086 return true 36087 } 36088 // match: (SETB (FlagLT_ULT)) 36089 // cond: 36090 // result: (MOVLconst [1]) 36091 for { 36092 v_0 := v.Args[0] 36093 if v_0.Op != OpAMD64FlagLT_ULT { 36094 break 36095 } 36096 v.reset(OpAMD64MOVLconst) 36097 v.AuxInt = 1 36098 return true 36099 } 36100 // match: (SETB (FlagLT_UGT)) 36101 // cond: 36102 // result: (MOVLconst [0]) 36103 for { 36104 v_0 := v.Args[0] 36105 if v_0.Op != OpAMD64FlagLT_UGT { 36106 break 36107 } 36108 v.reset(OpAMD64MOVLconst) 36109 v.AuxInt = 0 36110 return true 36111 } 36112 // match: (SETB (FlagGT_ULT)) 36113 // cond: 36114 // result: (MOVLconst [1]) 36115 for { 36116 v_0 := v.Args[0] 36117 if v_0.Op != OpAMD64FlagGT_ULT { 36118 break 36119 } 36120 v.reset(OpAMD64MOVLconst) 36121 v.AuxInt = 1 36122 return true 36123 } 36124 // match: (SETB (FlagGT_UGT)) 36125 // cond: 36126 // result: (MOVLconst [0]) 36127 for { 36128 v_0 := v.Args[0] 36129 if v_0.Op != OpAMD64FlagGT_UGT { 36130 break 36131 } 36132 v.reset(OpAMD64MOVLconst) 36133 v.AuxInt = 0 36134 return true 36135 } 36136 return false 36137 } 36138 func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { 36139 // match: (SETBE (InvertFlags x)) 36140 // cond: 36141 // result: (SETAE x) 36142 for { 36143 v_0 := v.Args[0] 36144 if v_0.Op != OpAMD64InvertFlags { 36145 break 36146 } 36147 x := v_0.Args[0] 36148 v.reset(OpAMD64SETAE) 36149 v.AddArg(x) 36150 return true 36151 } 36152 // match: (SETBE (FlagEQ)) 36153 // cond: 36154 // result: (MOVLconst [1]) 36155 for { 36156 v_0 := v.Args[0] 36157 if v_0.Op != OpAMD64FlagEQ { 36158 break 36159 } 36160 v.reset(OpAMD64MOVLconst) 36161 v.AuxInt = 1 36162 return true 36163 } 36164 // match: (SETBE (FlagLT_ULT)) 36165 // cond: 36166 // result: (MOVLconst [1]) 36167 for { 36168 v_0 := v.Args[0] 36169 if v_0.Op != OpAMD64FlagLT_ULT { 36170 break 36171 } 36172 v.reset(OpAMD64MOVLconst) 36173 v.AuxInt = 1 36174 return true 36175 } 36176 // match: (SETBE (FlagLT_UGT)) 36177 // cond: 36178 // result: (MOVLconst [0]) 36179 for { 36180 v_0 := v.Args[0] 36181 if v_0.Op != OpAMD64FlagLT_UGT { 36182 break 36183 } 36184 v.reset(OpAMD64MOVLconst) 36185 v.AuxInt = 0 36186 return true 36187 } 36188 // match: (SETBE (FlagGT_ULT)) 36189 // cond: 36190 // result: (MOVLconst [1]) 36191 for { 36192 v_0 := v.Args[0] 36193 if v_0.Op != OpAMD64FlagGT_ULT { 36194 break 36195 } 36196 v.reset(OpAMD64MOVLconst) 36197 v.AuxInt = 1 36198 return true 36199 } 36200 // match: (SETBE (FlagGT_UGT)) 36201 // cond: 36202 // result: (MOVLconst [0]) 36203 for { 36204 v_0 := v.Args[0] 36205 if v_0.Op != OpAMD64FlagGT_UGT { 36206 break 36207 } 36208 v.reset(OpAMD64MOVLconst) 36209 v.AuxInt = 0 36210 return true 36211 } 36212 return false 36213 } 36214 func rewriteValueAMD64_OpAMD64SETBEmem_0(v *Value) bool { 36215 b := v.Block 36216 _ = b 36217 // match: (SETBEmem [off] {sym} ptr (InvertFlags x) mem) 36218 // cond: 36219 // result: (SETAEmem [off] {sym} ptr x mem) 36220 for { 36221 off := v.AuxInt 36222 sym := v.Aux 36223 _ = v.Args[2] 36224 ptr := v.Args[0] 36225 v_1 := v.Args[1] 36226 if v_1.Op != OpAMD64InvertFlags { 36227 break 36228 } 36229 x := v_1.Args[0] 36230 mem := v.Args[2] 36231 v.reset(OpAMD64SETAEmem) 36232 v.AuxInt = off 36233 v.Aux = sym 36234 v.AddArg(ptr) 36235 v.AddArg(x) 36236 v.AddArg(mem) 36237 return true 36238 } 36239 // match: (SETBEmem [off] {sym} ptr x:(FlagEQ) mem) 36240 // cond: 36241 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36242 for { 36243 off := v.AuxInt 36244 sym := v.Aux 36245 _ = v.Args[2] 36246 ptr := v.Args[0] 36247 x := v.Args[1] 36248 if x.Op != OpAMD64FlagEQ { 36249 break 36250 } 36251 mem := v.Args[2] 36252 v.reset(OpAMD64MOVBstore) 36253 v.AuxInt = off 36254 v.Aux = sym 36255 v.AddArg(ptr) 36256 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36257 v0.AuxInt = 1 36258 v.AddArg(v0) 36259 v.AddArg(mem) 36260 return true 36261 } 36262 // match: (SETBEmem [off] {sym} ptr x:(FlagLT_ULT) mem) 36263 // cond: 36264 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36265 for { 36266 off := v.AuxInt 36267 sym := v.Aux 36268 _ = v.Args[2] 36269 ptr := v.Args[0] 36270 x := v.Args[1] 36271 if x.Op != OpAMD64FlagLT_ULT { 36272 break 36273 } 36274 mem := v.Args[2] 36275 v.reset(OpAMD64MOVBstore) 36276 v.AuxInt = off 36277 v.Aux = sym 36278 v.AddArg(ptr) 36279 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36280 v0.AuxInt = 1 36281 v.AddArg(v0) 36282 v.AddArg(mem) 36283 return true 36284 } 36285 // match: (SETBEmem [off] {sym} ptr x:(FlagLT_UGT) mem) 36286 // cond: 36287 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36288 for { 36289 off := v.AuxInt 36290 sym := v.Aux 36291 _ = v.Args[2] 36292 ptr := v.Args[0] 36293 x := v.Args[1] 36294 if x.Op != OpAMD64FlagLT_UGT { 36295 break 36296 } 36297 mem := v.Args[2] 36298 v.reset(OpAMD64MOVBstore) 36299 v.AuxInt = off 36300 v.Aux = sym 36301 v.AddArg(ptr) 36302 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36303 v0.AuxInt = 0 36304 v.AddArg(v0) 36305 v.AddArg(mem) 36306 return true 36307 } 36308 // match: (SETBEmem [off] {sym} ptr x:(FlagGT_ULT) mem) 36309 // cond: 36310 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36311 for { 36312 off := v.AuxInt 36313 sym := v.Aux 36314 _ = v.Args[2] 36315 ptr := v.Args[0] 36316 x := v.Args[1] 36317 if x.Op != OpAMD64FlagGT_ULT { 36318 break 36319 } 36320 mem := v.Args[2] 36321 v.reset(OpAMD64MOVBstore) 36322 v.AuxInt = off 36323 v.Aux = sym 36324 v.AddArg(ptr) 36325 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36326 v0.AuxInt = 1 36327 v.AddArg(v0) 36328 v.AddArg(mem) 36329 return true 36330 } 36331 // match: (SETBEmem [off] {sym} ptr x:(FlagGT_UGT) mem) 36332 // cond: 36333 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36334 for { 36335 off := v.AuxInt 36336 sym := v.Aux 36337 _ = v.Args[2] 36338 ptr := v.Args[0] 36339 x := v.Args[1] 36340 if x.Op != OpAMD64FlagGT_UGT { 36341 break 36342 } 36343 mem := v.Args[2] 36344 v.reset(OpAMD64MOVBstore) 36345 v.AuxInt = off 36346 v.Aux = sym 36347 v.AddArg(ptr) 36348 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36349 v0.AuxInt = 0 36350 v.AddArg(v0) 36351 v.AddArg(mem) 36352 return true 36353 } 36354 return false 36355 } 36356 func rewriteValueAMD64_OpAMD64SETBmem_0(v *Value) bool { 36357 b := v.Block 36358 _ = b 36359 // match: (SETBmem [off] {sym} ptr (InvertFlags x) mem) 36360 // cond: 36361 // result: (SETAmem [off] {sym} ptr x mem) 36362 for { 36363 off := v.AuxInt 36364 sym := v.Aux 36365 _ = v.Args[2] 36366 ptr := v.Args[0] 36367 v_1 := v.Args[1] 36368 if v_1.Op != OpAMD64InvertFlags { 36369 break 36370 } 36371 x := v_1.Args[0] 36372 mem := v.Args[2] 36373 v.reset(OpAMD64SETAmem) 36374 v.AuxInt = off 36375 v.Aux = sym 36376 v.AddArg(ptr) 36377 v.AddArg(x) 36378 v.AddArg(mem) 36379 return true 36380 } 36381 // match: (SETBmem [off] {sym} ptr x:(FlagEQ) mem) 36382 // cond: 36383 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36384 for { 36385 off := v.AuxInt 36386 sym := v.Aux 36387 _ = v.Args[2] 36388 ptr := v.Args[0] 36389 x := v.Args[1] 36390 if x.Op != OpAMD64FlagEQ { 36391 break 36392 } 36393 mem := v.Args[2] 36394 v.reset(OpAMD64MOVBstore) 36395 v.AuxInt = off 36396 v.Aux = sym 36397 v.AddArg(ptr) 36398 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36399 v0.AuxInt = 0 36400 v.AddArg(v0) 36401 v.AddArg(mem) 36402 return true 36403 } 36404 // match: (SETBmem [off] {sym} ptr x:(FlagLT_ULT) mem) 36405 // cond: 36406 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36407 for { 36408 off := v.AuxInt 36409 sym := v.Aux 36410 _ = v.Args[2] 36411 ptr := v.Args[0] 36412 x := v.Args[1] 36413 if x.Op != OpAMD64FlagLT_ULT { 36414 break 36415 } 36416 mem := v.Args[2] 36417 v.reset(OpAMD64MOVBstore) 36418 v.AuxInt = off 36419 v.Aux = sym 36420 v.AddArg(ptr) 36421 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36422 v0.AuxInt = 1 36423 v.AddArg(v0) 36424 v.AddArg(mem) 36425 return true 36426 } 36427 // match: (SETBmem [off] {sym} ptr x:(FlagLT_UGT) mem) 36428 // cond: 36429 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36430 for { 36431 off := v.AuxInt 36432 sym := v.Aux 36433 _ = v.Args[2] 36434 ptr := v.Args[0] 36435 x := v.Args[1] 36436 if x.Op != OpAMD64FlagLT_UGT { 36437 break 36438 } 36439 mem := v.Args[2] 36440 v.reset(OpAMD64MOVBstore) 36441 v.AuxInt = off 36442 v.Aux = sym 36443 v.AddArg(ptr) 36444 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36445 v0.AuxInt = 0 36446 v.AddArg(v0) 36447 v.AddArg(mem) 36448 return true 36449 } 36450 // match: (SETBmem [off] {sym} ptr x:(FlagGT_ULT) mem) 36451 // cond: 36452 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 36453 for { 36454 off := v.AuxInt 36455 sym := v.Aux 36456 _ = v.Args[2] 36457 ptr := v.Args[0] 36458 x := v.Args[1] 36459 if x.Op != OpAMD64FlagGT_ULT { 36460 break 36461 } 36462 mem := v.Args[2] 36463 v.reset(OpAMD64MOVBstore) 36464 v.AuxInt = off 36465 v.Aux = sym 36466 v.AddArg(ptr) 36467 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36468 v0.AuxInt = 1 36469 v.AddArg(v0) 36470 v.AddArg(mem) 36471 return true 36472 } 36473 // match: (SETBmem [off] {sym} ptr x:(FlagGT_UGT) mem) 36474 // cond: 36475 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 36476 for { 36477 off := v.AuxInt 36478 sym := v.Aux 36479 _ = v.Args[2] 36480 ptr := v.Args[0] 36481 x := v.Args[1] 36482 if x.Op != OpAMD64FlagGT_UGT { 36483 break 36484 } 36485 mem := v.Args[2] 36486 v.reset(OpAMD64MOVBstore) 36487 v.AuxInt = off 36488 v.Aux = sym 36489 v.AddArg(ptr) 36490 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 36491 v0.AuxInt = 0 36492 v.AddArg(v0) 36493 v.AddArg(mem) 36494 return true 36495 } 36496 return false 36497 } 36498 func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { 36499 b := v.Block 36500 _ = b 36501 config := b.Func.Config 36502 _ = config 36503 // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y)) 36504 // cond: !config.nacl 36505 // result: (SETAE (BTL x y)) 36506 for { 36507 v_0 := v.Args[0] 36508 if v_0.Op != OpAMD64TESTL { 36509 break 36510 } 36511 _ = v_0.Args[1] 36512 v_0_0 := v_0.Args[0] 36513 if v_0_0.Op != OpAMD64SHLL { 36514 break 36515 } 36516 _ = v_0_0.Args[1] 36517 v_0_0_0 := v_0_0.Args[0] 36518 if v_0_0_0.Op != OpAMD64MOVLconst { 36519 break 36520 } 36521 if v_0_0_0.AuxInt != 1 { 36522 break 36523 } 36524 x := v_0_0.Args[1] 36525 y := v_0.Args[1] 36526 if !(!config.nacl) { 36527 break 36528 } 36529 v.reset(OpAMD64SETAE) 36530 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36531 v0.AddArg(x) 36532 v0.AddArg(y) 36533 v.AddArg(v0) 36534 return true 36535 } 36536 // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x))) 36537 // cond: !config.nacl 36538 // result: (SETAE (BTL x y)) 36539 for { 36540 v_0 := v.Args[0] 36541 if v_0.Op != OpAMD64TESTL { 36542 break 36543 } 36544 _ = v_0.Args[1] 36545 y := v_0.Args[0] 36546 v_0_1 := v_0.Args[1] 36547 if v_0_1.Op != OpAMD64SHLL { 36548 break 36549 } 36550 _ = v_0_1.Args[1] 36551 v_0_1_0 := v_0_1.Args[0] 36552 if v_0_1_0.Op != OpAMD64MOVLconst { 36553 break 36554 } 36555 if v_0_1_0.AuxInt != 1 { 36556 break 36557 } 36558 x := v_0_1.Args[1] 36559 if !(!config.nacl) { 36560 break 36561 } 36562 v.reset(OpAMD64SETAE) 36563 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36564 v0.AddArg(x) 36565 v0.AddArg(y) 36566 v.AddArg(v0) 36567 return true 36568 } 36569 // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 36570 // cond: !config.nacl 36571 // result: (SETAE (BTQ x y)) 36572 for { 36573 v_0 := v.Args[0] 36574 if v_0.Op != OpAMD64TESTQ { 36575 break 36576 } 36577 _ = v_0.Args[1] 36578 v_0_0 := v_0.Args[0] 36579 if v_0_0.Op != OpAMD64SHLQ { 36580 break 36581 } 36582 _ = v_0_0.Args[1] 36583 v_0_0_0 := v_0_0.Args[0] 36584 if v_0_0_0.Op != OpAMD64MOVQconst { 36585 break 36586 } 36587 if v_0_0_0.AuxInt != 1 { 36588 break 36589 } 36590 x := v_0_0.Args[1] 36591 y := v_0.Args[1] 36592 if !(!config.nacl) { 36593 break 36594 } 36595 v.reset(OpAMD64SETAE) 36596 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36597 v0.AddArg(x) 36598 v0.AddArg(y) 36599 v.AddArg(v0) 36600 return true 36601 } 36602 // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 36603 // cond: !config.nacl 36604 // result: (SETAE (BTQ x y)) 36605 for { 36606 v_0 := v.Args[0] 36607 if v_0.Op != OpAMD64TESTQ { 36608 break 36609 } 36610 _ = v_0.Args[1] 36611 y := v_0.Args[0] 36612 v_0_1 := v_0.Args[1] 36613 if v_0_1.Op != OpAMD64SHLQ { 36614 break 36615 } 36616 _ = v_0_1.Args[1] 36617 v_0_1_0 := v_0_1.Args[0] 36618 if v_0_1_0.Op != OpAMD64MOVQconst { 36619 break 36620 } 36621 if v_0_1_0.AuxInt != 1 { 36622 break 36623 } 36624 x := v_0_1.Args[1] 36625 if !(!config.nacl) { 36626 break 36627 } 36628 v.reset(OpAMD64SETAE) 36629 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36630 v0.AddArg(x) 36631 v0.AddArg(y) 36632 v.AddArg(v0) 36633 return true 36634 } 36635 // match: (SETEQ (TESTLconst [c] x)) 36636 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 36637 // result: (SETAE (BTLconst [log2(c)] x)) 36638 for { 36639 v_0 := v.Args[0] 36640 if v_0.Op != OpAMD64TESTLconst { 36641 break 36642 } 36643 c := v_0.AuxInt 36644 x := v_0.Args[0] 36645 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 36646 break 36647 } 36648 v.reset(OpAMD64SETAE) 36649 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 36650 v0.AuxInt = log2(c) 36651 v0.AddArg(x) 36652 v.AddArg(v0) 36653 return true 36654 } 36655 // match: (SETEQ (TESTQconst [c] x)) 36656 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36657 // result: (SETAE (BTQconst [log2(c)] x)) 36658 for { 36659 v_0 := v.Args[0] 36660 if v_0.Op != OpAMD64TESTQconst { 36661 break 36662 } 36663 c := v_0.AuxInt 36664 x := v_0.Args[0] 36665 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36666 break 36667 } 36668 v.reset(OpAMD64SETAE) 36669 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36670 v0.AuxInt = log2(c) 36671 v0.AddArg(x) 36672 v.AddArg(v0) 36673 return true 36674 } 36675 // match: (SETEQ (TESTQ (MOVQconst [c]) x)) 36676 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36677 // result: (SETAE (BTQconst [log2(c)] x)) 36678 for { 36679 v_0 := v.Args[0] 36680 if v_0.Op != OpAMD64TESTQ { 36681 break 36682 } 36683 _ = v_0.Args[1] 36684 v_0_0 := v_0.Args[0] 36685 if v_0_0.Op != OpAMD64MOVQconst { 36686 break 36687 } 36688 c := v_0_0.AuxInt 36689 x := v_0.Args[1] 36690 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36691 break 36692 } 36693 v.reset(OpAMD64SETAE) 36694 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36695 v0.AuxInt = log2(c) 36696 v0.AddArg(x) 36697 v.AddArg(v0) 36698 return true 36699 } 36700 // match: (SETEQ (TESTQ x (MOVQconst [c]))) 36701 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 36702 // result: (SETAE (BTQconst [log2(c)] x)) 36703 for { 36704 v_0 := v.Args[0] 36705 if v_0.Op != OpAMD64TESTQ { 36706 break 36707 } 36708 _ = v_0.Args[1] 36709 x := v_0.Args[0] 36710 v_0_1 := v_0.Args[1] 36711 if v_0_1.Op != OpAMD64MOVQconst { 36712 break 36713 } 36714 c := v_0_1.AuxInt 36715 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 36716 break 36717 } 36718 v.reset(OpAMD64SETAE) 36719 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 36720 v0.AuxInt = log2(c) 36721 v0.AddArg(x) 36722 v.AddArg(v0) 36723 return true 36724 } 36725 // match: (SETEQ (InvertFlags x)) 36726 // cond: 36727 // result: (SETEQ x) 36728 for { 36729 v_0 := v.Args[0] 36730 if v_0.Op != OpAMD64InvertFlags { 36731 break 36732 } 36733 x := v_0.Args[0] 36734 v.reset(OpAMD64SETEQ) 36735 v.AddArg(x) 36736 return true 36737 } 36738 // match: (SETEQ (FlagEQ)) 36739 // cond: 36740 // result: (MOVLconst [1]) 36741 for { 36742 v_0 := v.Args[0] 36743 if v_0.Op != OpAMD64FlagEQ { 36744 break 36745 } 36746 v.reset(OpAMD64MOVLconst) 36747 v.AuxInt = 1 36748 return true 36749 } 36750 return false 36751 } 36752 func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { 36753 // match: (SETEQ (FlagLT_ULT)) 36754 // cond: 36755 // result: (MOVLconst [0]) 36756 for { 36757 v_0 := v.Args[0] 36758 if v_0.Op != OpAMD64FlagLT_ULT { 36759 break 36760 } 36761 v.reset(OpAMD64MOVLconst) 36762 v.AuxInt = 0 36763 return true 36764 } 36765 // match: (SETEQ (FlagLT_UGT)) 36766 // cond: 36767 // result: (MOVLconst [0]) 36768 for { 36769 v_0 := v.Args[0] 36770 if v_0.Op != OpAMD64FlagLT_UGT { 36771 break 36772 } 36773 v.reset(OpAMD64MOVLconst) 36774 v.AuxInt = 0 36775 return true 36776 } 36777 // match: (SETEQ (FlagGT_ULT)) 36778 // cond: 36779 // result: (MOVLconst [0]) 36780 for { 36781 v_0 := v.Args[0] 36782 if v_0.Op != OpAMD64FlagGT_ULT { 36783 break 36784 } 36785 v.reset(OpAMD64MOVLconst) 36786 v.AuxInt = 0 36787 return true 36788 } 36789 // match: (SETEQ (FlagGT_UGT)) 36790 // cond: 36791 // result: (MOVLconst [0]) 36792 for { 36793 v_0 := v.Args[0] 36794 if v_0.Op != OpAMD64FlagGT_UGT { 36795 break 36796 } 36797 v.reset(OpAMD64MOVLconst) 36798 v.AuxInt = 0 36799 return true 36800 } 36801 return false 36802 } 36803 func rewriteValueAMD64_OpAMD64SETEQmem_0(v *Value) bool { 36804 b := v.Block 36805 _ = b 36806 config := b.Func.Config 36807 _ = config 36808 // match: (SETEQmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 36809 // cond: !config.nacl 36810 // result: (SETAEmem [off] {sym} ptr (BTL x y) mem) 36811 for { 36812 off := v.AuxInt 36813 sym := v.Aux 36814 _ = v.Args[2] 36815 ptr := v.Args[0] 36816 v_1 := v.Args[1] 36817 if v_1.Op != OpAMD64TESTL { 36818 break 36819 } 36820 _ = v_1.Args[1] 36821 v_1_0 := v_1.Args[0] 36822 if v_1_0.Op != OpAMD64SHLL { 36823 break 36824 } 36825 _ = v_1_0.Args[1] 36826 v_1_0_0 := v_1_0.Args[0] 36827 if v_1_0_0.Op != OpAMD64MOVLconst { 36828 break 36829 } 36830 if v_1_0_0.AuxInt != 1 { 36831 break 36832 } 36833 x := v_1_0.Args[1] 36834 y := v_1.Args[1] 36835 mem := v.Args[2] 36836 if !(!config.nacl) { 36837 break 36838 } 36839 v.reset(OpAMD64SETAEmem) 36840 v.AuxInt = off 36841 v.Aux = sym 36842 v.AddArg(ptr) 36843 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36844 v0.AddArg(x) 36845 v0.AddArg(y) 36846 v.AddArg(v0) 36847 v.AddArg(mem) 36848 return true 36849 } 36850 // match: (SETEQmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) 36851 // cond: !config.nacl 36852 // result: (SETAEmem [off] {sym} ptr (BTL x y) mem) 36853 for { 36854 off := v.AuxInt 36855 sym := v.Aux 36856 _ = v.Args[2] 36857 ptr := v.Args[0] 36858 v_1 := v.Args[1] 36859 if v_1.Op != OpAMD64TESTL { 36860 break 36861 } 36862 _ = v_1.Args[1] 36863 y := v_1.Args[0] 36864 v_1_1 := v_1.Args[1] 36865 if v_1_1.Op != OpAMD64SHLL { 36866 break 36867 } 36868 _ = v_1_1.Args[1] 36869 v_1_1_0 := v_1_1.Args[0] 36870 if v_1_1_0.Op != OpAMD64MOVLconst { 36871 break 36872 } 36873 if v_1_1_0.AuxInt != 1 { 36874 break 36875 } 36876 x := v_1_1.Args[1] 36877 mem := v.Args[2] 36878 if !(!config.nacl) { 36879 break 36880 } 36881 v.reset(OpAMD64SETAEmem) 36882 v.AuxInt = off 36883 v.Aux = sym 36884 v.AddArg(ptr) 36885 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 36886 v0.AddArg(x) 36887 v0.AddArg(y) 36888 v.AddArg(v0) 36889 v.AddArg(mem) 36890 return true 36891 } 36892 // match: (SETEQmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 36893 // cond: !config.nacl 36894 // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem) 36895 for { 36896 off := v.AuxInt 36897 sym := v.Aux 36898 _ = v.Args[2] 36899 ptr := v.Args[0] 36900 v_1 := v.Args[1] 36901 if v_1.Op != OpAMD64TESTQ { 36902 break 36903 } 36904 _ = v_1.Args[1] 36905 v_1_0 := v_1.Args[0] 36906 if v_1_0.Op != OpAMD64SHLQ { 36907 break 36908 } 36909 _ = v_1_0.Args[1] 36910 v_1_0_0 := v_1_0.Args[0] 36911 if v_1_0_0.Op != OpAMD64MOVQconst { 36912 break 36913 } 36914 if v_1_0_0.AuxInt != 1 { 36915 break 36916 } 36917 x := v_1_0.Args[1] 36918 y := v_1.Args[1] 36919 mem := v.Args[2] 36920 if !(!config.nacl) { 36921 break 36922 } 36923 v.reset(OpAMD64SETAEmem) 36924 v.AuxInt = off 36925 v.Aux = sym 36926 v.AddArg(ptr) 36927 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36928 v0.AddArg(x) 36929 v0.AddArg(y) 36930 v.AddArg(v0) 36931 v.AddArg(mem) 36932 return true 36933 } 36934 // match: (SETEQmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) 36935 // cond: !config.nacl 36936 // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem) 36937 for { 36938 off := v.AuxInt 36939 sym := v.Aux 36940 _ = v.Args[2] 36941 ptr := v.Args[0] 36942 v_1 := v.Args[1] 36943 if v_1.Op != OpAMD64TESTQ { 36944 break 36945 } 36946 _ = v_1.Args[1] 36947 y := v_1.Args[0] 36948 v_1_1 := v_1.Args[1] 36949 if v_1_1.Op != OpAMD64SHLQ { 36950 break 36951 } 36952 _ = v_1_1.Args[1] 36953 v_1_1_0 := v_1_1.Args[0] 36954 if v_1_1_0.Op != OpAMD64MOVQconst { 36955 break 36956 } 36957 if v_1_1_0.AuxInt != 1 { 36958 break 36959 } 36960 x := v_1_1.Args[1] 36961 mem := v.Args[2] 36962 if !(!config.nacl) { 36963 break 36964 } 36965 v.reset(OpAMD64SETAEmem) 36966 v.AuxInt = off 36967 v.Aux = sym 36968 v.AddArg(ptr) 36969 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 36970 v0.AddArg(x) 36971 v0.AddArg(y) 36972 v.AddArg(v0) 36973 v.AddArg(mem) 36974 return true 36975 } 36976 // match: (SETEQmem [off] {sym} ptr (TESTLconst [c] x) mem) 36977 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 36978 // result: (SETAEmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) 36979 for { 36980 off := v.AuxInt 36981 sym := v.Aux 36982 _ = v.Args[2] 36983 ptr := v.Args[0] 36984 v_1 := v.Args[1] 36985 if v_1.Op != OpAMD64TESTLconst { 36986 break 36987 } 36988 c := v_1.AuxInt 36989 x := v_1.Args[0] 36990 mem := v.Args[2] 36991 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 36992 break 36993 } 36994 v.reset(OpAMD64SETAEmem) 36995 v.AuxInt = off 36996 v.Aux = sym 36997 v.AddArg(ptr) 36998 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 36999 v0.AuxInt = log2(c) 37000 v0.AddArg(x) 37001 v.AddArg(v0) 37002 v.AddArg(mem) 37003 return true 37004 } 37005 // match: (SETEQmem [off] {sym} ptr (TESTQconst [c] x) mem) 37006 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 37007 // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 37008 for { 37009 off := v.AuxInt 37010 sym := v.Aux 37011 _ = v.Args[2] 37012 ptr := v.Args[0] 37013 v_1 := v.Args[1] 37014 if v_1.Op != OpAMD64TESTQconst { 37015 break 37016 } 37017 c := v_1.AuxInt 37018 x := v_1.Args[0] 37019 mem := v.Args[2] 37020 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 37021 break 37022 } 37023 v.reset(OpAMD64SETAEmem) 37024 v.AuxInt = off 37025 v.Aux = sym 37026 v.AddArg(ptr) 37027 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 37028 v0.AuxInt = log2(c) 37029 v0.AddArg(x) 37030 v.AddArg(v0) 37031 v.AddArg(mem) 37032 return true 37033 } 37034 // match: (SETEQmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 37035 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 37036 // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 37037 for { 37038 off := v.AuxInt 37039 sym := v.Aux 37040 _ = v.Args[2] 37041 ptr := v.Args[0] 37042 v_1 := v.Args[1] 37043 if v_1.Op != OpAMD64TESTQ { 37044 break 37045 } 37046 _ = v_1.Args[1] 37047 v_1_0 := v_1.Args[0] 37048 if v_1_0.Op != OpAMD64MOVQconst { 37049 break 37050 } 37051 c := v_1_0.AuxInt 37052 x := v_1.Args[1] 37053 mem := v.Args[2] 37054 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 37055 break 37056 } 37057 v.reset(OpAMD64SETAEmem) 37058 v.AuxInt = off 37059 v.Aux = sym 37060 v.AddArg(ptr) 37061 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 37062 v0.AuxInt = log2(c) 37063 v0.AddArg(x) 37064 v.AddArg(v0) 37065 v.AddArg(mem) 37066 return true 37067 } 37068 // match: (SETEQmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) 37069 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 37070 // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 37071 for { 37072 off := v.AuxInt 37073 sym := v.Aux 37074 _ = v.Args[2] 37075 ptr := v.Args[0] 37076 v_1 := v.Args[1] 37077 if v_1.Op != OpAMD64TESTQ { 37078 break 37079 } 37080 _ = v_1.Args[1] 37081 x := v_1.Args[0] 37082 v_1_1 := v_1.Args[1] 37083 if v_1_1.Op != OpAMD64MOVQconst { 37084 break 37085 } 37086 c := v_1_1.AuxInt 37087 mem := v.Args[2] 37088 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 37089 break 37090 } 37091 v.reset(OpAMD64SETAEmem) 37092 v.AuxInt = off 37093 v.Aux = sym 37094 v.AddArg(ptr) 37095 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 37096 v0.AuxInt = log2(c) 37097 v0.AddArg(x) 37098 v.AddArg(v0) 37099 v.AddArg(mem) 37100 return true 37101 } 37102 // match: (SETEQmem [off] {sym} ptr (InvertFlags x) mem) 37103 // cond: 37104 // result: (SETEQmem [off] {sym} ptr x mem) 37105 for { 37106 off := v.AuxInt 37107 sym := v.Aux 37108 _ = v.Args[2] 37109 ptr := v.Args[0] 37110 v_1 := v.Args[1] 37111 if v_1.Op != OpAMD64InvertFlags { 37112 break 37113 } 37114 x := v_1.Args[0] 37115 mem := v.Args[2] 37116 v.reset(OpAMD64SETEQmem) 37117 v.AuxInt = off 37118 v.Aux = sym 37119 v.AddArg(ptr) 37120 v.AddArg(x) 37121 v.AddArg(mem) 37122 return true 37123 } 37124 // match: (SETEQmem [off] {sym} ptr x:(FlagEQ) mem) 37125 // cond: 37126 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37127 for { 37128 off := v.AuxInt 37129 sym := v.Aux 37130 _ = v.Args[2] 37131 ptr := v.Args[0] 37132 x := v.Args[1] 37133 if x.Op != OpAMD64FlagEQ { 37134 break 37135 } 37136 mem := v.Args[2] 37137 v.reset(OpAMD64MOVBstore) 37138 v.AuxInt = off 37139 v.Aux = sym 37140 v.AddArg(ptr) 37141 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37142 v0.AuxInt = 1 37143 v.AddArg(v0) 37144 v.AddArg(mem) 37145 return true 37146 } 37147 return false 37148 } 37149 func rewriteValueAMD64_OpAMD64SETEQmem_10(v *Value) bool { 37150 b := v.Block 37151 _ = b 37152 // match: (SETEQmem [off] {sym} ptr x:(FlagLT_ULT) mem) 37153 // cond: 37154 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37155 for { 37156 off := v.AuxInt 37157 sym := v.Aux 37158 _ = v.Args[2] 37159 ptr := v.Args[0] 37160 x := v.Args[1] 37161 if x.Op != OpAMD64FlagLT_ULT { 37162 break 37163 } 37164 mem := v.Args[2] 37165 v.reset(OpAMD64MOVBstore) 37166 v.AuxInt = off 37167 v.Aux = sym 37168 v.AddArg(ptr) 37169 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37170 v0.AuxInt = 0 37171 v.AddArg(v0) 37172 v.AddArg(mem) 37173 return true 37174 } 37175 // match: (SETEQmem [off] {sym} ptr x:(FlagLT_UGT) mem) 37176 // cond: 37177 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37178 for { 37179 off := v.AuxInt 37180 sym := v.Aux 37181 _ = v.Args[2] 37182 ptr := v.Args[0] 37183 x := v.Args[1] 37184 if x.Op != OpAMD64FlagLT_UGT { 37185 break 37186 } 37187 mem := v.Args[2] 37188 v.reset(OpAMD64MOVBstore) 37189 v.AuxInt = off 37190 v.Aux = sym 37191 v.AddArg(ptr) 37192 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37193 v0.AuxInt = 0 37194 v.AddArg(v0) 37195 v.AddArg(mem) 37196 return true 37197 } 37198 // match: (SETEQmem [off] {sym} ptr x:(FlagGT_ULT) mem) 37199 // cond: 37200 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37201 for { 37202 off := v.AuxInt 37203 sym := v.Aux 37204 _ = v.Args[2] 37205 ptr := v.Args[0] 37206 x := v.Args[1] 37207 if x.Op != OpAMD64FlagGT_ULT { 37208 break 37209 } 37210 mem := v.Args[2] 37211 v.reset(OpAMD64MOVBstore) 37212 v.AuxInt = off 37213 v.Aux = sym 37214 v.AddArg(ptr) 37215 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37216 v0.AuxInt = 0 37217 v.AddArg(v0) 37218 v.AddArg(mem) 37219 return true 37220 } 37221 // match: (SETEQmem [off] {sym} ptr x:(FlagGT_UGT) mem) 37222 // cond: 37223 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37224 for { 37225 off := v.AuxInt 37226 sym := v.Aux 37227 _ = v.Args[2] 37228 ptr := v.Args[0] 37229 x := v.Args[1] 37230 if x.Op != OpAMD64FlagGT_UGT { 37231 break 37232 } 37233 mem := v.Args[2] 37234 v.reset(OpAMD64MOVBstore) 37235 v.AuxInt = off 37236 v.Aux = sym 37237 v.AddArg(ptr) 37238 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37239 v0.AuxInt = 0 37240 v.AddArg(v0) 37241 v.AddArg(mem) 37242 return true 37243 } 37244 return false 37245 } 37246 func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { 37247 // match: (SETG (InvertFlags x)) 37248 // cond: 37249 // result: (SETL x) 37250 for { 37251 v_0 := v.Args[0] 37252 if v_0.Op != OpAMD64InvertFlags { 37253 break 37254 } 37255 x := v_0.Args[0] 37256 v.reset(OpAMD64SETL) 37257 v.AddArg(x) 37258 return true 37259 } 37260 // match: (SETG (FlagEQ)) 37261 // cond: 37262 // result: (MOVLconst [0]) 37263 for { 37264 v_0 := v.Args[0] 37265 if v_0.Op != OpAMD64FlagEQ { 37266 break 37267 } 37268 v.reset(OpAMD64MOVLconst) 37269 v.AuxInt = 0 37270 return true 37271 } 37272 // match: (SETG (FlagLT_ULT)) 37273 // cond: 37274 // result: (MOVLconst [0]) 37275 for { 37276 v_0 := v.Args[0] 37277 if v_0.Op != OpAMD64FlagLT_ULT { 37278 break 37279 } 37280 v.reset(OpAMD64MOVLconst) 37281 v.AuxInt = 0 37282 return true 37283 } 37284 // match: (SETG (FlagLT_UGT)) 37285 // cond: 37286 // result: (MOVLconst [0]) 37287 for { 37288 v_0 := v.Args[0] 37289 if v_0.Op != OpAMD64FlagLT_UGT { 37290 break 37291 } 37292 v.reset(OpAMD64MOVLconst) 37293 v.AuxInt = 0 37294 return true 37295 } 37296 // match: (SETG (FlagGT_ULT)) 37297 // cond: 37298 // result: (MOVLconst [1]) 37299 for { 37300 v_0 := v.Args[0] 37301 if v_0.Op != OpAMD64FlagGT_ULT { 37302 break 37303 } 37304 v.reset(OpAMD64MOVLconst) 37305 v.AuxInt = 1 37306 return true 37307 } 37308 // match: (SETG (FlagGT_UGT)) 37309 // cond: 37310 // result: (MOVLconst [1]) 37311 for { 37312 v_0 := v.Args[0] 37313 if v_0.Op != OpAMD64FlagGT_UGT { 37314 break 37315 } 37316 v.reset(OpAMD64MOVLconst) 37317 v.AuxInt = 1 37318 return true 37319 } 37320 return false 37321 } 37322 func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { 37323 // match: (SETGE (InvertFlags x)) 37324 // cond: 37325 // result: (SETLE x) 37326 for { 37327 v_0 := v.Args[0] 37328 if v_0.Op != OpAMD64InvertFlags { 37329 break 37330 } 37331 x := v_0.Args[0] 37332 v.reset(OpAMD64SETLE) 37333 v.AddArg(x) 37334 return true 37335 } 37336 // match: (SETGE (FlagEQ)) 37337 // cond: 37338 // result: (MOVLconst [1]) 37339 for { 37340 v_0 := v.Args[0] 37341 if v_0.Op != OpAMD64FlagEQ { 37342 break 37343 } 37344 v.reset(OpAMD64MOVLconst) 37345 v.AuxInt = 1 37346 return true 37347 } 37348 // match: (SETGE (FlagLT_ULT)) 37349 // cond: 37350 // result: (MOVLconst [0]) 37351 for { 37352 v_0 := v.Args[0] 37353 if v_0.Op != OpAMD64FlagLT_ULT { 37354 break 37355 } 37356 v.reset(OpAMD64MOVLconst) 37357 v.AuxInt = 0 37358 return true 37359 } 37360 // match: (SETGE (FlagLT_UGT)) 37361 // cond: 37362 // result: (MOVLconst [0]) 37363 for { 37364 v_0 := v.Args[0] 37365 if v_0.Op != OpAMD64FlagLT_UGT { 37366 break 37367 } 37368 v.reset(OpAMD64MOVLconst) 37369 v.AuxInt = 0 37370 return true 37371 } 37372 // match: (SETGE (FlagGT_ULT)) 37373 // cond: 37374 // result: (MOVLconst [1]) 37375 for { 37376 v_0 := v.Args[0] 37377 if v_0.Op != OpAMD64FlagGT_ULT { 37378 break 37379 } 37380 v.reset(OpAMD64MOVLconst) 37381 v.AuxInt = 1 37382 return true 37383 } 37384 // match: (SETGE (FlagGT_UGT)) 37385 // cond: 37386 // result: (MOVLconst [1]) 37387 for { 37388 v_0 := v.Args[0] 37389 if v_0.Op != OpAMD64FlagGT_UGT { 37390 break 37391 } 37392 v.reset(OpAMD64MOVLconst) 37393 v.AuxInt = 1 37394 return true 37395 } 37396 return false 37397 } 37398 func rewriteValueAMD64_OpAMD64SETGEmem_0(v *Value) bool { 37399 b := v.Block 37400 _ = b 37401 // match: (SETGEmem [off] {sym} ptr (InvertFlags x) mem) 37402 // cond: 37403 // result: (SETLEmem [off] {sym} ptr x mem) 37404 for { 37405 off := v.AuxInt 37406 sym := v.Aux 37407 _ = v.Args[2] 37408 ptr := v.Args[0] 37409 v_1 := v.Args[1] 37410 if v_1.Op != OpAMD64InvertFlags { 37411 break 37412 } 37413 x := v_1.Args[0] 37414 mem := v.Args[2] 37415 v.reset(OpAMD64SETLEmem) 37416 v.AuxInt = off 37417 v.Aux = sym 37418 v.AddArg(ptr) 37419 v.AddArg(x) 37420 v.AddArg(mem) 37421 return true 37422 } 37423 // match: (SETGEmem [off] {sym} ptr x:(FlagEQ) mem) 37424 // cond: 37425 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37426 for { 37427 off := v.AuxInt 37428 sym := v.Aux 37429 _ = v.Args[2] 37430 ptr := v.Args[0] 37431 x := v.Args[1] 37432 if x.Op != OpAMD64FlagEQ { 37433 break 37434 } 37435 mem := v.Args[2] 37436 v.reset(OpAMD64MOVBstore) 37437 v.AuxInt = off 37438 v.Aux = sym 37439 v.AddArg(ptr) 37440 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37441 v0.AuxInt = 1 37442 v.AddArg(v0) 37443 v.AddArg(mem) 37444 return true 37445 } 37446 // match: (SETGEmem [off] {sym} ptr x:(FlagLT_ULT) mem) 37447 // cond: 37448 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37449 for { 37450 off := v.AuxInt 37451 sym := v.Aux 37452 _ = v.Args[2] 37453 ptr := v.Args[0] 37454 x := v.Args[1] 37455 if x.Op != OpAMD64FlagLT_ULT { 37456 break 37457 } 37458 mem := v.Args[2] 37459 v.reset(OpAMD64MOVBstore) 37460 v.AuxInt = off 37461 v.Aux = sym 37462 v.AddArg(ptr) 37463 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37464 v0.AuxInt = 0 37465 v.AddArg(v0) 37466 v.AddArg(mem) 37467 return true 37468 } 37469 // match: (SETGEmem [off] {sym} ptr x:(FlagLT_UGT) mem) 37470 // cond: 37471 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37472 for { 37473 off := v.AuxInt 37474 sym := v.Aux 37475 _ = v.Args[2] 37476 ptr := v.Args[0] 37477 x := v.Args[1] 37478 if x.Op != OpAMD64FlagLT_UGT { 37479 break 37480 } 37481 mem := v.Args[2] 37482 v.reset(OpAMD64MOVBstore) 37483 v.AuxInt = off 37484 v.Aux = sym 37485 v.AddArg(ptr) 37486 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37487 v0.AuxInt = 0 37488 v.AddArg(v0) 37489 v.AddArg(mem) 37490 return true 37491 } 37492 // match: (SETGEmem [off] {sym} ptr x:(FlagGT_ULT) mem) 37493 // cond: 37494 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37495 for { 37496 off := v.AuxInt 37497 sym := v.Aux 37498 _ = v.Args[2] 37499 ptr := v.Args[0] 37500 x := v.Args[1] 37501 if x.Op != OpAMD64FlagGT_ULT { 37502 break 37503 } 37504 mem := v.Args[2] 37505 v.reset(OpAMD64MOVBstore) 37506 v.AuxInt = off 37507 v.Aux = sym 37508 v.AddArg(ptr) 37509 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37510 v0.AuxInt = 1 37511 v.AddArg(v0) 37512 v.AddArg(mem) 37513 return true 37514 } 37515 // match: (SETGEmem [off] {sym} ptr x:(FlagGT_UGT) mem) 37516 // cond: 37517 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37518 for { 37519 off := v.AuxInt 37520 sym := v.Aux 37521 _ = v.Args[2] 37522 ptr := v.Args[0] 37523 x := v.Args[1] 37524 if x.Op != OpAMD64FlagGT_UGT { 37525 break 37526 } 37527 mem := v.Args[2] 37528 v.reset(OpAMD64MOVBstore) 37529 v.AuxInt = off 37530 v.Aux = sym 37531 v.AddArg(ptr) 37532 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37533 v0.AuxInt = 1 37534 v.AddArg(v0) 37535 v.AddArg(mem) 37536 return true 37537 } 37538 return false 37539 } 37540 func rewriteValueAMD64_OpAMD64SETGmem_0(v *Value) bool { 37541 b := v.Block 37542 _ = b 37543 // match: (SETGmem [off] {sym} ptr (InvertFlags x) mem) 37544 // cond: 37545 // result: (SETLmem [off] {sym} ptr x mem) 37546 for { 37547 off := v.AuxInt 37548 sym := v.Aux 37549 _ = v.Args[2] 37550 ptr := v.Args[0] 37551 v_1 := v.Args[1] 37552 if v_1.Op != OpAMD64InvertFlags { 37553 break 37554 } 37555 x := v_1.Args[0] 37556 mem := v.Args[2] 37557 v.reset(OpAMD64SETLmem) 37558 v.AuxInt = off 37559 v.Aux = sym 37560 v.AddArg(ptr) 37561 v.AddArg(x) 37562 v.AddArg(mem) 37563 return true 37564 } 37565 // match: (SETGmem [off] {sym} ptr x:(FlagEQ) mem) 37566 // cond: 37567 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37568 for { 37569 off := v.AuxInt 37570 sym := v.Aux 37571 _ = v.Args[2] 37572 ptr := v.Args[0] 37573 x := v.Args[1] 37574 if x.Op != OpAMD64FlagEQ { 37575 break 37576 } 37577 mem := v.Args[2] 37578 v.reset(OpAMD64MOVBstore) 37579 v.AuxInt = off 37580 v.Aux = sym 37581 v.AddArg(ptr) 37582 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37583 v0.AuxInt = 0 37584 v.AddArg(v0) 37585 v.AddArg(mem) 37586 return true 37587 } 37588 // match: (SETGmem [off] {sym} ptr x:(FlagLT_ULT) mem) 37589 // cond: 37590 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37591 for { 37592 off := v.AuxInt 37593 sym := v.Aux 37594 _ = v.Args[2] 37595 ptr := v.Args[0] 37596 x := v.Args[1] 37597 if x.Op != OpAMD64FlagLT_ULT { 37598 break 37599 } 37600 mem := v.Args[2] 37601 v.reset(OpAMD64MOVBstore) 37602 v.AuxInt = off 37603 v.Aux = sym 37604 v.AddArg(ptr) 37605 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37606 v0.AuxInt = 0 37607 v.AddArg(v0) 37608 v.AddArg(mem) 37609 return true 37610 } 37611 // match: (SETGmem [off] {sym} ptr x:(FlagLT_UGT) mem) 37612 // cond: 37613 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37614 for { 37615 off := v.AuxInt 37616 sym := v.Aux 37617 _ = v.Args[2] 37618 ptr := v.Args[0] 37619 x := v.Args[1] 37620 if x.Op != OpAMD64FlagLT_UGT { 37621 break 37622 } 37623 mem := v.Args[2] 37624 v.reset(OpAMD64MOVBstore) 37625 v.AuxInt = off 37626 v.Aux = sym 37627 v.AddArg(ptr) 37628 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37629 v0.AuxInt = 0 37630 v.AddArg(v0) 37631 v.AddArg(mem) 37632 return true 37633 } 37634 // match: (SETGmem [off] {sym} ptr x:(FlagGT_ULT) mem) 37635 // cond: 37636 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37637 for { 37638 off := v.AuxInt 37639 sym := v.Aux 37640 _ = v.Args[2] 37641 ptr := v.Args[0] 37642 x := v.Args[1] 37643 if x.Op != OpAMD64FlagGT_ULT { 37644 break 37645 } 37646 mem := v.Args[2] 37647 v.reset(OpAMD64MOVBstore) 37648 v.AuxInt = off 37649 v.Aux = sym 37650 v.AddArg(ptr) 37651 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37652 v0.AuxInt = 1 37653 v.AddArg(v0) 37654 v.AddArg(mem) 37655 return true 37656 } 37657 // match: (SETGmem [off] {sym} ptr x:(FlagGT_UGT) mem) 37658 // cond: 37659 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37660 for { 37661 off := v.AuxInt 37662 sym := v.Aux 37663 _ = v.Args[2] 37664 ptr := v.Args[0] 37665 x := v.Args[1] 37666 if x.Op != OpAMD64FlagGT_UGT { 37667 break 37668 } 37669 mem := v.Args[2] 37670 v.reset(OpAMD64MOVBstore) 37671 v.AuxInt = off 37672 v.Aux = sym 37673 v.AddArg(ptr) 37674 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37675 v0.AuxInt = 1 37676 v.AddArg(v0) 37677 v.AddArg(mem) 37678 return true 37679 } 37680 return false 37681 } 37682 func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { 37683 // match: (SETL (InvertFlags x)) 37684 // cond: 37685 // result: (SETG x) 37686 for { 37687 v_0 := v.Args[0] 37688 if v_0.Op != OpAMD64InvertFlags { 37689 break 37690 } 37691 x := v_0.Args[0] 37692 v.reset(OpAMD64SETG) 37693 v.AddArg(x) 37694 return true 37695 } 37696 // match: (SETL (FlagEQ)) 37697 // cond: 37698 // result: (MOVLconst [0]) 37699 for { 37700 v_0 := v.Args[0] 37701 if v_0.Op != OpAMD64FlagEQ { 37702 break 37703 } 37704 v.reset(OpAMD64MOVLconst) 37705 v.AuxInt = 0 37706 return true 37707 } 37708 // match: (SETL (FlagLT_ULT)) 37709 // cond: 37710 // result: (MOVLconst [1]) 37711 for { 37712 v_0 := v.Args[0] 37713 if v_0.Op != OpAMD64FlagLT_ULT { 37714 break 37715 } 37716 v.reset(OpAMD64MOVLconst) 37717 v.AuxInt = 1 37718 return true 37719 } 37720 // match: (SETL (FlagLT_UGT)) 37721 // cond: 37722 // result: (MOVLconst [1]) 37723 for { 37724 v_0 := v.Args[0] 37725 if v_0.Op != OpAMD64FlagLT_UGT { 37726 break 37727 } 37728 v.reset(OpAMD64MOVLconst) 37729 v.AuxInt = 1 37730 return true 37731 } 37732 // match: (SETL (FlagGT_ULT)) 37733 // cond: 37734 // result: (MOVLconst [0]) 37735 for { 37736 v_0 := v.Args[0] 37737 if v_0.Op != OpAMD64FlagGT_ULT { 37738 break 37739 } 37740 v.reset(OpAMD64MOVLconst) 37741 v.AuxInt = 0 37742 return true 37743 } 37744 // match: (SETL (FlagGT_UGT)) 37745 // cond: 37746 // result: (MOVLconst [0]) 37747 for { 37748 v_0 := v.Args[0] 37749 if v_0.Op != OpAMD64FlagGT_UGT { 37750 break 37751 } 37752 v.reset(OpAMD64MOVLconst) 37753 v.AuxInt = 0 37754 return true 37755 } 37756 return false 37757 } 37758 func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { 37759 // match: (SETLE (InvertFlags x)) 37760 // cond: 37761 // result: (SETGE x) 37762 for { 37763 v_0 := v.Args[0] 37764 if v_0.Op != OpAMD64InvertFlags { 37765 break 37766 } 37767 x := v_0.Args[0] 37768 v.reset(OpAMD64SETGE) 37769 v.AddArg(x) 37770 return true 37771 } 37772 // match: (SETLE (FlagEQ)) 37773 // cond: 37774 // result: (MOVLconst [1]) 37775 for { 37776 v_0 := v.Args[0] 37777 if v_0.Op != OpAMD64FlagEQ { 37778 break 37779 } 37780 v.reset(OpAMD64MOVLconst) 37781 v.AuxInt = 1 37782 return true 37783 } 37784 // match: (SETLE (FlagLT_ULT)) 37785 // cond: 37786 // result: (MOVLconst [1]) 37787 for { 37788 v_0 := v.Args[0] 37789 if v_0.Op != OpAMD64FlagLT_ULT { 37790 break 37791 } 37792 v.reset(OpAMD64MOVLconst) 37793 v.AuxInt = 1 37794 return true 37795 } 37796 // match: (SETLE (FlagLT_UGT)) 37797 // cond: 37798 // result: (MOVLconst [1]) 37799 for { 37800 v_0 := v.Args[0] 37801 if v_0.Op != OpAMD64FlagLT_UGT { 37802 break 37803 } 37804 v.reset(OpAMD64MOVLconst) 37805 v.AuxInt = 1 37806 return true 37807 } 37808 // match: (SETLE (FlagGT_ULT)) 37809 // cond: 37810 // result: (MOVLconst [0]) 37811 for { 37812 v_0 := v.Args[0] 37813 if v_0.Op != OpAMD64FlagGT_ULT { 37814 break 37815 } 37816 v.reset(OpAMD64MOVLconst) 37817 v.AuxInt = 0 37818 return true 37819 } 37820 // match: (SETLE (FlagGT_UGT)) 37821 // cond: 37822 // result: (MOVLconst [0]) 37823 for { 37824 v_0 := v.Args[0] 37825 if v_0.Op != OpAMD64FlagGT_UGT { 37826 break 37827 } 37828 v.reset(OpAMD64MOVLconst) 37829 v.AuxInt = 0 37830 return true 37831 } 37832 return false 37833 } 37834 func rewriteValueAMD64_OpAMD64SETLEmem_0(v *Value) bool { 37835 b := v.Block 37836 _ = b 37837 // match: (SETLEmem [off] {sym} ptr (InvertFlags x) mem) 37838 // cond: 37839 // result: (SETGEmem [off] {sym} ptr x mem) 37840 for { 37841 off := v.AuxInt 37842 sym := v.Aux 37843 _ = v.Args[2] 37844 ptr := v.Args[0] 37845 v_1 := v.Args[1] 37846 if v_1.Op != OpAMD64InvertFlags { 37847 break 37848 } 37849 x := v_1.Args[0] 37850 mem := v.Args[2] 37851 v.reset(OpAMD64SETGEmem) 37852 v.AuxInt = off 37853 v.Aux = sym 37854 v.AddArg(ptr) 37855 v.AddArg(x) 37856 v.AddArg(mem) 37857 return true 37858 } 37859 // match: (SETLEmem [off] {sym} ptr x:(FlagEQ) mem) 37860 // cond: 37861 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37862 for { 37863 off := v.AuxInt 37864 sym := v.Aux 37865 _ = v.Args[2] 37866 ptr := v.Args[0] 37867 x := v.Args[1] 37868 if x.Op != OpAMD64FlagEQ { 37869 break 37870 } 37871 mem := v.Args[2] 37872 v.reset(OpAMD64MOVBstore) 37873 v.AuxInt = off 37874 v.Aux = sym 37875 v.AddArg(ptr) 37876 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37877 v0.AuxInt = 1 37878 v.AddArg(v0) 37879 v.AddArg(mem) 37880 return true 37881 } 37882 // match: (SETLEmem [off] {sym} ptr x:(FlagLT_ULT) mem) 37883 // cond: 37884 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37885 for { 37886 off := v.AuxInt 37887 sym := v.Aux 37888 _ = v.Args[2] 37889 ptr := v.Args[0] 37890 x := v.Args[1] 37891 if x.Op != OpAMD64FlagLT_ULT { 37892 break 37893 } 37894 mem := v.Args[2] 37895 v.reset(OpAMD64MOVBstore) 37896 v.AuxInt = off 37897 v.Aux = sym 37898 v.AddArg(ptr) 37899 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37900 v0.AuxInt = 1 37901 v.AddArg(v0) 37902 v.AddArg(mem) 37903 return true 37904 } 37905 // match: (SETLEmem [off] {sym} ptr x:(FlagLT_UGT) mem) 37906 // cond: 37907 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 37908 for { 37909 off := v.AuxInt 37910 sym := v.Aux 37911 _ = v.Args[2] 37912 ptr := v.Args[0] 37913 x := v.Args[1] 37914 if x.Op != OpAMD64FlagLT_UGT { 37915 break 37916 } 37917 mem := v.Args[2] 37918 v.reset(OpAMD64MOVBstore) 37919 v.AuxInt = off 37920 v.Aux = sym 37921 v.AddArg(ptr) 37922 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37923 v0.AuxInt = 1 37924 v.AddArg(v0) 37925 v.AddArg(mem) 37926 return true 37927 } 37928 // match: (SETLEmem [off] {sym} ptr x:(FlagGT_ULT) mem) 37929 // cond: 37930 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37931 for { 37932 off := v.AuxInt 37933 sym := v.Aux 37934 _ = v.Args[2] 37935 ptr := v.Args[0] 37936 x := v.Args[1] 37937 if x.Op != OpAMD64FlagGT_ULT { 37938 break 37939 } 37940 mem := v.Args[2] 37941 v.reset(OpAMD64MOVBstore) 37942 v.AuxInt = off 37943 v.Aux = sym 37944 v.AddArg(ptr) 37945 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37946 v0.AuxInt = 0 37947 v.AddArg(v0) 37948 v.AddArg(mem) 37949 return true 37950 } 37951 // match: (SETLEmem [off] {sym} ptr x:(FlagGT_UGT) mem) 37952 // cond: 37953 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 37954 for { 37955 off := v.AuxInt 37956 sym := v.Aux 37957 _ = v.Args[2] 37958 ptr := v.Args[0] 37959 x := v.Args[1] 37960 if x.Op != OpAMD64FlagGT_UGT { 37961 break 37962 } 37963 mem := v.Args[2] 37964 v.reset(OpAMD64MOVBstore) 37965 v.AuxInt = off 37966 v.Aux = sym 37967 v.AddArg(ptr) 37968 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 37969 v0.AuxInt = 0 37970 v.AddArg(v0) 37971 v.AddArg(mem) 37972 return true 37973 } 37974 return false 37975 } 37976 func rewriteValueAMD64_OpAMD64SETLmem_0(v *Value) bool { 37977 b := v.Block 37978 _ = b 37979 // match: (SETLmem [off] {sym} ptr (InvertFlags x) mem) 37980 // cond: 37981 // result: (SETGmem [off] {sym} ptr x mem) 37982 for { 37983 off := v.AuxInt 37984 sym := v.Aux 37985 _ = v.Args[2] 37986 ptr := v.Args[0] 37987 v_1 := v.Args[1] 37988 if v_1.Op != OpAMD64InvertFlags { 37989 break 37990 } 37991 x := v_1.Args[0] 37992 mem := v.Args[2] 37993 v.reset(OpAMD64SETGmem) 37994 v.AuxInt = off 37995 v.Aux = sym 37996 v.AddArg(ptr) 37997 v.AddArg(x) 37998 v.AddArg(mem) 37999 return true 38000 } 38001 // match: (SETLmem [off] {sym} ptr x:(FlagEQ) mem) 38002 // cond: 38003 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 38004 for { 38005 off := v.AuxInt 38006 sym := v.Aux 38007 _ = v.Args[2] 38008 ptr := v.Args[0] 38009 x := v.Args[1] 38010 if x.Op != OpAMD64FlagEQ { 38011 break 38012 } 38013 mem := v.Args[2] 38014 v.reset(OpAMD64MOVBstore) 38015 v.AuxInt = off 38016 v.Aux = sym 38017 v.AddArg(ptr) 38018 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38019 v0.AuxInt = 0 38020 v.AddArg(v0) 38021 v.AddArg(mem) 38022 return true 38023 } 38024 // match: (SETLmem [off] {sym} ptr x:(FlagLT_ULT) mem) 38025 // cond: 38026 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38027 for { 38028 off := v.AuxInt 38029 sym := v.Aux 38030 _ = v.Args[2] 38031 ptr := v.Args[0] 38032 x := v.Args[1] 38033 if x.Op != OpAMD64FlagLT_ULT { 38034 break 38035 } 38036 mem := v.Args[2] 38037 v.reset(OpAMD64MOVBstore) 38038 v.AuxInt = off 38039 v.Aux = sym 38040 v.AddArg(ptr) 38041 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38042 v0.AuxInt = 1 38043 v.AddArg(v0) 38044 v.AddArg(mem) 38045 return true 38046 } 38047 // match: (SETLmem [off] {sym} ptr x:(FlagLT_UGT) mem) 38048 // cond: 38049 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38050 for { 38051 off := v.AuxInt 38052 sym := v.Aux 38053 _ = v.Args[2] 38054 ptr := v.Args[0] 38055 x := v.Args[1] 38056 if x.Op != OpAMD64FlagLT_UGT { 38057 break 38058 } 38059 mem := v.Args[2] 38060 v.reset(OpAMD64MOVBstore) 38061 v.AuxInt = off 38062 v.Aux = sym 38063 v.AddArg(ptr) 38064 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38065 v0.AuxInt = 1 38066 v.AddArg(v0) 38067 v.AddArg(mem) 38068 return true 38069 } 38070 // match: (SETLmem [off] {sym} ptr x:(FlagGT_ULT) mem) 38071 // cond: 38072 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 38073 for { 38074 off := v.AuxInt 38075 sym := v.Aux 38076 _ = v.Args[2] 38077 ptr := v.Args[0] 38078 x := v.Args[1] 38079 if x.Op != OpAMD64FlagGT_ULT { 38080 break 38081 } 38082 mem := v.Args[2] 38083 v.reset(OpAMD64MOVBstore) 38084 v.AuxInt = off 38085 v.Aux = sym 38086 v.AddArg(ptr) 38087 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38088 v0.AuxInt = 0 38089 v.AddArg(v0) 38090 v.AddArg(mem) 38091 return true 38092 } 38093 // match: (SETLmem [off] {sym} ptr x:(FlagGT_UGT) mem) 38094 // cond: 38095 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 38096 for { 38097 off := v.AuxInt 38098 sym := v.Aux 38099 _ = v.Args[2] 38100 ptr := v.Args[0] 38101 x := v.Args[1] 38102 if x.Op != OpAMD64FlagGT_UGT { 38103 break 38104 } 38105 mem := v.Args[2] 38106 v.reset(OpAMD64MOVBstore) 38107 v.AuxInt = off 38108 v.Aux = sym 38109 v.AddArg(ptr) 38110 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38111 v0.AuxInt = 0 38112 v.AddArg(v0) 38113 v.AddArg(mem) 38114 return true 38115 } 38116 return false 38117 } 38118 func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { 38119 b := v.Block 38120 _ = b 38121 config := b.Func.Config 38122 _ = config 38123 // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y)) 38124 // cond: !config.nacl 38125 // result: (SETB (BTL x y)) 38126 for { 38127 v_0 := v.Args[0] 38128 if v_0.Op != OpAMD64TESTL { 38129 break 38130 } 38131 _ = v_0.Args[1] 38132 v_0_0 := v_0.Args[0] 38133 if v_0_0.Op != OpAMD64SHLL { 38134 break 38135 } 38136 _ = v_0_0.Args[1] 38137 v_0_0_0 := v_0_0.Args[0] 38138 if v_0_0_0.Op != OpAMD64MOVLconst { 38139 break 38140 } 38141 if v_0_0_0.AuxInt != 1 { 38142 break 38143 } 38144 x := v_0_0.Args[1] 38145 y := v_0.Args[1] 38146 if !(!config.nacl) { 38147 break 38148 } 38149 v.reset(OpAMD64SETB) 38150 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 38151 v0.AddArg(x) 38152 v0.AddArg(y) 38153 v.AddArg(v0) 38154 return true 38155 } 38156 // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x))) 38157 // cond: !config.nacl 38158 // result: (SETB (BTL x y)) 38159 for { 38160 v_0 := v.Args[0] 38161 if v_0.Op != OpAMD64TESTL { 38162 break 38163 } 38164 _ = v_0.Args[1] 38165 y := v_0.Args[0] 38166 v_0_1 := v_0.Args[1] 38167 if v_0_1.Op != OpAMD64SHLL { 38168 break 38169 } 38170 _ = v_0_1.Args[1] 38171 v_0_1_0 := v_0_1.Args[0] 38172 if v_0_1_0.Op != OpAMD64MOVLconst { 38173 break 38174 } 38175 if v_0_1_0.AuxInt != 1 { 38176 break 38177 } 38178 x := v_0_1.Args[1] 38179 if !(!config.nacl) { 38180 break 38181 } 38182 v.reset(OpAMD64SETB) 38183 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 38184 v0.AddArg(x) 38185 v0.AddArg(y) 38186 v.AddArg(v0) 38187 return true 38188 } 38189 // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 38190 // cond: !config.nacl 38191 // result: (SETB (BTQ x y)) 38192 for { 38193 v_0 := v.Args[0] 38194 if v_0.Op != OpAMD64TESTQ { 38195 break 38196 } 38197 _ = v_0.Args[1] 38198 v_0_0 := v_0.Args[0] 38199 if v_0_0.Op != OpAMD64SHLQ { 38200 break 38201 } 38202 _ = v_0_0.Args[1] 38203 v_0_0_0 := v_0_0.Args[0] 38204 if v_0_0_0.Op != OpAMD64MOVQconst { 38205 break 38206 } 38207 if v_0_0_0.AuxInt != 1 { 38208 break 38209 } 38210 x := v_0_0.Args[1] 38211 y := v_0.Args[1] 38212 if !(!config.nacl) { 38213 break 38214 } 38215 v.reset(OpAMD64SETB) 38216 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 38217 v0.AddArg(x) 38218 v0.AddArg(y) 38219 v.AddArg(v0) 38220 return true 38221 } 38222 // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x))) 38223 // cond: !config.nacl 38224 // result: (SETB (BTQ x y)) 38225 for { 38226 v_0 := v.Args[0] 38227 if v_0.Op != OpAMD64TESTQ { 38228 break 38229 } 38230 _ = v_0.Args[1] 38231 y := v_0.Args[0] 38232 v_0_1 := v_0.Args[1] 38233 if v_0_1.Op != OpAMD64SHLQ { 38234 break 38235 } 38236 _ = v_0_1.Args[1] 38237 v_0_1_0 := v_0_1.Args[0] 38238 if v_0_1_0.Op != OpAMD64MOVQconst { 38239 break 38240 } 38241 if v_0_1_0.AuxInt != 1 { 38242 break 38243 } 38244 x := v_0_1.Args[1] 38245 if !(!config.nacl) { 38246 break 38247 } 38248 v.reset(OpAMD64SETB) 38249 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 38250 v0.AddArg(x) 38251 v0.AddArg(y) 38252 v.AddArg(v0) 38253 return true 38254 } 38255 // match: (SETNE (TESTLconst [c] x)) 38256 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 38257 // result: (SETB (BTLconst [log2(c)] x)) 38258 for { 38259 v_0 := v.Args[0] 38260 if v_0.Op != OpAMD64TESTLconst { 38261 break 38262 } 38263 c := v_0.AuxInt 38264 x := v_0.Args[0] 38265 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 38266 break 38267 } 38268 v.reset(OpAMD64SETB) 38269 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 38270 v0.AuxInt = log2(c) 38271 v0.AddArg(x) 38272 v.AddArg(v0) 38273 return true 38274 } 38275 // match: (SETNE (TESTQconst [c] x)) 38276 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38277 // result: (SETB (BTQconst [log2(c)] x)) 38278 for { 38279 v_0 := v.Args[0] 38280 if v_0.Op != OpAMD64TESTQconst { 38281 break 38282 } 38283 c := v_0.AuxInt 38284 x := v_0.Args[0] 38285 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38286 break 38287 } 38288 v.reset(OpAMD64SETB) 38289 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38290 v0.AuxInt = log2(c) 38291 v0.AddArg(x) 38292 v.AddArg(v0) 38293 return true 38294 } 38295 // match: (SETNE (TESTQ (MOVQconst [c]) x)) 38296 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38297 // result: (SETB (BTQconst [log2(c)] x)) 38298 for { 38299 v_0 := v.Args[0] 38300 if v_0.Op != OpAMD64TESTQ { 38301 break 38302 } 38303 _ = v_0.Args[1] 38304 v_0_0 := v_0.Args[0] 38305 if v_0_0.Op != OpAMD64MOVQconst { 38306 break 38307 } 38308 c := v_0_0.AuxInt 38309 x := v_0.Args[1] 38310 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38311 break 38312 } 38313 v.reset(OpAMD64SETB) 38314 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38315 v0.AuxInt = log2(c) 38316 v0.AddArg(x) 38317 v.AddArg(v0) 38318 return true 38319 } 38320 // match: (SETNE (TESTQ x (MOVQconst [c]))) 38321 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38322 // result: (SETB (BTQconst [log2(c)] x)) 38323 for { 38324 v_0 := v.Args[0] 38325 if v_0.Op != OpAMD64TESTQ { 38326 break 38327 } 38328 _ = v_0.Args[1] 38329 x := v_0.Args[0] 38330 v_0_1 := v_0.Args[1] 38331 if v_0_1.Op != OpAMD64MOVQconst { 38332 break 38333 } 38334 c := v_0_1.AuxInt 38335 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38336 break 38337 } 38338 v.reset(OpAMD64SETB) 38339 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38340 v0.AuxInt = log2(c) 38341 v0.AddArg(x) 38342 v.AddArg(v0) 38343 return true 38344 } 38345 // match: (SETNE (InvertFlags x)) 38346 // cond: 38347 // result: (SETNE x) 38348 for { 38349 v_0 := v.Args[0] 38350 if v_0.Op != OpAMD64InvertFlags { 38351 break 38352 } 38353 x := v_0.Args[0] 38354 v.reset(OpAMD64SETNE) 38355 v.AddArg(x) 38356 return true 38357 } 38358 // match: (SETNE (FlagEQ)) 38359 // cond: 38360 // result: (MOVLconst [0]) 38361 for { 38362 v_0 := v.Args[0] 38363 if v_0.Op != OpAMD64FlagEQ { 38364 break 38365 } 38366 v.reset(OpAMD64MOVLconst) 38367 v.AuxInt = 0 38368 return true 38369 } 38370 return false 38371 } 38372 func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { 38373 // match: (SETNE (FlagLT_ULT)) 38374 // cond: 38375 // result: (MOVLconst [1]) 38376 for { 38377 v_0 := v.Args[0] 38378 if v_0.Op != OpAMD64FlagLT_ULT { 38379 break 38380 } 38381 v.reset(OpAMD64MOVLconst) 38382 v.AuxInt = 1 38383 return true 38384 } 38385 // match: (SETNE (FlagLT_UGT)) 38386 // cond: 38387 // result: (MOVLconst [1]) 38388 for { 38389 v_0 := v.Args[0] 38390 if v_0.Op != OpAMD64FlagLT_UGT { 38391 break 38392 } 38393 v.reset(OpAMD64MOVLconst) 38394 v.AuxInt = 1 38395 return true 38396 } 38397 // match: (SETNE (FlagGT_ULT)) 38398 // cond: 38399 // result: (MOVLconst [1]) 38400 for { 38401 v_0 := v.Args[0] 38402 if v_0.Op != OpAMD64FlagGT_ULT { 38403 break 38404 } 38405 v.reset(OpAMD64MOVLconst) 38406 v.AuxInt = 1 38407 return true 38408 } 38409 // match: (SETNE (FlagGT_UGT)) 38410 // cond: 38411 // result: (MOVLconst [1]) 38412 for { 38413 v_0 := v.Args[0] 38414 if v_0.Op != OpAMD64FlagGT_UGT { 38415 break 38416 } 38417 v.reset(OpAMD64MOVLconst) 38418 v.AuxInt = 1 38419 return true 38420 } 38421 return false 38422 } 38423 func rewriteValueAMD64_OpAMD64SETNEmem_0(v *Value) bool { 38424 b := v.Block 38425 _ = b 38426 config := b.Func.Config 38427 _ = config 38428 // match: (SETNEmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) 38429 // cond: !config.nacl 38430 // result: (SETBmem [off] {sym} ptr (BTL x y) mem) 38431 for { 38432 off := v.AuxInt 38433 sym := v.Aux 38434 _ = v.Args[2] 38435 ptr := v.Args[0] 38436 v_1 := v.Args[1] 38437 if v_1.Op != OpAMD64TESTL { 38438 break 38439 } 38440 _ = v_1.Args[1] 38441 v_1_0 := v_1.Args[0] 38442 if v_1_0.Op != OpAMD64SHLL { 38443 break 38444 } 38445 _ = v_1_0.Args[1] 38446 v_1_0_0 := v_1_0.Args[0] 38447 if v_1_0_0.Op != OpAMD64MOVLconst { 38448 break 38449 } 38450 if v_1_0_0.AuxInt != 1 { 38451 break 38452 } 38453 x := v_1_0.Args[1] 38454 y := v_1.Args[1] 38455 mem := v.Args[2] 38456 if !(!config.nacl) { 38457 break 38458 } 38459 v.reset(OpAMD64SETBmem) 38460 v.AuxInt = off 38461 v.Aux = sym 38462 v.AddArg(ptr) 38463 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 38464 v0.AddArg(x) 38465 v0.AddArg(y) 38466 v.AddArg(v0) 38467 v.AddArg(mem) 38468 return true 38469 } 38470 // match: (SETNEmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) 38471 // cond: !config.nacl 38472 // result: (SETBmem [off] {sym} ptr (BTL x y) mem) 38473 for { 38474 off := v.AuxInt 38475 sym := v.Aux 38476 _ = v.Args[2] 38477 ptr := v.Args[0] 38478 v_1 := v.Args[1] 38479 if v_1.Op != OpAMD64TESTL { 38480 break 38481 } 38482 _ = v_1.Args[1] 38483 y := v_1.Args[0] 38484 v_1_1 := v_1.Args[1] 38485 if v_1_1.Op != OpAMD64SHLL { 38486 break 38487 } 38488 _ = v_1_1.Args[1] 38489 v_1_1_0 := v_1_1.Args[0] 38490 if v_1_1_0.Op != OpAMD64MOVLconst { 38491 break 38492 } 38493 if v_1_1_0.AuxInt != 1 { 38494 break 38495 } 38496 x := v_1_1.Args[1] 38497 mem := v.Args[2] 38498 if !(!config.nacl) { 38499 break 38500 } 38501 v.reset(OpAMD64SETBmem) 38502 v.AuxInt = off 38503 v.Aux = sym 38504 v.AddArg(ptr) 38505 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 38506 v0.AddArg(x) 38507 v0.AddArg(y) 38508 v.AddArg(v0) 38509 v.AddArg(mem) 38510 return true 38511 } 38512 // match: (SETNEmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) 38513 // cond: !config.nacl 38514 // result: (SETBmem [off] {sym} ptr (BTQ x y) mem) 38515 for { 38516 off := v.AuxInt 38517 sym := v.Aux 38518 _ = v.Args[2] 38519 ptr := v.Args[0] 38520 v_1 := v.Args[1] 38521 if v_1.Op != OpAMD64TESTQ { 38522 break 38523 } 38524 _ = v_1.Args[1] 38525 v_1_0 := v_1.Args[0] 38526 if v_1_0.Op != OpAMD64SHLQ { 38527 break 38528 } 38529 _ = v_1_0.Args[1] 38530 v_1_0_0 := v_1_0.Args[0] 38531 if v_1_0_0.Op != OpAMD64MOVQconst { 38532 break 38533 } 38534 if v_1_0_0.AuxInt != 1 { 38535 break 38536 } 38537 x := v_1_0.Args[1] 38538 y := v_1.Args[1] 38539 mem := v.Args[2] 38540 if !(!config.nacl) { 38541 break 38542 } 38543 v.reset(OpAMD64SETBmem) 38544 v.AuxInt = off 38545 v.Aux = sym 38546 v.AddArg(ptr) 38547 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 38548 v0.AddArg(x) 38549 v0.AddArg(y) 38550 v.AddArg(v0) 38551 v.AddArg(mem) 38552 return true 38553 } 38554 // match: (SETNEmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) 38555 // cond: !config.nacl 38556 // result: (SETBmem [off] {sym} ptr (BTQ x y) mem) 38557 for { 38558 off := v.AuxInt 38559 sym := v.Aux 38560 _ = v.Args[2] 38561 ptr := v.Args[0] 38562 v_1 := v.Args[1] 38563 if v_1.Op != OpAMD64TESTQ { 38564 break 38565 } 38566 _ = v_1.Args[1] 38567 y := v_1.Args[0] 38568 v_1_1 := v_1.Args[1] 38569 if v_1_1.Op != OpAMD64SHLQ { 38570 break 38571 } 38572 _ = v_1_1.Args[1] 38573 v_1_1_0 := v_1_1.Args[0] 38574 if v_1_1_0.Op != OpAMD64MOVQconst { 38575 break 38576 } 38577 if v_1_1_0.AuxInt != 1 { 38578 break 38579 } 38580 x := v_1_1.Args[1] 38581 mem := v.Args[2] 38582 if !(!config.nacl) { 38583 break 38584 } 38585 v.reset(OpAMD64SETBmem) 38586 v.AuxInt = off 38587 v.Aux = sym 38588 v.AddArg(ptr) 38589 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 38590 v0.AddArg(x) 38591 v0.AddArg(y) 38592 v.AddArg(v0) 38593 v.AddArg(mem) 38594 return true 38595 } 38596 // match: (SETNEmem [off] {sym} ptr (TESTLconst [c] x) mem) 38597 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 38598 // result: (SETBmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) 38599 for { 38600 off := v.AuxInt 38601 sym := v.Aux 38602 _ = v.Args[2] 38603 ptr := v.Args[0] 38604 v_1 := v.Args[1] 38605 if v_1.Op != OpAMD64TESTLconst { 38606 break 38607 } 38608 c := v_1.AuxInt 38609 x := v_1.Args[0] 38610 mem := v.Args[2] 38611 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 38612 break 38613 } 38614 v.reset(OpAMD64SETBmem) 38615 v.AuxInt = off 38616 v.Aux = sym 38617 v.AddArg(ptr) 38618 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 38619 v0.AuxInt = log2(c) 38620 v0.AddArg(x) 38621 v.AddArg(v0) 38622 v.AddArg(mem) 38623 return true 38624 } 38625 // match: (SETNEmem [off] {sym} ptr (TESTQconst [c] x) mem) 38626 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38627 // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 38628 for { 38629 off := v.AuxInt 38630 sym := v.Aux 38631 _ = v.Args[2] 38632 ptr := v.Args[0] 38633 v_1 := v.Args[1] 38634 if v_1.Op != OpAMD64TESTQconst { 38635 break 38636 } 38637 c := v_1.AuxInt 38638 x := v_1.Args[0] 38639 mem := v.Args[2] 38640 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38641 break 38642 } 38643 v.reset(OpAMD64SETBmem) 38644 v.AuxInt = off 38645 v.Aux = sym 38646 v.AddArg(ptr) 38647 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38648 v0.AuxInt = log2(c) 38649 v0.AddArg(x) 38650 v.AddArg(v0) 38651 v.AddArg(mem) 38652 return true 38653 } 38654 // match: (SETNEmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) 38655 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38656 // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 38657 for { 38658 off := v.AuxInt 38659 sym := v.Aux 38660 _ = v.Args[2] 38661 ptr := v.Args[0] 38662 v_1 := v.Args[1] 38663 if v_1.Op != OpAMD64TESTQ { 38664 break 38665 } 38666 _ = v_1.Args[1] 38667 v_1_0 := v_1.Args[0] 38668 if v_1_0.Op != OpAMD64MOVQconst { 38669 break 38670 } 38671 c := v_1_0.AuxInt 38672 x := v_1.Args[1] 38673 mem := v.Args[2] 38674 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38675 break 38676 } 38677 v.reset(OpAMD64SETBmem) 38678 v.AuxInt = off 38679 v.Aux = sym 38680 v.AddArg(ptr) 38681 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38682 v0.AuxInt = log2(c) 38683 v0.AddArg(x) 38684 v.AddArg(v0) 38685 v.AddArg(mem) 38686 return true 38687 } 38688 // match: (SETNEmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) 38689 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 38690 // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) 38691 for { 38692 off := v.AuxInt 38693 sym := v.Aux 38694 _ = v.Args[2] 38695 ptr := v.Args[0] 38696 v_1 := v.Args[1] 38697 if v_1.Op != OpAMD64TESTQ { 38698 break 38699 } 38700 _ = v_1.Args[1] 38701 x := v_1.Args[0] 38702 v_1_1 := v_1.Args[1] 38703 if v_1_1.Op != OpAMD64MOVQconst { 38704 break 38705 } 38706 c := v_1_1.AuxInt 38707 mem := v.Args[2] 38708 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 38709 break 38710 } 38711 v.reset(OpAMD64SETBmem) 38712 v.AuxInt = off 38713 v.Aux = sym 38714 v.AddArg(ptr) 38715 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 38716 v0.AuxInt = log2(c) 38717 v0.AddArg(x) 38718 v.AddArg(v0) 38719 v.AddArg(mem) 38720 return true 38721 } 38722 // match: (SETNEmem [off] {sym} ptr (InvertFlags x) mem) 38723 // cond: 38724 // result: (SETNEmem [off] {sym} ptr x mem) 38725 for { 38726 off := v.AuxInt 38727 sym := v.Aux 38728 _ = v.Args[2] 38729 ptr := v.Args[0] 38730 v_1 := v.Args[1] 38731 if v_1.Op != OpAMD64InvertFlags { 38732 break 38733 } 38734 x := v_1.Args[0] 38735 mem := v.Args[2] 38736 v.reset(OpAMD64SETNEmem) 38737 v.AuxInt = off 38738 v.Aux = sym 38739 v.AddArg(ptr) 38740 v.AddArg(x) 38741 v.AddArg(mem) 38742 return true 38743 } 38744 // match: (SETNEmem [off] {sym} ptr x:(FlagEQ) mem) 38745 // cond: 38746 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem) 38747 for { 38748 off := v.AuxInt 38749 sym := v.Aux 38750 _ = v.Args[2] 38751 ptr := v.Args[0] 38752 x := v.Args[1] 38753 if x.Op != OpAMD64FlagEQ { 38754 break 38755 } 38756 mem := v.Args[2] 38757 v.reset(OpAMD64MOVBstore) 38758 v.AuxInt = off 38759 v.Aux = sym 38760 v.AddArg(ptr) 38761 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38762 v0.AuxInt = 0 38763 v.AddArg(v0) 38764 v.AddArg(mem) 38765 return true 38766 } 38767 return false 38768 } 38769 func rewriteValueAMD64_OpAMD64SETNEmem_10(v *Value) bool { 38770 b := v.Block 38771 _ = b 38772 // match: (SETNEmem [off] {sym} ptr x:(FlagLT_ULT) mem) 38773 // cond: 38774 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38775 for { 38776 off := v.AuxInt 38777 sym := v.Aux 38778 _ = v.Args[2] 38779 ptr := v.Args[0] 38780 x := v.Args[1] 38781 if x.Op != OpAMD64FlagLT_ULT { 38782 break 38783 } 38784 mem := v.Args[2] 38785 v.reset(OpAMD64MOVBstore) 38786 v.AuxInt = off 38787 v.Aux = sym 38788 v.AddArg(ptr) 38789 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38790 v0.AuxInt = 1 38791 v.AddArg(v0) 38792 v.AddArg(mem) 38793 return true 38794 } 38795 // match: (SETNEmem [off] {sym} ptr x:(FlagLT_UGT) mem) 38796 // cond: 38797 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38798 for { 38799 off := v.AuxInt 38800 sym := v.Aux 38801 _ = v.Args[2] 38802 ptr := v.Args[0] 38803 x := v.Args[1] 38804 if x.Op != OpAMD64FlagLT_UGT { 38805 break 38806 } 38807 mem := v.Args[2] 38808 v.reset(OpAMD64MOVBstore) 38809 v.AuxInt = off 38810 v.Aux = sym 38811 v.AddArg(ptr) 38812 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38813 v0.AuxInt = 1 38814 v.AddArg(v0) 38815 v.AddArg(mem) 38816 return true 38817 } 38818 // match: (SETNEmem [off] {sym} ptr x:(FlagGT_ULT) mem) 38819 // cond: 38820 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38821 for { 38822 off := v.AuxInt 38823 sym := v.Aux 38824 _ = v.Args[2] 38825 ptr := v.Args[0] 38826 x := v.Args[1] 38827 if x.Op != OpAMD64FlagGT_ULT { 38828 break 38829 } 38830 mem := v.Args[2] 38831 v.reset(OpAMD64MOVBstore) 38832 v.AuxInt = off 38833 v.Aux = sym 38834 v.AddArg(ptr) 38835 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38836 v0.AuxInt = 1 38837 v.AddArg(v0) 38838 v.AddArg(mem) 38839 return true 38840 } 38841 // match: (SETNEmem [off] {sym} ptr x:(FlagGT_UGT) mem) 38842 // cond: 38843 // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem) 38844 for { 38845 off := v.AuxInt 38846 sym := v.Aux 38847 _ = v.Args[2] 38848 ptr := v.Args[0] 38849 x := v.Args[1] 38850 if x.Op != OpAMD64FlagGT_UGT { 38851 break 38852 } 38853 mem := v.Args[2] 38854 v.reset(OpAMD64MOVBstore) 38855 v.AuxInt = off 38856 v.Aux = sym 38857 v.AddArg(ptr) 38858 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) 38859 v0.AuxInt = 1 38860 v.AddArg(v0) 38861 v.AddArg(mem) 38862 return true 38863 } 38864 return false 38865 } 38866 func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { 38867 b := v.Block 38868 _ = b 38869 // match: (SHLL x (MOVQconst [c])) 38870 // cond: 38871 // result: (SHLLconst [c&31] x) 38872 for { 38873 _ = v.Args[1] 38874 x := v.Args[0] 38875 v_1 := v.Args[1] 38876 if v_1.Op != OpAMD64MOVQconst { 38877 break 38878 } 38879 c := v_1.AuxInt 38880 v.reset(OpAMD64SHLLconst) 38881 v.AuxInt = c & 31 38882 v.AddArg(x) 38883 return true 38884 } 38885 // match: (SHLL x (MOVLconst [c])) 38886 // cond: 38887 // result: (SHLLconst [c&31] x) 38888 for { 38889 _ = v.Args[1] 38890 x := v.Args[0] 38891 v_1 := v.Args[1] 38892 if v_1.Op != OpAMD64MOVLconst { 38893 break 38894 } 38895 c := v_1.AuxInt 38896 v.reset(OpAMD64SHLLconst) 38897 v.AuxInt = c & 31 38898 v.AddArg(x) 38899 return true 38900 } 38901 // match: (SHLL x (ADDQconst [c] y)) 38902 // cond: c & 31 == 0 38903 // result: (SHLL x y) 38904 for { 38905 _ = v.Args[1] 38906 x := v.Args[0] 38907 v_1 := v.Args[1] 38908 if v_1.Op != OpAMD64ADDQconst { 38909 break 38910 } 38911 c := v_1.AuxInt 38912 y := v_1.Args[0] 38913 if !(c&31 == 0) { 38914 break 38915 } 38916 v.reset(OpAMD64SHLL) 38917 v.AddArg(x) 38918 v.AddArg(y) 38919 return true 38920 } 38921 // match: (SHLL x (NEGQ <t> (ADDQconst [c] y))) 38922 // cond: c & 31 == 0 38923 // result: (SHLL x (NEGQ <t> y)) 38924 for { 38925 _ = v.Args[1] 38926 x := v.Args[0] 38927 v_1 := v.Args[1] 38928 if v_1.Op != OpAMD64NEGQ { 38929 break 38930 } 38931 t := v_1.Type 38932 v_1_0 := v_1.Args[0] 38933 if v_1_0.Op != OpAMD64ADDQconst { 38934 break 38935 } 38936 c := v_1_0.AuxInt 38937 y := v_1_0.Args[0] 38938 if !(c&31 == 0) { 38939 break 38940 } 38941 v.reset(OpAMD64SHLL) 38942 v.AddArg(x) 38943 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 38944 v0.AddArg(y) 38945 v.AddArg(v0) 38946 return true 38947 } 38948 // match: (SHLL x (ANDQconst [c] y)) 38949 // cond: c & 31 == 31 38950 // result: (SHLL x y) 38951 for { 38952 _ = v.Args[1] 38953 x := v.Args[0] 38954 v_1 := v.Args[1] 38955 if v_1.Op != OpAMD64ANDQconst { 38956 break 38957 } 38958 c := v_1.AuxInt 38959 y := v_1.Args[0] 38960 if !(c&31 == 31) { 38961 break 38962 } 38963 v.reset(OpAMD64SHLL) 38964 v.AddArg(x) 38965 v.AddArg(y) 38966 return true 38967 } 38968 // match: (SHLL x (NEGQ <t> (ANDQconst [c] y))) 38969 // cond: c & 31 == 31 38970 // result: (SHLL x (NEGQ <t> y)) 38971 for { 38972 _ = v.Args[1] 38973 x := v.Args[0] 38974 v_1 := v.Args[1] 38975 if v_1.Op != OpAMD64NEGQ { 38976 break 38977 } 38978 t := v_1.Type 38979 v_1_0 := v_1.Args[0] 38980 if v_1_0.Op != OpAMD64ANDQconst { 38981 break 38982 } 38983 c := v_1_0.AuxInt 38984 y := v_1_0.Args[0] 38985 if !(c&31 == 31) { 38986 break 38987 } 38988 v.reset(OpAMD64SHLL) 38989 v.AddArg(x) 38990 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 38991 v0.AddArg(y) 38992 v.AddArg(v0) 38993 return true 38994 } 38995 // match: (SHLL x (ADDLconst [c] y)) 38996 // cond: c & 31 == 0 38997 // result: (SHLL x y) 38998 for { 38999 _ = v.Args[1] 39000 x := v.Args[0] 39001 v_1 := v.Args[1] 39002 if v_1.Op != OpAMD64ADDLconst { 39003 break 39004 } 39005 c := v_1.AuxInt 39006 y := v_1.Args[0] 39007 if !(c&31 == 0) { 39008 break 39009 } 39010 v.reset(OpAMD64SHLL) 39011 v.AddArg(x) 39012 v.AddArg(y) 39013 return true 39014 } 39015 // match: (SHLL x (NEGL <t> (ADDLconst [c] y))) 39016 // cond: c & 31 == 0 39017 // result: (SHLL x (NEGL <t> y)) 39018 for { 39019 _ = v.Args[1] 39020 x := v.Args[0] 39021 v_1 := v.Args[1] 39022 if v_1.Op != OpAMD64NEGL { 39023 break 39024 } 39025 t := v_1.Type 39026 v_1_0 := v_1.Args[0] 39027 if v_1_0.Op != OpAMD64ADDLconst { 39028 break 39029 } 39030 c := v_1_0.AuxInt 39031 y := v_1_0.Args[0] 39032 if !(c&31 == 0) { 39033 break 39034 } 39035 v.reset(OpAMD64SHLL) 39036 v.AddArg(x) 39037 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39038 v0.AddArg(y) 39039 v.AddArg(v0) 39040 return true 39041 } 39042 // match: (SHLL x (ANDLconst [c] y)) 39043 // cond: c & 31 == 31 39044 // result: (SHLL x y) 39045 for { 39046 _ = v.Args[1] 39047 x := v.Args[0] 39048 v_1 := v.Args[1] 39049 if v_1.Op != OpAMD64ANDLconst { 39050 break 39051 } 39052 c := v_1.AuxInt 39053 y := v_1.Args[0] 39054 if !(c&31 == 31) { 39055 break 39056 } 39057 v.reset(OpAMD64SHLL) 39058 v.AddArg(x) 39059 v.AddArg(y) 39060 return true 39061 } 39062 // match: (SHLL x (NEGL <t> (ANDLconst [c] y))) 39063 // cond: c & 31 == 31 39064 // result: (SHLL x (NEGL <t> y)) 39065 for { 39066 _ = v.Args[1] 39067 x := v.Args[0] 39068 v_1 := v.Args[1] 39069 if v_1.Op != OpAMD64NEGL { 39070 break 39071 } 39072 t := v_1.Type 39073 v_1_0 := v_1.Args[0] 39074 if v_1_0.Op != OpAMD64ANDLconst { 39075 break 39076 } 39077 c := v_1_0.AuxInt 39078 y := v_1_0.Args[0] 39079 if !(c&31 == 31) { 39080 break 39081 } 39082 v.reset(OpAMD64SHLL) 39083 v.AddArg(x) 39084 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39085 v0.AddArg(y) 39086 v.AddArg(v0) 39087 return true 39088 } 39089 return false 39090 } 39091 func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool { 39092 // match: (SHLLconst x [0]) 39093 // cond: 39094 // result: x 39095 for { 39096 if v.AuxInt != 0 { 39097 break 39098 } 39099 x := v.Args[0] 39100 v.reset(OpCopy) 39101 v.Type = x.Type 39102 v.AddArg(x) 39103 return true 39104 } 39105 return false 39106 } 39107 func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool { 39108 b := v.Block 39109 _ = b 39110 // match: (SHLQ x (MOVQconst [c])) 39111 // cond: 39112 // result: (SHLQconst [c&63] x) 39113 for { 39114 _ = v.Args[1] 39115 x := v.Args[0] 39116 v_1 := v.Args[1] 39117 if v_1.Op != OpAMD64MOVQconst { 39118 break 39119 } 39120 c := v_1.AuxInt 39121 v.reset(OpAMD64SHLQconst) 39122 v.AuxInt = c & 63 39123 v.AddArg(x) 39124 return true 39125 } 39126 // match: (SHLQ x (MOVLconst [c])) 39127 // cond: 39128 // result: (SHLQconst [c&63] x) 39129 for { 39130 _ = v.Args[1] 39131 x := v.Args[0] 39132 v_1 := v.Args[1] 39133 if v_1.Op != OpAMD64MOVLconst { 39134 break 39135 } 39136 c := v_1.AuxInt 39137 v.reset(OpAMD64SHLQconst) 39138 v.AuxInt = c & 63 39139 v.AddArg(x) 39140 return true 39141 } 39142 // match: (SHLQ x (ADDQconst [c] y)) 39143 // cond: c & 63 == 0 39144 // result: (SHLQ x y) 39145 for { 39146 _ = v.Args[1] 39147 x := v.Args[0] 39148 v_1 := v.Args[1] 39149 if v_1.Op != OpAMD64ADDQconst { 39150 break 39151 } 39152 c := v_1.AuxInt 39153 y := v_1.Args[0] 39154 if !(c&63 == 0) { 39155 break 39156 } 39157 v.reset(OpAMD64SHLQ) 39158 v.AddArg(x) 39159 v.AddArg(y) 39160 return true 39161 } 39162 // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y))) 39163 // cond: c & 63 == 0 39164 // result: (SHLQ x (NEGQ <t> y)) 39165 for { 39166 _ = v.Args[1] 39167 x := v.Args[0] 39168 v_1 := v.Args[1] 39169 if v_1.Op != OpAMD64NEGQ { 39170 break 39171 } 39172 t := v_1.Type 39173 v_1_0 := v_1.Args[0] 39174 if v_1_0.Op != OpAMD64ADDQconst { 39175 break 39176 } 39177 c := v_1_0.AuxInt 39178 y := v_1_0.Args[0] 39179 if !(c&63 == 0) { 39180 break 39181 } 39182 v.reset(OpAMD64SHLQ) 39183 v.AddArg(x) 39184 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39185 v0.AddArg(y) 39186 v.AddArg(v0) 39187 return true 39188 } 39189 // match: (SHLQ x (ANDQconst [c] y)) 39190 // cond: c & 63 == 63 39191 // result: (SHLQ x y) 39192 for { 39193 _ = v.Args[1] 39194 x := v.Args[0] 39195 v_1 := v.Args[1] 39196 if v_1.Op != OpAMD64ANDQconst { 39197 break 39198 } 39199 c := v_1.AuxInt 39200 y := v_1.Args[0] 39201 if !(c&63 == 63) { 39202 break 39203 } 39204 v.reset(OpAMD64SHLQ) 39205 v.AddArg(x) 39206 v.AddArg(y) 39207 return true 39208 } 39209 // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y))) 39210 // cond: c & 63 == 63 39211 // result: (SHLQ x (NEGQ <t> y)) 39212 for { 39213 _ = v.Args[1] 39214 x := v.Args[0] 39215 v_1 := v.Args[1] 39216 if v_1.Op != OpAMD64NEGQ { 39217 break 39218 } 39219 t := v_1.Type 39220 v_1_0 := v_1.Args[0] 39221 if v_1_0.Op != OpAMD64ANDQconst { 39222 break 39223 } 39224 c := v_1_0.AuxInt 39225 y := v_1_0.Args[0] 39226 if !(c&63 == 63) { 39227 break 39228 } 39229 v.reset(OpAMD64SHLQ) 39230 v.AddArg(x) 39231 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39232 v0.AddArg(y) 39233 v.AddArg(v0) 39234 return true 39235 } 39236 // match: (SHLQ x (ADDLconst [c] y)) 39237 // cond: c & 63 == 0 39238 // result: (SHLQ x y) 39239 for { 39240 _ = v.Args[1] 39241 x := v.Args[0] 39242 v_1 := v.Args[1] 39243 if v_1.Op != OpAMD64ADDLconst { 39244 break 39245 } 39246 c := v_1.AuxInt 39247 y := v_1.Args[0] 39248 if !(c&63 == 0) { 39249 break 39250 } 39251 v.reset(OpAMD64SHLQ) 39252 v.AddArg(x) 39253 v.AddArg(y) 39254 return true 39255 } 39256 // match: (SHLQ x (NEGL <t> (ADDLconst [c] y))) 39257 // cond: c & 63 == 0 39258 // result: (SHLQ x (NEGL <t> y)) 39259 for { 39260 _ = v.Args[1] 39261 x := v.Args[0] 39262 v_1 := v.Args[1] 39263 if v_1.Op != OpAMD64NEGL { 39264 break 39265 } 39266 t := v_1.Type 39267 v_1_0 := v_1.Args[0] 39268 if v_1_0.Op != OpAMD64ADDLconst { 39269 break 39270 } 39271 c := v_1_0.AuxInt 39272 y := v_1_0.Args[0] 39273 if !(c&63 == 0) { 39274 break 39275 } 39276 v.reset(OpAMD64SHLQ) 39277 v.AddArg(x) 39278 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39279 v0.AddArg(y) 39280 v.AddArg(v0) 39281 return true 39282 } 39283 // match: (SHLQ x (ANDLconst [c] y)) 39284 // cond: c & 63 == 63 39285 // result: (SHLQ x y) 39286 for { 39287 _ = v.Args[1] 39288 x := v.Args[0] 39289 v_1 := v.Args[1] 39290 if v_1.Op != OpAMD64ANDLconst { 39291 break 39292 } 39293 c := v_1.AuxInt 39294 y := v_1.Args[0] 39295 if !(c&63 == 63) { 39296 break 39297 } 39298 v.reset(OpAMD64SHLQ) 39299 v.AddArg(x) 39300 v.AddArg(y) 39301 return true 39302 } 39303 // match: (SHLQ x (NEGL <t> (ANDLconst [c] y))) 39304 // cond: c & 63 == 63 39305 // result: (SHLQ x (NEGL <t> y)) 39306 for { 39307 _ = v.Args[1] 39308 x := v.Args[0] 39309 v_1 := v.Args[1] 39310 if v_1.Op != OpAMD64NEGL { 39311 break 39312 } 39313 t := v_1.Type 39314 v_1_0 := v_1.Args[0] 39315 if v_1_0.Op != OpAMD64ANDLconst { 39316 break 39317 } 39318 c := v_1_0.AuxInt 39319 y := v_1_0.Args[0] 39320 if !(c&63 == 63) { 39321 break 39322 } 39323 v.reset(OpAMD64SHLQ) 39324 v.AddArg(x) 39325 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39326 v0.AddArg(y) 39327 v.AddArg(v0) 39328 return true 39329 } 39330 return false 39331 } 39332 func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool { 39333 // match: (SHLQconst x [0]) 39334 // cond: 39335 // result: x 39336 for { 39337 if v.AuxInt != 0 { 39338 break 39339 } 39340 x := v.Args[0] 39341 v.reset(OpCopy) 39342 v.Type = x.Type 39343 v.AddArg(x) 39344 return true 39345 } 39346 return false 39347 } 39348 func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool { 39349 // match: (SHRB x (MOVQconst [c])) 39350 // cond: c&31 < 8 39351 // result: (SHRBconst [c&31] x) 39352 for { 39353 _ = v.Args[1] 39354 x := v.Args[0] 39355 v_1 := v.Args[1] 39356 if v_1.Op != OpAMD64MOVQconst { 39357 break 39358 } 39359 c := v_1.AuxInt 39360 if !(c&31 < 8) { 39361 break 39362 } 39363 v.reset(OpAMD64SHRBconst) 39364 v.AuxInt = c & 31 39365 v.AddArg(x) 39366 return true 39367 } 39368 // match: (SHRB x (MOVLconst [c])) 39369 // cond: c&31 < 8 39370 // result: (SHRBconst [c&31] x) 39371 for { 39372 _ = v.Args[1] 39373 x := v.Args[0] 39374 v_1 := v.Args[1] 39375 if v_1.Op != OpAMD64MOVLconst { 39376 break 39377 } 39378 c := v_1.AuxInt 39379 if !(c&31 < 8) { 39380 break 39381 } 39382 v.reset(OpAMD64SHRBconst) 39383 v.AuxInt = c & 31 39384 v.AddArg(x) 39385 return true 39386 } 39387 // match: (SHRB _ (MOVQconst [c])) 39388 // cond: c&31 >= 8 39389 // result: (MOVLconst [0]) 39390 for { 39391 _ = v.Args[1] 39392 v_1 := v.Args[1] 39393 if v_1.Op != OpAMD64MOVQconst { 39394 break 39395 } 39396 c := v_1.AuxInt 39397 if !(c&31 >= 8) { 39398 break 39399 } 39400 v.reset(OpAMD64MOVLconst) 39401 v.AuxInt = 0 39402 return true 39403 } 39404 // match: (SHRB _ (MOVLconst [c])) 39405 // cond: c&31 >= 8 39406 // result: (MOVLconst [0]) 39407 for { 39408 _ = v.Args[1] 39409 v_1 := v.Args[1] 39410 if v_1.Op != OpAMD64MOVLconst { 39411 break 39412 } 39413 c := v_1.AuxInt 39414 if !(c&31 >= 8) { 39415 break 39416 } 39417 v.reset(OpAMD64MOVLconst) 39418 v.AuxInt = 0 39419 return true 39420 } 39421 return false 39422 } 39423 func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool { 39424 // match: (SHRBconst x [0]) 39425 // cond: 39426 // result: x 39427 for { 39428 if v.AuxInt != 0 { 39429 break 39430 } 39431 x := v.Args[0] 39432 v.reset(OpCopy) 39433 v.Type = x.Type 39434 v.AddArg(x) 39435 return true 39436 } 39437 return false 39438 } 39439 func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool { 39440 b := v.Block 39441 _ = b 39442 // match: (SHRL x (MOVQconst [c])) 39443 // cond: 39444 // result: (SHRLconst [c&31] x) 39445 for { 39446 _ = v.Args[1] 39447 x := v.Args[0] 39448 v_1 := v.Args[1] 39449 if v_1.Op != OpAMD64MOVQconst { 39450 break 39451 } 39452 c := v_1.AuxInt 39453 v.reset(OpAMD64SHRLconst) 39454 v.AuxInt = c & 31 39455 v.AddArg(x) 39456 return true 39457 } 39458 // match: (SHRL x (MOVLconst [c])) 39459 // cond: 39460 // result: (SHRLconst [c&31] x) 39461 for { 39462 _ = v.Args[1] 39463 x := v.Args[0] 39464 v_1 := v.Args[1] 39465 if v_1.Op != OpAMD64MOVLconst { 39466 break 39467 } 39468 c := v_1.AuxInt 39469 v.reset(OpAMD64SHRLconst) 39470 v.AuxInt = c & 31 39471 v.AddArg(x) 39472 return true 39473 } 39474 // match: (SHRL x (ADDQconst [c] y)) 39475 // cond: c & 31 == 0 39476 // result: (SHRL x y) 39477 for { 39478 _ = v.Args[1] 39479 x := v.Args[0] 39480 v_1 := v.Args[1] 39481 if v_1.Op != OpAMD64ADDQconst { 39482 break 39483 } 39484 c := v_1.AuxInt 39485 y := v_1.Args[0] 39486 if !(c&31 == 0) { 39487 break 39488 } 39489 v.reset(OpAMD64SHRL) 39490 v.AddArg(x) 39491 v.AddArg(y) 39492 return true 39493 } 39494 // match: (SHRL x (NEGQ <t> (ADDQconst [c] y))) 39495 // cond: c & 31 == 0 39496 // result: (SHRL x (NEGQ <t> y)) 39497 for { 39498 _ = v.Args[1] 39499 x := v.Args[0] 39500 v_1 := v.Args[1] 39501 if v_1.Op != OpAMD64NEGQ { 39502 break 39503 } 39504 t := v_1.Type 39505 v_1_0 := v_1.Args[0] 39506 if v_1_0.Op != OpAMD64ADDQconst { 39507 break 39508 } 39509 c := v_1_0.AuxInt 39510 y := v_1_0.Args[0] 39511 if !(c&31 == 0) { 39512 break 39513 } 39514 v.reset(OpAMD64SHRL) 39515 v.AddArg(x) 39516 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39517 v0.AddArg(y) 39518 v.AddArg(v0) 39519 return true 39520 } 39521 // match: (SHRL x (ANDQconst [c] y)) 39522 // cond: c & 31 == 31 39523 // result: (SHRL x y) 39524 for { 39525 _ = v.Args[1] 39526 x := v.Args[0] 39527 v_1 := v.Args[1] 39528 if v_1.Op != OpAMD64ANDQconst { 39529 break 39530 } 39531 c := v_1.AuxInt 39532 y := v_1.Args[0] 39533 if !(c&31 == 31) { 39534 break 39535 } 39536 v.reset(OpAMD64SHRL) 39537 v.AddArg(x) 39538 v.AddArg(y) 39539 return true 39540 } 39541 // match: (SHRL x (NEGQ <t> (ANDQconst [c] y))) 39542 // cond: c & 31 == 31 39543 // result: (SHRL x (NEGQ <t> y)) 39544 for { 39545 _ = v.Args[1] 39546 x := v.Args[0] 39547 v_1 := v.Args[1] 39548 if v_1.Op != OpAMD64NEGQ { 39549 break 39550 } 39551 t := v_1.Type 39552 v_1_0 := v_1.Args[0] 39553 if v_1_0.Op != OpAMD64ANDQconst { 39554 break 39555 } 39556 c := v_1_0.AuxInt 39557 y := v_1_0.Args[0] 39558 if !(c&31 == 31) { 39559 break 39560 } 39561 v.reset(OpAMD64SHRL) 39562 v.AddArg(x) 39563 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39564 v0.AddArg(y) 39565 v.AddArg(v0) 39566 return true 39567 } 39568 // match: (SHRL x (ADDLconst [c] y)) 39569 // cond: c & 31 == 0 39570 // result: (SHRL x y) 39571 for { 39572 _ = v.Args[1] 39573 x := v.Args[0] 39574 v_1 := v.Args[1] 39575 if v_1.Op != OpAMD64ADDLconst { 39576 break 39577 } 39578 c := v_1.AuxInt 39579 y := v_1.Args[0] 39580 if !(c&31 == 0) { 39581 break 39582 } 39583 v.reset(OpAMD64SHRL) 39584 v.AddArg(x) 39585 v.AddArg(y) 39586 return true 39587 } 39588 // match: (SHRL x (NEGL <t> (ADDLconst [c] y))) 39589 // cond: c & 31 == 0 39590 // result: (SHRL x (NEGL <t> y)) 39591 for { 39592 _ = v.Args[1] 39593 x := v.Args[0] 39594 v_1 := v.Args[1] 39595 if v_1.Op != OpAMD64NEGL { 39596 break 39597 } 39598 t := v_1.Type 39599 v_1_0 := v_1.Args[0] 39600 if v_1_0.Op != OpAMD64ADDLconst { 39601 break 39602 } 39603 c := v_1_0.AuxInt 39604 y := v_1_0.Args[0] 39605 if !(c&31 == 0) { 39606 break 39607 } 39608 v.reset(OpAMD64SHRL) 39609 v.AddArg(x) 39610 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39611 v0.AddArg(y) 39612 v.AddArg(v0) 39613 return true 39614 } 39615 // match: (SHRL x (ANDLconst [c] y)) 39616 // cond: c & 31 == 31 39617 // result: (SHRL x y) 39618 for { 39619 _ = v.Args[1] 39620 x := v.Args[0] 39621 v_1 := v.Args[1] 39622 if v_1.Op != OpAMD64ANDLconst { 39623 break 39624 } 39625 c := v_1.AuxInt 39626 y := v_1.Args[0] 39627 if !(c&31 == 31) { 39628 break 39629 } 39630 v.reset(OpAMD64SHRL) 39631 v.AddArg(x) 39632 v.AddArg(y) 39633 return true 39634 } 39635 // match: (SHRL x (NEGL <t> (ANDLconst [c] y))) 39636 // cond: c & 31 == 31 39637 // result: (SHRL x (NEGL <t> y)) 39638 for { 39639 _ = v.Args[1] 39640 x := v.Args[0] 39641 v_1 := v.Args[1] 39642 if v_1.Op != OpAMD64NEGL { 39643 break 39644 } 39645 t := v_1.Type 39646 v_1_0 := v_1.Args[0] 39647 if v_1_0.Op != OpAMD64ANDLconst { 39648 break 39649 } 39650 c := v_1_0.AuxInt 39651 y := v_1_0.Args[0] 39652 if !(c&31 == 31) { 39653 break 39654 } 39655 v.reset(OpAMD64SHRL) 39656 v.AddArg(x) 39657 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39658 v0.AddArg(y) 39659 v.AddArg(v0) 39660 return true 39661 } 39662 return false 39663 } 39664 func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool { 39665 // match: (SHRLconst x [0]) 39666 // cond: 39667 // result: x 39668 for { 39669 if v.AuxInt != 0 { 39670 break 39671 } 39672 x := v.Args[0] 39673 v.reset(OpCopy) 39674 v.Type = x.Type 39675 v.AddArg(x) 39676 return true 39677 } 39678 return false 39679 } 39680 func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool { 39681 b := v.Block 39682 _ = b 39683 // match: (SHRQ x (MOVQconst [c])) 39684 // cond: 39685 // result: (SHRQconst [c&63] x) 39686 for { 39687 _ = v.Args[1] 39688 x := v.Args[0] 39689 v_1 := v.Args[1] 39690 if v_1.Op != OpAMD64MOVQconst { 39691 break 39692 } 39693 c := v_1.AuxInt 39694 v.reset(OpAMD64SHRQconst) 39695 v.AuxInt = c & 63 39696 v.AddArg(x) 39697 return true 39698 } 39699 // match: (SHRQ x (MOVLconst [c])) 39700 // cond: 39701 // result: (SHRQconst [c&63] x) 39702 for { 39703 _ = v.Args[1] 39704 x := v.Args[0] 39705 v_1 := v.Args[1] 39706 if v_1.Op != OpAMD64MOVLconst { 39707 break 39708 } 39709 c := v_1.AuxInt 39710 v.reset(OpAMD64SHRQconst) 39711 v.AuxInt = c & 63 39712 v.AddArg(x) 39713 return true 39714 } 39715 // match: (SHRQ x (ADDQconst [c] y)) 39716 // cond: c & 63 == 0 39717 // result: (SHRQ x y) 39718 for { 39719 _ = v.Args[1] 39720 x := v.Args[0] 39721 v_1 := v.Args[1] 39722 if v_1.Op != OpAMD64ADDQconst { 39723 break 39724 } 39725 c := v_1.AuxInt 39726 y := v_1.Args[0] 39727 if !(c&63 == 0) { 39728 break 39729 } 39730 v.reset(OpAMD64SHRQ) 39731 v.AddArg(x) 39732 v.AddArg(y) 39733 return true 39734 } 39735 // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y))) 39736 // cond: c & 63 == 0 39737 // result: (SHRQ x (NEGQ <t> y)) 39738 for { 39739 _ = v.Args[1] 39740 x := v.Args[0] 39741 v_1 := v.Args[1] 39742 if v_1.Op != OpAMD64NEGQ { 39743 break 39744 } 39745 t := v_1.Type 39746 v_1_0 := v_1.Args[0] 39747 if v_1_0.Op != OpAMD64ADDQconst { 39748 break 39749 } 39750 c := v_1_0.AuxInt 39751 y := v_1_0.Args[0] 39752 if !(c&63 == 0) { 39753 break 39754 } 39755 v.reset(OpAMD64SHRQ) 39756 v.AddArg(x) 39757 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39758 v0.AddArg(y) 39759 v.AddArg(v0) 39760 return true 39761 } 39762 // match: (SHRQ x (ANDQconst [c] y)) 39763 // cond: c & 63 == 63 39764 // result: (SHRQ x y) 39765 for { 39766 _ = v.Args[1] 39767 x := v.Args[0] 39768 v_1 := v.Args[1] 39769 if v_1.Op != OpAMD64ANDQconst { 39770 break 39771 } 39772 c := v_1.AuxInt 39773 y := v_1.Args[0] 39774 if !(c&63 == 63) { 39775 break 39776 } 39777 v.reset(OpAMD64SHRQ) 39778 v.AddArg(x) 39779 v.AddArg(y) 39780 return true 39781 } 39782 // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y))) 39783 // cond: c & 63 == 63 39784 // result: (SHRQ x (NEGQ <t> y)) 39785 for { 39786 _ = v.Args[1] 39787 x := v.Args[0] 39788 v_1 := v.Args[1] 39789 if v_1.Op != OpAMD64NEGQ { 39790 break 39791 } 39792 t := v_1.Type 39793 v_1_0 := v_1.Args[0] 39794 if v_1_0.Op != OpAMD64ANDQconst { 39795 break 39796 } 39797 c := v_1_0.AuxInt 39798 y := v_1_0.Args[0] 39799 if !(c&63 == 63) { 39800 break 39801 } 39802 v.reset(OpAMD64SHRQ) 39803 v.AddArg(x) 39804 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 39805 v0.AddArg(y) 39806 v.AddArg(v0) 39807 return true 39808 } 39809 // match: (SHRQ x (ADDLconst [c] y)) 39810 // cond: c & 63 == 0 39811 // result: (SHRQ x y) 39812 for { 39813 _ = v.Args[1] 39814 x := v.Args[0] 39815 v_1 := v.Args[1] 39816 if v_1.Op != OpAMD64ADDLconst { 39817 break 39818 } 39819 c := v_1.AuxInt 39820 y := v_1.Args[0] 39821 if !(c&63 == 0) { 39822 break 39823 } 39824 v.reset(OpAMD64SHRQ) 39825 v.AddArg(x) 39826 v.AddArg(y) 39827 return true 39828 } 39829 // match: (SHRQ x (NEGL <t> (ADDLconst [c] y))) 39830 // cond: c & 63 == 0 39831 // result: (SHRQ x (NEGL <t> y)) 39832 for { 39833 _ = v.Args[1] 39834 x := v.Args[0] 39835 v_1 := v.Args[1] 39836 if v_1.Op != OpAMD64NEGL { 39837 break 39838 } 39839 t := v_1.Type 39840 v_1_0 := v_1.Args[0] 39841 if v_1_0.Op != OpAMD64ADDLconst { 39842 break 39843 } 39844 c := v_1_0.AuxInt 39845 y := v_1_0.Args[0] 39846 if !(c&63 == 0) { 39847 break 39848 } 39849 v.reset(OpAMD64SHRQ) 39850 v.AddArg(x) 39851 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39852 v0.AddArg(y) 39853 v.AddArg(v0) 39854 return true 39855 } 39856 // match: (SHRQ x (ANDLconst [c] y)) 39857 // cond: c & 63 == 63 39858 // result: (SHRQ x y) 39859 for { 39860 _ = v.Args[1] 39861 x := v.Args[0] 39862 v_1 := v.Args[1] 39863 if v_1.Op != OpAMD64ANDLconst { 39864 break 39865 } 39866 c := v_1.AuxInt 39867 y := v_1.Args[0] 39868 if !(c&63 == 63) { 39869 break 39870 } 39871 v.reset(OpAMD64SHRQ) 39872 v.AddArg(x) 39873 v.AddArg(y) 39874 return true 39875 } 39876 // match: (SHRQ x (NEGL <t> (ANDLconst [c] y))) 39877 // cond: c & 63 == 63 39878 // result: (SHRQ x (NEGL <t> y)) 39879 for { 39880 _ = v.Args[1] 39881 x := v.Args[0] 39882 v_1 := v.Args[1] 39883 if v_1.Op != OpAMD64NEGL { 39884 break 39885 } 39886 t := v_1.Type 39887 v_1_0 := v_1.Args[0] 39888 if v_1_0.Op != OpAMD64ANDLconst { 39889 break 39890 } 39891 c := v_1_0.AuxInt 39892 y := v_1_0.Args[0] 39893 if !(c&63 == 63) { 39894 break 39895 } 39896 v.reset(OpAMD64SHRQ) 39897 v.AddArg(x) 39898 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t) 39899 v0.AddArg(y) 39900 v.AddArg(v0) 39901 return true 39902 } 39903 return false 39904 } 39905 func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool { 39906 // match: (SHRQconst x [0]) 39907 // cond: 39908 // result: x 39909 for { 39910 if v.AuxInt != 0 { 39911 break 39912 } 39913 x := v.Args[0] 39914 v.reset(OpCopy) 39915 v.Type = x.Type 39916 v.AddArg(x) 39917 return true 39918 } 39919 return false 39920 } 39921 func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool { 39922 // match: (SHRW x (MOVQconst [c])) 39923 // cond: c&31 < 16 39924 // result: (SHRWconst [c&31] x) 39925 for { 39926 _ = v.Args[1] 39927 x := v.Args[0] 39928 v_1 := v.Args[1] 39929 if v_1.Op != OpAMD64MOVQconst { 39930 break 39931 } 39932 c := v_1.AuxInt 39933 if !(c&31 < 16) { 39934 break 39935 } 39936 v.reset(OpAMD64SHRWconst) 39937 v.AuxInt = c & 31 39938 v.AddArg(x) 39939 return true 39940 } 39941 // match: (SHRW x (MOVLconst [c])) 39942 // cond: c&31 < 16 39943 // result: (SHRWconst [c&31] x) 39944 for { 39945 _ = v.Args[1] 39946 x := v.Args[0] 39947 v_1 := v.Args[1] 39948 if v_1.Op != OpAMD64MOVLconst { 39949 break 39950 } 39951 c := v_1.AuxInt 39952 if !(c&31 < 16) { 39953 break 39954 } 39955 v.reset(OpAMD64SHRWconst) 39956 v.AuxInt = c & 31 39957 v.AddArg(x) 39958 return true 39959 } 39960 // match: (SHRW _ (MOVQconst [c])) 39961 // cond: c&31 >= 16 39962 // result: (MOVLconst [0]) 39963 for { 39964 _ = v.Args[1] 39965 v_1 := v.Args[1] 39966 if v_1.Op != OpAMD64MOVQconst { 39967 break 39968 } 39969 c := v_1.AuxInt 39970 if !(c&31 >= 16) { 39971 break 39972 } 39973 v.reset(OpAMD64MOVLconst) 39974 v.AuxInt = 0 39975 return true 39976 } 39977 // match: (SHRW _ (MOVLconst [c])) 39978 // cond: c&31 >= 16 39979 // result: (MOVLconst [0]) 39980 for { 39981 _ = v.Args[1] 39982 v_1 := v.Args[1] 39983 if v_1.Op != OpAMD64MOVLconst { 39984 break 39985 } 39986 c := v_1.AuxInt 39987 if !(c&31 >= 16) { 39988 break 39989 } 39990 v.reset(OpAMD64MOVLconst) 39991 v.AuxInt = 0 39992 return true 39993 } 39994 return false 39995 } 39996 func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool { 39997 // match: (SHRWconst x [0]) 39998 // cond: 39999 // result: x 40000 for { 40001 if v.AuxInt != 0 { 40002 break 40003 } 40004 x := v.Args[0] 40005 v.reset(OpCopy) 40006 v.Type = x.Type 40007 v.AddArg(x) 40008 return true 40009 } 40010 return false 40011 } 40012 func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { 40013 b := v.Block 40014 _ = b 40015 // match: (SUBL x (MOVLconst [c])) 40016 // cond: 40017 // result: (SUBLconst x [c]) 40018 for { 40019 _ = v.Args[1] 40020 x := v.Args[0] 40021 v_1 := v.Args[1] 40022 if v_1.Op != OpAMD64MOVLconst { 40023 break 40024 } 40025 c := v_1.AuxInt 40026 v.reset(OpAMD64SUBLconst) 40027 v.AuxInt = c 40028 v.AddArg(x) 40029 return true 40030 } 40031 // match: (SUBL (MOVLconst [c]) x) 40032 // cond: 40033 // result: (NEGL (SUBLconst <v.Type> x [c])) 40034 for { 40035 _ = v.Args[1] 40036 v_0 := v.Args[0] 40037 if v_0.Op != OpAMD64MOVLconst { 40038 break 40039 } 40040 c := v_0.AuxInt 40041 x := v.Args[1] 40042 v.reset(OpAMD64NEGL) 40043 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type) 40044 v0.AuxInt = c 40045 v0.AddArg(x) 40046 v.AddArg(v0) 40047 return true 40048 } 40049 // match: (SUBL x x) 40050 // cond: 40051 // result: (MOVLconst [0]) 40052 for { 40053 _ = v.Args[1] 40054 x := v.Args[0] 40055 if x != v.Args[1] { 40056 break 40057 } 40058 v.reset(OpAMD64MOVLconst) 40059 v.AuxInt = 0 40060 return true 40061 } 40062 // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) 40063 // cond: canMergeLoad(v, l, x) && clobber(l) 40064 // result: (SUBLmem x [off] {sym} ptr mem) 40065 for { 40066 _ = v.Args[1] 40067 x := v.Args[0] 40068 l := v.Args[1] 40069 if l.Op != OpAMD64MOVLload { 40070 break 40071 } 40072 off := l.AuxInt 40073 sym := l.Aux 40074 _ = l.Args[1] 40075 ptr := l.Args[0] 40076 mem := l.Args[1] 40077 if !(canMergeLoad(v, l, x) && clobber(l)) { 40078 break 40079 } 40080 v.reset(OpAMD64SUBLmem) 40081 v.AuxInt = off 40082 v.Aux = sym 40083 v.AddArg(x) 40084 v.AddArg(ptr) 40085 v.AddArg(mem) 40086 return true 40087 } 40088 return false 40089 } 40090 func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool { 40091 // match: (SUBLconst [c] x) 40092 // cond: int32(c) == 0 40093 // result: x 40094 for { 40095 c := v.AuxInt 40096 x := v.Args[0] 40097 if !(int32(c) == 0) { 40098 break 40099 } 40100 v.reset(OpCopy) 40101 v.Type = x.Type 40102 v.AddArg(x) 40103 return true 40104 } 40105 // match: (SUBLconst [c] x) 40106 // cond: 40107 // result: (ADDLconst [int64(int32(-c))] x) 40108 for { 40109 c := v.AuxInt 40110 x := v.Args[0] 40111 v.reset(OpAMD64ADDLconst) 40112 v.AuxInt = int64(int32(-c)) 40113 v.AddArg(x) 40114 return true 40115 } 40116 } 40117 func rewriteValueAMD64_OpAMD64SUBLmem_0(v *Value) bool { 40118 b := v.Block 40119 _ = b 40120 typ := &b.Func.Config.Types 40121 _ = typ 40122 // match: (SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 40123 // cond: 40124 // result: (SUBL x (MOVLf2i y)) 40125 for { 40126 off := v.AuxInt 40127 sym := v.Aux 40128 _ = v.Args[2] 40129 x := v.Args[0] 40130 ptr := v.Args[1] 40131 v_2 := v.Args[2] 40132 if v_2.Op != OpAMD64MOVSSstore { 40133 break 40134 } 40135 if v_2.AuxInt != off { 40136 break 40137 } 40138 if v_2.Aux != sym { 40139 break 40140 } 40141 _ = v_2.Args[2] 40142 if ptr != v_2.Args[0] { 40143 break 40144 } 40145 y := v_2.Args[1] 40146 v.reset(OpAMD64SUBL) 40147 v.AddArg(x) 40148 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 40149 v0.AddArg(y) 40150 v.AddArg(v0) 40151 return true 40152 } 40153 return false 40154 } 40155 func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { 40156 b := v.Block 40157 _ = b 40158 // match: (SUBQ x (MOVQconst [c])) 40159 // cond: is32Bit(c) 40160 // result: (SUBQconst x [c]) 40161 for { 40162 _ = v.Args[1] 40163 x := v.Args[0] 40164 v_1 := v.Args[1] 40165 if v_1.Op != OpAMD64MOVQconst { 40166 break 40167 } 40168 c := v_1.AuxInt 40169 if !(is32Bit(c)) { 40170 break 40171 } 40172 v.reset(OpAMD64SUBQconst) 40173 v.AuxInt = c 40174 v.AddArg(x) 40175 return true 40176 } 40177 // match: (SUBQ (MOVQconst [c]) x) 40178 // cond: is32Bit(c) 40179 // result: (NEGQ (SUBQconst <v.Type> x [c])) 40180 for { 40181 _ = v.Args[1] 40182 v_0 := v.Args[0] 40183 if v_0.Op != OpAMD64MOVQconst { 40184 break 40185 } 40186 c := v_0.AuxInt 40187 x := v.Args[1] 40188 if !(is32Bit(c)) { 40189 break 40190 } 40191 v.reset(OpAMD64NEGQ) 40192 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type) 40193 v0.AuxInt = c 40194 v0.AddArg(x) 40195 v.AddArg(v0) 40196 return true 40197 } 40198 // match: (SUBQ x x) 40199 // cond: 40200 // result: (MOVQconst [0]) 40201 for { 40202 _ = v.Args[1] 40203 x := v.Args[0] 40204 if x != v.Args[1] { 40205 break 40206 } 40207 v.reset(OpAMD64MOVQconst) 40208 v.AuxInt = 0 40209 return true 40210 } 40211 // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) 40212 // cond: canMergeLoad(v, l, x) && clobber(l) 40213 // result: (SUBQmem x [off] {sym} ptr mem) 40214 for { 40215 _ = v.Args[1] 40216 x := v.Args[0] 40217 l := v.Args[1] 40218 if l.Op != OpAMD64MOVQload { 40219 break 40220 } 40221 off := l.AuxInt 40222 sym := l.Aux 40223 _ = l.Args[1] 40224 ptr := l.Args[0] 40225 mem := l.Args[1] 40226 if !(canMergeLoad(v, l, x) && clobber(l)) { 40227 break 40228 } 40229 v.reset(OpAMD64SUBQmem) 40230 v.AuxInt = off 40231 v.Aux = sym 40232 v.AddArg(x) 40233 v.AddArg(ptr) 40234 v.AddArg(mem) 40235 return true 40236 } 40237 return false 40238 } 40239 func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { 40240 // match: (SUBQconst [0] x) 40241 // cond: 40242 // result: x 40243 for { 40244 if v.AuxInt != 0 { 40245 break 40246 } 40247 x := v.Args[0] 40248 v.reset(OpCopy) 40249 v.Type = x.Type 40250 v.AddArg(x) 40251 return true 40252 } 40253 // match: (SUBQconst [c] x) 40254 // cond: c != -(1<<31) 40255 // result: (ADDQconst [-c] x) 40256 for { 40257 c := v.AuxInt 40258 x := v.Args[0] 40259 if !(c != -(1 << 31)) { 40260 break 40261 } 40262 v.reset(OpAMD64ADDQconst) 40263 v.AuxInt = -c 40264 v.AddArg(x) 40265 return true 40266 } 40267 // match: (SUBQconst (MOVQconst [d]) [c]) 40268 // cond: 40269 // result: (MOVQconst [d-c]) 40270 for { 40271 c := v.AuxInt 40272 v_0 := v.Args[0] 40273 if v_0.Op != OpAMD64MOVQconst { 40274 break 40275 } 40276 d := v_0.AuxInt 40277 v.reset(OpAMD64MOVQconst) 40278 v.AuxInt = d - c 40279 return true 40280 } 40281 // match: (SUBQconst (SUBQconst x [d]) [c]) 40282 // cond: is32Bit(-c-d) 40283 // result: (ADDQconst [-c-d] x) 40284 for { 40285 c := v.AuxInt 40286 v_0 := v.Args[0] 40287 if v_0.Op != OpAMD64SUBQconst { 40288 break 40289 } 40290 d := v_0.AuxInt 40291 x := v_0.Args[0] 40292 if !(is32Bit(-c - d)) { 40293 break 40294 } 40295 v.reset(OpAMD64ADDQconst) 40296 v.AuxInt = -c - d 40297 v.AddArg(x) 40298 return true 40299 } 40300 return false 40301 } 40302 func rewriteValueAMD64_OpAMD64SUBQmem_0(v *Value) bool { 40303 b := v.Block 40304 _ = b 40305 typ := &b.Func.Config.Types 40306 _ = typ 40307 // match: (SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 40308 // cond: 40309 // result: (SUBQ x (MOVQf2i y)) 40310 for { 40311 off := v.AuxInt 40312 sym := v.Aux 40313 _ = v.Args[2] 40314 x := v.Args[0] 40315 ptr := v.Args[1] 40316 v_2 := v.Args[2] 40317 if v_2.Op != OpAMD64MOVSDstore { 40318 break 40319 } 40320 if v_2.AuxInt != off { 40321 break 40322 } 40323 if v_2.Aux != sym { 40324 break 40325 } 40326 _ = v_2.Args[2] 40327 if ptr != v_2.Args[0] { 40328 break 40329 } 40330 y := v_2.Args[1] 40331 v.reset(OpAMD64SUBQ) 40332 v.AddArg(x) 40333 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 40334 v0.AddArg(y) 40335 v.AddArg(v0) 40336 return true 40337 } 40338 return false 40339 } 40340 func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { 40341 // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) 40342 // cond: canMergeLoad(v, l, x) && clobber(l) 40343 // result: (SUBSDmem x [off] {sym} ptr mem) 40344 for { 40345 _ = v.Args[1] 40346 x := v.Args[0] 40347 l := v.Args[1] 40348 if l.Op != OpAMD64MOVSDload { 40349 break 40350 } 40351 off := l.AuxInt 40352 sym := l.Aux 40353 _ = l.Args[1] 40354 ptr := l.Args[0] 40355 mem := l.Args[1] 40356 if !(canMergeLoad(v, l, x) && clobber(l)) { 40357 break 40358 } 40359 v.reset(OpAMD64SUBSDmem) 40360 v.AuxInt = off 40361 v.Aux = sym 40362 v.AddArg(x) 40363 v.AddArg(ptr) 40364 v.AddArg(mem) 40365 return true 40366 } 40367 return false 40368 } 40369 func rewriteValueAMD64_OpAMD64SUBSDmem_0(v *Value) bool { 40370 b := v.Block 40371 _ = b 40372 typ := &b.Func.Config.Types 40373 _ = typ 40374 // match: (SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) 40375 // cond: 40376 // result: (SUBSD x (MOVQi2f y)) 40377 for { 40378 off := v.AuxInt 40379 sym := v.Aux 40380 _ = v.Args[2] 40381 x := v.Args[0] 40382 ptr := v.Args[1] 40383 v_2 := v.Args[2] 40384 if v_2.Op != OpAMD64MOVQstore { 40385 break 40386 } 40387 if v_2.AuxInt != off { 40388 break 40389 } 40390 if v_2.Aux != sym { 40391 break 40392 } 40393 _ = v_2.Args[2] 40394 if ptr != v_2.Args[0] { 40395 break 40396 } 40397 y := v_2.Args[1] 40398 v.reset(OpAMD64SUBSD) 40399 v.AddArg(x) 40400 v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) 40401 v0.AddArg(y) 40402 v.AddArg(v0) 40403 return true 40404 } 40405 return false 40406 } 40407 func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { 40408 // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) 40409 // cond: canMergeLoad(v, l, x) && clobber(l) 40410 // result: (SUBSSmem x [off] {sym} ptr mem) 40411 for { 40412 _ = v.Args[1] 40413 x := v.Args[0] 40414 l := v.Args[1] 40415 if l.Op != OpAMD64MOVSSload { 40416 break 40417 } 40418 off := l.AuxInt 40419 sym := l.Aux 40420 _ = l.Args[1] 40421 ptr := l.Args[0] 40422 mem := l.Args[1] 40423 if !(canMergeLoad(v, l, x) && clobber(l)) { 40424 break 40425 } 40426 v.reset(OpAMD64SUBSSmem) 40427 v.AuxInt = off 40428 v.Aux = sym 40429 v.AddArg(x) 40430 v.AddArg(ptr) 40431 v.AddArg(mem) 40432 return true 40433 } 40434 return false 40435 } 40436 func rewriteValueAMD64_OpAMD64SUBSSmem_0(v *Value) bool { 40437 b := v.Block 40438 _ = b 40439 typ := &b.Func.Config.Types 40440 _ = typ 40441 // match: (SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) 40442 // cond: 40443 // result: (SUBSS x (MOVLi2f y)) 40444 for { 40445 off := v.AuxInt 40446 sym := v.Aux 40447 _ = v.Args[2] 40448 x := v.Args[0] 40449 ptr := v.Args[1] 40450 v_2 := v.Args[2] 40451 if v_2.Op != OpAMD64MOVLstore { 40452 break 40453 } 40454 if v_2.AuxInt != off { 40455 break 40456 } 40457 if v_2.Aux != sym { 40458 break 40459 } 40460 _ = v_2.Args[2] 40461 if ptr != v_2.Args[0] { 40462 break 40463 } 40464 y := v_2.Args[1] 40465 v.reset(OpAMD64SUBSS) 40466 v.AddArg(x) 40467 v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) 40468 v0.AddArg(y) 40469 v.AddArg(v0) 40470 return true 40471 } 40472 return false 40473 } 40474 func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { 40475 // match: (TESTB (MOVLconst [c]) x) 40476 // cond: 40477 // result: (TESTBconst [c] x) 40478 for { 40479 _ = v.Args[1] 40480 v_0 := v.Args[0] 40481 if v_0.Op != OpAMD64MOVLconst { 40482 break 40483 } 40484 c := v_0.AuxInt 40485 x := v.Args[1] 40486 v.reset(OpAMD64TESTBconst) 40487 v.AuxInt = c 40488 v.AddArg(x) 40489 return true 40490 } 40491 // match: (TESTB x (MOVLconst [c])) 40492 // cond: 40493 // result: (TESTBconst [c] x) 40494 for { 40495 _ = v.Args[1] 40496 x := v.Args[0] 40497 v_1 := v.Args[1] 40498 if v_1.Op != OpAMD64MOVLconst { 40499 break 40500 } 40501 c := v_1.AuxInt 40502 v.reset(OpAMD64TESTBconst) 40503 v.AuxInt = c 40504 v.AddArg(x) 40505 return true 40506 } 40507 return false 40508 } 40509 func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool { 40510 // match: (TESTL (MOVLconst [c]) x) 40511 // cond: 40512 // result: (TESTLconst [c] x) 40513 for { 40514 _ = v.Args[1] 40515 v_0 := v.Args[0] 40516 if v_0.Op != OpAMD64MOVLconst { 40517 break 40518 } 40519 c := v_0.AuxInt 40520 x := v.Args[1] 40521 v.reset(OpAMD64TESTLconst) 40522 v.AuxInt = c 40523 v.AddArg(x) 40524 return true 40525 } 40526 // match: (TESTL x (MOVLconst [c])) 40527 // cond: 40528 // result: (TESTLconst [c] x) 40529 for { 40530 _ = v.Args[1] 40531 x := v.Args[0] 40532 v_1 := v.Args[1] 40533 if v_1.Op != OpAMD64MOVLconst { 40534 break 40535 } 40536 c := v_1.AuxInt 40537 v.reset(OpAMD64TESTLconst) 40538 v.AuxInt = c 40539 v.AddArg(x) 40540 return true 40541 } 40542 return false 40543 } 40544 func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool { 40545 // match: (TESTQ (MOVQconst [c]) x) 40546 // cond: is32Bit(c) 40547 // result: (TESTQconst [c] x) 40548 for { 40549 _ = v.Args[1] 40550 v_0 := v.Args[0] 40551 if v_0.Op != OpAMD64MOVQconst { 40552 break 40553 } 40554 c := v_0.AuxInt 40555 x := v.Args[1] 40556 if !(is32Bit(c)) { 40557 break 40558 } 40559 v.reset(OpAMD64TESTQconst) 40560 v.AuxInt = c 40561 v.AddArg(x) 40562 return true 40563 } 40564 // match: (TESTQ x (MOVQconst [c])) 40565 // cond: is32Bit(c) 40566 // result: (TESTQconst [c] x) 40567 for { 40568 _ = v.Args[1] 40569 x := v.Args[0] 40570 v_1 := v.Args[1] 40571 if v_1.Op != OpAMD64MOVQconst { 40572 break 40573 } 40574 c := v_1.AuxInt 40575 if !(is32Bit(c)) { 40576 break 40577 } 40578 v.reset(OpAMD64TESTQconst) 40579 v.AuxInt = c 40580 v.AddArg(x) 40581 return true 40582 } 40583 return false 40584 } 40585 func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool { 40586 // match: (TESTW (MOVLconst [c]) x) 40587 // cond: 40588 // result: (TESTWconst [c] x) 40589 for { 40590 _ = v.Args[1] 40591 v_0 := v.Args[0] 40592 if v_0.Op != OpAMD64MOVLconst { 40593 break 40594 } 40595 c := v_0.AuxInt 40596 x := v.Args[1] 40597 v.reset(OpAMD64TESTWconst) 40598 v.AuxInt = c 40599 v.AddArg(x) 40600 return true 40601 } 40602 // match: (TESTW x (MOVLconst [c])) 40603 // cond: 40604 // result: (TESTWconst [c] x) 40605 for { 40606 _ = v.Args[1] 40607 x := v.Args[0] 40608 v_1 := v.Args[1] 40609 if v_1.Op != OpAMD64MOVLconst { 40610 break 40611 } 40612 c := v_1.AuxInt 40613 v.reset(OpAMD64TESTWconst) 40614 v.AuxInt = c 40615 v.AddArg(x) 40616 return true 40617 } 40618 return false 40619 } 40620 func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool { 40621 // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 40622 // cond: is32Bit(off1+off2) 40623 // result: (XADDLlock [off1+off2] {sym} val ptr mem) 40624 for { 40625 off1 := v.AuxInt 40626 sym := v.Aux 40627 _ = v.Args[2] 40628 val := v.Args[0] 40629 v_1 := v.Args[1] 40630 if v_1.Op != OpAMD64ADDQconst { 40631 break 40632 } 40633 off2 := v_1.AuxInt 40634 ptr := v_1.Args[0] 40635 mem := v.Args[2] 40636 if !(is32Bit(off1 + off2)) { 40637 break 40638 } 40639 v.reset(OpAMD64XADDLlock) 40640 v.AuxInt = off1 + off2 40641 v.Aux = sym 40642 v.AddArg(val) 40643 v.AddArg(ptr) 40644 v.AddArg(mem) 40645 return true 40646 } 40647 return false 40648 } 40649 func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool { 40650 // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) 40651 // cond: is32Bit(off1+off2) 40652 // result: (XADDQlock [off1+off2] {sym} val ptr mem) 40653 for { 40654 off1 := v.AuxInt 40655 sym := v.Aux 40656 _ = v.Args[2] 40657 val := v.Args[0] 40658 v_1 := v.Args[1] 40659 if v_1.Op != OpAMD64ADDQconst { 40660 break 40661 } 40662 off2 := v_1.AuxInt 40663 ptr := v_1.Args[0] 40664 mem := v.Args[2] 40665 if !(is32Bit(off1 + off2)) { 40666 break 40667 } 40668 v.reset(OpAMD64XADDQlock) 40669 v.AuxInt = off1 + off2 40670 v.Aux = sym 40671 v.AddArg(val) 40672 v.AddArg(ptr) 40673 v.AddArg(mem) 40674 return true 40675 } 40676 return false 40677 } 40678 func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool { 40679 // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) 40680 // cond: is32Bit(off1+off2) 40681 // result: (XCHGL [off1+off2] {sym} val ptr mem) 40682 for { 40683 off1 := v.AuxInt 40684 sym := v.Aux 40685 _ = v.Args[2] 40686 val := v.Args[0] 40687 v_1 := v.Args[1] 40688 if v_1.Op != OpAMD64ADDQconst { 40689 break 40690 } 40691 off2 := v_1.AuxInt 40692 ptr := v_1.Args[0] 40693 mem := v.Args[2] 40694 if !(is32Bit(off1 + off2)) { 40695 break 40696 } 40697 v.reset(OpAMD64XCHGL) 40698 v.AuxInt = off1 + off2 40699 v.Aux = sym 40700 v.AddArg(val) 40701 v.AddArg(ptr) 40702 v.AddArg(mem) 40703 return true 40704 } 40705 // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 40706 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 40707 // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 40708 for { 40709 off1 := v.AuxInt 40710 sym1 := v.Aux 40711 _ = v.Args[2] 40712 val := v.Args[0] 40713 v_1 := v.Args[1] 40714 if v_1.Op != OpAMD64LEAQ { 40715 break 40716 } 40717 off2 := v_1.AuxInt 40718 sym2 := v_1.Aux 40719 ptr := v_1.Args[0] 40720 mem := v.Args[2] 40721 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 40722 break 40723 } 40724 v.reset(OpAMD64XCHGL) 40725 v.AuxInt = off1 + off2 40726 v.Aux = mergeSym(sym1, sym2) 40727 v.AddArg(val) 40728 v.AddArg(ptr) 40729 v.AddArg(mem) 40730 return true 40731 } 40732 return false 40733 } 40734 func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool { 40735 // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) 40736 // cond: is32Bit(off1+off2) 40737 // result: (XCHGQ [off1+off2] {sym} val ptr mem) 40738 for { 40739 off1 := v.AuxInt 40740 sym := v.Aux 40741 _ = v.Args[2] 40742 val := v.Args[0] 40743 v_1 := v.Args[1] 40744 if v_1.Op != OpAMD64ADDQconst { 40745 break 40746 } 40747 off2 := v_1.AuxInt 40748 ptr := v_1.Args[0] 40749 mem := v.Args[2] 40750 if !(is32Bit(off1 + off2)) { 40751 break 40752 } 40753 v.reset(OpAMD64XCHGQ) 40754 v.AuxInt = off1 + off2 40755 v.Aux = sym 40756 v.AddArg(val) 40757 v.AddArg(ptr) 40758 v.AddArg(mem) 40759 return true 40760 } 40761 // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) 40762 // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB 40763 // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) 40764 for { 40765 off1 := v.AuxInt 40766 sym1 := v.Aux 40767 _ = v.Args[2] 40768 val := v.Args[0] 40769 v_1 := v.Args[1] 40770 if v_1.Op != OpAMD64LEAQ { 40771 break 40772 } 40773 off2 := v_1.AuxInt 40774 sym2 := v_1.Aux 40775 ptr := v_1.Args[0] 40776 mem := v.Args[2] 40777 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { 40778 break 40779 } 40780 v.reset(OpAMD64XCHGQ) 40781 v.AuxInt = off1 + off2 40782 v.Aux = mergeSym(sym1, sym2) 40783 v.AddArg(val) 40784 v.AddArg(ptr) 40785 v.AddArg(mem) 40786 return true 40787 } 40788 return false 40789 } 40790 func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool { 40791 // match: (XORL x (MOVLconst [c])) 40792 // cond: 40793 // result: (XORLconst [c] x) 40794 for { 40795 _ = v.Args[1] 40796 x := v.Args[0] 40797 v_1 := v.Args[1] 40798 if v_1.Op != OpAMD64MOVLconst { 40799 break 40800 } 40801 c := v_1.AuxInt 40802 v.reset(OpAMD64XORLconst) 40803 v.AuxInt = c 40804 v.AddArg(x) 40805 return true 40806 } 40807 // match: (XORL (MOVLconst [c]) x) 40808 // cond: 40809 // result: (XORLconst [c] x) 40810 for { 40811 _ = v.Args[1] 40812 v_0 := v.Args[0] 40813 if v_0.Op != OpAMD64MOVLconst { 40814 break 40815 } 40816 c := v_0.AuxInt 40817 x := v.Args[1] 40818 v.reset(OpAMD64XORLconst) 40819 v.AuxInt = c 40820 v.AddArg(x) 40821 return true 40822 } 40823 // match: (XORL (SHLLconst x [c]) (SHRLconst x [d])) 40824 // cond: d==32-c 40825 // result: (ROLLconst x [c]) 40826 for { 40827 _ = v.Args[1] 40828 v_0 := v.Args[0] 40829 if v_0.Op != OpAMD64SHLLconst { 40830 break 40831 } 40832 c := v_0.AuxInt 40833 x := v_0.Args[0] 40834 v_1 := v.Args[1] 40835 if v_1.Op != OpAMD64SHRLconst { 40836 break 40837 } 40838 d := v_1.AuxInt 40839 if x != v_1.Args[0] { 40840 break 40841 } 40842 if !(d == 32-c) { 40843 break 40844 } 40845 v.reset(OpAMD64ROLLconst) 40846 v.AuxInt = c 40847 v.AddArg(x) 40848 return true 40849 } 40850 // match: (XORL (SHRLconst x [d]) (SHLLconst x [c])) 40851 // cond: d==32-c 40852 // result: (ROLLconst x [c]) 40853 for { 40854 _ = v.Args[1] 40855 v_0 := v.Args[0] 40856 if v_0.Op != OpAMD64SHRLconst { 40857 break 40858 } 40859 d := v_0.AuxInt 40860 x := v_0.Args[0] 40861 v_1 := v.Args[1] 40862 if v_1.Op != OpAMD64SHLLconst { 40863 break 40864 } 40865 c := v_1.AuxInt 40866 if x != v_1.Args[0] { 40867 break 40868 } 40869 if !(d == 32-c) { 40870 break 40871 } 40872 v.reset(OpAMD64ROLLconst) 40873 v.AuxInt = c 40874 v.AddArg(x) 40875 return true 40876 } 40877 // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) 40878 // cond: d==16-c && c < 16 && t.Size() == 2 40879 // result: (ROLWconst x [c]) 40880 for { 40881 t := v.Type 40882 _ = v.Args[1] 40883 v_0 := v.Args[0] 40884 if v_0.Op != OpAMD64SHLLconst { 40885 break 40886 } 40887 c := v_0.AuxInt 40888 x := v_0.Args[0] 40889 v_1 := v.Args[1] 40890 if v_1.Op != OpAMD64SHRWconst { 40891 break 40892 } 40893 d := v_1.AuxInt 40894 if x != v_1.Args[0] { 40895 break 40896 } 40897 if !(d == 16-c && c < 16 && t.Size() == 2) { 40898 break 40899 } 40900 v.reset(OpAMD64ROLWconst) 40901 v.AuxInt = c 40902 v.AddArg(x) 40903 return true 40904 } 40905 // match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c])) 40906 // cond: d==16-c && c < 16 && t.Size() == 2 40907 // result: (ROLWconst x [c]) 40908 for { 40909 t := v.Type 40910 _ = v.Args[1] 40911 v_0 := v.Args[0] 40912 if v_0.Op != OpAMD64SHRWconst { 40913 break 40914 } 40915 d := v_0.AuxInt 40916 x := v_0.Args[0] 40917 v_1 := v.Args[1] 40918 if v_1.Op != OpAMD64SHLLconst { 40919 break 40920 } 40921 c := v_1.AuxInt 40922 if x != v_1.Args[0] { 40923 break 40924 } 40925 if !(d == 16-c && c < 16 && t.Size() == 2) { 40926 break 40927 } 40928 v.reset(OpAMD64ROLWconst) 40929 v.AuxInt = c 40930 v.AddArg(x) 40931 return true 40932 } 40933 // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) 40934 // cond: d==8-c && c < 8 && t.Size() == 1 40935 // result: (ROLBconst x [c]) 40936 for { 40937 t := v.Type 40938 _ = v.Args[1] 40939 v_0 := v.Args[0] 40940 if v_0.Op != OpAMD64SHLLconst { 40941 break 40942 } 40943 c := v_0.AuxInt 40944 x := v_0.Args[0] 40945 v_1 := v.Args[1] 40946 if v_1.Op != OpAMD64SHRBconst { 40947 break 40948 } 40949 d := v_1.AuxInt 40950 if x != v_1.Args[0] { 40951 break 40952 } 40953 if !(d == 8-c && c < 8 && t.Size() == 1) { 40954 break 40955 } 40956 v.reset(OpAMD64ROLBconst) 40957 v.AuxInt = c 40958 v.AddArg(x) 40959 return true 40960 } 40961 // match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c])) 40962 // cond: d==8-c && c < 8 && t.Size() == 1 40963 // result: (ROLBconst x [c]) 40964 for { 40965 t := v.Type 40966 _ = v.Args[1] 40967 v_0 := v.Args[0] 40968 if v_0.Op != OpAMD64SHRBconst { 40969 break 40970 } 40971 d := v_0.AuxInt 40972 x := v_0.Args[0] 40973 v_1 := v.Args[1] 40974 if v_1.Op != OpAMD64SHLLconst { 40975 break 40976 } 40977 c := v_1.AuxInt 40978 if x != v_1.Args[0] { 40979 break 40980 } 40981 if !(d == 8-c && c < 8 && t.Size() == 1) { 40982 break 40983 } 40984 v.reset(OpAMD64ROLBconst) 40985 v.AuxInt = c 40986 v.AddArg(x) 40987 return true 40988 } 40989 // match: (XORL x x) 40990 // cond: 40991 // result: (MOVLconst [0]) 40992 for { 40993 _ = v.Args[1] 40994 x := v.Args[0] 40995 if x != v.Args[1] { 40996 break 40997 } 40998 v.reset(OpAMD64MOVLconst) 40999 v.AuxInt = 0 41000 return true 41001 } 41002 // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) 41003 // cond: canMergeLoad(v, l, x) && clobber(l) 41004 // result: (XORLmem x [off] {sym} ptr mem) 41005 for { 41006 _ = v.Args[1] 41007 x := v.Args[0] 41008 l := v.Args[1] 41009 if l.Op != OpAMD64MOVLload { 41010 break 41011 } 41012 off := l.AuxInt 41013 sym := l.Aux 41014 _ = l.Args[1] 41015 ptr := l.Args[0] 41016 mem := l.Args[1] 41017 if !(canMergeLoad(v, l, x) && clobber(l)) { 41018 break 41019 } 41020 v.reset(OpAMD64XORLmem) 41021 v.AuxInt = off 41022 v.Aux = sym 41023 v.AddArg(x) 41024 v.AddArg(ptr) 41025 v.AddArg(mem) 41026 return true 41027 } 41028 return false 41029 } 41030 func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { 41031 // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) 41032 // cond: canMergeLoad(v, l, x) && clobber(l) 41033 // result: (XORLmem x [off] {sym} ptr mem) 41034 for { 41035 _ = v.Args[1] 41036 l := v.Args[0] 41037 if l.Op != OpAMD64MOVLload { 41038 break 41039 } 41040 off := l.AuxInt 41041 sym := l.Aux 41042 _ = l.Args[1] 41043 ptr := l.Args[0] 41044 mem := l.Args[1] 41045 x := v.Args[1] 41046 if !(canMergeLoad(v, l, x) && clobber(l)) { 41047 break 41048 } 41049 v.reset(OpAMD64XORLmem) 41050 v.AuxInt = off 41051 v.Aux = sym 41052 v.AddArg(x) 41053 v.AddArg(ptr) 41054 v.AddArg(mem) 41055 return true 41056 } 41057 return false 41058 } 41059 func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool { 41060 // match: (XORLconst [1] (SETNE x)) 41061 // cond: 41062 // result: (SETEQ x) 41063 for { 41064 if v.AuxInt != 1 { 41065 break 41066 } 41067 v_0 := v.Args[0] 41068 if v_0.Op != OpAMD64SETNE { 41069 break 41070 } 41071 x := v_0.Args[0] 41072 v.reset(OpAMD64SETEQ) 41073 v.AddArg(x) 41074 return true 41075 } 41076 // match: (XORLconst [1] (SETEQ x)) 41077 // cond: 41078 // result: (SETNE x) 41079 for { 41080 if v.AuxInt != 1 { 41081 break 41082 } 41083 v_0 := v.Args[0] 41084 if v_0.Op != OpAMD64SETEQ { 41085 break 41086 } 41087 x := v_0.Args[0] 41088 v.reset(OpAMD64SETNE) 41089 v.AddArg(x) 41090 return true 41091 } 41092 // match: (XORLconst [1] (SETL x)) 41093 // cond: 41094 // result: (SETGE x) 41095 for { 41096 if v.AuxInt != 1 { 41097 break 41098 } 41099 v_0 := v.Args[0] 41100 if v_0.Op != OpAMD64SETL { 41101 break 41102 } 41103 x := v_0.Args[0] 41104 v.reset(OpAMD64SETGE) 41105 v.AddArg(x) 41106 return true 41107 } 41108 // match: (XORLconst [1] (SETGE x)) 41109 // cond: 41110 // result: (SETL x) 41111 for { 41112 if v.AuxInt != 1 { 41113 break 41114 } 41115 v_0 := v.Args[0] 41116 if v_0.Op != OpAMD64SETGE { 41117 break 41118 } 41119 x := v_0.Args[0] 41120 v.reset(OpAMD64SETL) 41121 v.AddArg(x) 41122 return true 41123 } 41124 // match: (XORLconst [1] (SETLE x)) 41125 // cond: 41126 // result: (SETG x) 41127 for { 41128 if v.AuxInt != 1 { 41129 break 41130 } 41131 v_0 := v.Args[0] 41132 if v_0.Op != OpAMD64SETLE { 41133 break 41134 } 41135 x := v_0.Args[0] 41136 v.reset(OpAMD64SETG) 41137 v.AddArg(x) 41138 return true 41139 } 41140 // match: (XORLconst [1] (SETG x)) 41141 // cond: 41142 // result: (SETLE x) 41143 for { 41144 if v.AuxInt != 1 { 41145 break 41146 } 41147 v_0 := v.Args[0] 41148 if v_0.Op != OpAMD64SETG { 41149 break 41150 } 41151 x := v_0.Args[0] 41152 v.reset(OpAMD64SETLE) 41153 v.AddArg(x) 41154 return true 41155 } 41156 // match: (XORLconst [1] (SETB x)) 41157 // cond: 41158 // result: (SETAE x) 41159 for { 41160 if v.AuxInt != 1 { 41161 break 41162 } 41163 v_0 := v.Args[0] 41164 if v_0.Op != OpAMD64SETB { 41165 break 41166 } 41167 x := v_0.Args[0] 41168 v.reset(OpAMD64SETAE) 41169 v.AddArg(x) 41170 return true 41171 } 41172 // match: (XORLconst [1] (SETAE x)) 41173 // cond: 41174 // result: (SETB x) 41175 for { 41176 if v.AuxInt != 1 { 41177 break 41178 } 41179 v_0 := v.Args[0] 41180 if v_0.Op != OpAMD64SETAE { 41181 break 41182 } 41183 x := v_0.Args[0] 41184 v.reset(OpAMD64SETB) 41185 v.AddArg(x) 41186 return true 41187 } 41188 // match: (XORLconst [1] (SETBE x)) 41189 // cond: 41190 // result: (SETA x) 41191 for { 41192 if v.AuxInt != 1 { 41193 break 41194 } 41195 v_0 := v.Args[0] 41196 if v_0.Op != OpAMD64SETBE { 41197 break 41198 } 41199 x := v_0.Args[0] 41200 v.reset(OpAMD64SETA) 41201 v.AddArg(x) 41202 return true 41203 } 41204 // match: (XORLconst [1] (SETA x)) 41205 // cond: 41206 // result: (SETBE x) 41207 for { 41208 if v.AuxInt != 1 { 41209 break 41210 } 41211 v_0 := v.Args[0] 41212 if v_0.Op != OpAMD64SETA { 41213 break 41214 } 41215 x := v_0.Args[0] 41216 v.reset(OpAMD64SETBE) 41217 v.AddArg(x) 41218 return true 41219 } 41220 return false 41221 } 41222 func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool { 41223 // match: (XORLconst [c] (XORLconst [d] x)) 41224 // cond: 41225 // result: (XORLconst [c ^ d] x) 41226 for { 41227 c := v.AuxInt 41228 v_0 := v.Args[0] 41229 if v_0.Op != OpAMD64XORLconst { 41230 break 41231 } 41232 d := v_0.AuxInt 41233 x := v_0.Args[0] 41234 v.reset(OpAMD64XORLconst) 41235 v.AuxInt = c ^ d 41236 v.AddArg(x) 41237 return true 41238 } 41239 // match: (XORLconst [c] x) 41240 // cond: int32(c)==0 41241 // result: x 41242 for { 41243 c := v.AuxInt 41244 x := v.Args[0] 41245 if !(int32(c) == 0) { 41246 break 41247 } 41248 v.reset(OpCopy) 41249 v.Type = x.Type 41250 v.AddArg(x) 41251 return true 41252 } 41253 // match: (XORLconst [c] (MOVLconst [d])) 41254 // cond: 41255 // result: (MOVLconst [c^d]) 41256 for { 41257 c := v.AuxInt 41258 v_0 := v.Args[0] 41259 if v_0.Op != OpAMD64MOVLconst { 41260 break 41261 } 41262 d := v_0.AuxInt 41263 v.reset(OpAMD64MOVLconst) 41264 v.AuxInt = c ^ d 41265 return true 41266 } 41267 return false 41268 } 41269 func rewriteValueAMD64_OpAMD64XORLmem_0(v *Value) bool { 41270 b := v.Block 41271 _ = b 41272 typ := &b.Func.Config.Types 41273 _ = typ 41274 // match: (XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) 41275 // cond: 41276 // result: (XORL x (MOVLf2i y)) 41277 for { 41278 off := v.AuxInt 41279 sym := v.Aux 41280 _ = v.Args[2] 41281 x := v.Args[0] 41282 ptr := v.Args[1] 41283 v_2 := v.Args[2] 41284 if v_2.Op != OpAMD64MOVSSstore { 41285 break 41286 } 41287 if v_2.AuxInt != off { 41288 break 41289 } 41290 if v_2.Aux != sym { 41291 break 41292 } 41293 _ = v_2.Args[2] 41294 if ptr != v_2.Args[0] { 41295 break 41296 } 41297 y := v_2.Args[1] 41298 v.reset(OpAMD64XORL) 41299 v.AddArg(x) 41300 v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) 41301 v0.AddArg(y) 41302 v.AddArg(v0) 41303 return true 41304 } 41305 return false 41306 } 41307 func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { 41308 // match: (XORQ x (MOVQconst [c])) 41309 // cond: is32Bit(c) 41310 // result: (XORQconst [c] x) 41311 for { 41312 _ = v.Args[1] 41313 x := v.Args[0] 41314 v_1 := v.Args[1] 41315 if v_1.Op != OpAMD64MOVQconst { 41316 break 41317 } 41318 c := v_1.AuxInt 41319 if !(is32Bit(c)) { 41320 break 41321 } 41322 v.reset(OpAMD64XORQconst) 41323 v.AuxInt = c 41324 v.AddArg(x) 41325 return true 41326 } 41327 // match: (XORQ (MOVQconst [c]) x) 41328 // cond: is32Bit(c) 41329 // result: (XORQconst [c] x) 41330 for { 41331 _ = v.Args[1] 41332 v_0 := v.Args[0] 41333 if v_0.Op != OpAMD64MOVQconst { 41334 break 41335 } 41336 c := v_0.AuxInt 41337 x := v.Args[1] 41338 if !(is32Bit(c)) { 41339 break 41340 } 41341 v.reset(OpAMD64XORQconst) 41342 v.AuxInt = c 41343 v.AddArg(x) 41344 return true 41345 } 41346 // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d])) 41347 // cond: d==64-c 41348 // result: (ROLQconst x [c]) 41349 for { 41350 _ = v.Args[1] 41351 v_0 := v.Args[0] 41352 if v_0.Op != OpAMD64SHLQconst { 41353 break 41354 } 41355 c := v_0.AuxInt 41356 x := v_0.Args[0] 41357 v_1 := v.Args[1] 41358 if v_1.Op != OpAMD64SHRQconst { 41359 break 41360 } 41361 d := v_1.AuxInt 41362 if x != v_1.Args[0] { 41363 break 41364 } 41365 if !(d == 64-c) { 41366 break 41367 } 41368 v.reset(OpAMD64ROLQconst) 41369 v.AuxInt = c 41370 v.AddArg(x) 41371 return true 41372 } 41373 // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c])) 41374 // cond: d==64-c 41375 // result: (ROLQconst x [c]) 41376 for { 41377 _ = v.Args[1] 41378 v_0 := v.Args[0] 41379 if v_0.Op != OpAMD64SHRQconst { 41380 break 41381 } 41382 d := v_0.AuxInt 41383 x := v_0.Args[0] 41384 v_1 := v.Args[1] 41385 if v_1.Op != OpAMD64SHLQconst { 41386 break 41387 } 41388 c := v_1.AuxInt 41389 if x != v_1.Args[0] { 41390 break 41391 } 41392 if !(d == 64-c) { 41393 break 41394 } 41395 v.reset(OpAMD64ROLQconst) 41396 v.AuxInt = c 41397 v.AddArg(x) 41398 return true 41399 } 41400 // match: (XORQ x x) 41401 // cond: 41402 // result: (MOVQconst [0]) 41403 for { 41404 _ = v.Args[1] 41405 x := v.Args[0] 41406 if x != v.Args[1] { 41407 break 41408 } 41409 v.reset(OpAMD64MOVQconst) 41410 v.AuxInt = 0 41411 return true 41412 } 41413 // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) 41414 // cond: canMergeLoad(v, l, x) && clobber(l) 41415 // result: (XORQmem x [off] {sym} ptr mem) 41416 for { 41417 _ = v.Args[1] 41418 x := v.Args[0] 41419 l := v.Args[1] 41420 if l.Op != OpAMD64MOVQload { 41421 break 41422 } 41423 off := l.AuxInt 41424 sym := l.Aux 41425 _ = l.Args[1] 41426 ptr := l.Args[0] 41427 mem := l.Args[1] 41428 if !(canMergeLoad(v, l, x) && clobber(l)) { 41429 break 41430 } 41431 v.reset(OpAMD64XORQmem) 41432 v.AuxInt = off 41433 v.Aux = sym 41434 v.AddArg(x) 41435 v.AddArg(ptr) 41436 v.AddArg(mem) 41437 return true 41438 } 41439 // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x) 41440 // cond: canMergeLoad(v, l, x) && clobber(l) 41441 // result: (XORQmem x [off] {sym} ptr mem) 41442 for { 41443 _ = v.Args[1] 41444 l := v.Args[0] 41445 if l.Op != OpAMD64MOVQload { 41446 break 41447 } 41448 off := l.AuxInt 41449 sym := l.Aux 41450 _ = l.Args[1] 41451 ptr := l.Args[0] 41452 mem := l.Args[1] 41453 x := v.Args[1] 41454 if !(canMergeLoad(v, l, x) && clobber(l)) { 41455 break 41456 } 41457 v.reset(OpAMD64XORQmem) 41458 v.AuxInt = off 41459 v.Aux = sym 41460 v.AddArg(x) 41461 v.AddArg(ptr) 41462 v.AddArg(mem) 41463 return true 41464 } 41465 return false 41466 } 41467 func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool { 41468 // match: (XORQconst [c] (XORQconst [d] x)) 41469 // cond: 41470 // result: (XORQconst [c ^ d] x) 41471 for { 41472 c := v.AuxInt 41473 v_0 := v.Args[0] 41474 if v_0.Op != OpAMD64XORQconst { 41475 break 41476 } 41477 d := v_0.AuxInt 41478 x := v_0.Args[0] 41479 v.reset(OpAMD64XORQconst) 41480 v.AuxInt = c ^ d 41481 v.AddArg(x) 41482 return true 41483 } 41484 // match: (XORQconst [0] x) 41485 // cond: 41486 // result: x 41487 for { 41488 if v.AuxInt != 0 { 41489 break 41490 } 41491 x := v.Args[0] 41492 v.reset(OpCopy) 41493 v.Type = x.Type 41494 v.AddArg(x) 41495 return true 41496 } 41497 // match: (XORQconst [c] (MOVQconst [d])) 41498 // cond: 41499 // result: (MOVQconst [c^d]) 41500 for { 41501 c := v.AuxInt 41502 v_0 := v.Args[0] 41503 if v_0.Op != OpAMD64MOVQconst { 41504 break 41505 } 41506 d := v_0.AuxInt 41507 v.reset(OpAMD64MOVQconst) 41508 v.AuxInt = c ^ d 41509 return true 41510 } 41511 return false 41512 } 41513 func rewriteValueAMD64_OpAMD64XORQmem_0(v *Value) bool { 41514 b := v.Block 41515 _ = b 41516 typ := &b.Func.Config.Types 41517 _ = typ 41518 // match: (XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) 41519 // cond: 41520 // result: (XORQ x (MOVQf2i y)) 41521 for { 41522 off := v.AuxInt 41523 sym := v.Aux 41524 _ = v.Args[2] 41525 x := v.Args[0] 41526 ptr := v.Args[1] 41527 v_2 := v.Args[2] 41528 if v_2.Op != OpAMD64MOVSDstore { 41529 break 41530 } 41531 if v_2.AuxInt != off { 41532 break 41533 } 41534 if v_2.Aux != sym { 41535 break 41536 } 41537 _ = v_2.Args[2] 41538 if ptr != v_2.Args[0] { 41539 break 41540 } 41541 y := v_2.Args[1] 41542 v.reset(OpAMD64XORQ) 41543 v.AddArg(x) 41544 v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) 41545 v0.AddArg(y) 41546 v.AddArg(v0) 41547 return true 41548 } 41549 return false 41550 } 41551 func rewriteValueAMD64_OpAdd16_0(v *Value) bool { 41552 // match: (Add16 x y) 41553 // cond: 41554 // result: (ADDL x y) 41555 for { 41556 _ = v.Args[1] 41557 x := v.Args[0] 41558 y := v.Args[1] 41559 v.reset(OpAMD64ADDL) 41560 v.AddArg(x) 41561 v.AddArg(y) 41562 return true 41563 } 41564 } 41565 func rewriteValueAMD64_OpAdd32_0(v *Value) bool { 41566 // match: (Add32 x y) 41567 // cond: 41568 // result: (ADDL x y) 41569 for { 41570 _ = v.Args[1] 41571 x := v.Args[0] 41572 y := v.Args[1] 41573 v.reset(OpAMD64ADDL) 41574 v.AddArg(x) 41575 v.AddArg(y) 41576 return true 41577 } 41578 } 41579 func rewriteValueAMD64_OpAdd32F_0(v *Value) bool { 41580 // match: (Add32F x y) 41581 // cond: 41582 // result: (ADDSS x y) 41583 for { 41584 _ = v.Args[1] 41585 x := v.Args[0] 41586 y := v.Args[1] 41587 v.reset(OpAMD64ADDSS) 41588 v.AddArg(x) 41589 v.AddArg(y) 41590 return true 41591 } 41592 } 41593 func rewriteValueAMD64_OpAdd64_0(v *Value) bool { 41594 // match: (Add64 x y) 41595 // cond: 41596 // result: (ADDQ x y) 41597 for { 41598 _ = v.Args[1] 41599 x := v.Args[0] 41600 y := v.Args[1] 41601 v.reset(OpAMD64ADDQ) 41602 v.AddArg(x) 41603 v.AddArg(y) 41604 return true 41605 } 41606 } 41607 func rewriteValueAMD64_OpAdd64F_0(v *Value) bool { 41608 // match: (Add64F x y) 41609 // cond: 41610 // result: (ADDSD x y) 41611 for { 41612 _ = v.Args[1] 41613 x := v.Args[0] 41614 y := v.Args[1] 41615 v.reset(OpAMD64ADDSD) 41616 v.AddArg(x) 41617 v.AddArg(y) 41618 return true 41619 } 41620 } 41621 func rewriteValueAMD64_OpAdd8_0(v *Value) bool { 41622 // match: (Add8 x y) 41623 // cond: 41624 // result: (ADDL x y) 41625 for { 41626 _ = v.Args[1] 41627 x := v.Args[0] 41628 y := v.Args[1] 41629 v.reset(OpAMD64ADDL) 41630 v.AddArg(x) 41631 v.AddArg(y) 41632 return true 41633 } 41634 } 41635 func rewriteValueAMD64_OpAddPtr_0(v *Value) bool { 41636 b := v.Block 41637 _ = b 41638 config := b.Func.Config 41639 _ = config 41640 // match: (AddPtr x y) 41641 // cond: config.PtrSize == 8 41642 // result: (ADDQ x y) 41643 for { 41644 _ = v.Args[1] 41645 x := v.Args[0] 41646 y := v.Args[1] 41647 if !(config.PtrSize == 8) { 41648 break 41649 } 41650 v.reset(OpAMD64ADDQ) 41651 v.AddArg(x) 41652 v.AddArg(y) 41653 return true 41654 } 41655 // match: (AddPtr x y) 41656 // cond: config.PtrSize == 4 41657 // result: (ADDL x y) 41658 for { 41659 _ = v.Args[1] 41660 x := v.Args[0] 41661 y := v.Args[1] 41662 if !(config.PtrSize == 4) { 41663 break 41664 } 41665 v.reset(OpAMD64ADDL) 41666 v.AddArg(x) 41667 v.AddArg(y) 41668 return true 41669 } 41670 return false 41671 } 41672 func rewriteValueAMD64_OpAddr_0(v *Value) bool { 41673 b := v.Block 41674 _ = b 41675 config := b.Func.Config 41676 _ = config 41677 // match: (Addr {sym} base) 41678 // cond: config.PtrSize == 8 41679 // result: (LEAQ {sym} base) 41680 for { 41681 sym := v.Aux 41682 base := v.Args[0] 41683 if !(config.PtrSize == 8) { 41684 break 41685 } 41686 v.reset(OpAMD64LEAQ) 41687 v.Aux = sym 41688 v.AddArg(base) 41689 return true 41690 } 41691 // match: (Addr {sym} base) 41692 // cond: config.PtrSize == 4 41693 // result: (LEAL {sym} base) 41694 for { 41695 sym := v.Aux 41696 base := v.Args[0] 41697 if !(config.PtrSize == 4) { 41698 break 41699 } 41700 v.reset(OpAMD64LEAL) 41701 v.Aux = sym 41702 v.AddArg(base) 41703 return true 41704 } 41705 return false 41706 } 41707 func rewriteValueAMD64_OpAnd16_0(v *Value) bool { 41708 // match: (And16 x y) 41709 // cond: 41710 // result: (ANDL x y) 41711 for { 41712 _ = v.Args[1] 41713 x := v.Args[0] 41714 y := v.Args[1] 41715 v.reset(OpAMD64ANDL) 41716 v.AddArg(x) 41717 v.AddArg(y) 41718 return true 41719 } 41720 } 41721 func rewriteValueAMD64_OpAnd32_0(v *Value) bool { 41722 // match: (And32 x y) 41723 // cond: 41724 // result: (ANDL x y) 41725 for { 41726 _ = v.Args[1] 41727 x := v.Args[0] 41728 y := v.Args[1] 41729 v.reset(OpAMD64ANDL) 41730 v.AddArg(x) 41731 v.AddArg(y) 41732 return true 41733 } 41734 } 41735 func rewriteValueAMD64_OpAnd64_0(v *Value) bool { 41736 // match: (And64 x y) 41737 // cond: 41738 // result: (ANDQ x y) 41739 for { 41740 _ = v.Args[1] 41741 x := v.Args[0] 41742 y := v.Args[1] 41743 v.reset(OpAMD64ANDQ) 41744 v.AddArg(x) 41745 v.AddArg(y) 41746 return true 41747 } 41748 } 41749 func rewriteValueAMD64_OpAnd8_0(v *Value) bool { 41750 // match: (And8 x y) 41751 // cond: 41752 // result: (ANDL x y) 41753 for { 41754 _ = v.Args[1] 41755 x := v.Args[0] 41756 y := v.Args[1] 41757 v.reset(OpAMD64ANDL) 41758 v.AddArg(x) 41759 v.AddArg(y) 41760 return true 41761 } 41762 } 41763 func rewriteValueAMD64_OpAndB_0(v *Value) bool { 41764 // match: (AndB x y) 41765 // cond: 41766 // result: (ANDL x y) 41767 for { 41768 _ = v.Args[1] 41769 x := v.Args[0] 41770 y := v.Args[1] 41771 v.reset(OpAMD64ANDL) 41772 v.AddArg(x) 41773 v.AddArg(y) 41774 return true 41775 } 41776 } 41777 func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool { 41778 b := v.Block 41779 _ = b 41780 typ := &b.Func.Config.Types 41781 _ = typ 41782 // match: (AtomicAdd32 ptr val mem) 41783 // cond: 41784 // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) 41785 for { 41786 _ = v.Args[2] 41787 ptr := v.Args[0] 41788 val := v.Args[1] 41789 mem := v.Args[2] 41790 v.reset(OpAMD64AddTupleFirst32) 41791 v.AddArg(val) 41792 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) 41793 v0.AddArg(val) 41794 v0.AddArg(ptr) 41795 v0.AddArg(mem) 41796 v.AddArg(v0) 41797 return true 41798 } 41799 } 41800 func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool { 41801 b := v.Block 41802 _ = b 41803 typ := &b.Func.Config.Types 41804 _ = typ 41805 // match: (AtomicAdd64 ptr val mem) 41806 // cond: 41807 // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) 41808 for { 41809 _ = v.Args[2] 41810 ptr := v.Args[0] 41811 val := v.Args[1] 41812 mem := v.Args[2] 41813 v.reset(OpAMD64AddTupleFirst64) 41814 v.AddArg(val) 41815 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) 41816 v0.AddArg(val) 41817 v0.AddArg(ptr) 41818 v0.AddArg(mem) 41819 v.AddArg(v0) 41820 return true 41821 } 41822 } 41823 func rewriteValueAMD64_OpAtomicAnd8_0(v *Value) bool { 41824 // match: (AtomicAnd8 ptr val mem) 41825 // cond: 41826 // result: (ANDBlock ptr val mem) 41827 for { 41828 _ = v.Args[2] 41829 ptr := v.Args[0] 41830 val := v.Args[1] 41831 mem := v.Args[2] 41832 v.reset(OpAMD64ANDBlock) 41833 v.AddArg(ptr) 41834 v.AddArg(val) 41835 v.AddArg(mem) 41836 return true 41837 } 41838 } 41839 func rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v *Value) bool { 41840 // match: (AtomicCompareAndSwap32 ptr old new_ mem) 41841 // cond: 41842 // result: (CMPXCHGLlock ptr old new_ mem) 41843 for { 41844 _ = v.Args[3] 41845 ptr := v.Args[0] 41846 old := v.Args[1] 41847 new_ := v.Args[2] 41848 mem := v.Args[3] 41849 v.reset(OpAMD64CMPXCHGLlock) 41850 v.AddArg(ptr) 41851 v.AddArg(old) 41852 v.AddArg(new_) 41853 v.AddArg(mem) 41854 return true 41855 } 41856 } 41857 func rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v *Value) bool { 41858 // match: (AtomicCompareAndSwap64 ptr old new_ mem) 41859 // cond: 41860 // result: (CMPXCHGQlock ptr old new_ mem) 41861 for { 41862 _ = v.Args[3] 41863 ptr := v.Args[0] 41864 old := v.Args[1] 41865 new_ := v.Args[2] 41866 mem := v.Args[3] 41867 v.reset(OpAMD64CMPXCHGQlock) 41868 v.AddArg(ptr) 41869 v.AddArg(old) 41870 v.AddArg(new_) 41871 v.AddArg(mem) 41872 return true 41873 } 41874 } 41875 func rewriteValueAMD64_OpAtomicExchange32_0(v *Value) bool { 41876 // match: (AtomicExchange32 ptr val mem) 41877 // cond: 41878 // result: (XCHGL val ptr mem) 41879 for { 41880 _ = v.Args[2] 41881 ptr := v.Args[0] 41882 val := v.Args[1] 41883 mem := v.Args[2] 41884 v.reset(OpAMD64XCHGL) 41885 v.AddArg(val) 41886 v.AddArg(ptr) 41887 v.AddArg(mem) 41888 return true 41889 } 41890 } 41891 func rewriteValueAMD64_OpAtomicExchange64_0(v *Value) bool { 41892 // match: (AtomicExchange64 ptr val mem) 41893 // cond: 41894 // result: (XCHGQ val ptr mem) 41895 for { 41896 _ = v.Args[2] 41897 ptr := v.Args[0] 41898 val := v.Args[1] 41899 mem := v.Args[2] 41900 v.reset(OpAMD64XCHGQ) 41901 v.AddArg(val) 41902 v.AddArg(ptr) 41903 v.AddArg(mem) 41904 return true 41905 } 41906 } 41907 func rewriteValueAMD64_OpAtomicLoad32_0(v *Value) bool { 41908 // match: (AtomicLoad32 ptr mem) 41909 // cond: 41910 // result: (MOVLatomicload ptr mem) 41911 for { 41912 _ = v.Args[1] 41913 ptr := v.Args[0] 41914 mem := v.Args[1] 41915 v.reset(OpAMD64MOVLatomicload) 41916 v.AddArg(ptr) 41917 v.AddArg(mem) 41918 return true 41919 } 41920 } 41921 func rewriteValueAMD64_OpAtomicLoad64_0(v *Value) bool { 41922 // match: (AtomicLoad64 ptr mem) 41923 // cond: 41924 // result: (MOVQatomicload ptr mem) 41925 for { 41926 _ = v.Args[1] 41927 ptr := v.Args[0] 41928 mem := v.Args[1] 41929 v.reset(OpAMD64MOVQatomicload) 41930 v.AddArg(ptr) 41931 v.AddArg(mem) 41932 return true 41933 } 41934 } 41935 func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool { 41936 b := v.Block 41937 _ = b 41938 config := b.Func.Config 41939 _ = config 41940 // match: (AtomicLoadPtr ptr mem) 41941 // cond: config.PtrSize == 8 41942 // result: (MOVQatomicload ptr mem) 41943 for { 41944 _ = v.Args[1] 41945 ptr := v.Args[0] 41946 mem := v.Args[1] 41947 if !(config.PtrSize == 8) { 41948 break 41949 } 41950 v.reset(OpAMD64MOVQatomicload) 41951 v.AddArg(ptr) 41952 v.AddArg(mem) 41953 return true 41954 } 41955 // match: (AtomicLoadPtr ptr mem) 41956 // cond: config.PtrSize == 4 41957 // result: (MOVLatomicload ptr mem) 41958 for { 41959 _ = v.Args[1] 41960 ptr := v.Args[0] 41961 mem := v.Args[1] 41962 if !(config.PtrSize == 4) { 41963 break 41964 } 41965 v.reset(OpAMD64MOVLatomicload) 41966 v.AddArg(ptr) 41967 v.AddArg(mem) 41968 return true 41969 } 41970 return false 41971 } 41972 func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool { 41973 // match: (AtomicOr8 ptr val mem) 41974 // cond: 41975 // result: (ORBlock ptr val mem) 41976 for { 41977 _ = v.Args[2] 41978 ptr := v.Args[0] 41979 val := v.Args[1] 41980 mem := v.Args[2] 41981 v.reset(OpAMD64ORBlock) 41982 v.AddArg(ptr) 41983 v.AddArg(val) 41984 v.AddArg(mem) 41985 return true 41986 } 41987 } 41988 func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool { 41989 b := v.Block 41990 _ = b 41991 typ := &b.Func.Config.Types 41992 _ = typ 41993 // match: (AtomicStore32 ptr val mem) 41994 // cond: 41995 // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem)) 41996 for { 41997 _ = v.Args[2] 41998 ptr := v.Args[0] 41999 val := v.Args[1] 42000 mem := v.Args[2] 42001 v.reset(OpSelect1) 42002 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) 42003 v0.AddArg(val) 42004 v0.AddArg(ptr) 42005 v0.AddArg(mem) 42006 v.AddArg(v0) 42007 return true 42008 } 42009 } 42010 func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool { 42011 b := v.Block 42012 _ = b 42013 typ := &b.Func.Config.Types 42014 _ = typ 42015 // match: (AtomicStore64 ptr val mem) 42016 // cond: 42017 // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem)) 42018 for { 42019 _ = v.Args[2] 42020 ptr := v.Args[0] 42021 val := v.Args[1] 42022 mem := v.Args[2] 42023 v.reset(OpSelect1) 42024 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) 42025 v0.AddArg(val) 42026 v0.AddArg(ptr) 42027 v0.AddArg(mem) 42028 v.AddArg(v0) 42029 return true 42030 } 42031 } 42032 func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool { 42033 b := v.Block 42034 _ = b 42035 config := b.Func.Config 42036 _ = config 42037 typ := &b.Func.Config.Types 42038 _ = typ 42039 // match: (AtomicStorePtrNoWB ptr val mem) 42040 // cond: config.PtrSize == 8 42041 // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 42042 for { 42043 _ = v.Args[2] 42044 ptr := v.Args[0] 42045 val := v.Args[1] 42046 mem := v.Args[2] 42047 if !(config.PtrSize == 8) { 42048 break 42049 } 42050 v.reset(OpSelect1) 42051 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) 42052 v0.AddArg(val) 42053 v0.AddArg(ptr) 42054 v0.AddArg(mem) 42055 v.AddArg(v0) 42056 return true 42057 } 42058 // match: (AtomicStorePtrNoWB ptr val mem) 42059 // cond: config.PtrSize == 4 42060 // result: (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem)) 42061 for { 42062 _ = v.Args[2] 42063 ptr := v.Args[0] 42064 val := v.Args[1] 42065 mem := v.Args[2] 42066 if !(config.PtrSize == 4) { 42067 break 42068 } 42069 v.reset(OpSelect1) 42070 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem)) 42071 v0.AddArg(val) 42072 v0.AddArg(ptr) 42073 v0.AddArg(mem) 42074 v.AddArg(v0) 42075 return true 42076 } 42077 return false 42078 } 42079 func rewriteValueAMD64_OpAvg64u_0(v *Value) bool { 42080 // match: (Avg64u x y) 42081 // cond: 42082 // result: (AVGQU x y) 42083 for { 42084 _ = v.Args[1] 42085 x := v.Args[0] 42086 y := v.Args[1] 42087 v.reset(OpAMD64AVGQU) 42088 v.AddArg(x) 42089 v.AddArg(y) 42090 return true 42091 } 42092 } 42093 func rewriteValueAMD64_OpBitLen32_0(v *Value) bool { 42094 b := v.Block 42095 _ = b 42096 typ := &b.Func.Config.Types 42097 _ = typ 42098 // match: (BitLen32 x) 42099 // cond: 42100 // result: (BitLen64 (MOVLQZX <typ.UInt64> x)) 42101 for { 42102 x := v.Args[0] 42103 v.reset(OpBitLen64) 42104 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) 42105 v0.AddArg(x) 42106 v.AddArg(v0) 42107 return true 42108 } 42109 } 42110 func rewriteValueAMD64_OpBitLen64_0(v *Value) bool { 42111 b := v.Block 42112 _ = b 42113 typ := &b.Func.Config.Types 42114 _ = typ 42115 // match: (BitLen64 <t> x) 42116 // cond: 42117 // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x)))) 42118 for { 42119 t := v.Type 42120 x := v.Args[0] 42121 v.reset(OpAMD64ADDQconst) 42122 v.AuxInt = 1 42123 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) 42124 v1 := b.NewValue0(v.Pos, OpSelect0, t) 42125 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 42126 v2.AddArg(x) 42127 v1.AddArg(v2) 42128 v0.AddArg(v1) 42129 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 42130 v3.AuxInt = -1 42131 v0.AddArg(v3) 42132 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 42133 v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 42134 v5.AddArg(x) 42135 v4.AddArg(v5) 42136 v0.AddArg(v4) 42137 v.AddArg(v0) 42138 return true 42139 } 42140 } 42141 func rewriteValueAMD64_OpBswap32_0(v *Value) bool { 42142 // match: (Bswap32 x) 42143 // cond: 42144 // result: (BSWAPL x) 42145 for { 42146 x := v.Args[0] 42147 v.reset(OpAMD64BSWAPL) 42148 v.AddArg(x) 42149 return true 42150 } 42151 } 42152 func rewriteValueAMD64_OpBswap64_0(v *Value) bool { 42153 // match: (Bswap64 x) 42154 // cond: 42155 // result: (BSWAPQ x) 42156 for { 42157 x := v.Args[0] 42158 v.reset(OpAMD64BSWAPQ) 42159 v.AddArg(x) 42160 return true 42161 } 42162 } 42163 func rewriteValueAMD64_OpCeil_0(v *Value) bool { 42164 // match: (Ceil x) 42165 // cond: 42166 // result: (ROUNDSD [2] x) 42167 for { 42168 x := v.Args[0] 42169 v.reset(OpAMD64ROUNDSD) 42170 v.AuxInt = 2 42171 v.AddArg(x) 42172 return true 42173 } 42174 } 42175 func rewriteValueAMD64_OpClosureCall_0(v *Value) bool { 42176 // match: (ClosureCall [argwid] entry closure mem) 42177 // cond: 42178 // result: (CALLclosure [argwid] entry closure mem) 42179 for { 42180 argwid := v.AuxInt 42181 _ = v.Args[2] 42182 entry := v.Args[0] 42183 closure := v.Args[1] 42184 mem := v.Args[2] 42185 v.reset(OpAMD64CALLclosure) 42186 v.AuxInt = argwid 42187 v.AddArg(entry) 42188 v.AddArg(closure) 42189 v.AddArg(mem) 42190 return true 42191 } 42192 } 42193 func rewriteValueAMD64_OpCom16_0(v *Value) bool { 42194 // match: (Com16 x) 42195 // cond: 42196 // result: (NOTL x) 42197 for { 42198 x := v.Args[0] 42199 v.reset(OpAMD64NOTL) 42200 v.AddArg(x) 42201 return true 42202 } 42203 } 42204 func rewriteValueAMD64_OpCom32_0(v *Value) bool { 42205 // match: (Com32 x) 42206 // cond: 42207 // result: (NOTL x) 42208 for { 42209 x := v.Args[0] 42210 v.reset(OpAMD64NOTL) 42211 v.AddArg(x) 42212 return true 42213 } 42214 } 42215 func rewriteValueAMD64_OpCom64_0(v *Value) bool { 42216 // match: (Com64 x) 42217 // cond: 42218 // result: (NOTQ x) 42219 for { 42220 x := v.Args[0] 42221 v.reset(OpAMD64NOTQ) 42222 v.AddArg(x) 42223 return true 42224 } 42225 } 42226 func rewriteValueAMD64_OpCom8_0(v *Value) bool { 42227 // match: (Com8 x) 42228 // cond: 42229 // result: (NOTL x) 42230 for { 42231 x := v.Args[0] 42232 v.reset(OpAMD64NOTL) 42233 v.AddArg(x) 42234 return true 42235 } 42236 } 42237 func rewriteValueAMD64_OpConst16_0(v *Value) bool { 42238 // match: (Const16 [val]) 42239 // cond: 42240 // result: (MOVLconst [val]) 42241 for { 42242 val := v.AuxInt 42243 v.reset(OpAMD64MOVLconst) 42244 v.AuxInt = val 42245 return true 42246 } 42247 } 42248 func rewriteValueAMD64_OpConst32_0(v *Value) bool { 42249 // match: (Const32 [val]) 42250 // cond: 42251 // result: (MOVLconst [val]) 42252 for { 42253 val := v.AuxInt 42254 v.reset(OpAMD64MOVLconst) 42255 v.AuxInt = val 42256 return true 42257 } 42258 } 42259 func rewriteValueAMD64_OpConst32F_0(v *Value) bool { 42260 // match: (Const32F [val]) 42261 // cond: 42262 // result: (MOVSSconst [val]) 42263 for { 42264 val := v.AuxInt 42265 v.reset(OpAMD64MOVSSconst) 42266 v.AuxInt = val 42267 return true 42268 } 42269 } 42270 func rewriteValueAMD64_OpConst64_0(v *Value) bool { 42271 // match: (Const64 [val]) 42272 // cond: 42273 // result: (MOVQconst [val]) 42274 for { 42275 val := v.AuxInt 42276 v.reset(OpAMD64MOVQconst) 42277 v.AuxInt = val 42278 return true 42279 } 42280 } 42281 func rewriteValueAMD64_OpConst64F_0(v *Value) bool { 42282 // match: (Const64F [val]) 42283 // cond: 42284 // result: (MOVSDconst [val]) 42285 for { 42286 val := v.AuxInt 42287 v.reset(OpAMD64MOVSDconst) 42288 v.AuxInt = val 42289 return true 42290 } 42291 } 42292 func rewriteValueAMD64_OpConst8_0(v *Value) bool { 42293 // match: (Const8 [val]) 42294 // cond: 42295 // result: (MOVLconst [val]) 42296 for { 42297 val := v.AuxInt 42298 v.reset(OpAMD64MOVLconst) 42299 v.AuxInt = val 42300 return true 42301 } 42302 } 42303 func rewriteValueAMD64_OpConstBool_0(v *Value) bool { 42304 // match: (ConstBool [b]) 42305 // cond: 42306 // result: (MOVLconst [b]) 42307 for { 42308 b := v.AuxInt 42309 v.reset(OpAMD64MOVLconst) 42310 v.AuxInt = b 42311 return true 42312 } 42313 } 42314 func rewriteValueAMD64_OpConstNil_0(v *Value) bool { 42315 b := v.Block 42316 _ = b 42317 config := b.Func.Config 42318 _ = config 42319 // match: (ConstNil) 42320 // cond: config.PtrSize == 8 42321 // result: (MOVQconst [0]) 42322 for { 42323 if !(config.PtrSize == 8) { 42324 break 42325 } 42326 v.reset(OpAMD64MOVQconst) 42327 v.AuxInt = 0 42328 return true 42329 } 42330 // match: (ConstNil) 42331 // cond: config.PtrSize == 4 42332 // result: (MOVLconst [0]) 42333 for { 42334 if !(config.PtrSize == 4) { 42335 break 42336 } 42337 v.reset(OpAMD64MOVLconst) 42338 v.AuxInt = 0 42339 return true 42340 } 42341 return false 42342 } 42343 func rewriteValueAMD64_OpConvert_0(v *Value) bool { 42344 b := v.Block 42345 _ = b 42346 config := b.Func.Config 42347 _ = config 42348 // match: (Convert <t> x mem) 42349 // cond: config.PtrSize == 8 42350 // result: (MOVQconvert <t> x mem) 42351 for { 42352 t := v.Type 42353 _ = v.Args[1] 42354 x := v.Args[0] 42355 mem := v.Args[1] 42356 if !(config.PtrSize == 8) { 42357 break 42358 } 42359 v.reset(OpAMD64MOVQconvert) 42360 v.Type = t 42361 v.AddArg(x) 42362 v.AddArg(mem) 42363 return true 42364 } 42365 // match: (Convert <t> x mem) 42366 // cond: config.PtrSize == 4 42367 // result: (MOVLconvert <t> x mem) 42368 for { 42369 t := v.Type 42370 _ = v.Args[1] 42371 x := v.Args[0] 42372 mem := v.Args[1] 42373 if !(config.PtrSize == 4) { 42374 break 42375 } 42376 v.reset(OpAMD64MOVLconvert) 42377 v.Type = t 42378 v.AddArg(x) 42379 v.AddArg(mem) 42380 return true 42381 } 42382 return false 42383 } 42384 func rewriteValueAMD64_OpCtz32_0(v *Value) bool { 42385 b := v.Block 42386 _ = b 42387 typ := &b.Func.Config.Types 42388 _ = typ 42389 // match: (Ctz32 x) 42390 // cond: 42391 // result: (Select0 (BSFQ (ORQ <typ.UInt64> (MOVQconst [1<<32]) x))) 42392 for { 42393 x := v.Args[0] 42394 v.reset(OpSelect0) 42395 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 42396 v1 := b.NewValue0(v.Pos, OpAMD64ORQ, typ.UInt64) 42397 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 42398 v2.AuxInt = 1 << 32 42399 v1.AddArg(v2) 42400 v1.AddArg(x) 42401 v0.AddArg(v1) 42402 v.AddArg(v0) 42403 return true 42404 } 42405 } 42406 func rewriteValueAMD64_OpCtz64_0(v *Value) bool { 42407 b := v.Block 42408 _ = b 42409 typ := &b.Func.Config.Types 42410 _ = typ 42411 // match: (Ctz64 <t> x) 42412 // cond: 42413 // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x))) 42414 for { 42415 t := v.Type 42416 x := v.Args[0] 42417 v.reset(OpAMD64CMOVQEQ) 42418 v0 := b.NewValue0(v.Pos, OpSelect0, t) 42419 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 42420 v1.AddArg(x) 42421 v0.AddArg(v1) 42422 v.AddArg(v0) 42423 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) 42424 v2.AuxInt = 64 42425 v.AddArg(v2) 42426 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) 42427 v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) 42428 v4.AddArg(x) 42429 v3.AddArg(v4) 42430 v.AddArg(v3) 42431 return true 42432 } 42433 } 42434 func rewriteValueAMD64_OpCvt32Fto32_0(v *Value) bool { 42435 // match: (Cvt32Fto32 x) 42436 // cond: 42437 // result: (CVTTSS2SL x) 42438 for { 42439 x := v.Args[0] 42440 v.reset(OpAMD64CVTTSS2SL) 42441 v.AddArg(x) 42442 return true 42443 } 42444 } 42445 func rewriteValueAMD64_OpCvt32Fto64_0(v *Value) bool { 42446 // match: (Cvt32Fto64 x) 42447 // cond: 42448 // result: (CVTTSS2SQ x) 42449 for { 42450 x := v.Args[0] 42451 v.reset(OpAMD64CVTTSS2SQ) 42452 v.AddArg(x) 42453 return true 42454 } 42455 } 42456 func rewriteValueAMD64_OpCvt32Fto64F_0(v *Value) bool { 42457 // match: (Cvt32Fto64F x) 42458 // cond: 42459 // result: (CVTSS2SD x) 42460 for { 42461 x := v.Args[0] 42462 v.reset(OpAMD64CVTSS2SD) 42463 v.AddArg(x) 42464 return true 42465 } 42466 } 42467 func rewriteValueAMD64_OpCvt32to32F_0(v *Value) bool { 42468 // match: (Cvt32to32F x) 42469 // cond: 42470 // result: (CVTSL2SS x) 42471 for { 42472 x := v.Args[0] 42473 v.reset(OpAMD64CVTSL2SS) 42474 v.AddArg(x) 42475 return true 42476 } 42477 } 42478 func rewriteValueAMD64_OpCvt32to64F_0(v *Value) bool { 42479 // match: (Cvt32to64F x) 42480 // cond: 42481 // result: (CVTSL2SD x) 42482 for { 42483 x := v.Args[0] 42484 v.reset(OpAMD64CVTSL2SD) 42485 v.AddArg(x) 42486 return true 42487 } 42488 } 42489 func rewriteValueAMD64_OpCvt64Fto32_0(v *Value) bool { 42490 // match: (Cvt64Fto32 x) 42491 // cond: 42492 // result: (CVTTSD2SL x) 42493 for { 42494 x := v.Args[0] 42495 v.reset(OpAMD64CVTTSD2SL) 42496 v.AddArg(x) 42497 return true 42498 } 42499 } 42500 func rewriteValueAMD64_OpCvt64Fto32F_0(v *Value) bool { 42501 // match: (Cvt64Fto32F x) 42502 // cond: 42503 // result: (CVTSD2SS x) 42504 for { 42505 x := v.Args[0] 42506 v.reset(OpAMD64CVTSD2SS) 42507 v.AddArg(x) 42508 return true 42509 } 42510 } 42511 func rewriteValueAMD64_OpCvt64Fto64_0(v *Value) bool { 42512 // match: (Cvt64Fto64 x) 42513 // cond: 42514 // result: (CVTTSD2SQ x) 42515 for { 42516 x := v.Args[0] 42517 v.reset(OpAMD64CVTTSD2SQ) 42518 v.AddArg(x) 42519 return true 42520 } 42521 } 42522 func rewriteValueAMD64_OpCvt64to32F_0(v *Value) bool { 42523 // match: (Cvt64to32F x) 42524 // cond: 42525 // result: (CVTSQ2SS x) 42526 for { 42527 x := v.Args[0] 42528 v.reset(OpAMD64CVTSQ2SS) 42529 v.AddArg(x) 42530 return true 42531 } 42532 } 42533 func rewriteValueAMD64_OpCvt64to64F_0(v *Value) bool { 42534 // match: (Cvt64to64F x) 42535 // cond: 42536 // result: (CVTSQ2SD x) 42537 for { 42538 x := v.Args[0] 42539 v.reset(OpAMD64CVTSQ2SD) 42540 v.AddArg(x) 42541 return true 42542 } 42543 } 42544 func rewriteValueAMD64_OpDiv128u_0(v *Value) bool { 42545 // match: (Div128u xhi xlo y) 42546 // cond: 42547 // result: (DIVQU2 xhi xlo y) 42548 for { 42549 _ = v.Args[2] 42550 xhi := v.Args[0] 42551 xlo := v.Args[1] 42552 y := v.Args[2] 42553 v.reset(OpAMD64DIVQU2) 42554 v.AddArg(xhi) 42555 v.AddArg(xlo) 42556 v.AddArg(y) 42557 return true 42558 } 42559 } 42560 func rewriteValueAMD64_OpDiv16_0(v *Value) bool { 42561 b := v.Block 42562 _ = b 42563 typ := &b.Func.Config.Types 42564 _ = typ 42565 // match: (Div16 x y) 42566 // cond: 42567 // result: (Select0 (DIVW x y)) 42568 for { 42569 _ = v.Args[1] 42570 x := v.Args[0] 42571 y := v.Args[1] 42572 v.reset(OpSelect0) 42573 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 42574 v0.AddArg(x) 42575 v0.AddArg(y) 42576 v.AddArg(v0) 42577 return true 42578 } 42579 } 42580 func rewriteValueAMD64_OpDiv16u_0(v *Value) bool { 42581 b := v.Block 42582 _ = b 42583 typ := &b.Func.Config.Types 42584 _ = typ 42585 // match: (Div16u x y) 42586 // cond: 42587 // result: (Select0 (DIVWU x y)) 42588 for { 42589 _ = v.Args[1] 42590 x := v.Args[0] 42591 y := v.Args[1] 42592 v.reset(OpSelect0) 42593 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 42594 v0.AddArg(x) 42595 v0.AddArg(y) 42596 v.AddArg(v0) 42597 return true 42598 } 42599 } 42600 func rewriteValueAMD64_OpDiv32_0(v *Value) bool { 42601 b := v.Block 42602 _ = b 42603 typ := &b.Func.Config.Types 42604 _ = typ 42605 // match: (Div32 x y) 42606 // cond: 42607 // result: (Select0 (DIVL x y)) 42608 for { 42609 _ = v.Args[1] 42610 x := v.Args[0] 42611 y := v.Args[1] 42612 v.reset(OpSelect0) 42613 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 42614 v0.AddArg(x) 42615 v0.AddArg(y) 42616 v.AddArg(v0) 42617 return true 42618 } 42619 } 42620 func rewriteValueAMD64_OpDiv32F_0(v *Value) bool { 42621 // match: (Div32F x y) 42622 // cond: 42623 // result: (DIVSS x y) 42624 for { 42625 _ = v.Args[1] 42626 x := v.Args[0] 42627 y := v.Args[1] 42628 v.reset(OpAMD64DIVSS) 42629 v.AddArg(x) 42630 v.AddArg(y) 42631 return true 42632 } 42633 } 42634 func rewriteValueAMD64_OpDiv32u_0(v *Value) bool { 42635 b := v.Block 42636 _ = b 42637 typ := &b.Func.Config.Types 42638 _ = typ 42639 // match: (Div32u x y) 42640 // cond: 42641 // result: (Select0 (DIVLU x y)) 42642 for { 42643 _ = v.Args[1] 42644 x := v.Args[0] 42645 y := v.Args[1] 42646 v.reset(OpSelect0) 42647 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 42648 v0.AddArg(x) 42649 v0.AddArg(y) 42650 v.AddArg(v0) 42651 return true 42652 } 42653 } 42654 func rewriteValueAMD64_OpDiv64_0(v *Value) bool { 42655 b := v.Block 42656 _ = b 42657 typ := &b.Func.Config.Types 42658 _ = typ 42659 // match: (Div64 x y) 42660 // cond: 42661 // result: (Select0 (DIVQ x y)) 42662 for { 42663 _ = v.Args[1] 42664 x := v.Args[0] 42665 y := v.Args[1] 42666 v.reset(OpSelect0) 42667 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 42668 v0.AddArg(x) 42669 v0.AddArg(y) 42670 v.AddArg(v0) 42671 return true 42672 } 42673 } 42674 func rewriteValueAMD64_OpDiv64F_0(v *Value) bool { 42675 // match: (Div64F x y) 42676 // cond: 42677 // result: (DIVSD x y) 42678 for { 42679 _ = v.Args[1] 42680 x := v.Args[0] 42681 y := v.Args[1] 42682 v.reset(OpAMD64DIVSD) 42683 v.AddArg(x) 42684 v.AddArg(y) 42685 return true 42686 } 42687 } 42688 func rewriteValueAMD64_OpDiv64u_0(v *Value) bool { 42689 b := v.Block 42690 _ = b 42691 typ := &b.Func.Config.Types 42692 _ = typ 42693 // match: (Div64u x y) 42694 // cond: 42695 // result: (Select0 (DIVQU x y)) 42696 for { 42697 _ = v.Args[1] 42698 x := v.Args[0] 42699 y := v.Args[1] 42700 v.reset(OpSelect0) 42701 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 42702 v0.AddArg(x) 42703 v0.AddArg(y) 42704 v.AddArg(v0) 42705 return true 42706 } 42707 } 42708 func rewriteValueAMD64_OpDiv8_0(v *Value) bool { 42709 b := v.Block 42710 _ = b 42711 typ := &b.Func.Config.Types 42712 _ = typ 42713 // match: (Div8 x y) 42714 // cond: 42715 // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 42716 for { 42717 _ = v.Args[1] 42718 x := v.Args[0] 42719 y := v.Args[1] 42720 v.reset(OpSelect0) 42721 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 42722 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 42723 v1.AddArg(x) 42724 v0.AddArg(v1) 42725 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 42726 v2.AddArg(y) 42727 v0.AddArg(v2) 42728 v.AddArg(v0) 42729 return true 42730 } 42731 } 42732 func rewriteValueAMD64_OpDiv8u_0(v *Value) bool { 42733 b := v.Block 42734 _ = b 42735 typ := &b.Func.Config.Types 42736 _ = typ 42737 // match: (Div8u x y) 42738 // cond: 42739 // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 42740 for { 42741 _ = v.Args[1] 42742 x := v.Args[0] 42743 y := v.Args[1] 42744 v.reset(OpSelect0) 42745 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 42746 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 42747 v1.AddArg(x) 42748 v0.AddArg(v1) 42749 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 42750 v2.AddArg(y) 42751 v0.AddArg(v2) 42752 v.AddArg(v0) 42753 return true 42754 } 42755 } 42756 func rewriteValueAMD64_OpEq16_0(v *Value) bool { 42757 b := v.Block 42758 _ = b 42759 // match: (Eq16 x y) 42760 // cond: 42761 // result: (SETEQ (CMPW x y)) 42762 for { 42763 _ = v.Args[1] 42764 x := v.Args[0] 42765 y := v.Args[1] 42766 v.reset(OpAMD64SETEQ) 42767 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 42768 v0.AddArg(x) 42769 v0.AddArg(y) 42770 v.AddArg(v0) 42771 return true 42772 } 42773 } 42774 func rewriteValueAMD64_OpEq32_0(v *Value) bool { 42775 b := v.Block 42776 _ = b 42777 // match: (Eq32 x y) 42778 // cond: 42779 // result: (SETEQ (CMPL x y)) 42780 for { 42781 _ = v.Args[1] 42782 x := v.Args[0] 42783 y := v.Args[1] 42784 v.reset(OpAMD64SETEQ) 42785 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42786 v0.AddArg(x) 42787 v0.AddArg(y) 42788 v.AddArg(v0) 42789 return true 42790 } 42791 } 42792 func rewriteValueAMD64_OpEq32F_0(v *Value) bool { 42793 b := v.Block 42794 _ = b 42795 // match: (Eq32F x y) 42796 // cond: 42797 // result: (SETEQF (UCOMISS x y)) 42798 for { 42799 _ = v.Args[1] 42800 x := v.Args[0] 42801 y := v.Args[1] 42802 v.reset(OpAMD64SETEQF) 42803 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 42804 v0.AddArg(x) 42805 v0.AddArg(y) 42806 v.AddArg(v0) 42807 return true 42808 } 42809 } 42810 func rewriteValueAMD64_OpEq64_0(v *Value) bool { 42811 b := v.Block 42812 _ = b 42813 // match: (Eq64 x y) 42814 // cond: 42815 // result: (SETEQ (CMPQ x y)) 42816 for { 42817 _ = v.Args[1] 42818 x := v.Args[0] 42819 y := v.Args[1] 42820 v.reset(OpAMD64SETEQ) 42821 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 42822 v0.AddArg(x) 42823 v0.AddArg(y) 42824 v.AddArg(v0) 42825 return true 42826 } 42827 } 42828 func rewriteValueAMD64_OpEq64F_0(v *Value) bool { 42829 b := v.Block 42830 _ = b 42831 // match: (Eq64F x y) 42832 // cond: 42833 // result: (SETEQF (UCOMISD x y)) 42834 for { 42835 _ = v.Args[1] 42836 x := v.Args[0] 42837 y := v.Args[1] 42838 v.reset(OpAMD64SETEQF) 42839 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 42840 v0.AddArg(x) 42841 v0.AddArg(y) 42842 v.AddArg(v0) 42843 return true 42844 } 42845 } 42846 func rewriteValueAMD64_OpEq8_0(v *Value) bool { 42847 b := v.Block 42848 _ = b 42849 // match: (Eq8 x y) 42850 // cond: 42851 // result: (SETEQ (CMPB x y)) 42852 for { 42853 _ = v.Args[1] 42854 x := v.Args[0] 42855 y := v.Args[1] 42856 v.reset(OpAMD64SETEQ) 42857 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 42858 v0.AddArg(x) 42859 v0.AddArg(y) 42860 v.AddArg(v0) 42861 return true 42862 } 42863 } 42864 func rewriteValueAMD64_OpEqB_0(v *Value) bool { 42865 b := v.Block 42866 _ = b 42867 // match: (EqB x y) 42868 // cond: 42869 // result: (SETEQ (CMPB x y)) 42870 for { 42871 _ = v.Args[1] 42872 x := v.Args[0] 42873 y := v.Args[1] 42874 v.reset(OpAMD64SETEQ) 42875 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 42876 v0.AddArg(x) 42877 v0.AddArg(y) 42878 v.AddArg(v0) 42879 return true 42880 } 42881 } 42882 func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { 42883 b := v.Block 42884 _ = b 42885 config := b.Func.Config 42886 _ = config 42887 // match: (EqPtr x y) 42888 // cond: config.PtrSize == 8 42889 // result: (SETEQ (CMPQ x y)) 42890 for { 42891 _ = v.Args[1] 42892 x := v.Args[0] 42893 y := v.Args[1] 42894 if !(config.PtrSize == 8) { 42895 break 42896 } 42897 v.reset(OpAMD64SETEQ) 42898 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 42899 v0.AddArg(x) 42900 v0.AddArg(y) 42901 v.AddArg(v0) 42902 return true 42903 } 42904 // match: (EqPtr x y) 42905 // cond: config.PtrSize == 4 42906 // result: (SETEQ (CMPL x y)) 42907 for { 42908 _ = v.Args[1] 42909 x := v.Args[0] 42910 y := v.Args[1] 42911 if !(config.PtrSize == 4) { 42912 break 42913 } 42914 v.reset(OpAMD64SETEQ) 42915 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42916 v0.AddArg(x) 42917 v0.AddArg(y) 42918 v.AddArg(v0) 42919 return true 42920 } 42921 return false 42922 } 42923 func rewriteValueAMD64_OpFloor_0(v *Value) bool { 42924 // match: (Floor x) 42925 // cond: 42926 // result: (ROUNDSD [1] x) 42927 for { 42928 x := v.Args[0] 42929 v.reset(OpAMD64ROUNDSD) 42930 v.AuxInt = 1 42931 v.AddArg(x) 42932 return true 42933 } 42934 } 42935 func rewriteValueAMD64_OpGeq16_0(v *Value) bool { 42936 b := v.Block 42937 _ = b 42938 // match: (Geq16 x y) 42939 // cond: 42940 // result: (SETGE (CMPW x y)) 42941 for { 42942 _ = v.Args[1] 42943 x := v.Args[0] 42944 y := v.Args[1] 42945 v.reset(OpAMD64SETGE) 42946 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 42947 v0.AddArg(x) 42948 v0.AddArg(y) 42949 v.AddArg(v0) 42950 return true 42951 } 42952 } 42953 func rewriteValueAMD64_OpGeq16U_0(v *Value) bool { 42954 b := v.Block 42955 _ = b 42956 // match: (Geq16U x y) 42957 // cond: 42958 // result: (SETAE (CMPW x y)) 42959 for { 42960 _ = v.Args[1] 42961 x := v.Args[0] 42962 y := v.Args[1] 42963 v.reset(OpAMD64SETAE) 42964 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 42965 v0.AddArg(x) 42966 v0.AddArg(y) 42967 v.AddArg(v0) 42968 return true 42969 } 42970 } 42971 func rewriteValueAMD64_OpGeq32_0(v *Value) bool { 42972 b := v.Block 42973 _ = b 42974 // match: (Geq32 x y) 42975 // cond: 42976 // result: (SETGE (CMPL x y)) 42977 for { 42978 _ = v.Args[1] 42979 x := v.Args[0] 42980 y := v.Args[1] 42981 v.reset(OpAMD64SETGE) 42982 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 42983 v0.AddArg(x) 42984 v0.AddArg(y) 42985 v.AddArg(v0) 42986 return true 42987 } 42988 } 42989 func rewriteValueAMD64_OpGeq32F_0(v *Value) bool { 42990 b := v.Block 42991 _ = b 42992 // match: (Geq32F x y) 42993 // cond: 42994 // result: (SETGEF (UCOMISS x y)) 42995 for { 42996 _ = v.Args[1] 42997 x := v.Args[0] 42998 y := v.Args[1] 42999 v.reset(OpAMD64SETGEF) 43000 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 43001 v0.AddArg(x) 43002 v0.AddArg(y) 43003 v.AddArg(v0) 43004 return true 43005 } 43006 } 43007 func rewriteValueAMD64_OpGeq32U_0(v *Value) bool { 43008 b := v.Block 43009 _ = b 43010 // match: (Geq32U x y) 43011 // cond: 43012 // result: (SETAE (CMPL x y)) 43013 for { 43014 _ = v.Args[1] 43015 x := v.Args[0] 43016 y := v.Args[1] 43017 v.reset(OpAMD64SETAE) 43018 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43019 v0.AddArg(x) 43020 v0.AddArg(y) 43021 v.AddArg(v0) 43022 return true 43023 } 43024 } 43025 func rewriteValueAMD64_OpGeq64_0(v *Value) bool { 43026 b := v.Block 43027 _ = b 43028 // match: (Geq64 x y) 43029 // cond: 43030 // result: (SETGE (CMPQ x y)) 43031 for { 43032 _ = v.Args[1] 43033 x := v.Args[0] 43034 y := v.Args[1] 43035 v.reset(OpAMD64SETGE) 43036 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43037 v0.AddArg(x) 43038 v0.AddArg(y) 43039 v.AddArg(v0) 43040 return true 43041 } 43042 } 43043 func rewriteValueAMD64_OpGeq64F_0(v *Value) bool { 43044 b := v.Block 43045 _ = b 43046 // match: (Geq64F x y) 43047 // cond: 43048 // result: (SETGEF (UCOMISD x y)) 43049 for { 43050 _ = v.Args[1] 43051 x := v.Args[0] 43052 y := v.Args[1] 43053 v.reset(OpAMD64SETGEF) 43054 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 43055 v0.AddArg(x) 43056 v0.AddArg(y) 43057 v.AddArg(v0) 43058 return true 43059 } 43060 } 43061 func rewriteValueAMD64_OpGeq64U_0(v *Value) bool { 43062 b := v.Block 43063 _ = b 43064 // match: (Geq64U x y) 43065 // cond: 43066 // result: (SETAE (CMPQ x y)) 43067 for { 43068 _ = v.Args[1] 43069 x := v.Args[0] 43070 y := v.Args[1] 43071 v.reset(OpAMD64SETAE) 43072 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43073 v0.AddArg(x) 43074 v0.AddArg(y) 43075 v.AddArg(v0) 43076 return true 43077 } 43078 } 43079 func rewriteValueAMD64_OpGeq8_0(v *Value) bool { 43080 b := v.Block 43081 _ = b 43082 // match: (Geq8 x y) 43083 // cond: 43084 // result: (SETGE (CMPB x y)) 43085 for { 43086 _ = v.Args[1] 43087 x := v.Args[0] 43088 y := v.Args[1] 43089 v.reset(OpAMD64SETGE) 43090 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43091 v0.AddArg(x) 43092 v0.AddArg(y) 43093 v.AddArg(v0) 43094 return true 43095 } 43096 } 43097 func rewriteValueAMD64_OpGeq8U_0(v *Value) bool { 43098 b := v.Block 43099 _ = b 43100 // match: (Geq8U x y) 43101 // cond: 43102 // result: (SETAE (CMPB x y)) 43103 for { 43104 _ = v.Args[1] 43105 x := v.Args[0] 43106 y := v.Args[1] 43107 v.reset(OpAMD64SETAE) 43108 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43109 v0.AddArg(x) 43110 v0.AddArg(y) 43111 v.AddArg(v0) 43112 return true 43113 } 43114 } 43115 func rewriteValueAMD64_OpGetCallerPC_0(v *Value) bool { 43116 // match: (GetCallerPC) 43117 // cond: 43118 // result: (LoweredGetCallerPC) 43119 for { 43120 v.reset(OpAMD64LoweredGetCallerPC) 43121 return true 43122 } 43123 } 43124 func rewriteValueAMD64_OpGetCallerSP_0(v *Value) bool { 43125 // match: (GetCallerSP) 43126 // cond: 43127 // result: (LoweredGetCallerSP) 43128 for { 43129 v.reset(OpAMD64LoweredGetCallerSP) 43130 return true 43131 } 43132 } 43133 func rewriteValueAMD64_OpGetClosurePtr_0(v *Value) bool { 43134 // match: (GetClosurePtr) 43135 // cond: 43136 // result: (LoweredGetClosurePtr) 43137 for { 43138 v.reset(OpAMD64LoweredGetClosurePtr) 43139 return true 43140 } 43141 } 43142 func rewriteValueAMD64_OpGetG_0(v *Value) bool { 43143 // match: (GetG mem) 43144 // cond: 43145 // result: (LoweredGetG mem) 43146 for { 43147 mem := v.Args[0] 43148 v.reset(OpAMD64LoweredGetG) 43149 v.AddArg(mem) 43150 return true 43151 } 43152 } 43153 func rewriteValueAMD64_OpGreater16_0(v *Value) bool { 43154 b := v.Block 43155 _ = b 43156 // match: (Greater16 x y) 43157 // cond: 43158 // result: (SETG (CMPW x y)) 43159 for { 43160 _ = v.Args[1] 43161 x := v.Args[0] 43162 y := v.Args[1] 43163 v.reset(OpAMD64SETG) 43164 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43165 v0.AddArg(x) 43166 v0.AddArg(y) 43167 v.AddArg(v0) 43168 return true 43169 } 43170 } 43171 func rewriteValueAMD64_OpGreater16U_0(v *Value) bool { 43172 b := v.Block 43173 _ = b 43174 // match: (Greater16U x y) 43175 // cond: 43176 // result: (SETA (CMPW x y)) 43177 for { 43178 _ = v.Args[1] 43179 x := v.Args[0] 43180 y := v.Args[1] 43181 v.reset(OpAMD64SETA) 43182 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43183 v0.AddArg(x) 43184 v0.AddArg(y) 43185 v.AddArg(v0) 43186 return true 43187 } 43188 } 43189 func rewriteValueAMD64_OpGreater32_0(v *Value) bool { 43190 b := v.Block 43191 _ = b 43192 // match: (Greater32 x y) 43193 // cond: 43194 // result: (SETG (CMPL x y)) 43195 for { 43196 _ = v.Args[1] 43197 x := v.Args[0] 43198 y := v.Args[1] 43199 v.reset(OpAMD64SETG) 43200 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43201 v0.AddArg(x) 43202 v0.AddArg(y) 43203 v.AddArg(v0) 43204 return true 43205 } 43206 } 43207 func rewriteValueAMD64_OpGreater32F_0(v *Value) bool { 43208 b := v.Block 43209 _ = b 43210 // match: (Greater32F x y) 43211 // cond: 43212 // result: (SETGF (UCOMISS x y)) 43213 for { 43214 _ = v.Args[1] 43215 x := v.Args[0] 43216 y := v.Args[1] 43217 v.reset(OpAMD64SETGF) 43218 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 43219 v0.AddArg(x) 43220 v0.AddArg(y) 43221 v.AddArg(v0) 43222 return true 43223 } 43224 } 43225 func rewriteValueAMD64_OpGreater32U_0(v *Value) bool { 43226 b := v.Block 43227 _ = b 43228 // match: (Greater32U x y) 43229 // cond: 43230 // result: (SETA (CMPL x y)) 43231 for { 43232 _ = v.Args[1] 43233 x := v.Args[0] 43234 y := v.Args[1] 43235 v.reset(OpAMD64SETA) 43236 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43237 v0.AddArg(x) 43238 v0.AddArg(y) 43239 v.AddArg(v0) 43240 return true 43241 } 43242 } 43243 func rewriteValueAMD64_OpGreater64_0(v *Value) bool { 43244 b := v.Block 43245 _ = b 43246 // match: (Greater64 x y) 43247 // cond: 43248 // result: (SETG (CMPQ x y)) 43249 for { 43250 _ = v.Args[1] 43251 x := v.Args[0] 43252 y := v.Args[1] 43253 v.reset(OpAMD64SETG) 43254 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43255 v0.AddArg(x) 43256 v0.AddArg(y) 43257 v.AddArg(v0) 43258 return true 43259 } 43260 } 43261 func rewriteValueAMD64_OpGreater64F_0(v *Value) bool { 43262 b := v.Block 43263 _ = b 43264 // match: (Greater64F x y) 43265 // cond: 43266 // result: (SETGF (UCOMISD x y)) 43267 for { 43268 _ = v.Args[1] 43269 x := v.Args[0] 43270 y := v.Args[1] 43271 v.reset(OpAMD64SETGF) 43272 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 43273 v0.AddArg(x) 43274 v0.AddArg(y) 43275 v.AddArg(v0) 43276 return true 43277 } 43278 } 43279 func rewriteValueAMD64_OpGreater64U_0(v *Value) bool { 43280 b := v.Block 43281 _ = b 43282 // match: (Greater64U x y) 43283 // cond: 43284 // result: (SETA (CMPQ x y)) 43285 for { 43286 _ = v.Args[1] 43287 x := v.Args[0] 43288 y := v.Args[1] 43289 v.reset(OpAMD64SETA) 43290 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43291 v0.AddArg(x) 43292 v0.AddArg(y) 43293 v.AddArg(v0) 43294 return true 43295 } 43296 } 43297 func rewriteValueAMD64_OpGreater8_0(v *Value) bool { 43298 b := v.Block 43299 _ = b 43300 // match: (Greater8 x y) 43301 // cond: 43302 // result: (SETG (CMPB x y)) 43303 for { 43304 _ = v.Args[1] 43305 x := v.Args[0] 43306 y := v.Args[1] 43307 v.reset(OpAMD64SETG) 43308 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43309 v0.AddArg(x) 43310 v0.AddArg(y) 43311 v.AddArg(v0) 43312 return true 43313 } 43314 } 43315 func rewriteValueAMD64_OpGreater8U_0(v *Value) bool { 43316 b := v.Block 43317 _ = b 43318 // match: (Greater8U x y) 43319 // cond: 43320 // result: (SETA (CMPB x y)) 43321 for { 43322 _ = v.Args[1] 43323 x := v.Args[0] 43324 y := v.Args[1] 43325 v.reset(OpAMD64SETA) 43326 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43327 v0.AddArg(x) 43328 v0.AddArg(y) 43329 v.AddArg(v0) 43330 return true 43331 } 43332 } 43333 func rewriteValueAMD64_OpHmul32_0(v *Value) bool { 43334 // match: (Hmul32 x y) 43335 // cond: 43336 // result: (HMULL x y) 43337 for { 43338 _ = v.Args[1] 43339 x := v.Args[0] 43340 y := v.Args[1] 43341 v.reset(OpAMD64HMULL) 43342 v.AddArg(x) 43343 v.AddArg(y) 43344 return true 43345 } 43346 } 43347 func rewriteValueAMD64_OpHmul32u_0(v *Value) bool { 43348 // match: (Hmul32u x y) 43349 // cond: 43350 // result: (HMULLU x y) 43351 for { 43352 _ = v.Args[1] 43353 x := v.Args[0] 43354 y := v.Args[1] 43355 v.reset(OpAMD64HMULLU) 43356 v.AddArg(x) 43357 v.AddArg(y) 43358 return true 43359 } 43360 } 43361 func rewriteValueAMD64_OpHmul64_0(v *Value) bool { 43362 // match: (Hmul64 x y) 43363 // cond: 43364 // result: (HMULQ x y) 43365 for { 43366 _ = v.Args[1] 43367 x := v.Args[0] 43368 y := v.Args[1] 43369 v.reset(OpAMD64HMULQ) 43370 v.AddArg(x) 43371 v.AddArg(y) 43372 return true 43373 } 43374 } 43375 func rewriteValueAMD64_OpHmul64u_0(v *Value) bool { 43376 // match: (Hmul64u x y) 43377 // cond: 43378 // result: (HMULQU x y) 43379 for { 43380 _ = v.Args[1] 43381 x := v.Args[0] 43382 y := v.Args[1] 43383 v.reset(OpAMD64HMULQU) 43384 v.AddArg(x) 43385 v.AddArg(y) 43386 return true 43387 } 43388 } 43389 func rewriteValueAMD64_OpInt64Hi_0(v *Value) bool { 43390 // match: (Int64Hi x) 43391 // cond: 43392 // result: (SHRQconst [32] x) 43393 for { 43394 x := v.Args[0] 43395 v.reset(OpAMD64SHRQconst) 43396 v.AuxInt = 32 43397 v.AddArg(x) 43398 return true 43399 } 43400 } 43401 func rewriteValueAMD64_OpInterCall_0(v *Value) bool { 43402 // match: (InterCall [argwid] entry mem) 43403 // cond: 43404 // result: (CALLinter [argwid] entry mem) 43405 for { 43406 argwid := v.AuxInt 43407 _ = v.Args[1] 43408 entry := v.Args[0] 43409 mem := v.Args[1] 43410 v.reset(OpAMD64CALLinter) 43411 v.AuxInt = argwid 43412 v.AddArg(entry) 43413 v.AddArg(mem) 43414 return true 43415 } 43416 } 43417 func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool { 43418 b := v.Block 43419 _ = b 43420 config := b.Func.Config 43421 _ = config 43422 // match: (IsInBounds idx len) 43423 // cond: config.PtrSize == 8 43424 // result: (SETB (CMPQ idx len)) 43425 for { 43426 _ = v.Args[1] 43427 idx := v.Args[0] 43428 len := v.Args[1] 43429 if !(config.PtrSize == 8) { 43430 break 43431 } 43432 v.reset(OpAMD64SETB) 43433 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43434 v0.AddArg(idx) 43435 v0.AddArg(len) 43436 v.AddArg(v0) 43437 return true 43438 } 43439 // match: (IsInBounds idx len) 43440 // cond: config.PtrSize == 4 43441 // result: (SETB (CMPL idx len)) 43442 for { 43443 _ = v.Args[1] 43444 idx := v.Args[0] 43445 len := v.Args[1] 43446 if !(config.PtrSize == 4) { 43447 break 43448 } 43449 v.reset(OpAMD64SETB) 43450 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43451 v0.AddArg(idx) 43452 v0.AddArg(len) 43453 v.AddArg(v0) 43454 return true 43455 } 43456 return false 43457 } 43458 func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool { 43459 b := v.Block 43460 _ = b 43461 config := b.Func.Config 43462 _ = config 43463 // match: (IsNonNil p) 43464 // cond: config.PtrSize == 8 43465 // result: (SETNE (TESTQ p p)) 43466 for { 43467 p := v.Args[0] 43468 if !(config.PtrSize == 8) { 43469 break 43470 } 43471 v.reset(OpAMD64SETNE) 43472 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) 43473 v0.AddArg(p) 43474 v0.AddArg(p) 43475 v.AddArg(v0) 43476 return true 43477 } 43478 // match: (IsNonNil p) 43479 // cond: config.PtrSize == 4 43480 // result: (SETNE (TESTL p p)) 43481 for { 43482 p := v.Args[0] 43483 if !(config.PtrSize == 4) { 43484 break 43485 } 43486 v.reset(OpAMD64SETNE) 43487 v0 := b.NewValue0(v.Pos, OpAMD64TESTL, types.TypeFlags) 43488 v0.AddArg(p) 43489 v0.AddArg(p) 43490 v.AddArg(v0) 43491 return true 43492 } 43493 return false 43494 } 43495 func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool { 43496 b := v.Block 43497 _ = b 43498 config := b.Func.Config 43499 _ = config 43500 // match: (IsSliceInBounds idx len) 43501 // cond: config.PtrSize == 8 43502 // result: (SETBE (CMPQ idx len)) 43503 for { 43504 _ = v.Args[1] 43505 idx := v.Args[0] 43506 len := v.Args[1] 43507 if !(config.PtrSize == 8) { 43508 break 43509 } 43510 v.reset(OpAMD64SETBE) 43511 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43512 v0.AddArg(idx) 43513 v0.AddArg(len) 43514 v.AddArg(v0) 43515 return true 43516 } 43517 // match: (IsSliceInBounds idx len) 43518 // cond: config.PtrSize == 4 43519 // result: (SETBE (CMPL idx len)) 43520 for { 43521 _ = v.Args[1] 43522 idx := v.Args[0] 43523 len := v.Args[1] 43524 if !(config.PtrSize == 4) { 43525 break 43526 } 43527 v.reset(OpAMD64SETBE) 43528 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43529 v0.AddArg(idx) 43530 v0.AddArg(len) 43531 v.AddArg(v0) 43532 return true 43533 } 43534 return false 43535 } 43536 func rewriteValueAMD64_OpLeq16_0(v *Value) bool { 43537 b := v.Block 43538 _ = b 43539 // match: (Leq16 x y) 43540 // cond: 43541 // result: (SETLE (CMPW x y)) 43542 for { 43543 _ = v.Args[1] 43544 x := v.Args[0] 43545 y := v.Args[1] 43546 v.reset(OpAMD64SETLE) 43547 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43548 v0.AddArg(x) 43549 v0.AddArg(y) 43550 v.AddArg(v0) 43551 return true 43552 } 43553 } 43554 func rewriteValueAMD64_OpLeq16U_0(v *Value) bool { 43555 b := v.Block 43556 _ = b 43557 // match: (Leq16U x y) 43558 // cond: 43559 // result: (SETBE (CMPW x y)) 43560 for { 43561 _ = v.Args[1] 43562 x := v.Args[0] 43563 y := v.Args[1] 43564 v.reset(OpAMD64SETBE) 43565 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43566 v0.AddArg(x) 43567 v0.AddArg(y) 43568 v.AddArg(v0) 43569 return true 43570 } 43571 } 43572 func rewriteValueAMD64_OpLeq32_0(v *Value) bool { 43573 b := v.Block 43574 _ = b 43575 // match: (Leq32 x y) 43576 // cond: 43577 // result: (SETLE (CMPL x y)) 43578 for { 43579 _ = v.Args[1] 43580 x := v.Args[0] 43581 y := v.Args[1] 43582 v.reset(OpAMD64SETLE) 43583 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43584 v0.AddArg(x) 43585 v0.AddArg(y) 43586 v.AddArg(v0) 43587 return true 43588 } 43589 } 43590 func rewriteValueAMD64_OpLeq32F_0(v *Value) bool { 43591 b := v.Block 43592 _ = b 43593 // match: (Leq32F x y) 43594 // cond: 43595 // result: (SETGEF (UCOMISS y x)) 43596 for { 43597 _ = v.Args[1] 43598 x := v.Args[0] 43599 y := v.Args[1] 43600 v.reset(OpAMD64SETGEF) 43601 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 43602 v0.AddArg(y) 43603 v0.AddArg(x) 43604 v.AddArg(v0) 43605 return true 43606 } 43607 } 43608 func rewriteValueAMD64_OpLeq32U_0(v *Value) bool { 43609 b := v.Block 43610 _ = b 43611 // match: (Leq32U x y) 43612 // cond: 43613 // result: (SETBE (CMPL x y)) 43614 for { 43615 _ = v.Args[1] 43616 x := v.Args[0] 43617 y := v.Args[1] 43618 v.reset(OpAMD64SETBE) 43619 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43620 v0.AddArg(x) 43621 v0.AddArg(y) 43622 v.AddArg(v0) 43623 return true 43624 } 43625 } 43626 func rewriteValueAMD64_OpLeq64_0(v *Value) bool { 43627 b := v.Block 43628 _ = b 43629 // match: (Leq64 x y) 43630 // cond: 43631 // result: (SETLE (CMPQ x y)) 43632 for { 43633 _ = v.Args[1] 43634 x := v.Args[0] 43635 y := v.Args[1] 43636 v.reset(OpAMD64SETLE) 43637 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43638 v0.AddArg(x) 43639 v0.AddArg(y) 43640 v.AddArg(v0) 43641 return true 43642 } 43643 } 43644 func rewriteValueAMD64_OpLeq64F_0(v *Value) bool { 43645 b := v.Block 43646 _ = b 43647 // match: (Leq64F x y) 43648 // cond: 43649 // result: (SETGEF (UCOMISD y x)) 43650 for { 43651 _ = v.Args[1] 43652 x := v.Args[0] 43653 y := v.Args[1] 43654 v.reset(OpAMD64SETGEF) 43655 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 43656 v0.AddArg(y) 43657 v0.AddArg(x) 43658 v.AddArg(v0) 43659 return true 43660 } 43661 } 43662 func rewriteValueAMD64_OpLeq64U_0(v *Value) bool { 43663 b := v.Block 43664 _ = b 43665 // match: (Leq64U x y) 43666 // cond: 43667 // result: (SETBE (CMPQ x y)) 43668 for { 43669 _ = v.Args[1] 43670 x := v.Args[0] 43671 y := v.Args[1] 43672 v.reset(OpAMD64SETBE) 43673 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43674 v0.AddArg(x) 43675 v0.AddArg(y) 43676 v.AddArg(v0) 43677 return true 43678 } 43679 } 43680 func rewriteValueAMD64_OpLeq8_0(v *Value) bool { 43681 b := v.Block 43682 _ = b 43683 // match: (Leq8 x y) 43684 // cond: 43685 // result: (SETLE (CMPB x y)) 43686 for { 43687 _ = v.Args[1] 43688 x := v.Args[0] 43689 y := v.Args[1] 43690 v.reset(OpAMD64SETLE) 43691 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43692 v0.AddArg(x) 43693 v0.AddArg(y) 43694 v.AddArg(v0) 43695 return true 43696 } 43697 } 43698 func rewriteValueAMD64_OpLeq8U_0(v *Value) bool { 43699 b := v.Block 43700 _ = b 43701 // match: (Leq8U x y) 43702 // cond: 43703 // result: (SETBE (CMPB x y)) 43704 for { 43705 _ = v.Args[1] 43706 x := v.Args[0] 43707 y := v.Args[1] 43708 v.reset(OpAMD64SETBE) 43709 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43710 v0.AddArg(x) 43711 v0.AddArg(y) 43712 v.AddArg(v0) 43713 return true 43714 } 43715 } 43716 func rewriteValueAMD64_OpLess16_0(v *Value) bool { 43717 b := v.Block 43718 _ = b 43719 // match: (Less16 x y) 43720 // cond: 43721 // result: (SETL (CMPW x y)) 43722 for { 43723 _ = v.Args[1] 43724 x := v.Args[0] 43725 y := v.Args[1] 43726 v.reset(OpAMD64SETL) 43727 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43728 v0.AddArg(x) 43729 v0.AddArg(y) 43730 v.AddArg(v0) 43731 return true 43732 } 43733 } 43734 func rewriteValueAMD64_OpLess16U_0(v *Value) bool { 43735 b := v.Block 43736 _ = b 43737 // match: (Less16U x y) 43738 // cond: 43739 // result: (SETB (CMPW x y)) 43740 for { 43741 _ = v.Args[1] 43742 x := v.Args[0] 43743 y := v.Args[1] 43744 v.reset(OpAMD64SETB) 43745 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 43746 v0.AddArg(x) 43747 v0.AddArg(y) 43748 v.AddArg(v0) 43749 return true 43750 } 43751 } 43752 func rewriteValueAMD64_OpLess32_0(v *Value) bool { 43753 b := v.Block 43754 _ = b 43755 // match: (Less32 x y) 43756 // cond: 43757 // result: (SETL (CMPL x y)) 43758 for { 43759 _ = v.Args[1] 43760 x := v.Args[0] 43761 y := v.Args[1] 43762 v.reset(OpAMD64SETL) 43763 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43764 v0.AddArg(x) 43765 v0.AddArg(y) 43766 v.AddArg(v0) 43767 return true 43768 } 43769 } 43770 func rewriteValueAMD64_OpLess32F_0(v *Value) bool { 43771 b := v.Block 43772 _ = b 43773 // match: (Less32F x y) 43774 // cond: 43775 // result: (SETGF (UCOMISS y x)) 43776 for { 43777 _ = v.Args[1] 43778 x := v.Args[0] 43779 y := v.Args[1] 43780 v.reset(OpAMD64SETGF) 43781 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 43782 v0.AddArg(y) 43783 v0.AddArg(x) 43784 v.AddArg(v0) 43785 return true 43786 } 43787 } 43788 func rewriteValueAMD64_OpLess32U_0(v *Value) bool { 43789 b := v.Block 43790 _ = b 43791 // match: (Less32U x y) 43792 // cond: 43793 // result: (SETB (CMPL x y)) 43794 for { 43795 _ = v.Args[1] 43796 x := v.Args[0] 43797 y := v.Args[1] 43798 v.reset(OpAMD64SETB) 43799 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 43800 v0.AddArg(x) 43801 v0.AddArg(y) 43802 v.AddArg(v0) 43803 return true 43804 } 43805 } 43806 func rewriteValueAMD64_OpLess64_0(v *Value) bool { 43807 b := v.Block 43808 _ = b 43809 // match: (Less64 x y) 43810 // cond: 43811 // result: (SETL (CMPQ x y)) 43812 for { 43813 _ = v.Args[1] 43814 x := v.Args[0] 43815 y := v.Args[1] 43816 v.reset(OpAMD64SETL) 43817 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43818 v0.AddArg(x) 43819 v0.AddArg(y) 43820 v.AddArg(v0) 43821 return true 43822 } 43823 } 43824 func rewriteValueAMD64_OpLess64F_0(v *Value) bool { 43825 b := v.Block 43826 _ = b 43827 // match: (Less64F x y) 43828 // cond: 43829 // result: (SETGF (UCOMISD y x)) 43830 for { 43831 _ = v.Args[1] 43832 x := v.Args[0] 43833 y := v.Args[1] 43834 v.reset(OpAMD64SETGF) 43835 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 43836 v0.AddArg(y) 43837 v0.AddArg(x) 43838 v.AddArg(v0) 43839 return true 43840 } 43841 } 43842 func rewriteValueAMD64_OpLess64U_0(v *Value) bool { 43843 b := v.Block 43844 _ = b 43845 // match: (Less64U x y) 43846 // cond: 43847 // result: (SETB (CMPQ x y)) 43848 for { 43849 _ = v.Args[1] 43850 x := v.Args[0] 43851 y := v.Args[1] 43852 v.reset(OpAMD64SETB) 43853 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 43854 v0.AddArg(x) 43855 v0.AddArg(y) 43856 v.AddArg(v0) 43857 return true 43858 } 43859 } 43860 func rewriteValueAMD64_OpLess8_0(v *Value) bool { 43861 b := v.Block 43862 _ = b 43863 // match: (Less8 x y) 43864 // cond: 43865 // result: (SETL (CMPB x y)) 43866 for { 43867 _ = v.Args[1] 43868 x := v.Args[0] 43869 y := v.Args[1] 43870 v.reset(OpAMD64SETL) 43871 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43872 v0.AddArg(x) 43873 v0.AddArg(y) 43874 v.AddArg(v0) 43875 return true 43876 } 43877 } 43878 func rewriteValueAMD64_OpLess8U_0(v *Value) bool { 43879 b := v.Block 43880 _ = b 43881 // match: (Less8U x y) 43882 // cond: 43883 // result: (SETB (CMPB x y)) 43884 for { 43885 _ = v.Args[1] 43886 x := v.Args[0] 43887 y := v.Args[1] 43888 v.reset(OpAMD64SETB) 43889 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 43890 v0.AddArg(x) 43891 v0.AddArg(y) 43892 v.AddArg(v0) 43893 return true 43894 } 43895 } 43896 func rewriteValueAMD64_OpLoad_0(v *Value) bool { 43897 b := v.Block 43898 _ = b 43899 config := b.Func.Config 43900 _ = config 43901 // match: (Load <t> ptr mem) 43902 // cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8) 43903 // result: (MOVQload ptr mem) 43904 for { 43905 t := v.Type 43906 _ = v.Args[1] 43907 ptr := v.Args[0] 43908 mem := v.Args[1] 43909 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) { 43910 break 43911 } 43912 v.reset(OpAMD64MOVQload) 43913 v.AddArg(ptr) 43914 v.AddArg(mem) 43915 return true 43916 } 43917 // match: (Load <t> ptr mem) 43918 // cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4) 43919 // result: (MOVLload ptr mem) 43920 for { 43921 t := v.Type 43922 _ = v.Args[1] 43923 ptr := v.Args[0] 43924 mem := v.Args[1] 43925 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) { 43926 break 43927 } 43928 v.reset(OpAMD64MOVLload) 43929 v.AddArg(ptr) 43930 v.AddArg(mem) 43931 return true 43932 } 43933 // match: (Load <t> ptr mem) 43934 // cond: is16BitInt(t) 43935 // result: (MOVWload ptr mem) 43936 for { 43937 t := v.Type 43938 _ = v.Args[1] 43939 ptr := v.Args[0] 43940 mem := v.Args[1] 43941 if !(is16BitInt(t)) { 43942 break 43943 } 43944 v.reset(OpAMD64MOVWload) 43945 v.AddArg(ptr) 43946 v.AddArg(mem) 43947 return true 43948 } 43949 // match: (Load <t> ptr mem) 43950 // cond: (t.IsBoolean() || is8BitInt(t)) 43951 // result: (MOVBload ptr mem) 43952 for { 43953 t := v.Type 43954 _ = v.Args[1] 43955 ptr := v.Args[0] 43956 mem := v.Args[1] 43957 if !(t.IsBoolean() || is8BitInt(t)) { 43958 break 43959 } 43960 v.reset(OpAMD64MOVBload) 43961 v.AddArg(ptr) 43962 v.AddArg(mem) 43963 return true 43964 } 43965 // match: (Load <t> ptr mem) 43966 // cond: is32BitFloat(t) 43967 // result: (MOVSSload ptr mem) 43968 for { 43969 t := v.Type 43970 _ = v.Args[1] 43971 ptr := v.Args[0] 43972 mem := v.Args[1] 43973 if !(is32BitFloat(t)) { 43974 break 43975 } 43976 v.reset(OpAMD64MOVSSload) 43977 v.AddArg(ptr) 43978 v.AddArg(mem) 43979 return true 43980 } 43981 // match: (Load <t> ptr mem) 43982 // cond: is64BitFloat(t) 43983 // result: (MOVSDload ptr mem) 43984 for { 43985 t := v.Type 43986 _ = v.Args[1] 43987 ptr := v.Args[0] 43988 mem := v.Args[1] 43989 if !(is64BitFloat(t)) { 43990 break 43991 } 43992 v.reset(OpAMD64MOVSDload) 43993 v.AddArg(ptr) 43994 v.AddArg(mem) 43995 return true 43996 } 43997 return false 43998 } 43999 func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool { 44000 b := v.Block 44001 _ = b 44002 // match: (Lsh16x16 <t> x y) 44003 // cond: 44004 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 44005 for { 44006 t := v.Type 44007 _ = v.Args[1] 44008 x := v.Args[0] 44009 y := v.Args[1] 44010 v.reset(OpAMD64ANDL) 44011 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44012 v0.AddArg(x) 44013 v0.AddArg(y) 44014 v.AddArg(v0) 44015 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44016 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44017 v2.AuxInt = 32 44018 v2.AddArg(y) 44019 v1.AddArg(v2) 44020 v.AddArg(v1) 44021 return true 44022 } 44023 } 44024 func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool { 44025 b := v.Block 44026 _ = b 44027 // match: (Lsh16x32 <t> x y) 44028 // cond: 44029 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 44030 for { 44031 t := v.Type 44032 _ = v.Args[1] 44033 x := v.Args[0] 44034 y := v.Args[1] 44035 v.reset(OpAMD64ANDL) 44036 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44037 v0.AddArg(x) 44038 v0.AddArg(y) 44039 v.AddArg(v0) 44040 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44041 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44042 v2.AuxInt = 32 44043 v2.AddArg(y) 44044 v1.AddArg(v2) 44045 v.AddArg(v1) 44046 return true 44047 } 44048 } 44049 func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool { 44050 b := v.Block 44051 _ = b 44052 // match: (Lsh16x64 <t> x y) 44053 // cond: 44054 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 44055 for { 44056 t := v.Type 44057 _ = v.Args[1] 44058 x := v.Args[0] 44059 y := v.Args[1] 44060 v.reset(OpAMD64ANDL) 44061 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44062 v0.AddArg(x) 44063 v0.AddArg(y) 44064 v.AddArg(v0) 44065 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44066 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44067 v2.AuxInt = 32 44068 v2.AddArg(y) 44069 v1.AddArg(v2) 44070 v.AddArg(v1) 44071 return true 44072 } 44073 } 44074 func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool { 44075 b := v.Block 44076 _ = b 44077 // match: (Lsh16x8 <t> x y) 44078 // cond: 44079 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 44080 for { 44081 t := v.Type 44082 _ = v.Args[1] 44083 x := v.Args[0] 44084 y := v.Args[1] 44085 v.reset(OpAMD64ANDL) 44086 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44087 v0.AddArg(x) 44088 v0.AddArg(y) 44089 v.AddArg(v0) 44090 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44091 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44092 v2.AuxInt = 32 44093 v2.AddArg(y) 44094 v1.AddArg(v2) 44095 v.AddArg(v1) 44096 return true 44097 } 44098 } 44099 func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool { 44100 b := v.Block 44101 _ = b 44102 // match: (Lsh32x16 <t> x y) 44103 // cond: 44104 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 44105 for { 44106 t := v.Type 44107 _ = v.Args[1] 44108 x := v.Args[0] 44109 y := v.Args[1] 44110 v.reset(OpAMD64ANDL) 44111 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44112 v0.AddArg(x) 44113 v0.AddArg(y) 44114 v.AddArg(v0) 44115 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44116 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44117 v2.AuxInt = 32 44118 v2.AddArg(y) 44119 v1.AddArg(v2) 44120 v.AddArg(v1) 44121 return true 44122 } 44123 } 44124 func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool { 44125 b := v.Block 44126 _ = b 44127 // match: (Lsh32x32 <t> x y) 44128 // cond: 44129 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 44130 for { 44131 t := v.Type 44132 _ = v.Args[1] 44133 x := v.Args[0] 44134 y := v.Args[1] 44135 v.reset(OpAMD64ANDL) 44136 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44137 v0.AddArg(x) 44138 v0.AddArg(y) 44139 v.AddArg(v0) 44140 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44141 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44142 v2.AuxInt = 32 44143 v2.AddArg(y) 44144 v1.AddArg(v2) 44145 v.AddArg(v1) 44146 return true 44147 } 44148 } 44149 func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool { 44150 b := v.Block 44151 _ = b 44152 // match: (Lsh32x64 <t> x y) 44153 // cond: 44154 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 44155 for { 44156 t := v.Type 44157 _ = v.Args[1] 44158 x := v.Args[0] 44159 y := v.Args[1] 44160 v.reset(OpAMD64ANDL) 44161 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44162 v0.AddArg(x) 44163 v0.AddArg(y) 44164 v.AddArg(v0) 44165 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44166 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44167 v2.AuxInt = 32 44168 v2.AddArg(y) 44169 v1.AddArg(v2) 44170 v.AddArg(v1) 44171 return true 44172 } 44173 } 44174 func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool { 44175 b := v.Block 44176 _ = b 44177 // match: (Lsh32x8 <t> x y) 44178 // cond: 44179 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 44180 for { 44181 t := v.Type 44182 _ = v.Args[1] 44183 x := v.Args[0] 44184 y := v.Args[1] 44185 v.reset(OpAMD64ANDL) 44186 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44187 v0.AddArg(x) 44188 v0.AddArg(y) 44189 v.AddArg(v0) 44190 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44191 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44192 v2.AuxInt = 32 44193 v2.AddArg(y) 44194 v1.AddArg(v2) 44195 v.AddArg(v1) 44196 return true 44197 } 44198 } 44199 func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool { 44200 b := v.Block 44201 _ = b 44202 // match: (Lsh64x16 <t> x y) 44203 // cond: 44204 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 44205 for { 44206 t := v.Type 44207 _ = v.Args[1] 44208 x := v.Args[0] 44209 y := v.Args[1] 44210 v.reset(OpAMD64ANDQ) 44211 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 44212 v0.AddArg(x) 44213 v0.AddArg(y) 44214 v.AddArg(v0) 44215 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 44216 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44217 v2.AuxInt = 64 44218 v2.AddArg(y) 44219 v1.AddArg(v2) 44220 v.AddArg(v1) 44221 return true 44222 } 44223 } 44224 func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool { 44225 b := v.Block 44226 _ = b 44227 // match: (Lsh64x32 <t> x y) 44228 // cond: 44229 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 44230 for { 44231 t := v.Type 44232 _ = v.Args[1] 44233 x := v.Args[0] 44234 y := v.Args[1] 44235 v.reset(OpAMD64ANDQ) 44236 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 44237 v0.AddArg(x) 44238 v0.AddArg(y) 44239 v.AddArg(v0) 44240 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 44241 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44242 v2.AuxInt = 64 44243 v2.AddArg(y) 44244 v1.AddArg(v2) 44245 v.AddArg(v1) 44246 return true 44247 } 44248 } 44249 func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool { 44250 b := v.Block 44251 _ = b 44252 // match: (Lsh64x64 <t> x y) 44253 // cond: 44254 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 44255 for { 44256 t := v.Type 44257 _ = v.Args[1] 44258 x := v.Args[0] 44259 y := v.Args[1] 44260 v.reset(OpAMD64ANDQ) 44261 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 44262 v0.AddArg(x) 44263 v0.AddArg(y) 44264 v.AddArg(v0) 44265 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 44266 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44267 v2.AuxInt = 64 44268 v2.AddArg(y) 44269 v1.AddArg(v2) 44270 v.AddArg(v1) 44271 return true 44272 } 44273 } 44274 func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool { 44275 b := v.Block 44276 _ = b 44277 // match: (Lsh64x8 <t> x y) 44278 // cond: 44279 // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 44280 for { 44281 t := v.Type 44282 _ = v.Args[1] 44283 x := v.Args[0] 44284 y := v.Args[1] 44285 v.reset(OpAMD64ANDQ) 44286 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) 44287 v0.AddArg(x) 44288 v0.AddArg(y) 44289 v.AddArg(v0) 44290 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 44291 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44292 v2.AuxInt = 64 44293 v2.AddArg(y) 44294 v1.AddArg(v2) 44295 v.AddArg(v1) 44296 return true 44297 } 44298 } 44299 func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool { 44300 b := v.Block 44301 _ = b 44302 // match: (Lsh8x16 <t> x y) 44303 // cond: 44304 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 44305 for { 44306 t := v.Type 44307 _ = v.Args[1] 44308 x := v.Args[0] 44309 y := v.Args[1] 44310 v.reset(OpAMD64ANDL) 44311 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44312 v0.AddArg(x) 44313 v0.AddArg(y) 44314 v.AddArg(v0) 44315 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44316 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 44317 v2.AuxInt = 32 44318 v2.AddArg(y) 44319 v1.AddArg(v2) 44320 v.AddArg(v1) 44321 return true 44322 } 44323 } 44324 func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool { 44325 b := v.Block 44326 _ = b 44327 // match: (Lsh8x32 <t> x y) 44328 // cond: 44329 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 44330 for { 44331 t := v.Type 44332 _ = v.Args[1] 44333 x := v.Args[0] 44334 y := v.Args[1] 44335 v.reset(OpAMD64ANDL) 44336 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44337 v0.AddArg(x) 44338 v0.AddArg(y) 44339 v.AddArg(v0) 44340 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44341 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 44342 v2.AuxInt = 32 44343 v2.AddArg(y) 44344 v1.AddArg(v2) 44345 v.AddArg(v1) 44346 return true 44347 } 44348 } 44349 func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool { 44350 b := v.Block 44351 _ = b 44352 // match: (Lsh8x64 <t> x y) 44353 // cond: 44354 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 44355 for { 44356 t := v.Type 44357 _ = v.Args[1] 44358 x := v.Args[0] 44359 y := v.Args[1] 44360 v.reset(OpAMD64ANDL) 44361 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44362 v0.AddArg(x) 44363 v0.AddArg(y) 44364 v.AddArg(v0) 44365 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44366 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 44367 v2.AuxInt = 32 44368 v2.AddArg(y) 44369 v1.AddArg(v2) 44370 v.AddArg(v1) 44371 return true 44372 } 44373 } 44374 func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool { 44375 b := v.Block 44376 _ = b 44377 // match: (Lsh8x8 <t> x y) 44378 // cond: 44379 // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 44380 for { 44381 t := v.Type 44382 _ = v.Args[1] 44383 x := v.Args[0] 44384 y := v.Args[1] 44385 v.reset(OpAMD64ANDL) 44386 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) 44387 v0.AddArg(x) 44388 v0.AddArg(y) 44389 v.AddArg(v0) 44390 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 44391 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 44392 v2.AuxInt = 32 44393 v2.AddArg(y) 44394 v1.AddArg(v2) 44395 v.AddArg(v1) 44396 return true 44397 } 44398 } 44399 func rewriteValueAMD64_OpMod16_0(v *Value) bool { 44400 b := v.Block 44401 _ = b 44402 typ := &b.Func.Config.Types 44403 _ = typ 44404 // match: (Mod16 x y) 44405 // cond: 44406 // result: (Select1 (DIVW x y)) 44407 for { 44408 _ = v.Args[1] 44409 x := v.Args[0] 44410 y := v.Args[1] 44411 v.reset(OpSelect1) 44412 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 44413 v0.AddArg(x) 44414 v0.AddArg(y) 44415 v.AddArg(v0) 44416 return true 44417 } 44418 } 44419 func rewriteValueAMD64_OpMod16u_0(v *Value) bool { 44420 b := v.Block 44421 _ = b 44422 typ := &b.Func.Config.Types 44423 _ = typ 44424 // match: (Mod16u x y) 44425 // cond: 44426 // result: (Select1 (DIVWU x y)) 44427 for { 44428 _ = v.Args[1] 44429 x := v.Args[0] 44430 y := v.Args[1] 44431 v.reset(OpSelect1) 44432 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 44433 v0.AddArg(x) 44434 v0.AddArg(y) 44435 v.AddArg(v0) 44436 return true 44437 } 44438 } 44439 func rewriteValueAMD64_OpMod32_0(v *Value) bool { 44440 b := v.Block 44441 _ = b 44442 typ := &b.Func.Config.Types 44443 _ = typ 44444 // match: (Mod32 x y) 44445 // cond: 44446 // result: (Select1 (DIVL x y)) 44447 for { 44448 _ = v.Args[1] 44449 x := v.Args[0] 44450 y := v.Args[1] 44451 v.reset(OpSelect1) 44452 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) 44453 v0.AddArg(x) 44454 v0.AddArg(y) 44455 v.AddArg(v0) 44456 return true 44457 } 44458 } 44459 func rewriteValueAMD64_OpMod32u_0(v *Value) bool { 44460 b := v.Block 44461 _ = b 44462 typ := &b.Func.Config.Types 44463 _ = typ 44464 // match: (Mod32u x y) 44465 // cond: 44466 // result: (Select1 (DIVLU x y)) 44467 for { 44468 _ = v.Args[1] 44469 x := v.Args[0] 44470 y := v.Args[1] 44471 v.reset(OpSelect1) 44472 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) 44473 v0.AddArg(x) 44474 v0.AddArg(y) 44475 v.AddArg(v0) 44476 return true 44477 } 44478 } 44479 func rewriteValueAMD64_OpMod64_0(v *Value) bool { 44480 b := v.Block 44481 _ = b 44482 typ := &b.Func.Config.Types 44483 _ = typ 44484 // match: (Mod64 x y) 44485 // cond: 44486 // result: (Select1 (DIVQ x y)) 44487 for { 44488 _ = v.Args[1] 44489 x := v.Args[0] 44490 y := v.Args[1] 44491 v.reset(OpSelect1) 44492 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) 44493 v0.AddArg(x) 44494 v0.AddArg(y) 44495 v.AddArg(v0) 44496 return true 44497 } 44498 } 44499 func rewriteValueAMD64_OpMod64u_0(v *Value) bool { 44500 b := v.Block 44501 _ = b 44502 typ := &b.Func.Config.Types 44503 _ = typ 44504 // match: (Mod64u x y) 44505 // cond: 44506 // result: (Select1 (DIVQU x y)) 44507 for { 44508 _ = v.Args[1] 44509 x := v.Args[0] 44510 y := v.Args[1] 44511 v.reset(OpSelect1) 44512 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) 44513 v0.AddArg(x) 44514 v0.AddArg(y) 44515 v.AddArg(v0) 44516 return true 44517 } 44518 } 44519 func rewriteValueAMD64_OpMod8_0(v *Value) bool { 44520 b := v.Block 44521 _ = b 44522 typ := &b.Func.Config.Types 44523 _ = typ 44524 // match: (Mod8 x y) 44525 // cond: 44526 // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) 44527 for { 44528 _ = v.Args[1] 44529 x := v.Args[0] 44530 y := v.Args[1] 44531 v.reset(OpSelect1) 44532 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) 44533 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 44534 v1.AddArg(x) 44535 v0.AddArg(v1) 44536 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) 44537 v2.AddArg(y) 44538 v0.AddArg(v2) 44539 v.AddArg(v0) 44540 return true 44541 } 44542 } 44543 func rewriteValueAMD64_OpMod8u_0(v *Value) bool { 44544 b := v.Block 44545 _ = b 44546 typ := &b.Func.Config.Types 44547 _ = typ 44548 // match: (Mod8u x y) 44549 // cond: 44550 // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) 44551 for { 44552 _ = v.Args[1] 44553 x := v.Args[0] 44554 y := v.Args[1] 44555 v.reset(OpSelect1) 44556 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) 44557 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 44558 v1.AddArg(x) 44559 v0.AddArg(v1) 44560 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) 44561 v2.AddArg(y) 44562 v0.AddArg(v2) 44563 v.AddArg(v0) 44564 return true 44565 } 44566 } 44567 func rewriteValueAMD64_OpMove_0(v *Value) bool { 44568 b := v.Block 44569 _ = b 44570 config := b.Func.Config 44571 _ = config 44572 typ := &b.Func.Config.Types 44573 _ = typ 44574 // match: (Move [0] _ _ mem) 44575 // cond: 44576 // result: mem 44577 for { 44578 if v.AuxInt != 0 { 44579 break 44580 } 44581 _ = v.Args[2] 44582 mem := v.Args[2] 44583 v.reset(OpCopy) 44584 v.Type = mem.Type 44585 v.AddArg(mem) 44586 return true 44587 } 44588 // match: (Move [1] dst src mem) 44589 // cond: 44590 // result: (MOVBstore dst (MOVBload src mem) mem) 44591 for { 44592 if v.AuxInt != 1 { 44593 break 44594 } 44595 _ = v.Args[2] 44596 dst := v.Args[0] 44597 src := v.Args[1] 44598 mem := v.Args[2] 44599 v.reset(OpAMD64MOVBstore) 44600 v.AddArg(dst) 44601 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 44602 v0.AddArg(src) 44603 v0.AddArg(mem) 44604 v.AddArg(v0) 44605 v.AddArg(mem) 44606 return true 44607 } 44608 // match: (Move [2] dst src mem) 44609 // cond: 44610 // result: (MOVWstore dst (MOVWload src mem) mem) 44611 for { 44612 if v.AuxInt != 2 { 44613 break 44614 } 44615 _ = v.Args[2] 44616 dst := v.Args[0] 44617 src := v.Args[1] 44618 mem := v.Args[2] 44619 v.reset(OpAMD64MOVWstore) 44620 v.AddArg(dst) 44621 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 44622 v0.AddArg(src) 44623 v0.AddArg(mem) 44624 v.AddArg(v0) 44625 v.AddArg(mem) 44626 return true 44627 } 44628 // match: (Move [4] dst src mem) 44629 // cond: 44630 // result: (MOVLstore dst (MOVLload src mem) mem) 44631 for { 44632 if v.AuxInt != 4 { 44633 break 44634 } 44635 _ = v.Args[2] 44636 dst := v.Args[0] 44637 src := v.Args[1] 44638 mem := v.Args[2] 44639 v.reset(OpAMD64MOVLstore) 44640 v.AddArg(dst) 44641 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 44642 v0.AddArg(src) 44643 v0.AddArg(mem) 44644 v.AddArg(v0) 44645 v.AddArg(mem) 44646 return true 44647 } 44648 // match: (Move [8] dst src mem) 44649 // cond: 44650 // result: (MOVQstore dst (MOVQload src mem) mem) 44651 for { 44652 if v.AuxInt != 8 { 44653 break 44654 } 44655 _ = v.Args[2] 44656 dst := v.Args[0] 44657 src := v.Args[1] 44658 mem := v.Args[2] 44659 v.reset(OpAMD64MOVQstore) 44660 v.AddArg(dst) 44661 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44662 v0.AddArg(src) 44663 v0.AddArg(mem) 44664 v.AddArg(v0) 44665 v.AddArg(mem) 44666 return true 44667 } 44668 // match: (Move [16] dst src mem) 44669 // cond: config.useSSE 44670 // result: (MOVOstore dst (MOVOload src mem) mem) 44671 for { 44672 if v.AuxInt != 16 { 44673 break 44674 } 44675 _ = v.Args[2] 44676 dst := v.Args[0] 44677 src := v.Args[1] 44678 mem := v.Args[2] 44679 if !(config.useSSE) { 44680 break 44681 } 44682 v.reset(OpAMD64MOVOstore) 44683 v.AddArg(dst) 44684 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 44685 v0.AddArg(src) 44686 v0.AddArg(mem) 44687 v.AddArg(v0) 44688 v.AddArg(mem) 44689 return true 44690 } 44691 // match: (Move [16] dst src mem) 44692 // cond: !config.useSSE 44693 // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 44694 for { 44695 if v.AuxInt != 16 { 44696 break 44697 } 44698 _ = v.Args[2] 44699 dst := v.Args[0] 44700 src := v.Args[1] 44701 mem := v.Args[2] 44702 if !(!config.useSSE) { 44703 break 44704 } 44705 v.reset(OpAMD64MOVQstore) 44706 v.AuxInt = 8 44707 v.AddArg(dst) 44708 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44709 v0.AuxInt = 8 44710 v0.AddArg(src) 44711 v0.AddArg(mem) 44712 v.AddArg(v0) 44713 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 44714 v1.AddArg(dst) 44715 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44716 v2.AddArg(src) 44717 v2.AddArg(mem) 44718 v1.AddArg(v2) 44719 v1.AddArg(mem) 44720 v.AddArg(v1) 44721 return true 44722 } 44723 // match: (Move [3] dst src mem) 44724 // cond: 44725 // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) 44726 for { 44727 if v.AuxInt != 3 { 44728 break 44729 } 44730 _ = v.Args[2] 44731 dst := v.Args[0] 44732 src := v.Args[1] 44733 mem := v.Args[2] 44734 v.reset(OpAMD64MOVBstore) 44735 v.AuxInt = 2 44736 v.AddArg(dst) 44737 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 44738 v0.AuxInt = 2 44739 v0.AddArg(src) 44740 v0.AddArg(mem) 44741 v.AddArg(v0) 44742 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) 44743 v1.AddArg(dst) 44744 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 44745 v2.AddArg(src) 44746 v2.AddArg(mem) 44747 v1.AddArg(v2) 44748 v1.AddArg(mem) 44749 v.AddArg(v1) 44750 return true 44751 } 44752 // match: (Move [5] dst src mem) 44753 // cond: 44754 // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 44755 for { 44756 if v.AuxInt != 5 { 44757 break 44758 } 44759 _ = v.Args[2] 44760 dst := v.Args[0] 44761 src := v.Args[1] 44762 mem := v.Args[2] 44763 v.reset(OpAMD64MOVBstore) 44764 v.AuxInt = 4 44765 v.AddArg(dst) 44766 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) 44767 v0.AuxInt = 4 44768 v0.AddArg(src) 44769 v0.AddArg(mem) 44770 v.AddArg(v0) 44771 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 44772 v1.AddArg(dst) 44773 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 44774 v2.AddArg(src) 44775 v2.AddArg(mem) 44776 v1.AddArg(v2) 44777 v1.AddArg(mem) 44778 v.AddArg(v1) 44779 return true 44780 } 44781 // match: (Move [6] dst src mem) 44782 // cond: 44783 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) 44784 for { 44785 if v.AuxInt != 6 { 44786 break 44787 } 44788 _ = v.Args[2] 44789 dst := v.Args[0] 44790 src := v.Args[1] 44791 mem := v.Args[2] 44792 v.reset(OpAMD64MOVWstore) 44793 v.AuxInt = 4 44794 v.AddArg(dst) 44795 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) 44796 v0.AuxInt = 4 44797 v0.AddArg(src) 44798 v0.AddArg(mem) 44799 v.AddArg(v0) 44800 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 44801 v1.AddArg(dst) 44802 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 44803 v2.AddArg(src) 44804 v2.AddArg(mem) 44805 v1.AddArg(v2) 44806 v1.AddArg(mem) 44807 v.AddArg(v1) 44808 return true 44809 } 44810 return false 44811 } 44812 func rewriteValueAMD64_OpMove_10(v *Value) bool { 44813 b := v.Block 44814 _ = b 44815 config := b.Func.Config 44816 _ = config 44817 typ := &b.Func.Config.Types 44818 _ = typ 44819 // match: (Move [7] dst src mem) 44820 // cond: 44821 // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) 44822 for { 44823 if v.AuxInt != 7 { 44824 break 44825 } 44826 _ = v.Args[2] 44827 dst := v.Args[0] 44828 src := v.Args[1] 44829 mem := v.Args[2] 44830 v.reset(OpAMD64MOVLstore) 44831 v.AuxInt = 3 44832 v.AddArg(dst) 44833 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 44834 v0.AuxInt = 3 44835 v0.AddArg(src) 44836 v0.AddArg(mem) 44837 v.AddArg(v0) 44838 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) 44839 v1.AddArg(dst) 44840 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) 44841 v2.AddArg(src) 44842 v2.AddArg(mem) 44843 v1.AddArg(v2) 44844 v1.AddArg(mem) 44845 v.AddArg(v1) 44846 return true 44847 } 44848 // match: (Move [s] dst src mem) 44849 // cond: s > 8 && s < 16 44850 // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) 44851 for { 44852 s := v.AuxInt 44853 _ = v.Args[2] 44854 dst := v.Args[0] 44855 src := v.Args[1] 44856 mem := v.Args[2] 44857 if !(s > 8 && s < 16) { 44858 break 44859 } 44860 v.reset(OpAMD64MOVQstore) 44861 v.AuxInt = s - 8 44862 v.AddArg(dst) 44863 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44864 v0.AuxInt = s - 8 44865 v0.AddArg(src) 44866 v0.AddArg(mem) 44867 v.AddArg(v0) 44868 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 44869 v1.AddArg(dst) 44870 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44871 v2.AddArg(src) 44872 v2.AddArg(mem) 44873 v1.AddArg(v2) 44874 v1.AddArg(mem) 44875 v.AddArg(v1) 44876 return true 44877 } 44878 // match: (Move [s] dst src mem) 44879 // cond: s > 16 && s%16 != 0 && s%16 <= 8 44880 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) 44881 for { 44882 s := v.AuxInt 44883 _ = v.Args[2] 44884 dst := v.Args[0] 44885 src := v.Args[1] 44886 mem := v.Args[2] 44887 if !(s > 16 && s%16 != 0 && s%16 <= 8) { 44888 break 44889 } 44890 v.reset(OpMove) 44891 v.AuxInt = s - s%16 44892 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 44893 v0.AuxInt = s % 16 44894 v0.AddArg(dst) 44895 v.AddArg(v0) 44896 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 44897 v1.AuxInt = s % 16 44898 v1.AddArg(src) 44899 v.AddArg(v1) 44900 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 44901 v2.AddArg(dst) 44902 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44903 v3.AddArg(src) 44904 v3.AddArg(mem) 44905 v2.AddArg(v3) 44906 v2.AddArg(mem) 44907 v.AddArg(v2) 44908 return true 44909 } 44910 // match: (Move [s] dst src mem) 44911 // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE 44912 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) 44913 for { 44914 s := v.AuxInt 44915 _ = v.Args[2] 44916 dst := v.Args[0] 44917 src := v.Args[1] 44918 mem := v.Args[2] 44919 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) { 44920 break 44921 } 44922 v.reset(OpMove) 44923 v.AuxInt = s - s%16 44924 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 44925 v0.AuxInt = s % 16 44926 v0.AddArg(dst) 44927 v.AddArg(v0) 44928 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 44929 v1.AuxInt = s % 16 44930 v1.AddArg(src) 44931 v.AddArg(v1) 44932 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 44933 v2.AddArg(dst) 44934 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) 44935 v3.AddArg(src) 44936 v3.AddArg(mem) 44937 v2.AddArg(v3) 44938 v2.AddArg(mem) 44939 v.AddArg(v2) 44940 return true 44941 } 44942 // match: (Move [s] dst src mem) 44943 // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE 44944 // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))) 44945 for { 44946 s := v.AuxInt 44947 _ = v.Args[2] 44948 dst := v.Args[0] 44949 src := v.Args[1] 44950 mem := v.Args[2] 44951 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) { 44952 break 44953 } 44954 v.reset(OpMove) 44955 v.AuxInt = s - s%16 44956 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) 44957 v0.AuxInt = s % 16 44958 v0.AddArg(dst) 44959 v.AddArg(v0) 44960 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) 44961 v1.AuxInt = s % 16 44962 v1.AddArg(src) 44963 v.AddArg(v1) 44964 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 44965 v2.AuxInt = 8 44966 v2.AddArg(dst) 44967 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44968 v3.AuxInt = 8 44969 v3.AddArg(src) 44970 v3.AddArg(mem) 44971 v2.AddArg(v3) 44972 v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) 44973 v4.AddArg(dst) 44974 v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) 44975 v5.AddArg(src) 44976 v5.AddArg(mem) 44977 v4.AddArg(v5) 44978 v4.AddArg(mem) 44979 v2.AddArg(v4) 44980 v.AddArg(v2) 44981 return true 44982 } 44983 // match: (Move [s] dst src mem) 44984 // cond: s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice 44985 // result: (DUFFCOPY [14*(64-s/16)] dst src mem) 44986 for { 44987 s := v.AuxInt 44988 _ = v.Args[2] 44989 dst := v.Args[0] 44990 src := v.Args[1] 44991 mem := v.Args[2] 44992 if !(s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { 44993 break 44994 } 44995 v.reset(OpAMD64DUFFCOPY) 44996 v.AuxInt = 14 * (64 - s/16) 44997 v.AddArg(dst) 44998 v.AddArg(src) 44999 v.AddArg(mem) 45000 return true 45001 } 45002 // match: (Move [s] dst src mem) 45003 // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 45004 // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) 45005 for { 45006 s := v.AuxInt 45007 _ = v.Args[2] 45008 dst := v.Args[0] 45009 src := v.Args[1] 45010 mem := v.Args[2] 45011 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0) { 45012 break 45013 } 45014 v.reset(OpAMD64REPMOVSQ) 45015 v.AddArg(dst) 45016 v.AddArg(src) 45017 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 45018 v0.AuxInt = s / 8 45019 v.AddArg(v0) 45020 v.AddArg(mem) 45021 return true 45022 } 45023 return false 45024 } 45025 func rewriteValueAMD64_OpMul16_0(v *Value) bool { 45026 // match: (Mul16 x y) 45027 // cond: 45028 // result: (MULL x y) 45029 for { 45030 _ = v.Args[1] 45031 x := v.Args[0] 45032 y := v.Args[1] 45033 v.reset(OpAMD64MULL) 45034 v.AddArg(x) 45035 v.AddArg(y) 45036 return true 45037 } 45038 } 45039 func rewriteValueAMD64_OpMul32_0(v *Value) bool { 45040 // match: (Mul32 x y) 45041 // cond: 45042 // result: (MULL x y) 45043 for { 45044 _ = v.Args[1] 45045 x := v.Args[0] 45046 y := v.Args[1] 45047 v.reset(OpAMD64MULL) 45048 v.AddArg(x) 45049 v.AddArg(y) 45050 return true 45051 } 45052 } 45053 func rewriteValueAMD64_OpMul32F_0(v *Value) bool { 45054 // match: (Mul32F x y) 45055 // cond: 45056 // result: (MULSS x y) 45057 for { 45058 _ = v.Args[1] 45059 x := v.Args[0] 45060 y := v.Args[1] 45061 v.reset(OpAMD64MULSS) 45062 v.AddArg(x) 45063 v.AddArg(y) 45064 return true 45065 } 45066 } 45067 func rewriteValueAMD64_OpMul64_0(v *Value) bool { 45068 // match: (Mul64 x y) 45069 // cond: 45070 // result: (MULQ x y) 45071 for { 45072 _ = v.Args[1] 45073 x := v.Args[0] 45074 y := v.Args[1] 45075 v.reset(OpAMD64MULQ) 45076 v.AddArg(x) 45077 v.AddArg(y) 45078 return true 45079 } 45080 } 45081 func rewriteValueAMD64_OpMul64F_0(v *Value) bool { 45082 // match: (Mul64F x y) 45083 // cond: 45084 // result: (MULSD x y) 45085 for { 45086 _ = v.Args[1] 45087 x := v.Args[0] 45088 y := v.Args[1] 45089 v.reset(OpAMD64MULSD) 45090 v.AddArg(x) 45091 v.AddArg(y) 45092 return true 45093 } 45094 } 45095 func rewriteValueAMD64_OpMul64uhilo_0(v *Value) bool { 45096 // match: (Mul64uhilo x y) 45097 // cond: 45098 // result: (MULQU2 x y) 45099 for { 45100 _ = v.Args[1] 45101 x := v.Args[0] 45102 y := v.Args[1] 45103 v.reset(OpAMD64MULQU2) 45104 v.AddArg(x) 45105 v.AddArg(y) 45106 return true 45107 } 45108 } 45109 func rewriteValueAMD64_OpMul8_0(v *Value) bool { 45110 // match: (Mul8 x y) 45111 // cond: 45112 // result: (MULL x y) 45113 for { 45114 _ = v.Args[1] 45115 x := v.Args[0] 45116 y := v.Args[1] 45117 v.reset(OpAMD64MULL) 45118 v.AddArg(x) 45119 v.AddArg(y) 45120 return true 45121 } 45122 } 45123 func rewriteValueAMD64_OpNeg16_0(v *Value) bool { 45124 // match: (Neg16 x) 45125 // cond: 45126 // result: (NEGL x) 45127 for { 45128 x := v.Args[0] 45129 v.reset(OpAMD64NEGL) 45130 v.AddArg(x) 45131 return true 45132 } 45133 } 45134 func rewriteValueAMD64_OpNeg32_0(v *Value) bool { 45135 // match: (Neg32 x) 45136 // cond: 45137 // result: (NEGL x) 45138 for { 45139 x := v.Args[0] 45140 v.reset(OpAMD64NEGL) 45141 v.AddArg(x) 45142 return true 45143 } 45144 } 45145 func rewriteValueAMD64_OpNeg32F_0(v *Value) bool { 45146 b := v.Block 45147 _ = b 45148 typ := &b.Func.Config.Types 45149 _ = typ 45150 // match: (Neg32F x) 45151 // cond: 45152 // result: (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))])) 45153 for { 45154 x := v.Args[0] 45155 v.reset(OpAMD64PXOR) 45156 v.AddArg(x) 45157 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) 45158 v0.AuxInt = f2i(math.Copysign(0, -1)) 45159 v.AddArg(v0) 45160 return true 45161 } 45162 } 45163 func rewriteValueAMD64_OpNeg64_0(v *Value) bool { 45164 // match: (Neg64 x) 45165 // cond: 45166 // result: (NEGQ x) 45167 for { 45168 x := v.Args[0] 45169 v.reset(OpAMD64NEGQ) 45170 v.AddArg(x) 45171 return true 45172 } 45173 } 45174 func rewriteValueAMD64_OpNeg64F_0(v *Value) bool { 45175 b := v.Block 45176 _ = b 45177 typ := &b.Func.Config.Types 45178 _ = typ 45179 // match: (Neg64F x) 45180 // cond: 45181 // result: (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))])) 45182 for { 45183 x := v.Args[0] 45184 v.reset(OpAMD64PXOR) 45185 v.AddArg(x) 45186 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) 45187 v0.AuxInt = f2i(math.Copysign(0, -1)) 45188 v.AddArg(v0) 45189 return true 45190 } 45191 } 45192 func rewriteValueAMD64_OpNeg8_0(v *Value) bool { 45193 // match: (Neg8 x) 45194 // cond: 45195 // result: (NEGL x) 45196 for { 45197 x := v.Args[0] 45198 v.reset(OpAMD64NEGL) 45199 v.AddArg(x) 45200 return true 45201 } 45202 } 45203 func rewriteValueAMD64_OpNeq16_0(v *Value) bool { 45204 b := v.Block 45205 _ = b 45206 // match: (Neq16 x y) 45207 // cond: 45208 // result: (SETNE (CMPW x y)) 45209 for { 45210 _ = v.Args[1] 45211 x := v.Args[0] 45212 y := v.Args[1] 45213 v.reset(OpAMD64SETNE) 45214 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) 45215 v0.AddArg(x) 45216 v0.AddArg(y) 45217 v.AddArg(v0) 45218 return true 45219 } 45220 } 45221 func rewriteValueAMD64_OpNeq32_0(v *Value) bool { 45222 b := v.Block 45223 _ = b 45224 // match: (Neq32 x y) 45225 // cond: 45226 // result: (SETNE (CMPL x y)) 45227 for { 45228 _ = v.Args[1] 45229 x := v.Args[0] 45230 y := v.Args[1] 45231 v.reset(OpAMD64SETNE) 45232 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 45233 v0.AddArg(x) 45234 v0.AddArg(y) 45235 v.AddArg(v0) 45236 return true 45237 } 45238 } 45239 func rewriteValueAMD64_OpNeq32F_0(v *Value) bool { 45240 b := v.Block 45241 _ = b 45242 // match: (Neq32F x y) 45243 // cond: 45244 // result: (SETNEF (UCOMISS x y)) 45245 for { 45246 _ = v.Args[1] 45247 x := v.Args[0] 45248 y := v.Args[1] 45249 v.reset(OpAMD64SETNEF) 45250 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) 45251 v0.AddArg(x) 45252 v0.AddArg(y) 45253 v.AddArg(v0) 45254 return true 45255 } 45256 } 45257 func rewriteValueAMD64_OpNeq64_0(v *Value) bool { 45258 b := v.Block 45259 _ = b 45260 // match: (Neq64 x y) 45261 // cond: 45262 // result: (SETNE (CMPQ x y)) 45263 for { 45264 _ = v.Args[1] 45265 x := v.Args[0] 45266 y := v.Args[1] 45267 v.reset(OpAMD64SETNE) 45268 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 45269 v0.AddArg(x) 45270 v0.AddArg(y) 45271 v.AddArg(v0) 45272 return true 45273 } 45274 } 45275 func rewriteValueAMD64_OpNeq64F_0(v *Value) bool { 45276 b := v.Block 45277 _ = b 45278 // match: (Neq64F x y) 45279 // cond: 45280 // result: (SETNEF (UCOMISD x y)) 45281 for { 45282 _ = v.Args[1] 45283 x := v.Args[0] 45284 y := v.Args[1] 45285 v.reset(OpAMD64SETNEF) 45286 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) 45287 v0.AddArg(x) 45288 v0.AddArg(y) 45289 v.AddArg(v0) 45290 return true 45291 } 45292 } 45293 func rewriteValueAMD64_OpNeq8_0(v *Value) bool { 45294 b := v.Block 45295 _ = b 45296 // match: (Neq8 x y) 45297 // cond: 45298 // result: (SETNE (CMPB x y)) 45299 for { 45300 _ = v.Args[1] 45301 x := v.Args[0] 45302 y := v.Args[1] 45303 v.reset(OpAMD64SETNE) 45304 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 45305 v0.AddArg(x) 45306 v0.AddArg(y) 45307 v.AddArg(v0) 45308 return true 45309 } 45310 } 45311 func rewriteValueAMD64_OpNeqB_0(v *Value) bool { 45312 b := v.Block 45313 _ = b 45314 // match: (NeqB x y) 45315 // cond: 45316 // result: (SETNE (CMPB x y)) 45317 for { 45318 _ = v.Args[1] 45319 x := v.Args[0] 45320 y := v.Args[1] 45321 v.reset(OpAMD64SETNE) 45322 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) 45323 v0.AddArg(x) 45324 v0.AddArg(y) 45325 v.AddArg(v0) 45326 return true 45327 } 45328 } 45329 func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool { 45330 b := v.Block 45331 _ = b 45332 config := b.Func.Config 45333 _ = config 45334 // match: (NeqPtr x y) 45335 // cond: config.PtrSize == 8 45336 // result: (SETNE (CMPQ x y)) 45337 for { 45338 _ = v.Args[1] 45339 x := v.Args[0] 45340 y := v.Args[1] 45341 if !(config.PtrSize == 8) { 45342 break 45343 } 45344 v.reset(OpAMD64SETNE) 45345 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) 45346 v0.AddArg(x) 45347 v0.AddArg(y) 45348 v.AddArg(v0) 45349 return true 45350 } 45351 // match: (NeqPtr x y) 45352 // cond: config.PtrSize == 4 45353 // result: (SETNE (CMPL x y)) 45354 for { 45355 _ = v.Args[1] 45356 x := v.Args[0] 45357 y := v.Args[1] 45358 if !(config.PtrSize == 4) { 45359 break 45360 } 45361 v.reset(OpAMD64SETNE) 45362 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) 45363 v0.AddArg(x) 45364 v0.AddArg(y) 45365 v.AddArg(v0) 45366 return true 45367 } 45368 return false 45369 } 45370 func rewriteValueAMD64_OpNilCheck_0(v *Value) bool { 45371 // match: (NilCheck ptr mem) 45372 // cond: 45373 // result: (LoweredNilCheck ptr mem) 45374 for { 45375 _ = v.Args[1] 45376 ptr := v.Args[0] 45377 mem := v.Args[1] 45378 v.reset(OpAMD64LoweredNilCheck) 45379 v.AddArg(ptr) 45380 v.AddArg(mem) 45381 return true 45382 } 45383 } 45384 func rewriteValueAMD64_OpNot_0(v *Value) bool { 45385 // match: (Not x) 45386 // cond: 45387 // result: (XORLconst [1] x) 45388 for { 45389 x := v.Args[0] 45390 v.reset(OpAMD64XORLconst) 45391 v.AuxInt = 1 45392 v.AddArg(x) 45393 return true 45394 } 45395 } 45396 func rewriteValueAMD64_OpOffPtr_0(v *Value) bool { 45397 b := v.Block 45398 _ = b 45399 config := b.Func.Config 45400 _ = config 45401 typ := &b.Func.Config.Types 45402 _ = typ 45403 // match: (OffPtr [off] ptr) 45404 // cond: config.PtrSize == 8 && is32Bit(off) 45405 // result: (ADDQconst [off] ptr) 45406 for { 45407 off := v.AuxInt 45408 ptr := v.Args[0] 45409 if !(config.PtrSize == 8 && is32Bit(off)) { 45410 break 45411 } 45412 v.reset(OpAMD64ADDQconst) 45413 v.AuxInt = off 45414 v.AddArg(ptr) 45415 return true 45416 } 45417 // match: (OffPtr [off] ptr) 45418 // cond: config.PtrSize == 8 45419 // result: (ADDQ (MOVQconst [off]) ptr) 45420 for { 45421 off := v.AuxInt 45422 ptr := v.Args[0] 45423 if !(config.PtrSize == 8) { 45424 break 45425 } 45426 v.reset(OpAMD64ADDQ) 45427 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 45428 v0.AuxInt = off 45429 v.AddArg(v0) 45430 v.AddArg(ptr) 45431 return true 45432 } 45433 // match: (OffPtr [off] ptr) 45434 // cond: config.PtrSize == 4 45435 // result: (ADDLconst [off] ptr) 45436 for { 45437 off := v.AuxInt 45438 ptr := v.Args[0] 45439 if !(config.PtrSize == 4) { 45440 break 45441 } 45442 v.reset(OpAMD64ADDLconst) 45443 v.AuxInt = off 45444 v.AddArg(ptr) 45445 return true 45446 } 45447 return false 45448 } 45449 func rewriteValueAMD64_OpOr16_0(v *Value) bool { 45450 // match: (Or16 x y) 45451 // cond: 45452 // result: (ORL x y) 45453 for { 45454 _ = v.Args[1] 45455 x := v.Args[0] 45456 y := v.Args[1] 45457 v.reset(OpAMD64ORL) 45458 v.AddArg(x) 45459 v.AddArg(y) 45460 return true 45461 } 45462 } 45463 func rewriteValueAMD64_OpOr32_0(v *Value) bool { 45464 // match: (Or32 x y) 45465 // cond: 45466 // result: (ORL x y) 45467 for { 45468 _ = v.Args[1] 45469 x := v.Args[0] 45470 y := v.Args[1] 45471 v.reset(OpAMD64ORL) 45472 v.AddArg(x) 45473 v.AddArg(y) 45474 return true 45475 } 45476 } 45477 func rewriteValueAMD64_OpOr64_0(v *Value) bool { 45478 // match: (Or64 x y) 45479 // cond: 45480 // result: (ORQ x y) 45481 for { 45482 _ = v.Args[1] 45483 x := v.Args[0] 45484 y := v.Args[1] 45485 v.reset(OpAMD64ORQ) 45486 v.AddArg(x) 45487 v.AddArg(y) 45488 return true 45489 } 45490 } 45491 func rewriteValueAMD64_OpOr8_0(v *Value) bool { 45492 // match: (Or8 x y) 45493 // cond: 45494 // result: (ORL x y) 45495 for { 45496 _ = v.Args[1] 45497 x := v.Args[0] 45498 y := v.Args[1] 45499 v.reset(OpAMD64ORL) 45500 v.AddArg(x) 45501 v.AddArg(y) 45502 return true 45503 } 45504 } 45505 func rewriteValueAMD64_OpOrB_0(v *Value) bool { 45506 // match: (OrB x y) 45507 // cond: 45508 // result: (ORL x y) 45509 for { 45510 _ = v.Args[1] 45511 x := v.Args[0] 45512 y := v.Args[1] 45513 v.reset(OpAMD64ORL) 45514 v.AddArg(x) 45515 v.AddArg(y) 45516 return true 45517 } 45518 } 45519 func rewriteValueAMD64_OpPopCount16_0(v *Value) bool { 45520 b := v.Block 45521 _ = b 45522 typ := &b.Func.Config.Types 45523 _ = typ 45524 // match: (PopCount16 x) 45525 // cond: 45526 // result: (POPCNTL (MOVWQZX <typ.UInt32> x)) 45527 for { 45528 x := v.Args[0] 45529 v.reset(OpAMD64POPCNTL) 45530 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) 45531 v0.AddArg(x) 45532 v.AddArg(v0) 45533 return true 45534 } 45535 } 45536 func rewriteValueAMD64_OpPopCount32_0(v *Value) bool { 45537 // match: (PopCount32 x) 45538 // cond: 45539 // result: (POPCNTL x) 45540 for { 45541 x := v.Args[0] 45542 v.reset(OpAMD64POPCNTL) 45543 v.AddArg(x) 45544 return true 45545 } 45546 } 45547 func rewriteValueAMD64_OpPopCount64_0(v *Value) bool { 45548 // match: (PopCount64 x) 45549 // cond: 45550 // result: (POPCNTQ x) 45551 for { 45552 x := v.Args[0] 45553 v.reset(OpAMD64POPCNTQ) 45554 v.AddArg(x) 45555 return true 45556 } 45557 } 45558 func rewriteValueAMD64_OpPopCount8_0(v *Value) bool { 45559 b := v.Block 45560 _ = b 45561 typ := &b.Func.Config.Types 45562 _ = typ 45563 // match: (PopCount8 x) 45564 // cond: 45565 // result: (POPCNTL (MOVBQZX <typ.UInt32> x)) 45566 for { 45567 x := v.Args[0] 45568 v.reset(OpAMD64POPCNTL) 45569 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) 45570 v0.AddArg(x) 45571 v.AddArg(v0) 45572 return true 45573 } 45574 } 45575 func rewriteValueAMD64_OpRound32F_0(v *Value) bool { 45576 // match: (Round32F x) 45577 // cond: 45578 // result: x 45579 for { 45580 x := v.Args[0] 45581 v.reset(OpCopy) 45582 v.Type = x.Type 45583 v.AddArg(x) 45584 return true 45585 } 45586 } 45587 func rewriteValueAMD64_OpRound64F_0(v *Value) bool { 45588 // match: (Round64F x) 45589 // cond: 45590 // result: x 45591 for { 45592 x := v.Args[0] 45593 v.reset(OpCopy) 45594 v.Type = x.Type 45595 v.AddArg(x) 45596 return true 45597 } 45598 } 45599 func rewriteValueAMD64_OpRoundToEven_0(v *Value) bool { 45600 // match: (RoundToEven x) 45601 // cond: 45602 // result: (ROUNDSD [0] x) 45603 for { 45604 x := v.Args[0] 45605 v.reset(OpAMD64ROUNDSD) 45606 v.AuxInt = 0 45607 v.AddArg(x) 45608 return true 45609 } 45610 } 45611 func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool { 45612 b := v.Block 45613 _ = b 45614 // match: (Rsh16Ux16 <t> x y) 45615 // cond: 45616 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) 45617 for { 45618 t := v.Type 45619 _ = v.Args[1] 45620 x := v.Args[0] 45621 y := v.Args[1] 45622 v.reset(OpAMD64ANDL) 45623 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 45624 v0.AddArg(x) 45625 v0.AddArg(y) 45626 v.AddArg(v0) 45627 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45628 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 45629 v2.AuxInt = 16 45630 v2.AddArg(y) 45631 v1.AddArg(v2) 45632 v.AddArg(v1) 45633 return true 45634 } 45635 } 45636 func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool { 45637 b := v.Block 45638 _ = b 45639 // match: (Rsh16Ux32 <t> x y) 45640 // cond: 45641 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) 45642 for { 45643 t := v.Type 45644 _ = v.Args[1] 45645 x := v.Args[0] 45646 y := v.Args[1] 45647 v.reset(OpAMD64ANDL) 45648 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 45649 v0.AddArg(x) 45650 v0.AddArg(y) 45651 v.AddArg(v0) 45652 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45653 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 45654 v2.AuxInt = 16 45655 v2.AddArg(y) 45656 v1.AddArg(v2) 45657 v.AddArg(v1) 45658 return true 45659 } 45660 } 45661 func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool { 45662 b := v.Block 45663 _ = b 45664 // match: (Rsh16Ux64 <t> x y) 45665 // cond: 45666 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) 45667 for { 45668 t := v.Type 45669 _ = v.Args[1] 45670 x := v.Args[0] 45671 y := v.Args[1] 45672 v.reset(OpAMD64ANDL) 45673 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 45674 v0.AddArg(x) 45675 v0.AddArg(y) 45676 v.AddArg(v0) 45677 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45678 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 45679 v2.AuxInt = 16 45680 v2.AddArg(y) 45681 v1.AddArg(v2) 45682 v.AddArg(v1) 45683 return true 45684 } 45685 } 45686 func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool { 45687 b := v.Block 45688 _ = b 45689 // match: (Rsh16Ux8 <t> x y) 45690 // cond: 45691 // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) 45692 for { 45693 t := v.Type 45694 _ = v.Args[1] 45695 x := v.Args[0] 45696 y := v.Args[1] 45697 v.reset(OpAMD64ANDL) 45698 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t) 45699 v0.AddArg(x) 45700 v0.AddArg(y) 45701 v.AddArg(v0) 45702 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45703 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 45704 v2.AuxInt = 16 45705 v2.AddArg(y) 45706 v1.AddArg(v2) 45707 v.AddArg(v1) 45708 return true 45709 } 45710 } 45711 func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool { 45712 b := v.Block 45713 _ = b 45714 // match: (Rsh16x16 <t> x y) 45715 // cond: 45716 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) 45717 for { 45718 t := v.Type 45719 _ = v.Args[1] 45720 x := v.Args[0] 45721 y := v.Args[1] 45722 v.reset(OpAMD64SARW) 45723 v.Type = t 45724 v.AddArg(x) 45725 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45726 v0.AddArg(y) 45727 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45728 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45729 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 45730 v3.AuxInt = 16 45731 v3.AddArg(y) 45732 v2.AddArg(v3) 45733 v1.AddArg(v2) 45734 v0.AddArg(v1) 45735 v.AddArg(v0) 45736 return true 45737 } 45738 } 45739 func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool { 45740 b := v.Block 45741 _ = b 45742 // match: (Rsh16x32 <t> x y) 45743 // cond: 45744 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) 45745 for { 45746 t := v.Type 45747 _ = v.Args[1] 45748 x := v.Args[0] 45749 y := v.Args[1] 45750 v.reset(OpAMD64SARW) 45751 v.Type = t 45752 v.AddArg(x) 45753 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45754 v0.AddArg(y) 45755 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45756 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45757 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 45758 v3.AuxInt = 16 45759 v3.AddArg(y) 45760 v2.AddArg(v3) 45761 v1.AddArg(v2) 45762 v0.AddArg(v1) 45763 v.AddArg(v0) 45764 return true 45765 } 45766 } 45767 func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool { 45768 b := v.Block 45769 _ = b 45770 // match: (Rsh16x64 <t> x y) 45771 // cond: 45772 // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) 45773 for { 45774 t := v.Type 45775 _ = v.Args[1] 45776 x := v.Args[0] 45777 y := v.Args[1] 45778 v.reset(OpAMD64SARW) 45779 v.Type = t 45780 v.AddArg(x) 45781 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 45782 v0.AddArg(y) 45783 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 45784 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 45785 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 45786 v3.AuxInt = 16 45787 v3.AddArg(y) 45788 v2.AddArg(v3) 45789 v1.AddArg(v2) 45790 v0.AddArg(v1) 45791 v.AddArg(v0) 45792 return true 45793 } 45794 } 45795 func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool { 45796 b := v.Block 45797 _ = b 45798 // match: (Rsh16x8 <t> x y) 45799 // cond: 45800 // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) 45801 for { 45802 t := v.Type 45803 _ = v.Args[1] 45804 x := v.Args[0] 45805 y := v.Args[1] 45806 v.reset(OpAMD64SARW) 45807 v.Type = t 45808 v.AddArg(x) 45809 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45810 v0.AddArg(y) 45811 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45812 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45813 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 45814 v3.AuxInt = 16 45815 v3.AddArg(y) 45816 v2.AddArg(v3) 45817 v1.AddArg(v2) 45818 v0.AddArg(v1) 45819 v.AddArg(v0) 45820 return true 45821 } 45822 } 45823 func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool { 45824 b := v.Block 45825 _ = b 45826 // match: (Rsh32Ux16 <t> x y) 45827 // cond: 45828 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) 45829 for { 45830 t := v.Type 45831 _ = v.Args[1] 45832 x := v.Args[0] 45833 y := v.Args[1] 45834 v.reset(OpAMD64ANDL) 45835 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 45836 v0.AddArg(x) 45837 v0.AddArg(y) 45838 v.AddArg(v0) 45839 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45840 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 45841 v2.AuxInt = 32 45842 v2.AddArg(y) 45843 v1.AddArg(v2) 45844 v.AddArg(v1) 45845 return true 45846 } 45847 } 45848 func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool { 45849 b := v.Block 45850 _ = b 45851 // match: (Rsh32Ux32 <t> x y) 45852 // cond: 45853 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) 45854 for { 45855 t := v.Type 45856 _ = v.Args[1] 45857 x := v.Args[0] 45858 y := v.Args[1] 45859 v.reset(OpAMD64ANDL) 45860 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 45861 v0.AddArg(x) 45862 v0.AddArg(y) 45863 v.AddArg(v0) 45864 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45865 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 45866 v2.AuxInt = 32 45867 v2.AddArg(y) 45868 v1.AddArg(v2) 45869 v.AddArg(v1) 45870 return true 45871 } 45872 } 45873 func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool { 45874 b := v.Block 45875 _ = b 45876 // match: (Rsh32Ux64 <t> x y) 45877 // cond: 45878 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) 45879 for { 45880 t := v.Type 45881 _ = v.Args[1] 45882 x := v.Args[0] 45883 y := v.Args[1] 45884 v.reset(OpAMD64ANDL) 45885 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 45886 v0.AddArg(x) 45887 v0.AddArg(y) 45888 v.AddArg(v0) 45889 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45890 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 45891 v2.AuxInt = 32 45892 v2.AddArg(y) 45893 v1.AddArg(v2) 45894 v.AddArg(v1) 45895 return true 45896 } 45897 } 45898 func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool { 45899 b := v.Block 45900 _ = b 45901 // match: (Rsh32Ux8 <t> x y) 45902 // cond: 45903 // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) 45904 for { 45905 t := v.Type 45906 _ = v.Args[1] 45907 x := v.Args[0] 45908 y := v.Args[1] 45909 v.reset(OpAMD64ANDL) 45910 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t) 45911 v0.AddArg(x) 45912 v0.AddArg(y) 45913 v.AddArg(v0) 45914 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 45915 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 45916 v2.AuxInt = 32 45917 v2.AddArg(y) 45918 v1.AddArg(v2) 45919 v.AddArg(v1) 45920 return true 45921 } 45922 } 45923 func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool { 45924 b := v.Block 45925 _ = b 45926 // match: (Rsh32x16 <t> x y) 45927 // cond: 45928 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) 45929 for { 45930 t := v.Type 45931 _ = v.Args[1] 45932 x := v.Args[0] 45933 y := v.Args[1] 45934 v.reset(OpAMD64SARL) 45935 v.Type = t 45936 v.AddArg(x) 45937 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45938 v0.AddArg(y) 45939 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45940 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45941 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 45942 v3.AuxInt = 32 45943 v3.AddArg(y) 45944 v2.AddArg(v3) 45945 v1.AddArg(v2) 45946 v0.AddArg(v1) 45947 v.AddArg(v0) 45948 return true 45949 } 45950 } 45951 func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool { 45952 b := v.Block 45953 _ = b 45954 // match: (Rsh32x32 <t> x y) 45955 // cond: 45956 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) 45957 for { 45958 t := v.Type 45959 _ = v.Args[1] 45960 x := v.Args[0] 45961 y := v.Args[1] 45962 v.reset(OpAMD64SARL) 45963 v.Type = t 45964 v.AddArg(x) 45965 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 45966 v0.AddArg(y) 45967 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 45968 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 45969 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 45970 v3.AuxInt = 32 45971 v3.AddArg(y) 45972 v2.AddArg(v3) 45973 v1.AddArg(v2) 45974 v0.AddArg(v1) 45975 v.AddArg(v0) 45976 return true 45977 } 45978 } 45979 func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool { 45980 b := v.Block 45981 _ = b 45982 // match: (Rsh32x64 <t> x y) 45983 // cond: 45984 // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) 45985 for { 45986 t := v.Type 45987 _ = v.Args[1] 45988 x := v.Args[0] 45989 y := v.Args[1] 45990 v.reset(OpAMD64SARL) 45991 v.Type = t 45992 v.AddArg(x) 45993 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 45994 v0.AddArg(y) 45995 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 45996 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 45997 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 45998 v3.AuxInt = 32 45999 v3.AddArg(y) 46000 v2.AddArg(v3) 46001 v1.AddArg(v2) 46002 v0.AddArg(v1) 46003 v.AddArg(v0) 46004 return true 46005 } 46006 } 46007 func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool { 46008 b := v.Block 46009 _ = b 46010 // match: (Rsh32x8 <t> x y) 46011 // cond: 46012 // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) 46013 for { 46014 t := v.Type 46015 _ = v.Args[1] 46016 x := v.Args[0] 46017 y := v.Args[1] 46018 v.reset(OpAMD64SARL) 46019 v.Type = t 46020 v.AddArg(x) 46021 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46022 v0.AddArg(y) 46023 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46024 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46025 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 46026 v3.AuxInt = 32 46027 v3.AddArg(y) 46028 v2.AddArg(v3) 46029 v1.AddArg(v2) 46030 v0.AddArg(v1) 46031 v.AddArg(v0) 46032 return true 46033 } 46034 } 46035 func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool { 46036 b := v.Block 46037 _ = b 46038 // match: (Rsh64Ux16 <t> x y) 46039 // cond: 46040 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) 46041 for { 46042 t := v.Type 46043 _ = v.Args[1] 46044 x := v.Args[0] 46045 y := v.Args[1] 46046 v.reset(OpAMD64ANDQ) 46047 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 46048 v0.AddArg(x) 46049 v0.AddArg(y) 46050 v.AddArg(v0) 46051 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 46052 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 46053 v2.AuxInt = 64 46054 v2.AddArg(y) 46055 v1.AddArg(v2) 46056 v.AddArg(v1) 46057 return true 46058 } 46059 } 46060 func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool { 46061 b := v.Block 46062 _ = b 46063 // match: (Rsh64Ux32 <t> x y) 46064 // cond: 46065 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) 46066 for { 46067 t := v.Type 46068 _ = v.Args[1] 46069 x := v.Args[0] 46070 y := v.Args[1] 46071 v.reset(OpAMD64ANDQ) 46072 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 46073 v0.AddArg(x) 46074 v0.AddArg(y) 46075 v.AddArg(v0) 46076 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 46077 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 46078 v2.AuxInt = 64 46079 v2.AddArg(y) 46080 v1.AddArg(v2) 46081 v.AddArg(v1) 46082 return true 46083 } 46084 } 46085 func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool { 46086 b := v.Block 46087 _ = b 46088 // match: (Rsh64Ux64 <t> x y) 46089 // cond: 46090 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) 46091 for { 46092 t := v.Type 46093 _ = v.Args[1] 46094 x := v.Args[0] 46095 y := v.Args[1] 46096 v.reset(OpAMD64ANDQ) 46097 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 46098 v0.AddArg(x) 46099 v0.AddArg(y) 46100 v.AddArg(v0) 46101 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 46102 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 46103 v2.AuxInt = 64 46104 v2.AddArg(y) 46105 v1.AddArg(v2) 46106 v.AddArg(v1) 46107 return true 46108 } 46109 } 46110 func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool { 46111 b := v.Block 46112 _ = b 46113 // match: (Rsh64Ux8 <t> x y) 46114 // cond: 46115 // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) 46116 for { 46117 t := v.Type 46118 _ = v.Args[1] 46119 x := v.Args[0] 46120 y := v.Args[1] 46121 v.reset(OpAMD64ANDQ) 46122 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t) 46123 v0.AddArg(x) 46124 v0.AddArg(y) 46125 v.AddArg(v0) 46126 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) 46127 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 46128 v2.AuxInt = 64 46129 v2.AddArg(y) 46130 v1.AddArg(v2) 46131 v.AddArg(v1) 46132 return true 46133 } 46134 } 46135 func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool { 46136 b := v.Block 46137 _ = b 46138 // match: (Rsh64x16 <t> x y) 46139 // cond: 46140 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) 46141 for { 46142 t := v.Type 46143 _ = v.Args[1] 46144 x := v.Args[0] 46145 y := v.Args[1] 46146 v.reset(OpAMD64SARQ) 46147 v.Type = t 46148 v.AddArg(x) 46149 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46150 v0.AddArg(y) 46151 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46152 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46153 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 46154 v3.AuxInt = 64 46155 v3.AddArg(y) 46156 v2.AddArg(v3) 46157 v1.AddArg(v2) 46158 v0.AddArg(v1) 46159 v.AddArg(v0) 46160 return true 46161 } 46162 } 46163 func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool { 46164 b := v.Block 46165 _ = b 46166 // match: (Rsh64x32 <t> x y) 46167 // cond: 46168 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) 46169 for { 46170 t := v.Type 46171 _ = v.Args[1] 46172 x := v.Args[0] 46173 y := v.Args[1] 46174 v.reset(OpAMD64SARQ) 46175 v.Type = t 46176 v.AddArg(x) 46177 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46178 v0.AddArg(y) 46179 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46180 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46181 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 46182 v3.AuxInt = 64 46183 v3.AddArg(y) 46184 v2.AddArg(v3) 46185 v1.AddArg(v2) 46186 v0.AddArg(v1) 46187 v.AddArg(v0) 46188 return true 46189 } 46190 } 46191 func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool { 46192 b := v.Block 46193 _ = b 46194 // match: (Rsh64x64 <t> x y) 46195 // cond: 46196 // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) 46197 for { 46198 t := v.Type 46199 _ = v.Args[1] 46200 x := v.Args[0] 46201 y := v.Args[1] 46202 v.reset(OpAMD64SARQ) 46203 v.Type = t 46204 v.AddArg(x) 46205 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 46206 v0.AddArg(y) 46207 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 46208 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 46209 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 46210 v3.AuxInt = 64 46211 v3.AddArg(y) 46212 v2.AddArg(v3) 46213 v1.AddArg(v2) 46214 v0.AddArg(v1) 46215 v.AddArg(v0) 46216 return true 46217 } 46218 } 46219 func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool { 46220 b := v.Block 46221 _ = b 46222 // match: (Rsh64x8 <t> x y) 46223 // cond: 46224 // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) 46225 for { 46226 t := v.Type 46227 _ = v.Args[1] 46228 x := v.Args[0] 46229 y := v.Args[1] 46230 v.reset(OpAMD64SARQ) 46231 v.Type = t 46232 v.AddArg(x) 46233 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46234 v0.AddArg(y) 46235 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46236 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46237 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 46238 v3.AuxInt = 64 46239 v3.AddArg(y) 46240 v2.AddArg(v3) 46241 v1.AddArg(v2) 46242 v0.AddArg(v1) 46243 v.AddArg(v0) 46244 return true 46245 } 46246 } 46247 func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool { 46248 b := v.Block 46249 _ = b 46250 // match: (Rsh8Ux16 <t> x y) 46251 // cond: 46252 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) 46253 for { 46254 t := v.Type 46255 _ = v.Args[1] 46256 x := v.Args[0] 46257 y := v.Args[1] 46258 v.reset(OpAMD64ANDL) 46259 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 46260 v0.AddArg(x) 46261 v0.AddArg(y) 46262 v.AddArg(v0) 46263 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 46264 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 46265 v2.AuxInt = 8 46266 v2.AddArg(y) 46267 v1.AddArg(v2) 46268 v.AddArg(v1) 46269 return true 46270 } 46271 } 46272 func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool { 46273 b := v.Block 46274 _ = b 46275 // match: (Rsh8Ux32 <t> x y) 46276 // cond: 46277 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) 46278 for { 46279 t := v.Type 46280 _ = v.Args[1] 46281 x := v.Args[0] 46282 y := v.Args[1] 46283 v.reset(OpAMD64ANDL) 46284 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 46285 v0.AddArg(x) 46286 v0.AddArg(y) 46287 v.AddArg(v0) 46288 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 46289 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 46290 v2.AuxInt = 8 46291 v2.AddArg(y) 46292 v1.AddArg(v2) 46293 v.AddArg(v1) 46294 return true 46295 } 46296 } 46297 func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool { 46298 b := v.Block 46299 _ = b 46300 // match: (Rsh8Ux64 <t> x y) 46301 // cond: 46302 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) 46303 for { 46304 t := v.Type 46305 _ = v.Args[1] 46306 x := v.Args[0] 46307 y := v.Args[1] 46308 v.reset(OpAMD64ANDL) 46309 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 46310 v0.AddArg(x) 46311 v0.AddArg(y) 46312 v.AddArg(v0) 46313 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 46314 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 46315 v2.AuxInt = 8 46316 v2.AddArg(y) 46317 v1.AddArg(v2) 46318 v.AddArg(v1) 46319 return true 46320 } 46321 } 46322 func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool { 46323 b := v.Block 46324 _ = b 46325 // match: (Rsh8Ux8 <t> x y) 46326 // cond: 46327 // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) 46328 for { 46329 t := v.Type 46330 _ = v.Args[1] 46331 x := v.Args[0] 46332 y := v.Args[1] 46333 v.reset(OpAMD64ANDL) 46334 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t) 46335 v0.AddArg(x) 46336 v0.AddArg(y) 46337 v.AddArg(v0) 46338 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) 46339 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 46340 v2.AuxInt = 8 46341 v2.AddArg(y) 46342 v1.AddArg(v2) 46343 v.AddArg(v1) 46344 return true 46345 } 46346 } 46347 func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool { 46348 b := v.Block 46349 _ = b 46350 // match: (Rsh8x16 <t> x y) 46351 // cond: 46352 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) 46353 for { 46354 t := v.Type 46355 _ = v.Args[1] 46356 x := v.Args[0] 46357 y := v.Args[1] 46358 v.reset(OpAMD64SARB) 46359 v.Type = t 46360 v.AddArg(x) 46361 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46362 v0.AddArg(y) 46363 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46364 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46365 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) 46366 v3.AuxInt = 8 46367 v3.AddArg(y) 46368 v2.AddArg(v3) 46369 v1.AddArg(v2) 46370 v0.AddArg(v1) 46371 v.AddArg(v0) 46372 return true 46373 } 46374 } 46375 func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool { 46376 b := v.Block 46377 _ = b 46378 // match: (Rsh8x32 <t> x y) 46379 // cond: 46380 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) 46381 for { 46382 t := v.Type 46383 _ = v.Args[1] 46384 x := v.Args[0] 46385 y := v.Args[1] 46386 v.reset(OpAMD64SARB) 46387 v.Type = t 46388 v.AddArg(x) 46389 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46390 v0.AddArg(y) 46391 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46392 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46393 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) 46394 v3.AuxInt = 8 46395 v3.AddArg(y) 46396 v2.AddArg(v3) 46397 v1.AddArg(v2) 46398 v0.AddArg(v1) 46399 v.AddArg(v0) 46400 return true 46401 } 46402 } 46403 func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool { 46404 b := v.Block 46405 _ = b 46406 // match: (Rsh8x64 <t> x y) 46407 // cond: 46408 // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) 46409 for { 46410 t := v.Type 46411 _ = v.Args[1] 46412 x := v.Args[0] 46413 y := v.Args[1] 46414 v.reset(OpAMD64SARB) 46415 v.Type = t 46416 v.AddArg(x) 46417 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type) 46418 v0.AddArg(y) 46419 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type) 46420 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type) 46421 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) 46422 v3.AuxInt = 8 46423 v3.AddArg(y) 46424 v2.AddArg(v3) 46425 v1.AddArg(v2) 46426 v0.AddArg(v1) 46427 v.AddArg(v0) 46428 return true 46429 } 46430 } 46431 func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { 46432 b := v.Block 46433 _ = b 46434 // match: (Rsh8x8 <t> x y) 46435 // cond: 46436 // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) 46437 for { 46438 t := v.Type 46439 _ = v.Args[1] 46440 x := v.Args[0] 46441 y := v.Args[1] 46442 v.reset(OpAMD64SARB) 46443 v.Type = t 46444 v.AddArg(x) 46445 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type) 46446 v0.AddArg(y) 46447 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type) 46448 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type) 46449 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) 46450 v3.AuxInt = 8 46451 v3.AddArg(y) 46452 v2.AddArg(v3) 46453 v1.AddArg(v2) 46454 v0.AddArg(v1) 46455 v.AddArg(v0) 46456 return true 46457 } 46458 } 46459 func rewriteValueAMD64_OpSelect0_0(v *Value) bool { 46460 b := v.Block 46461 _ = b 46462 // match: (Select0 <t> (AddTupleFirst32 val tuple)) 46463 // cond: 46464 // result: (ADDL val (Select0 <t> tuple)) 46465 for { 46466 t := v.Type 46467 v_0 := v.Args[0] 46468 if v_0.Op != OpAMD64AddTupleFirst32 { 46469 break 46470 } 46471 _ = v_0.Args[1] 46472 val := v_0.Args[0] 46473 tuple := v_0.Args[1] 46474 v.reset(OpAMD64ADDL) 46475 v.AddArg(val) 46476 v0 := b.NewValue0(v.Pos, OpSelect0, t) 46477 v0.AddArg(tuple) 46478 v.AddArg(v0) 46479 return true 46480 } 46481 // match: (Select0 <t> (AddTupleFirst64 val tuple)) 46482 // cond: 46483 // result: (ADDQ val (Select0 <t> tuple)) 46484 for { 46485 t := v.Type 46486 v_0 := v.Args[0] 46487 if v_0.Op != OpAMD64AddTupleFirst64 { 46488 break 46489 } 46490 _ = v_0.Args[1] 46491 val := v_0.Args[0] 46492 tuple := v_0.Args[1] 46493 v.reset(OpAMD64ADDQ) 46494 v.AddArg(val) 46495 v0 := b.NewValue0(v.Pos, OpSelect0, t) 46496 v0.AddArg(tuple) 46497 v.AddArg(v0) 46498 return true 46499 } 46500 return false 46501 } 46502 func rewriteValueAMD64_OpSelect1_0(v *Value) bool { 46503 // match: (Select1 (AddTupleFirst32 _ tuple)) 46504 // cond: 46505 // result: (Select1 tuple) 46506 for { 46507 v_0 := v.Args[0] 46508 if v_0.Op != OpAMD64AddTupleFirst32 { 46509 break 46510 } 46511 _ = v_0.Args[1] 46512 tuple := v_0.Args[1] 46513 v.reset(OpSelect1) 46514 v.AddArg(tuple) 46515 return true 46516 } 46517 // match: (Select1 (AddTupleFirst64 _ tuple)) 46518 // cond: 46519 // result: (Select1 tuple) 46520 for { 46521 v_0 := v.Args[0] 46522 if v_0.Op != OpAMD64AddTupleFirst64 { 46523 break 46524 } 46525 _ = v_0.Args[1] 46526 tuple := v_0.Args[1] 46527 v.reset(OpSelect1) 46528 v.AddArg(tuple) 46529 return true 46530 } 46531 return false 46532 } 46533 func rewriteValueAMD64_OpSignExt16to32_0(v *Value) bool { 46534 // match: (SignExt16to32 x) 46535 // cond: 46536 // result: (MOVWQSX x) 46537 for { 46538 x := v.Args[0] 46539 v.reset(OpAMD64MOVWQSX) 46540 v.AddArg(x) 46541 return true 46542 } 46543 } 46544 func rewriteValueAMD64_OpSignExt16to64_0(v *Value) bool { 46545 // match: (SignExt16to64 x) 46546 // cond: 46547 // result: (MOVWQSX x) 46548 for { 46549 x := v.Args[0] 46550 v.reset(OpAMD64MOVWQSX) 46551 v.AddArg(x) 46552 return true 46553 } 46554 } 46555 func rewriteValueAMD64_OpSignExt32to64_0(v *Value) bool { 46556 // match: (SignExt32to64 x) 46557 // cond: 46558 // result: (MOVLQSX x) 46559 for { 46560 x := v.Args[0] 46561 v.reset(OpAMD64MOVLQSX) 46562 v.AddArg(x) 46563 return true 46564 } 46565 } 46566 func rewriteValueAMD64_OpSignExt8to16_0(v *Value) bool { 46567 // match: (SignExt8to16 x) 46568 // cond: 46569 // result: (MOVBQSX x) 46570 for { 46571 x := v.Args[0] 46572 v.reset(OpAMD64MOVBQSX) 46573 v.AddArg(x) 46574 return true 46575 } 46576 } 46577 func rewriteValueAMD64_OpSignExt8to32_0(v *Value) bool { 46578 // match: (SignExt8to32 x) 46579 // cond: 46580 // result: (MOVBQSX x) 46581 for { 46582 x := v.Args[0] 46583 v.reset(OpAMD64MOVBQSX) 46584 v.AddArg(x) 46585 return true 46586 } 46587 } 46588 func rewriteValueAMD64_OpSignExt8to64_0(v *Value) bool { 46589 // match: (SignExt8to64 x) 46590 // cond: 46591 // result: (MOVBQSX x) 46592 for { 46593 x := v.Args[0] 46594 v.reset(OpAMD64MOVBQSX) 46595 v.AddArg(x) 46596 return true 46597 } 46598 } 46599 func rewriteValueAMD64_OpSlicemask_0(v *Value) bool { 46600 b := v.Block 46601 _ = b 46602 // match: (Slicemask <t> x) 46603 // cond: 46604 // result: (SARQconst (NEGQ <t> x) [63]) 46605 for { 46606 t := v.Type 46607 x := v.Args[0] 46608 v.reset(OpAMD64SARQconst) 46609 v.AuxInt = 63 46610 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t) 46611 v0.AddArg(x) 46612 v.AddArg(v0) 46613 return true 46614 } 46615 } 46616 func rewriteValueAMD64_OpSqrt_0(v *Value) bool { 46617 // match: (Sqrt x) 46618 // cond: 46619 // result: (SQRTSD x) 46620 for { 46621 x := v.Args[0] 46622 v.reset(OpAMD64SQRTSD) 46623 v.AddArg(x) 46624 return true 46625 } 46626 } 46627 func rewriteValueAMD64_OpStaticCall_0(v *Value) bool { 46628 // match: (StaticCall [argwid] {target} mem) 46629 // cond: 46630 // result: (CALLstatic [argwid] {target} mem) 46631 for { 46632 argwid := v.AuxInt 46633 target := v.Aux 46634 mem := v.Args[0] 46635 v.reset(OpAMD64CALLstatic) 46636 v.AuxInt = argwid 46637 v.Aux = target 46638 v.AddArg(mem) 46639 return true 46640 } 46641 } 46642 func rewriteValueAMD64_OpStore_0(v *Value) bool { 46643 // match: (Store {t} ptr val mem) 46644 // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) 46645 // result: (MOVSDstore ptr val mem) 46646 for { 46647 t := v.Aux 46648 _ = v.Args[2] 46649 ptr := v.Args[0] 46650 val := v.Args[1] 46651 mem := v.Args[2] 46652 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { 46653 break 46654 } 46655 v.reset(OpAMD64MOVSDstore) 46656 v.AddArg(ptr) 46657 v.AddArg(val) 46658 v.AddArg(mem) 46659 return true 46660 } 46661 // match: (Store {t} ptr val mem) 46662 // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) 46663 // result: (MOVSSstore ptr val mem) 46664 for { 46665 t := v.Aux 46666 _ = v.Args[2] 46667 ptr := v.Args[0] 46668 val := v.Args[1] 46669 mem := v.Args[2] 46670 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { 46671 break 46672 } 46673 v.reset(OpAMD64MOVSSstore) 46674 v.AddArg(ptr) 46675 v.AddArg(val) 46676 v.AddArg(mem) 46677 return true 46678 } 46679 // match: (Store {t} ptr val mem) 46680 // cond: t.(*types.Type).Size() == 8 46681 // result: (MOVQstore ptr val mem) 46682 for { 46683 t := v.Aux 46684 _ = v.Args[2] 46685 ptr := v.Args[0] 46686 val := v.Args[1] 46687 mem := v.Args[2] 46688 if !(t.(*types.Type).Size() == 8) { 46689 break 46690 } 46691 v.reset(OpAMD64MOVQstore) 46692 v.AddArg(ptr) 46693 v.AddArg(val) 46694 v.AddArg(mem) 46695 return true 46696 } 46697 // match: (Store {t} ptr val mem) 46698 // cond: t.(*types.Type).Size() == 4 46699 // result: (MOVLstore ptr val mem) 46700 for { 46701 t := v.Aux 46702 _ = v.Args[2] 46703 ptr := v.Args[0] 46704 val := v.Args[1] 46705 mem := v.Args[2] 46706 if !(t.(*types.Type).Size() == 4) { 46707 break 46708 } 46709 v.reset(OpAMD64MOVLstore) 46710 v.AddArg(ptr) 46711 v.AddArg(val) 46712 v.AddArg(mem) 46713 return true 46714 } 46715 // match: (Store {t} ptr val mem) 46716 // cond: t.(*types.Type).Size() == 2 46717 // result: (MOVWstore ptr val mem) 46718 for { 46719 t := v.Aux 46720 _ = v.Args[2] 46721 ptr := v.Args[0] 46722 val := v.Args[1] 46723 mem := v.Args[2] 46724 if !(t.(*types.Type).Size() == 2) { 46725 break 46726 } 46727 v.reset(OpAMD64MOVWstore) 46728 v.AddArg(ptr) 46729 v.AddArg(val) 46730 v.AddArg(mem) 46731 return true 46732 } 46733 // match: (Store {t} ptr val mem) 46734 // cond: t.(*types.Type).Size() == 1 46735 // result: (MOVBstore ptr val mem) 46736 for { 46737 t := v.Aux 46738 _ = v.Args[2] 46739 ptr := v.Args[0] 46740 val := v.Args[1] 46741 mem := v.Args[2] 46742 if !(t.(*types.Type).Size() == 1) { 46743 break 46744 } 46745 v.reset(OpAMD64MOVBstore) 46746 v.AddArg(ptr) 46747 v.AddArg(val) 46748 v.AddArg(mem) 46749 return true 46750 } 46751 return false 46752 } 46753 func rewriteValueAMD64_OpSub16_0(v *Value) bool { 46754 // match: (Sub16 x y) 46755 // cond: 46756 // result: (SUBL x y) 46757 for { 46758 _ = v.Args[1] 46759 x := v.Args[0] 46760 y := v.Args[1] 46761 v.reset(OpAMD64SUBL) 46762 v.AddArg(x) 46763 v.AddArg(y) 46764 return true 46765 } 46766 } 46767 func rewriteValueAMD64_OpSub32_0(v *Value) bool { 46768 // match: (Sub32 x y) 46769 // cond: 46770 // result: (SUBL x y) 46771 for { 46772 _ = v.Args[1] 46773 x := v.Args[0] 46774 y := v.Args[1] 46775 v.reset(OpAMD64SUBL) 46776 v.AddArg(x) 46777 v.AddArg(y) 46778 return true 46779 } 46780 } 46781 func rewriteValueAMD64_OpSub32F_0(v *Value) bool { 46782 // match: (Sub32F x y) 46783 // cond: 46784 // result: (SUBSS x y) 46785 for { 46786 _ = v.Args[1] 46787 x := v.Args[0] 46788 y := v.Args[1] 46789 v.reset(OpAMD64SUBSS) 46790 v.AddArg(x) 46791 v.AddArg(y) 46792 return true 46793 } 46794 } 46795 func rewriteValueAMD64_OpSub64_0(v *Value) bool { 46796 // match: (Sub64 x y) 46797 // cond: 46798 // result: (SUBQ x y) 46799 for { 46800 _ = v.Args[1] 46801 x := v.Args[0] 46802 y := v.Args[1] 46803 v.reset(OpAMD64SUBQ) 46804 v.AddArg(x) 46805 v.AddArg(y) 46806 return true 46807 } 46808 } 46809 func rewriteValueAMD64_OpSub64F_0(v *Value) bool { 46810 // match: (Sub64F x y) 46811 // cond: 46812 // result: (SUBSD x y) 46813 for { 46814 _ = v.Args[1] 46815 x := v.Args[0] 46816 y := v.Args[1] 46817 v.reset(OpAMD64SUBSD) 46818 v.AddArg(x) 46819 v.AddArg(y) 46820 return true 46821 } 46822 } 46823 func rewriteValueAMD64_OpSub8_0(v *Value) bool { 46824 // match: (Sub8 x y) 46825 // cond: 46826 // result: (SUBL x y) 46827 for { 46828 _ = v.Args[1] 46829 x := v.Args[0] 46830 y := v.Args[1] 46831 v.reset(OpAMD64SUBL) 46832 v.AddArg(x) 46833 v.AddArg(y) 46834 return true 46835 } 46836 } 46837 func rewriteValueAMD64_OpSubPtr_0(v *Value) bool { 46838 b := v.Block 46839 _ = b 46840 config := b.Func.Config 46841 _ = config 46842 // match: (SubPtr x y) 46843 // cond: config.PtrSize == 8 46844 // result: (SUBQ x y) 46845 for { 46846 _ = v.Args[1] 46847 x := v.Args[0] 46848 y := v.Args[1] 46849 if !(config.PtrSize == 8) { 46850 break 46851 } 46852 v.reset(OpAMD64SUBQ) 46853 v.AddArg(x) 46854 v.AddArg(y) 46855 return true 46856 } 46857 // match: (SubPtr x y) 46858 // cond: config.PtrSize == 4 46859 // result: (SUBL x y) 46860 for { 46861 _ = v.Args[1] 46862 x := v.Args[0] 46863 y := v.Args[1] 46864 if !(config.PtrSize == 4) { 46865 break 46866 } 46867 v.reset(OpAMD64SUBL) 46868 v.AddArg(x) 46869 v.AddArg(y) 46870 return true 46871 } 46872 return false 46873 } 46874 func rewriteValueAMD64_OpTrunc_0(v *Value) bool { 46875 // match: (Trunc x) 46876 // cond: 46877 // result: (ROUNDSD [3] x) 46878 for { 46879 x := v.Args[0] 46880 v.reset(OpAMD64ROUNDSD) 46881 v.AuxInt = 3 46882 v.AddArg(x) 46883 return true 46884 } 46885 } 46886 func rewriteValueAMD64_OpTrunc16to8_0(v *Value) bool { 46887 // match: (Trunc16to8 x) 46888 // cond: 46889 // result: x 46890 for { 46891 x := v.Args[0] 46892 v.reset(OpCopy) 46893 v.Type = x.Type 46894 v.AddArg(x) 46895 return true 46896 } 46897 } 46898 func rewriteValueAMD64_OpTrunc32to16_0(v *Value) bool { 46899 // match: (Trunc32to16 x) 46900 // cond: 46901 // result: x 46902 for { 46903 x := v.Args[0] 46904 v.reset(OpCopy) 46905 v.Type = x.Type 46906 v.AddArg(x) 46907 return true 46908 } 46909 } 46910 func rewriteValueAMD64_OpTrunc32to8_0(v *Value) bool { 46911 // match: (Trunc32to8 x) 46912 // cond: 46913 // result: x 46914 for { 46915 x := v.Args[0] 46916 v.reset(OpCopy) 46917 v.Type = x.Type 46918 v.AddArg(x) 46919 return true 46920 } 46921 } 46922 func rewriteValueAMD64_OpTrunc64to16_0(v *Value) bool { 46923 // match: (Trunc64to16 x) 46924 // cond: 46925 // result: x 46926 for { 46927 x := v.Args[0] 46928 v.reset(OpCopy) 46929 v.Type = x.Type 46930 v.AddArg(x) 46931 return true 46932 } 46933 } 46934 func rewriteValueAMD64_OpTrunc64to32_0(v *Value) bool { 46935 // match: (Trunc64to32 x) 46936 // cond: 46937 // result: x 46938 for { 46939 x := v.Args[0] 46940 v.reset(OpCopy) 46941 v.Type = x.Type 46942 v.AddArg(x) 46943 return true 46944 } 46945 } 46946 func rewriteValueAMD64_OpTrunc64to8_0(v *Value) bool { 46947 // match: (Trunc64to8 x) 46948 // cond: 46949 // result: x 46950 for { 46951 x := v.Args[0] 46952 v.reset(OpCopy) 46953 v.Type = x.Type 46954 v.AddArg(x) 46955 return true 46956 } 46957 } 46958 func rewriteValueAMD64_OpWB_0(v *Value) bool { 46959 // match: (WB {fn} destptr srcptr mem) 46960 // cond: 46961 // result: (LoweredWB {fn} destptr srcptr mem) 46962 for { 46963 fn := v.Aux 46964 _ = v.Args[2] 46965 destptr := v.Args[0] 46966 srcptr := v.Args[1] 46967 mem := v.Args[2] 46968 v.reset(OpAMD64LoweredWB) 46969 v.Aux = fn 46970 v.AddArg(destptr) 46971 v.AddArg(srcptr) 46972 v.AddArg(mem) 46973 return true 46974 } 46975 } 46976 func rewriteValueAMD64_OpXor16_0(v *Value) bool { 46977 // match: (Xor16 x y) 46978 // cond: 46979 // result: (XORL x y) 46980 for { 46981 _ = v.Args[1] 46982 x := v.Args[0] 46983 y := v.Args[1] 46984 v.reset(OpAMD64XORL) 46985 v.AddArg(x) 46986 v.AddArg(y) 46987 return true 46988 } 46989 } 46990 func rewriteValueAMD64_OpXor32_0(v *Value) bool { 46991 // match: (Xor32 x y) 46992 // cond: 46993 // result: (XORL x y) 46994 for { 46995 _ = v.Args[1] 46996 x := v.Args[0] 46997 y := v.Args[1] 46998 v.reset(OpAMD64XORL) 46999 v.AddArg(x) 47000 v.AddArg(y) 47001 return true 47002 } 47003 } 47004 func rewriteValueAMD64_OpXor64_0(v *Value) bool { 47005 // match: (Xor64 x y) 47006 // cond: 47007 // result: (XORQ x y) 47008 for { 47009 _ = v.Args[1] 47010 x := v.Args[0] 47011 y := v.Args[1] 47012 v.reset(OpAMD64XORQ) 47013 v.AddArg(x) 47014 v.AddArg(y) 47015 return true 47016 } 47017 } 47018 func rewriteValueAMD64_OpXor8_0(v *Value) bool { 47019 // match: (Xor8 x y) 47020 // cond: 47021 // result: (XORL x y) 47022 for { 47023 _ = v.Args[1] 47024 x := v.Args[0] 47025 y := v.Args[1] 47026 v.reset(OpAMD64XORL) 47027 v.AddArg(x) 47028 v.AddArg(y) 47029 return true 47030 } 47031 } 47032 func rewriteValueAMD64_OpZero_0(v *Value) bool { 47033 b := v.Block 47034 _ = b 47035 config := b.Func.Config 47036 _ = config 47037 // match: (Zero [0] _ mem) 47038 // cond: 47039 // result: mem 47040 for { 47041 if v.AuxInt != 0 { 47042 break 47043 } 47044 _ = v.Args[1] 47045 mem := v.Args[1] 47046 v.reset(OpCopy) 47047 v.Type = mem.Type 47048 v.AddArg(mem) 47049 return true 47050 } 47051 // match: (Zero [1] destptr mem) 47052 // cond: 47053 // result: (MOVBstoreconst [0] destptr mem) 47054 for { 47055 if v.AuxInt != 1 { 47056 break 47057 } 47058 _ = v.Args[1] 47059 destptr := v.Args[0] 47060 mem := v.Args[1] 47061 v.reset(OpAMD64MOVBstoreconst) 47062 v.AuxInt = 0 47063 v.AddArg(destptr) 47064 v.AddArg(mem) 47065 return true 47066 } 47067 // match: (Zero [2] destptr mem) 47068 // cond: 47069 // result: (MOVWstoreconst [0] destptr mem) 47070 for { 47071 if v.AuxInt != 2 { 47072 break 47073 } 47074 _ = v.Args[1] 47075 destptr := v.Args[0] 47076 mem := v.Args[1] 47077 v.reset(OpAMD64MOVWstoreconst) 47078 v.AuxInt = 0 47079 v.AddArg(destptr) 47080 v.AddArg(mem) 47081 return true 47082 } 47083 // match: (Zero [4] destptr mem) 47084 // cond: 47085 // result: (MOVLstoreconst [0] destptr mem) 47086 for { 47087 if v.AuxInt != 4 { 47088 break 47089 } 47090 _ = v.Args[1] 47091 destptr := v.Args[0] 47092 mem := v.Args[1] 47093 v.reset(OpAMD64MOVLstoreconst) 47094 v.AuxInt = 0 47095 v.AddArg(destptr) 47096 v.AddArg(mem) 47097 return true 47098 } 47099 // match: (Zero [8] destptr mem) 47100 // cond: 47101 // result: (MOVQstoreconst [0] destptr mem) 47102 for { 47103 if v.AuxInt != 8 { 47104 break 47105 } 47106 _ = v.Args[1] 47107 destptr := v.Args[0] 47108 mem := v.Args[1] 47109 v.reset(OpAMD64MOVQstoreconst) 47110 v.AuxInt = 0 47111 v.AddArg(destptr) 47112 v.AddArg(mem) 47113 return true 47114 } 47115 // match: (Zero [3] destptr mem) 47116 // cond: 47117 // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) 47118 for { 47119 if v.AuxInt != 3 { 47120 break 47121 } 47122 _ = v.Args[1] 47123 destptr := v.Args[0] 47124 mem := v.Args[1] 47125 v.reset(OpAMD64MOVBstoreconst) 47126 v.AuxInt = makeValAndOff(0, 2) 47127 v.AddArg(destptr) 47128 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem) 47129 v0.AuxInt = 0 47130 v0.AddArg(destptr) 47131 v0.AddArg(mem) 47132 v.AddArg(v0) 47133 return true 47134 } 47135 // match: (Zero [5] destptr mem) 47136 // cond: 47137 // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 47138 for { 47139 if v.AuxInt != 5 { 47140 break 47141 } 47142 _ = v.Args[1] 47143 destptr := v.Args[0] 47144 mem := v.Args[1] 47145 v.reset(OpAMD64MOVBstoreconst) 47146 v.AuxInt = makeValAndOff(0, 4) 47147 v.AddArg(destptr) 47148 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 47149 v0.AuxInt = 0 47150 v0.AddArg(destptr) 47151 v0.AddArg(mem) 47152 v.AddArg(v0) 47153 return true 47154 } 47155 // match: (Zero [6] destptr mem) 47156 // cond: 47157 // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) 47158 for { 47159 if v.AuxInt != 6 { 47160 break 47161 } 47162 _ = v.Args[1] 47163 destptr := v.Args[0] 47164 mem := v.Args[1] 47165 v.reset(OpAMD64MOVWstoreconst) 47166 v.AuxInt = makeValAndOff(0, 4) 47167 v.AddArg(destptr) 47168 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 47169 v0.AuxInt = 0 47170 v0.AddArg(destptr) 47171 v0.AddArg(mem) 47172 v.AddArg(v0) 47173 return true 47174 } 47175 // match: (Zero [7] destptr mem) 47176 // cond: 47177 // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) 47178 for { 47179 if v.AuxInt != 7 { 47180 break 47181 } 47182 _ = v.Args[1] 47183 destptr := v.Args[0] 47184 mem := v.Args[1] 47185 v.reset(OpAMD64MOVLstoreconst) 47186 v.AuxInt = makeValAndOff(0, 3) 47187 v.AddArg(destptr) 47188 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem) 47189 v0.AuxInt = 0 47190 v0.AddArg(destptr) 47191 v0.AddArg(mem) 47192 v.AddArg(v0) 47193 return true 47194 } 47195 // match: (Zero [s] destptr mem) 47196 // cond: s%8 != 0 && s > 8 && !config.useSSE 47197 // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [0] destptr mem)) 47198 for { 47199 s := v.AuxInt 47200 _ = v.Args[1] 47201 destptr := v.Args[0] 47202 mem := v.Args[1] 47203 if !(s%8 != 0 && s > 8 && !config.useSSE) { 47204 break 47205 } 47206 v.reset(OpZero) 47207 v.AuxInt = s - s%8 47208 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47209 v0.AuxInt = s % 8 47210 v0.AddArg(destptr) 47211 v.AddArg(v0) 47212 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47213 v1.AuxInt = 0 47214 v1.AddArg(destptr) 47215 v1.AddArg(mem) 47216 v.AddArg(v1) 47217 return true 47218 } 47219 return false 47220 } 47221 func rewriteValueAMD64_OpZero_10(v *Value) bool { 47222 b := v.Block 47223 _ = b 47224 config := b.Func.Config 47225 _ = config 47226 // match: (Zero [16] destptr mem) 47227 // cond: !config.useSSE 47228 // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) 47229 for { 47230 if v.AuxInt != 16 { 47231 break 47232 } 47233 _ = v.Args[1] 47234 destptr := v.Args[0] 47235 mem := v.Args[1] 47236 if !(!config.useSSE) { 47237 break 47238 } 47239 v.reset(OpAMD64MOVQstoreconst) 47240 v.AuxInt = makeValAndOff(0, 8) 47241 v.AddArg(destptr) 47242 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47243 v0.AuxInt = 0 47244 v0.AddArg(destptr) 47245 v0.AddArg(mem) 47246 v.AddArg(v0) 47247 return true 47248 } 47249 // match: (Zero [24] destptr mem) 47250 // cond: !config.useSSE 47251 // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) 47252 for { 47253 if v.AuxInt != 24 { 47254 break 47255 } 47256 _ = v.Args[1] 47257 destptr := v.Args[0] 47258 mem := v.Args[1] 47259 if !(!config.useSSE) { 47260 break 47261 } 47262 v.reset(OpAMD64MOVQstoreconst) 47263 v.AuxInt = makeValAndOff(0, 16) 47264 v.AddArg(destptr) 47265 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47266 v0.AuxInt = makeValAndOff(0, 8) 47267 v0.AddArg(destptr) 47268 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47269 v1.AuxInt = 0 47270 v1.AddArg(destptr) 47271 v1.AddArg(mem) 47272 v0.AddArg(v1) 47273 v.AddArg(v0) 47274 return true 47275 } 47276 // match: (Zero [32] destptr mem) 47277 // cond: !config.useSSE 47278 // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) 47279 for { 47280 if v.AuxInt != 32 { 47281 break 47282 } 47283 _ = v.Args[1] 47284 destptr := v.Args[0] 47285 mem := v.Args[1] 47286 if !(!config.useSSE) { 47287 break 47288 } 47289 v.reset(OpAMD64MOVQstoreconst) 47290 v.AuxInt = makeValAndOff(0, 24) 47291 v.AddArg(destptr) 47292 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47293 v0.AuxInt = makeValAndOff(0, 16) 47294 v0.AddArg(destptr) 47295 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47296 v1.AuxInt = makeValAndOff(0, 8) 47297 v1.AddArg(destptr) 47298 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47299 v2.AuxInt = 0 47300 v2.AddArg(destptr) 47301 v2.AddArg(mem) 47302 v1.AddArg(v2) 47303 v0.AddArg(v1) 47304 v.AddArg(v0) 47305 return true 47306 } 47307 // match: (Zero [s] destptr mem) 47308 // cond: s > 8 && s < 16 && config.useSSE 47309 // result: (MOVQstoreconst [makeValAndOff(0,s-8)] destptr (MOVQstoreconst [0] destptr mem)) 47310 for { 47311 s := v.AuxInt 47312 _ = v.Args[1] 47313 destptr := v.Args[0] 47314 mem := v.Args[1] 47315 if !(s > 8 && s < 16 && config.useSSE) { 47316 break 47317 } 47318 v.reset(OpAMD64MOVQstoreconst) 47319 v.AuxInt = makeValAndOff(0, s-8) 47320 v.AddArg(destptr) 47321 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47322 v0.AuxInt = 0 47323 v0.AddArg(destptr) 47324 v0.AddArg(mem) 47325 v.AddArg(v0) 47326 return true 47327 } 47328 // match: (Zero [s] destptr mem) 47329 // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE 47330 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstore destptr (MOVOconst [0]) mem)) 47331 for { 47332 s := v.AuxInt 47333 _ = v.Args[1] 47334 destptr := v.Args[0] 47335 mem := v.Args[1] 47336 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) { 47337 break 47338 } 47339 v.reset(OpZero) 47340 v.AuxInt = s - s%16 47341 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47342 v0.AuxInt = s % 16 47343 v0.AddArg(destptr) 47344 v.AddArg(v0) 47345 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47346 v1.AddArg(destptr) 47347 v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47348 v2.AuxInt = 0 47349 v1.AddArg(v2) 47350 v1.AddArg(mem) 47351 v.AddArg(v1) 47352 return true 47353 } 47354 // match: (Zero [s] destptr mem) 47355 // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE 47356 // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVQstoreconst [0] destptr mem)) 47357 for { 47358 s := v.AuxInt 47359 _ = v.Args[1] 47360 destptr := v.Args[0] 47361 mem := v.Args[1] 47362 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) { 47363 break 47364 } 47365 v.reset(OpZero) 47366 v.AuxInt = s - s%16 47367 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47368 v0.AuxInt = s % 16 47369 v0.AddArg(destptr) 47370 v.AddArg(v0) 47371 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) 47372 v1.AuxInt = 0 47373 v1.AddArg(destptr) 47374 v1.AddArg(mem) 47375 v.AddArg(v1) 47376 return true 47377 } 47378 // match: (Zero [16] destptr mem) 47379 // cond: config.useSSE 47380 // result: (MOVOstore destptr (MOVOconst [0]) mem) 47381 for { 47382 if v.AuxInt != 16 { 47383 break 47384 } 47385 _ = v.Args[1] 47386 destptr := v.Args[0] 47387 mem := v.Args[1] 47388 if !(config.useSSE) { 47389 break 47390 } 47391 v.reset(OpAMD64MOVOstore) 47392 v.AddArg(destptr) 47393 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47394 v0.AuxInt = 0 47395 v.AddArg(v0) 47396 v.AddArg(mem) 47397 return true 47398 } 47399 // match: (Zero [32] destptr mem) 47400 // cond: config.useSSE 47401 // result: (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)) 47402 for { 47403 if v.AuxInt != 32 { 47404 break 47405 } 47406 _ = v.Args[1] 47407 destptr := v.Args[0] 47408 mem := v.Args[1] 47409 if !(config.useSSE) { 47410 break 47411 } 47412 v.reset(OpAMD64MOVOstore) 47413 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47414 v0.AuxInt = 16 47415 v0.AddArg(destptr) 47416 v.AddArg(v0) 47417 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47418 v1.AuxInt = 0 47419 v.AddArg(v1) 47420 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47421 v2.AddArg(destptr) 47422 v3 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47423 v3.AuxInt = 0 47424 v2.AddArg(v3) 47425 v2.AddArg(mem) 47426 v.AddArg(v2) 47427 return true 47428 } 47429 // match: (Zero [48] destptr mem) 47430 // cond: config.useSSE 47431 // result: (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))) 47432 for { 47433 if v.AuxInt != 48 { 47434 break 47435 } 47436 _ = v.Args[1] 47437 destptr := v.Args[0] 47438 mem := v.Args[1] 47439 if !(config.useSSE) { 47440 break 47441 } 47442 v.reset(OpAMD64MOVOstore) 47443 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47444 v0.AuxInt = 32 47445 v0.AddArg(destptr) 47446 v.AddArg(v0) 47447 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47448 v1.AuxInt = 0 47449 v.AddArg(v1) 47450 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47451 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47452 v3.AuxInt = 16 47453 v3.AddArg(destptr) 47454 v2.AddArg(v3) 47455 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47456 v4.AuxInt = 0 47457 v2.AddArg(v4) 47458 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47459 v5.AddArg(destptr) 47460 v6 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47461 v6.AuxInt = 0 47462 v5.AddArg(v6) 47463 v5.AddArg(mem) 47464 v2.AddArg(v5) 47465 v.AddArg(v2) 47466 return true 47467 } 47468 // match: (Zero [64] destptr mem) 47469 // cond: config.useSSE 47470 // result: (MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)))) 47471 for { 47472 if v.AuxInt != 64 { 47473 break 47474 } 47475 _ = v.Args[1] 47476 destptr := v.Args[0] 47477 mem := v.Args[1] 47478 if !(config.useSSE) { 47479 break 47480 } 47481 v.reset(OpAMD64MOVOstore) 47482 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47483 v0.AuxInt = 48 47484 v0.AddArg(destptr) 47485 v.AddArg(v0) 47486 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47487 v1.AuxInt = 0 47488 v.AddArg(v1) 47489 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47490 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47491 v3.AuxInt = 32 47492 v3.AddArg(destptr) 47493 v2.AddArg(v3) 47494 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47495 v4.AuxInt = 0 47496 v2.AddArg(v4) 47497 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47498 v6 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) 47499 v6.AuxInt = 16 47500 v6.AddArg(destptr) 47501 v5.AddArg(v6) 47502 v7 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47503 v7.AuxInt = 0 47504 v5.AddArg(v7) 47505 v8 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) 47506 v8.AddArg(destptr) 47507 v9 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47508 v9.AuxInt = 0 47509 v8.AddArg(v9) 47510 v8.AddArg(mem) 47511 v5.AddArg(v8) 47512 v2.AddArg(v5) 47513 v.AddArg(v2) 47514 return true 47515 } 47516 return false 47517 } 47518 func rewriteValueAMD64_OpZero_20(v *Value) bool { 47519 b := v.Block 47520 _ = b 47521 config := b.Func.Config 47522 _ = config 47523 typ := &b.Func.Config.Types 47524 _ = typ 47525 // match: (Zero [s] destptr mem) 47526 // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice 47527 // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem) 47528 for { 47529 s := v.AuxInt 47530 _ = v.Args[1] 47531 destptr := v.Args[0] 47532 mem := v.Args[1] 47533 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) { 47534 break 47535 } 47536 v.reset(OpAMD64DUFFZERO) 47537 v.AuxInt = s 47538 v.AddArg(destptr) 47539 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) 47540 v0.AuxInt = 0 47541 v.AddArg(v0) 47542 v.AddArg(mem) 47543 return true 47544 } 47545 // match: (Zero [s] destptr mem) 47546 // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0 47547 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) 47548 for { 47549 s := v.AuxInt 47550 _ = v.Args[1] 47551 destptr := v.Args[0] 47552 mem := v.Args[1] 47553 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) { 47554 break 47555 } 47556 v.reset(OpAMD64REPSTOSQ) 47557 v.AddArg(destptr) 47558 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 47559 v0.AuxInt = s / 8 47560 v.AddArg(v0) 47561 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) 47562 v1.AuxInt = 0 47563 v.AddArg(v1) 47564 v.AddArg(mem) 47565 return true 47566 } 47567 return false 47568 } 47569 func rewriteValueAMD64_OpZeroExt16to32_0(v *Value) bool { 47570 // match: (ZeroExt16to32 x) 47571 // cond: 47572 // result: (MOVWQZX x) 47573 for { 47574 x := v.Args[0] 47575 v.reset(OpAMD64MOVWQZX) 47576 v.AddArg(x) 47577 return true 47578 } 47579 } 47580 func rewriteValueAMD64_OpZeroExt16to64_0(v *Value) bool { 47581 // match: (ZeroExt16to64 x) 47582 // cond: 47583 // result: (MOVWQZX x) 47584 for { 47585 x := v.Args[0] 47586 v.reset(OpAMD64MOVWQZX) 47587 v.AddArg(x) 47588 return true 47589 } 47590 } 47591 func rewriteValueAMD64_OpZeroExt32to64_0(v *Value) bool { 47592 // match: (ZeroExt32to64 x) 47593 // cond: 47594 // result: (MOVLQZX x) 47595 for { 47596 x := v.Args[0] 47597 v.reset(OpAMD64MOVLQZX) 47598 v.AddArg(x) 47599 return true 47600 } 47601 } 47602 func rewriteValueAMD64_OpZeroExt8to16_0(v *Value) bool { 47603 // match: (ZeroExt8to16 x) 47604 // cond: 47605 // result: (MOVBQZX x) 47606 for { 47607 x := v.Args[0] 47608 v.reset(OpAMD64MOVBQZX) 47609 v.AddArg(x) 47610 return true 47611 } 47612 } 47613 func rewriteValueAMD64_OpZeroExt8to32_0(v *Value) bool { 47614 // match: (ZeroExt8to32 x) 47615 // cond: 47616 // result: (MOVBQZX x) 47617 for { 47618 x := v.Args[0] 47619 v.reset(OpAMD64MOVBQZX) 47620 v.AddArg(x) 47621 return true 47622 } 47623 } 47624 func rewriteValueAMD64_OpZeroExt8to64_0(v *Value) bool { 47625 // match: (ZeroExt8to64 x) 47626 // cond: 47627 // result: (MOVBQZX x) 47628 for { 47629 x := v.Args[0] 47630 v.reset(OpAMD64MOVBQZX) 47631 v.AddArg(x) 47632 return true 47633 } 47634 } 47635 func rewriteBlockAMD64(b *Block) bool { 47636 config := b.Func.Config 47637 _ = config 47638 fe := b.Func.fe 47639 _ = fe 47640 typ := &config.Types 47641 _ = typ 47642 switch b.Kind { 47643 case BlockAMD64EQ: 47644 // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y)) 47645 // cond: !config.nacl 47646 // result: (UGE (BTL x y)) 47647 for { 47648 v := b.Control 47649 if v.Op != OpAMD64TESTL { 47650 break 47651 } 47652 _ = v.Args[1] 47653 v_0 := v.Args[0] 47654 if v_0.Op != OpAMD64SHLL { 47655 break 47656 } 47657 _ = v_0.Args[1] 47658 v_0_0 := v_0.Args[0] 47659 if v_0_0.Op != OpAMD64MOVLconst { 47660 break 47661 } 47662 if v_0_0.AuxInt != 1 { 47663 break 47664 } 47665 x := v_0.Args[1] 47666 y := v.Args[1] 47667 if !(!config.nacl) { 47668 break 47669 } 47670 b.Kind = BlockAMD64UGE 47671 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 47672 v0.AddArg(x) 47673 v0.AddArg(y) 47674 b.SetControl(v0) 47675 b.Aux = nil 47676 return true 47677 } 47678 // match: (EQ (TESTL y (SHLL (MOVLconst [1]) x))) 47679 // cond: !config.nacl 47680 // result: (UGE (BTL x y)) 47681 for { 47682 v := b.Control 47683 if v.Op != OpAMD64TESTL { 47684 break 47685 } 47686 _ = v.Args[1] 47687 y := v.Args[0] 47688 v_1 := v.Args[1] 47689 if v_1.Op != OpAMD64SHLL { 47690 break 47691 } 47692 _ = v_1.Args[1] 47693 v_1_0 := v_1.Args[0] 47694 if v_1_0.Op != OpAMD64MOVLconst { 47695 break 47696 } 47697 if v_1_0.AuxInt != 1 { 47698 break 47699 } 47700 x := v_1.Args[1] 47701 if !(!config.nacl) { 47702 break 47703 } 47704 b.Kind = BlockAMD64UGE 47705 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 47706 v0.AddArg(x) 47707 v0.AddArg(y) 47708 b.SetControl(v0) 47709 b.Aux = nil 47710 return true 47711 } 47712 // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) 47713 // cond: !config.nacl 47714 // result: (UGE (BTQ x y)) 47715 for { 47716 v := b.Control 47717 if v.Op != OpAMD64TESTQ { 47718 break 47719 } 47720 _ = v.Args[1] 47721 v_0 := v.Args[0] 47722 if v_0.Op != OpAMD64SHLQ { 47723 break 47724 } 47725 _ = v_0.Args[1] 47726 v_0_0 := v_0.Args[0] 47727 if v_0_0.Op != OpAMD64MOVQconst { 47728 break 47729 } 47730 if v_0_0.AuxInt != 1 { 47731 break 47732 } 47733 x := v_0.Args[1] 47734 y := v.Args[1] 47735 if !(!config.nacl) { 47736 break 47737 } 47738 b.Kind = BlockAMD64UGE 47739 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 47740 v0.AddArg(x) 47741 v0.AddArg(y) 47742 b.SetControl(v0) 47743 b.Aux = nil 47744 return true 47745 } 47746 // match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x))) 47747 // cond: !config.nacl 47748 // result: (UGE (BTQ x y)) 47749 for { 47750 v := b.Control 47751 if v.Op != OpAMD64TESTQ { 47752 break 47753 } 47754 _ = v.Args[1] 47755 y := v.Args[0] 47756 v_1 := v.Args[1] 47757 if v_1.Op != OpAMD64SHLQ { 47758 break 47759 } 47760 _ = v_1.Args[1] 47761 v_1_0 := v_1.Args[0] 47762 if v_1_0.Op != OpAMD64MOVQconst { 47763 break 47764 } 47765 if v_1_0.AuxInt != 1 { 47766 break 47767 } 47768 x := v_1.Args[1] 47769 if !(!config.nacl) { 47770 break 47771 } 47772 b.Kind = BlockAMD64UGE 47773 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 47774 v0.AddArg(x) 47775 v0.AddArg(y) 47776 b.SetControl(v0) 47777 b.Aux = nil 47778 return true 47779 } 47780 // match: (EQ (TESTLconst [c] x)) 47781 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 47782 // result: (UGE (BTLconst [log2(c)] x)) 47783 for { 47784 v := b.Control 47785 if v.Op != OpAMD64TESTLconst { 47786 break 47787 } 47788 c := v.AuxInt 47789 x := v.Args[0] 47790 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 47791 break 47792 } 47793 b.Kind = BlockAMD64UGE 47794 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 47795 v0.AuxInt = log2(c) 47796 v0.AddArg(x) 47797 b.SetControl(v0) 47798 b.Aux = nil 47799 return true 47800 } 47801 // match: (EQ (TESTQconst [c] x)) 47802 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 47803 // result: (UGE (BTQconst [log2(c)] x)) 47804 for { 47805 v := b.Control 47806 if v.Op != OpAMD64TESTQconst { 47807 break 47808 } 47809 c := v.AuxInt 47810 x := v.Args[0] 47811 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 47812 break 47813 } 47814 b.Kind = BlockAMD64UGE 47815 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 47816 v0.AuxInt = log2(c) 47817 v0.AddArg(x) 47818 b.SetControl(v0) 47819 b.Aux = nil 47820 return true 47821 } 47822 // match: (EQ (TESTQ (MOVQconst [c]) x)) 47823 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 47824 // result: (UGE (BTQconst [log2(c)] x)) 47825 for { 47826 v := b.Control 47827 if v.Op != OpAMD64TESTQ { 47828 break 47829 } 47830 _ = v.Args[1] 47831 v_0 := v.Args[0] 47832 if v_0.Op != OpAMD64MOVQconst { 47833 break 47834 } 47835 c := v_0.AuxInt 47836 x := v.Args[1] 47837 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 47838 break 47839 } 47840 b.Kind = BlockAMD64UGE 47841 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 47842 v0.AuxInt = log2(c) 47843 v0.AddArg(x) 47844 b.SetControl(v0) 47845 b.Aux = nil 47846 return true 47847 } 47848 // match: (EQ (TESTQ x (MOVQconst [c]))) 47849 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 47850 // result: (UGE (BTQconst [log2(c)] x)) 47851 for { 47852 v := b.Control 47853 if v.Op != OpAMD64TESTQ { 47854 break 47855 } 47856 _ = v.Args[1] 47857 x := v.Args[0] 47858 v_1 := v.Args[1] 47859 if v_1.Op != OpAMD64MOVQconst { 47860 break 47861 } 47862 c := v_1.AuxInt 47863 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 47864 break 47865 } 47866 b.Kind = BlockAMD64UGE 47867 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 47868 v0.AuxInt = log2(c) 47869 v0.AddArg(x) 47870 b.SetControl(v0) 47871 b.Aux = nil 47872 return true 47873 } 47874 // match: (EQ (InvertFlags cmp) yes no) 47875 // cond: 47876 // result: (EQ cmp yes no) 47877 for { 47878 v := b.Control 47879 if v.Op != OpAMD64InvertFlags { 47880 break 47881 } 47882 cmp := v.Args[0] 47883 b.Kind = BlockAMD64EQ 47884 b.SetControl(cmp) 47885 b.Aux = nil 47886 return true 47887 } 47888 // match: (EQ (FlagEQ) yes no) 47889 // cond: 47890 // result: (First nil yes no) 47891 for { 47892 v := b.Control 47893 if v.Op != OpAMD64FlagEQ { 47894 break 47895 } 47896 b.Kind = BlockFirst 47897 b.SetControl(nil) 47898 b.Aux = nil 47899 return true 47900 } 47901 // match: (EQ (FlagLT_ULT) yes no) 47902 // cond: 47903 // result: (First nil no yes) 47904 for { 47905 v := b.Control 47906 if v.Op != OpAMD64FlagLT_ULT { 47907 break 47908 } 47909 b.Kind = BlockFirst 47910 b.SetControl(nil) 47911 b.Aux = nil 47912 b.swapSuccessors() 47913 return true 47914 } 47915 // match: (EQ (FlagLT_UGT) yes no) 47916 // cond: 47917 // result: (First nil no yes) 47918 for { 47919 v := b.Control 47920 if v.Op != OpAMD64FlagLT_UGT { 47921 break 47922 } 47923 b.Kind = BlockFirst 47924 b.SetControl(nil) 47925 b.Aux = nil 47926 b.swapSuccessors() 47927 return true 47928 } 47929 // match: (EQ (FlagGT_ULT) yes no) 47930 // cond: 47931 // result: (First nil no yes) 47932 for { 47933 v := b.Control 47934 if v.Op != OpAMD64FlagGT_ULT { 47935 break 47936 } 47937 b.Kind = BlockFirst 47938 b.SetControl(nil) 47939 b.Aux = nil 47940 b.swapSuccessors() 47941 return true 47942 } 47943 // match: (EQ (FlagGT_UGT) yes no) 47944 // cond: 47945 // result: (First nil no yes) 47946 for { 47947 v := b.Control 47948 if v.Op != OpAMD64FlagGT_UGT { 47949 break 47950 } 47951 b.Kind = BlockFirst 47952 b.SetControl(nil) 47953 b.Aux = nil 47954 b.swapSuccessors() 47955 return true 47956 } 47957 case BlockAMD64GE: 47958 // match: (GE (InvertFlags cmp) yes no) 47959 // cond: 47960 // result: (LE cmp yes no) 47961 for { 47962 v := b.Control 47963 if v.Op != OpAMD64InvertFlags { 47964 break 47965 } 47966 cmp := v.Args[0] 47967 b.Kind = BlockAMD64LE 47968 b.SetControl(cmp) 47969 b.Aux = nil 47970 return true 47971 } 47972 // match: (GE (FlagEQ) yes no) 47973 // cond: 47974 // result: (First nil yes no) 47975 for { 47976 v := b.Control 47977 if v.Op != OpAMD64FlagEQ { 47978 break 47979 } 47980 b.Kind = BlockFirst 47981 b.SetControl(nil) 47982 b.Aux = nil 47983 return true 47984 } 47985 // match: (GE (FlagLT_ULT) yes no) 47986 // cond: 47987 // result: (First nil no yes) 47988 for { 47989 v := b.Control 47990 if v.Op != OpAMD64FlagLT_ULT { 47991 break 47992 } 47993 b.Kind = BlockFirst 47994 b.SetControl(nil) 47995 b.Aux = nil 47996 b.swapSuccessors() 47997 return true 47998 } 47999 // match: (GE (FlagLT_UGT) yes no) 48000 // cond: 48001 // result: (First nil no yes) 48002 for { 48003 v := b.Control 48004 if v.Op != OpAMD64FlagLT_UGT { 48005 break 48006 } 48007 b.Kind = BlockFirst 48008 b.SetControl(nil) 48009 b.Aux = nil 48010 b.swapSuccessors() 48011 return true 48012 } 48013 // match: (GE (FlagGT_ULT) yes no) 48014 // cond: 48015 // result: (First nil yes no) 48016 for { 48017 v := b.Control 48018 if v.Op != OpAMD64FlagGT_ULT { 48019 break 48020 } 48021 b.Kind = BlockFirst 48022 b.SetControl(nil) 48023 b.Aux = nil 48024 return true 48025 } 48026 // match: (GE (FlagGT_UGT) yes no) 48027 // cond: 48028 // result: (First nil yes no) 48029 for { 48030 v := b.Control 48031 if v.Op != OpAMD64FlagGT_UGT { 48032 break 48033 } 48034 b.Kind = BlockFirst 48035 b.SetControl(nil) 48036 b.Aux = nil 48037 return true 48038 } 48039 case BlockAMD64GT: 48040 // match: (GT (InvertFlags cmp) yes no) 48041 // cond: 48042 // result: (LT cmp yes no) 48043 for { 48044 v := b.Control 48045 if v.Op != OpAMD64InvertFlags { 48046 break 48047 } 48048 cmp := v.Args[0] 48049 b.Kind = BlockAMD64LT 48050 b.SetControl(cmp) 48051 b.Aux = nil 48052 return true 48053 } 48054 // match: (GT (FlagEQ) yes no) 48055 // cond: 48056 // result: (First nil no yes) 48057 for { 48058 v := b.Control 48059 if v.Op != OpAMD64FlagEQ { 48060 break 48061 } 48062 b.Kind = BlockFirst 48063 b.SetControl(nil) 48064 b.Aux = nil 48065 b.swapSuccessors() 48066 return true 48067 } 48068 // match: (GT (FlagLT_ULT) yes no) 48069 // cond: 48070 // result: (First nil no yes) 48071 for { 48072 v := b.Control 48073 if v.Op != OpAMD64FlagLT_ULT { 48074 break 48075 } 48076 b.Kind = BlockFirst 48077 b.SetControl(nil) 48078 b.Aux = nil 48079 b.swapSuccessors() 48080 return true 48081 } 48082 // match: (GT (FlagLT_UGT) yes no) 48083 // cond: 48084 // result: (First nil no yes) 48085 for { 48086 v := b.Control 48087 if v.Op != OpAMD64FlagLT_UGT { 48088 break 48089 } 48090 b.Kind = BlockFirst 48091 b.SetControl(nil) 48092 b.Aux = nil 48093 b.swapSuccessors() 48094 return true 48095 } 48096 // match: (GT (FlagGT_ULT) yes no) 48097 // cond: 48098 // result: (First nil yes no) 48099 for { 48100 v := b.Control 48101 if v.Op != OpAMD64FlagGT_ULT { 48102 break 48103 } 48104 b.Kind = BlockFirst 48105 b.SetControl(nil) 48106 b.Aux = nil 48107 return true 48108 } 48109 // match: (GT (FlagGT_UGT) yes no) 48110 // cond: 48111 // result: (First nil yes no) 48112 for { 48113 v := b.Control 48114 if v.Op != OpAMD64FlagGT_UGT { 48115 break 48116 } 48117 b.Kind = BlockFirst 48118 b.SetControl(nil) 48119 b.Aux = nil 48120 return true 48121 } 48122 case BlockIf: 48123 // match: (If (SETL cmp) yes no) 48124 // cond: 48125 // result: (LT cmp yes no) 48126 for { 48127 v := b.Control 48128 if v.Op != OpAMD64SETL { 48129 break 48130 } 48131 cmp := v.Args[0] 48132 b.Kind = BlockAMD64LT 48133 b.SetControl(cmp) 48134 b.Aux = nil 48135 return true 48136 } 48137 // match: (If (SETLE cmp) yes no) 48138 // cond: 48139 // result: (LE cmp yes no) 48140 for { 48141 v := b.Control 48142 if v.Op != OpAMD64SETLE { 48143 break 48144 } 48145 cmp := v.Args[0] 48146 b.Kind = BlockAMD64LE 48147 b.SetControl(cmp) 48148 b.Aux = nil 48149 return true 48150 } 48151 // match: (If (SETG cmp) yes no) 48152 // cond: 48153 // result: (GT cmp yes no) 48154 for { 48155 v := b.Control 48156 if v.Op != OpAMD64SETG { 48157 break 48158 } 48159 cmp := v.Args[0] 48160 b.Kind = BlockAMD64GT 48161 b.SetControl(cmp) 48162 b.Aux = nil 48163 return true 48164 } 48165 // match: (If (SETGE cmp) yes no) 48166 // cond: 48167 // result: (GE cmp yes no) 48168 for { 48169 v := b.Control 48170 if v.Op != OpAMD64SETGE { 48171 break 48172 } 48173 cmp := v.Args[0] 48174 b.Kind = BlockAMD64GE 48175 b.SetControl(cmp) 48176 b.Aux = nil 48177 return true 48178 } 48179 // match: (If (SETEQ cmp) yes no) 48180 // cond: 48181 // result: (EQ cmp yes no) 48182 for { 48183 v := b.Control 48184 if v.Op != OpAMD64SETEQ { 48185 break 48186 } 48187 cmp := v.Args[0] 48188 b.Kind = BlockAMD64EQ 48189 b.SetControl(cmp) 48190 b.Aux = nil 48191 return true 48192 } 48193 // match: (If (SETNE cmp) yes no) 48194 // cond: 48195 // result: (NE cmp yes no) 48196 for { 48197 v := b.Control 48198 if v.Op != OpAMD64SETNE { 48199 break 48200 } 48201 cmp := v.Args[0] 48202 b.Kind = BlockAMD64NE 48203 b.SetControl(cmp) 48204 b.Aux = nil 48205 return true 48206 } 48207 // match: (If (SETB cmp) yes no) 48208 // cond: 48209 // result: (ULT cmp yes no) 48210 for { 48211 v := b.Control 48212 if v.Op != OpAMD64SETB { 48213 break 48214 } 48215 cmp := v.Args[0] 48216 b.Kind = BlockAMD64ULT 48217 b.SetControl(cmp) 48218 b.Aux = nil 48219 return true 48220 } 48221 // match: (If (SETBE cmp) yes no) 48222 // cond: 48223 // result: (ULE cmp yes no) 48224 for { 48225 v := b.Control 48226 if v.Op != OpAMD64SETBE { 48227 break 48228 } 48229 cmp := v.Args[0] 48230 b.Kind = BlockAMD64ULE 48231 b.SetControl(cmp) 48232 b.Aux = nil 48233 return true 48234 } 48235 // match: (If (SETA cmp) yes no) 48236 // cond: 48237 // result: (UGT cmp yes no) 48238 for { 48239 v := b.Control 48240 if v.Op != OpAMD64SETA { 48241 break 48242 } 48243 cmp := v.Args[0] 48244 b.Kind = BlockAMD64UGT 48245 b.SetControl(cmp) 48246 b.Aux = nil 48247 return true 48248 } 48249 // match: (If (SETAE cmp) yes no) 48250 // cond: 48251 // result: (UGE cmp yes no) 48252 for { 48253 v := b.Control 48254 if v.Op != OpAMD64SETAE { 48255 break 48256 } 48257 cmp := v.Args[0] 48258 b.Kind = BlockAMD64UGE 48259 b.SetControl(cmp) 48260 b.Aux = nil 48261 return true 48262 } 48263 // match: (If (SETGF cmp) yes no) 48264 // cond: 48265 // result: (UGT cmp yes no) 48266 for { 48267 v := b.Control 48268 if v.Op != OpAMD64SETGF { 48269 break 48270 } 48271 cmp := v.Args[0] 48272 b.Kind = BlockAMD64UGT 48273 b.SetControl(cmp) 48274 b.Aux = nil 48275 return true 48276 } 48277 // match: (If (SETGEF cmp) yes no) 48278 // cond: 48279 // result: (UGE cmp yes no) 48280 for { 48281 v := b.Control 48282 if v.Op != OpAMD64SETGEF { 48283 break 48284 } 48285 cmp := v.Args[0] 48286 b.Kind = BlockAMD64UGE 48287 b.SetControl(cmp) 48288 b.Aux = nil 48289 return true 48290 } 48291 // match: (If (SETEQF cmp) yes no) 48292 // cond: 48293 // result: (EQF cmp yes no) 48294 for { 48295 v := b.Control 48296 if v.Op != OpAMD64SETEQF { 48297 break 48298 } 48299 cmp := v.Args[0] 48300 b.Kind = BlockAMD64EQF 48301 b.SetControl(cmp) 48302 b.Aux = nil 48303 return true 48304 } 48305 // match: (If (SETNEF cmp) yes no) 48306 // cond: 48307 // result: (NEF cmp yes no) 48308 for { 48309 v := b.Control 48310 if v.Op != OpAMD64SETNEF { 48311 break 48312 } 48313 cmp := v.Args[0] 48314 b.Kind = BlockAMD64NEF 48315 b.SetControl(cmp) 48316 b.Aux = nil 48317 return true 48318 } 48319 // match: (If cond yes no) 48320 // cond: 48321 // result: (NE (TESTB cond cond) yes no) 48322 for { 48323 v := b.Control 48324 _ = v 48325 cond := b.Control 48326 b.Kind = BlockAMD64NE 48327 v0 := b.NewValue0(v.Pos, OpAMD64TESTB, types.TypeFlags) 48328 v0.AddArg(cond) 48329 v0.AddArg(cond) 48330 b.SetControl(v0) 48331 b.Aux = nil 48332 return true 48333 } 48334 case BlockAMD64LE: 48335 // match: (LE (InvertFlags cmp) yes no) 48336 // cond: 48337 // result: (GE cmp yes no) 48338 for { 48339 v := b.Control 48340 if v.Op != OpAMD64InvertFlags { 48341 break 48342 } 48343 cmp := v.Args[0] 48344 b.Kind = BlockAMD64GE 48345 b.SetControl(cmp) 48346 b.Aux = nil 48347 return true 48348 } 48349 // match: (LE (FlagEQ) yes no) 48350 // cond: 48351 // result: (First nil yes no) 48352 for { 48353 v := b.Control 48354 if v.Op != OpAMD64FlagEQ { 48355 break 48356 } 48357 b.Kind = BlockFirst 48358 b.SetControl(nil) 48359 b.Aux = nil 48360 return true 48361 } 48362 // match: (LE (FlagLT_ULT) yes no) 48363 // cond: 48364 // result: (First nil yes no) 48365 for { 48366 v := b.Control 48367 if v.Op != OpAMD64FlagLT_ULT { 48368 break 48369 } 48370 b.Kind = BlockFirst 48371 b.SetControl(nil) 48372 b.Aux = nil 48373 return true 48374 } 48375 // match: (LE (FlagLT_UGT) yes no) 48376 // cond: 48377 // result: (First nil yes no) 48378 for { 48379 v := b.Control 48380 if v.Op != OpAMD64FlagLT_UGT { 48381 break 48382 } 48383 b.Kind = BlockFirst 48384 b.SetControl(nil) 48385 b.Aux = nil 48386 return true 48387 } 48388 // match: (LE (FlagGT_ULT) yes no) 48389 // cond: 48390 // result: (First nil no yes) 48391 for { 48392 v := b.Control 48393 if v.Op != OpAMD64FlagGT_ULT { 48394 break 48395 } 48396 b.Kind = BlockFirst 48397 b.SetControl(nil) 48398 b.Aux = nil 48399 b.swapSuccessors() 48400 return true 48401 } 48402 // match: (LE (FlagGT_UGT) yes no) 48403 // cond: 48404 // result: (First nil no yes) 48405 for { 48406 v := b.Control 48407 if v.Op != OpAMD64FlagGT_UGT { 48408 break 48409 } 48410 b.Kind = BlockFirst 48411 b.SetControl(nil) 48412 b.Aux = nil 48413 b.swapSuccessors() 48414 return true 48415 } 48416 case BlockAMD64LT: 48417 // match: (LT (InvertFlags cmp) yes no) 48418 // cond: 48419 // result: (GT cmp yes no) 48420 for { 48421 v := b.Control 48422 if v.Op != OpAMD64InvertFlags { 48423 break 48424 } 48425 cmp := v.Args[0] 48426 b.Kind = BlockAMD64GT 48427 b.SetControl(cmp) 48428 b.Aux = nil 48429 return true 48430 } 48431 // match: (LT (FlagEQ) yes no) 48432 // cond: 48433 // result: (First nil no yes) 48434 for { 48435 v := b.Control 48436 if v.Op != OpAMD64FlagEQ { 48437 break 48438 } 48439 b.Kind = BlockFirst 48440 b.SetControl(nil) 48441 b.Aux = nil 48442 b.swapSuccessors() 48443 return true 48444 } 48445 // match: (LT (FlagLT_ULT) yes no) 48446 // cond: 48447 // result: (First nil yes no) 48448 for { 48449 v := b.Control 48450 if v.Op != OpAMD64FlagLT_ULT { 48451 break 48452 } 48453 b.Kind = BlockFirst 48454 b.SetControl(nil) 48455 b.Aux = nil 48456 return true 48457 } 48458 // match: (LT (FlagLT_UGT) yes no) 48459 // cond: 48460 // result: (First nil yes no) 48461 for { 48462 v := b.Control 48463 if v.Op != OpAMD64FlagLT_UGT { 48464 break 48465 } 48466 b.Kind = BlockFirst 48467 b.SetControl(nil) 48468 b.Aux = nil 48469 return true 48470 } 48471 // match: (LT (FlagGT_ULT) yes no) 48472 // cond: 48473 // result: (First nil no yes) 48474 for { 48475 v := b.Control 48476 if v.Op != OpAMD64FlagGT_ULT { 48477 break 48478 } 48479 b.Kind = BlockFirst 48480 b.SetControl(nil) 48481 b.Aux = nil 48482 b.swapSuccessors() 48483 return true 48484 } 48485 // match: (LT (FlagGT_UGT) yes no) 48486 // cond: 48487 // result: (First nil no yes) 48488 for { 48489 v := b.Control 48490 if v.Op != OpAMD64FlagGT_UGT { 48491 break 48492 } 48493 b.Kind = BlockFirst 48494 b.SetControl(nil) 48495 b.Aux = nil 48496 b.swapSuccessors() 48497 return true 48498 } 48499 case BlockAMD64NE: 48500 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 48501 // cond: 48502 // result: (LT cmp yes no) 48503 for { 48504 v := b.Control 48505 if v.Op != OpAMD64TESTB { 48506 break 48507 } 48508 _ = v.Args[1] 48509 v_0 := v.Args[0] 48510 if v_0.Op != OpAMD64SETL { 48511 break 48512 } 48513 cmp := v_0.Args[0] 48514 v_1 := v.Args[1] 48515 if v_1.Op != OpAMD64SETL { 48516 break 48517 } 48518 if cmp != v_1.Args[0] { 48519 break 48520 } 48521 b.Kind = BlockAMD64LT 48522 b.SetControl(cmp) 48523 b.Aux = nil 48524 return true 48525 } 48526 // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) 48527 // cond: 48528 // result: (LT cmp yes no) 48529 for { 48530 v := b.Control 48531 if v.Op != OpAMD64TESTB { 48532 break 48533 } 48534 _ = v.Args[1] 48535 v_0 := v.Args[0] 48536 if v_0.Op != OpAMD64SETL { 48537 break 48538 } 48539 cmp := v_0.Args[0] 48540 v_1 := v.Args[1] 48541 if v_1.Op != OpAMD64SETL { 48542 break 48543 } 48544 if cmp != v_1.Args[0] { 48545 break 48546 } 48547 b.Kind = BlockAMD64LT 48548 b.SetControl(cmp) 48549 b.Aux = nil 48550 return true 48551 } 48552 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 48553 // cond: 48554 // result: (LE cmp yes no) 48555 for { 48556 v := b.Control 48557 if v.Op != OpAMD64TESTB { 48558 break 48559 } 48560 _ = v.Args[1] 48561 v_0 := v.Args[0] 48562 if v_0.Op != OpAMD64SETLE { 48563 break 48564 } 48565 cmp := v_0.Args[0] 48566 v_1 := v.Args[1] 48567 if v_1.Op != OpAMD64SETLE { 48568 break 48569 } 48570 if cmp != v_1.Args[0] { 48571 break 48572 } 48573 b.Kind = BlockAMD64LE 48574 b.SetControl(cmp) 48575 b.Aux = nil 48576 return true 48577 } 48578 // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) 48579 // cond: 48580 // result: (LE cmp yes no) 48581 for { 48582 v := b.Control 48583 if v.Op != OpAMD64TESTB { 48584 break 48585 } 48586 _ = v.Args[1] 48587 v_0 := v.Args[0] 48588 if v_0.Op != OpAMD64SETLE { 48589 break 48590 } 48591 cmp := v_0.Args[0] 48592 v_1 := v.Args[1] 48593 if v_1.Op != OpAMD64SETLE { 48594 break 48595 } 48596 if cmp != v_1.Args[0] { 48597 break 48598 } 48599 b.Kind = BlockAMD64LE 48600 b.SetControl(cmp) 48601 b.Aux = nil 48602 return true 48603 } 48604 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 48605 // cond: 48606 // result: (GT cmp yes no) 48607 for { 48608 v := b.Control 48609 if v.Op != OpAMD64TESTB { 48610 break 48611 } 48612 _ = v.Args[1] 48613 v_0 := v.Args[0] 48614 if v_0.Op != OpAMD64SETG { 48615 break 48616 } 48617 cmp := v_0.Args[0] 48618 v_1 := v.Args[1] 48619 if v_1.Op != OpAMD64SETG { 48620 break 48621 } 48622 if cmp != v_1.Args[0] { 48623 break 48624 } 48625 b.Kind = BlockAMD64GT 48626 b.SetControl(cmp) 48627 b.Aux = nil 48628 return true 48629 } 48630 // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) 48631 // cond: 48632 // result: (GT cmp yes no) 48633 for { 48634 v := b.Control 48635 if v.Op != OpAMD64TESTB { 48636 break 48637 } 48638 _ = v.Args[1] 48639 v_0 := v.Args[0] 48640 if v_0.Op != OpAMD64SETG { 48641 break 48642 } 48643 cmp := v_0.Args[0] 48644 v_1 := v.Args[1] 48645 if v_1.Op != OpAMD64SETG { 48646 break 48647 } 48648 if cmp != v_1.Args[0] { 48649 break 48650 } 48651 b.Kind = BlockAMD64GT 48652 b.SetControl(cmp) 48653 b.Aux = nil 48654 return true 48655 } 48656 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 48657 // cond: 48658 // result: (GE cmp yes no) 48659 for { 48660 v := b.Control 48661 if v.Op != OpAMD64TESTB { 48662 break 48663 } 48664 _ = v.Args[1] 48665 v_0 := v.Args[0] 48666 if v_0.Op != OpAMD64SETGE { 48667 break 48668 } 48669 cmp := v_0.Args[0] 48670 v_1 := v.Args[1] 48671 if v_1.Op != OpAMD64SETGE { 48672 break 48673 } 48674 if cmp != v_1.Args[0] { 48675 break 48676 } 48677 b.Kind = BlockAMD64GE 48678 b.SetControl(cmp) 48679 b.Aux = nil 48680 return true 48681 } 48682 // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) 48683 // cond: 48684 // result: (GE cmp yes no) 48685 for { 48686 v := b.Control 48687 if v.Op != OpAMD64TESTB { 48688 break 48689 } 48690 _ = v.Args[1] 48691 v_0 := v.Args[0] 48692 if v_0.Op != OpAMD64SETGE { 48693 break 48694 } 48695 cmp := v_0.Args[0] 48696 v_1 := v.Args[1] 48697 if v_1.Op != OpAMD64SETGE { 48698 break 48699 } 48700 if cmp != v_1.Args[0] { 48701 break 48702 } 48703 b.Kind = BlockAMD64GE 48704 b.SetControl(cmp) 48705 b.Aux = nil 48706 return true 48707 } 48708 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 48709 // cond: 48710 // result: (EQ cmp yes no) 48711 for { 48712 v := b.Control 48713 if v.Op != OpAMD64TESTB { 48714 break 48715 } 48716 _ = v.Args[1] 48717 v_0 := v.Args[0] 48718 if v_0.Op != OpAMD64SETEQ { 48719 break 48720 } 48721 cmp := v_0.Args[0] 48722 v_1 := v.Args[1] 48723 if v_1.Op != OpAMD64SETEQ { 48724 break 48725 } 48726 if cmp != v_1.Args[0] { 48727 break 48728 } 48729 b.Kind = BlockAMD64EQ 48730 b.SetControl(cmp) 48731 b.Aux = nil 48732 return true 48733 } 48734 // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) 48735 // cond: 48736 // result: (EQ cmp yes no) 48737 for { 48738 v := b.Control 48739 if v.Op != OpAMD64TESTB { 48740 break 48741 } 48742 _ = v.Args[1] 48743 v_0 := v.Args[0] 48744 if v_0.Op != OpAMD64SETEQ { 48745 break 48746 } 48747 cmp := v_0.Args[0] 48748 v_1 := v.Args[1] 48749 if v_1.Op != OpAMD64SETEQ { 48750 break 48751 } 48752 if cmp != v_1.Args[0] { 48753 break 48754 } 48755 b.Kind = BlockAMD64EQ 48756 b.SetControl(cmp) 48757 b.Aux = nil 48758 return true 48759 } 48760 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 48761 // cond: 48762 // result: (NE cmp yes no) 48763 for { 48764 v := b.Control 48765 if v.Op != OpAMD64TESTB { 48766 break 48767 } 48768 _ = v.Args[1] 48769 v_0 := v.Args[0] 48770 if v_0.Op != OpAMD64SETNE { 48771 break 48772 } 48773 cmp := v_0.Args[0] 48774 v_1 := v.Args[1] 48775 if v_1.Op != OpAMD64SETNE { 48776 break 48777 } 48778 if cmp != v_1.Args[0] { 48779 break 48780 } 48781 b.Kind = BlockAMD64NE 48782 b.SetControl(cmp) 48783 b.Aux = nil 48784 return true 48785 } 48786 // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) 48787 // cond: 48788 // result: (NE cmp yes no) 48789 for { 48790 v := b.Control 48791 if v.Op != OpAMD64TESTB { 48792 break 48793 } 48794 _ = v.Args[1] 48795 v_0 := v.Args[0] 48796 if v_0.Op != OpAMD64SETNE { 48797 break 48798 } 48799 cmp := v_0.Args[0] 48800 v_1 := v.Args[1] 48801 if v_1.Op != OpAMD64SETNE { 48802 break 48803 } 48804 if cmp != v_1.Args[0] { 48805 break 48806 } 48807 b.Kind = BlockAMD64NE 48808 b.SetControl(cmp) 48809 b.Aux = nil 48810 return true 48811 } 48812 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 48813 // cond: 48814 // result: (ULT cmp yes no) 48815 for { 48816 v := b.Control 48817 if v.Op != OpAMD64TESTB { 48818 break 48819 } 48820 _ = v.Args[1] 48821 v_0 := v.Args[0] 48822 if v_0.Op != OpAMD64SETB { 48823 break 48824 } 48825 cmp := v_0.Args[0] 48826 v_1 := v.Args[1] 48827 if v_1.Op != OpAMD64SETB { 48828 break 48829 } 48830 if cmp != v_1.Args[0] { 48831 break 48832 } 48833 b.Kind = BlockAMD64ULT 48834 b.SetControl(cmp) 48835 b.Aux = nil 48836 return true 48837 } 48838 // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) 48839 // cond: 48840 // result: (ULT cmp yes no) 48841 for { 48842 v := b.Control 48843 if v.Op != OpAMD64TESTB { 48844 break 48845 } 48846 _ = v.Args[1] 48847 v_0 := v.Args[0] 48848 if v_0.Op != OpAMD64SETB { 48849 break 48850 } 48851 cmp := v_0.Args[0] 48852 v_1 := v.Args[1] 48853 if v_1.Op != OpAMD64SETB { 48854 break 48855 } 48856 if cmp != v_1.Args[0] { 48857 break 48858 } 48859 b.Kind = BlockAMD64ULT 48860 b.SetControl(cmp) 48861 b.Aux = nil 48862 return true 48863 } 48864 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 48865 // cond: 48866 // result: (ULE cmp yes no) 48867 for { 48868 v := b.Control 48869 if v.Op != OpAMD64TESTB { 48870 break 48871 } 48872 _ = v.Args[1] 48873 v_0 := v.Args[0] 48874 if v_0.Op != OpAMD64SETBE { 48875 break 48876 } 48877 cmp := v_0.Args[0] 48878 v_1 := v.Args[1] 48879 if v_1.Op != OpAMD64SETBE { 48880 break 48881 } 48882 if cmp != v_1.Args[0] { 48883 break 48884 } 48885 b.Kind = BlockAMD64ULE 48886 b.SetControl(cmp) 48887 b.Aux = nil 48888 return true 48889 } 48890 // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) 48891 // cond: 48892 // result: (ULE cmp yes no) 48893 for { 48894 v := b.Control 48895 if v.Op != OpAMD64TESTB { 48896 break 48897 } 48898 _ = v.Args[1] 48899 v_0 := v.Args[0] 48900 if v_0.Op != OpAMD64SETBE { 48901 break 48902 } 48903 cmp := v_0.Args[0] 48904 v_1 := v.Args[1] 48905 if v_1.Op != OpAMD64SETBE { 48906 break 48907 } 48908 if cmp != v_1.Args[0] { 48909 break 48910 } 48911 b.Kind = BlockAMD64ULE 48912 b.SetControl(cmp) 48913 b.Aux = nil 48914 return true 48915 } 48916 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 48917 // cond: 48918 // result: (UGT cmp yes no) 48919 for { 48920 v := b.Control 48921 if v.Op != OpAMD64TESTB { 48922 break 48923 } 48924 _ = v.Args[1] 48925 v_0 := v.Args[0] 48926 if v_0.Op != OpAMD64SETA { 48927 break 48928 } 48929 cmp := v_0.Args[0] 48930 v_1 := v.Args[1] 48931 if v_1.Op != OpAMD64SETA { 48932 break 48933 } 48934 if cmp != v_1.Args[0] { 48935 break 48936 } 48937 b.Kind = BlockAMD64UGT 48938 b.SetControl(cmp) 48939 b.Aux = nil 48940 return true 48941 } 48942 // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) 48943 // cond: 48944 // result: (UGT cmp yes no) 48945 for { 48946 v := b.Control 48947 if v.Op != OpAMD64TESTB { 48948 break 48949 } 48950 _ = v.Args[1] 48951 v_0 := v.Args[0] 48952 if v_0.Op != OpAMD64SETA { 48953 break 48954 } 48955 cmp := v_0.Args[0] 48956 v_1 := v.Args[1] 48957 if v_1.Op != OpAMD64SETA { 48958 break 48959 } 48960 if cmp != v_1.Args[0] { 48961 break 48962 } 48963 b.Kind = BlockAMD64UGT 48964 b.SetControl(cmp) 48965 b.Aux = nil 48966 return true 48967 } 48968 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 48969 // cond: 48970 // result: (UGE cmp yes no) 48971 for { 48972 v := b.Control 48973 if v.Op != OpAMD64TESTB { 48974 break 48975 } 48976 _ = v.Args[1] 48977 v_0 := v.Args[0] 48978 if v_0.Op != OpAMD64SETAE { 48979 break 48980 } 48981 cmp := v_0.Args[0] 48982 v_1 := v.Args[1] 48983 if v_1.Op != OpAMD64SETAE { 48984 break 48985 } 48986 if cmp != v_1.Args[0] { 48987 break 48988 } 48989 b.Kind = BlockAMD64UGE 48990 b.SetControl(cmp) 48991 b.Aux = nil 48992 return true 48993 } 48994 // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) 48995 // cond: 48996 // result: (UGE cmp yes no) 48997 for { 48998 v := b.Control 48999 if v.Op != OpAMD64TESTB { 49000 break 49001 } 49002 _ = v.Args[1] 49003 v_0 := v.Args[0] 49004 if v_0.Op != OpAMD64SETAE { 49005 break 49006 } 49007 cmp := v_0.Args[0] 49008 v_1 := v.Args[1] 49009 if v_1.Op != OpAMD64SETAE { 49010 break 49011 } 49012 if cmp != v_1.Args[0] { 49013 break 49014 } 49015 b.Kind = BlockAMD64UGE 49016 b.SetControl(cmp) 49017 b.Aux = nil 49018 return true 49019 } 49020 // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) 49021 // cond: !config.nacl 49022 // result: (ULT (BTL x y)) 49023 for { 49024 v := b.Control 49025 if v.Op != OpAMD64TESTL { 49026 break 49027 } 49028 _ = v.Args[1] 49029 v_0 := v.Args[0] 49030 if v_0.Op != OpAMD64SHLL { 49031 break 49032 } 49033 _ = v_0.Args[1] 49034 v_0_0 := v_0.Args[0] 49035 if v_0_0.Op != OpAMD64MOVLconst { 49036 break 49037 } 49038 if v_0_0.AuxInt != 1 { 49039 break 49040 } 49041 x := v_0.Args[1] 49042 y := v.Args[1] 49043 if !(!config.nacl) { 49044 break 49045 } 49046 b.Kind = BlockAMD64ULT 49047 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 49048 v0.AddArg(x) 49049 v0.AddArg(y) 49050 b.SetControl(v0) 49051 b.Aux = nil 49052 return true 49053 } 49054 // match: (NE (TESTL y (SHLL (MOVLconst [1]) x))) 49055 // cond: !config.nacl 49056 // result: (ULT (BTL x y)) 49057 for { 49058 v := b.Control 49059 if v.Op != OpAMD64TESTL { 49060 break 49061 } 49062 _ = v.Args[1] 49063 y := v.Args[0] 49064 v_1 := v.Args[1] 49065 if v_1.Op != OpAMD64SHLL { 49066 break 49067 } 49068 _ = v_1.Args[1] 49069 v_1_0 := v_1.Args[0] 49070 if v_1_0.Op != OpAMD64MOVLconst { 49071 break 49072 } 49073 if v_1_0.AuxInt != 1 { 49074 break 49075 } 49076 x := v_1.Args[1] 49077 if !(!config.nacl) { 49078 break 49079 } 49080 b.Kind = BlockAMD64ULT 49081 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) 49082 v0.AddArg(x) 49083 v0.AddArg(y) 49084 b.SetControl(v0) 49085 b.Aux = nil 49086 return true 49087 } 49088 // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) 49089 // cond: !config.nacl 49090 // result: (ULT (BTQ x y)) 49091 for { 49092 v := b.Control 49093 if v.Op != OpAMD64TESTQ { 49094 break 49095 } 49096 _ = v.Args[1] 49097 v_0 := v.Args[0] 49098 if v_0.Op != OpAMD64SHLQ { 49099 break 49100 } 49101 _ = v_0.Args[1] 49102 v_0_0 := v_0.Args[0] 49103 if v_0_0.Op != OpAMD64MOVQconst { 49104 break 49105 } 49106 if v_0_0.AuxInt != 1 { 49107 break 49108 } 49109 x := v_0.Args[1] 49110 y := v.Args[1] 49111 if !(!config.nacl) { 49112 break 49113 } 49114 b.Kind = BlockAMD64ULT 49115 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 49116 v0.AddArg(x) 49117 v0.AddArg(y) 49118 b.SetControl(v0) 49119 b.Aux = nil 49120 return true 49121 } 49122 // match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x))) 49123 // cond: !config.nacl 49124 // result: (ULT (BTQ x y)) 49125 for { 49126 v := b.Control 49127 if v.Op != OpAMD64TESTQ { 49128 break 49129 } 49130 _ = v.Args[1] 49131 y := v.Args[0] 49132 v_1 := v.Args[1] 49133 if v_1.Op != OpAMD64SHLQ { 49134 break 49135 } 49136 _ = v_1.Args[1] 49137 v_1_0 := v_1.Args[0] 49138 if v_1_0.Op != OpAMD64MOVQconst { 49139 break 49140 } 49141 if v_1_0.AuxInt != 1 { 49142 break 49143 } 49144 x := v_1.Args[1] 49145 if !(!config.nacl) { 49146 break 49147 } 49148 b.Kind = BlockAMD64ULT 49149 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) 49150 v0.AddArg(x) 49151 v0.AddArg(y) 49152 b.SetControl(v0) 49153 b.Aux = nil 49154 return true 49155 } 49156 // match: (NE (TESTLconst [c] x)) 49157 // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl 49158 // result: (ULT (BTLconst [log2(c)] x)) 49159 for { 49160 v := b.Control 49161 if v.Op != OpAMD64TESTLconst { 49162 break 49163 } 49164 c := v.AuxInt 49165 x := v.Args[0] 49166 if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { 49167 break 49168 } 49169 b.Kind = BlockAMD64ULT 49170 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) 49171 v0.AuxInt = log2(c) 49172 v0.AddArg(x) 49173 b.SetControl(v0) 49174 b.Aux = nil 49175 return true 49176 } 49177 // match: (NE (TESTQconst [c] x)) 49178 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 49179 // result: (ULT (BTQconst [log2(c)] x)) 49180 for { 49181 v := b.Control 49182 if v.Op != OpAMD64TESTQconst { 49183 break 49184 } 49185 c := v.AuxInt 49186 x := v.Args[0] 49187 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 49188 break 49189 } 49190 b.Kind = BlockAMD64ULT 49191 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49192 v0.AuxInt = log2(c) 49193 v0.AddArg(x) 49194 b.SetControl(v0) 49195 b.Aux = nil 49196 return true 49197 } 49198 // match: (NE (TESTQ (MOVQconst [c]) x)) 49199 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 49200 // result: (ULT (BTQconst [log2(c)] x)) 49201 for { 49202 v := b.Control 49203 if v.Op != OpAMD64TESTQ { 49204 break 49205 } 49206 _ = v.Args[1] 49207 v_0 := v.Args[0] 49208 if v_0.Op != OpAMD64MOVQconst { 49209 break 49210 } 49211 c := v_0.AuxInt 49212 x := v.Args[1] 49213 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 49214 break 49215 } 49216 b.Kind = BlockAMD64ULT 49217 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49218 v0.AuxInt = log2(c) 49219 v0.AddArg(x) 49220 b.SetControl(v0) 49221 b.Aux = nil 49222 return true 49223 } 49224 // match: (NE (TESTQ x (MOVQconst [c]))) 49225 // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl 49226 // result: (ULT (BTQconst [log2(c)] x)) 49227 for { 49228 v := b.Control 49229 if v.Op != OpAMD64TESTQ { 49230 break 49231 } 49232 _ = v.Args[1] 49233 x := v.Args[0] 49234 v_1 := v.Args[1] 49235 if v_1.Op != OpAMD64MOVQconst { 49236 break 49237 } 49238 c := v_1.AuxInt 49239 if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { 49240 break 49241 } 49242 b.Kind = BlockAMD64ULT 49243 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) 49244 v0.AuxInt = log2(c) 49245 v0.AddArg(x) 49246 b.SetControl(v0) 49247 b.Aux = nil 49248 return true 49249 } 49250 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 49251 // cond: 49252 // result: (UGT cmp yes no) 49253 for { 49254 v := b.Control 49255 if v.Op != OpAMD64TESTB { 49256 break 49257 } 49258 _ = v.Args[1] 49259 v_0 := v.Args[0] 49260 if v_0.Op != OpAMD64SETGF { 49261 break 49262 } 49263 cmp := v_0.Args[0] 49264 v_1 := v.Args[1] 49265 if v_1.Op != OpAMD64SETGF { 49266 break 49267 } 49268 if cmp != v_1.Args[0] { 49269 break 49270 } 49271 b.Kind = BlockAMD64UGT 49272 b.SetControl(cmp) 49273 b.Aux = nil 49274 return true 49275 } 49276 // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) 49277 // cond: 49278 // result: (UGT cmp yes no) 49279 for { 49280 v := b.Control 49281 if v.Op != OpAMD64TESTB { 49282 break 49283 } 49284 _ = v.Args[1] 49285 v_0 := v.Args[0] 49286 if v_0.Op != OpAMD64SETGF { 49287 break 49288 } 49289 cmp := v_0.Args[0] 49290 v_1 := v.Args[1] 49291 if v_1.Op != OpAMD64SETGF { 49292 break 49293 } 49294 if cmp != v_1.Args[0] { 49295 break 49296 } 49297 b.Kind = BlockAMD64UGT 49298 b.SetControl(cmp) 49299 b.Aux = nil 49300 return true 49301 } 49302 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 49303 // cond: 49304 // result: (UGE cmp yes no) 49305 for { 49306 v := b.Control 49307 if v.Op != OpAMD64TESTB { 49308 break 49309 } 49310 _ = v.Args[1] 49311 v_0 := v.Args[0] 49312 if v_0.Op != OpAMD64SETGEF { 49313 break 49314 } 49315 cmp := v_0.Args[0] 49316 v_1 := v.Args[1] 49317 if v_1.Op != OpAMD64SETGEF { 49318 break 49319 } 49320 if cmp != v_1.Args[0] { 49321 break 49322 } 49323 b.Kind = BlockAMD64UGE 49324 b.SetControl(cmp) 49325 b.Aux = nil 49326 return true 49327 } 49328 // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) 49329 // cond: 49330 // result: (UGE cmp yes no) 49331 for { 49332 v := b.Control 49333 if v.Op != OpAMD64TESTB { 49334 break 49335 } 49336 _ = v.Args[1] 49337 v_0 := v.Args[0] 49338 if v_0.Op != OpAMD64SETGEF { 49339 break 49340 } 49341 cmp := v_0.Args[0] 49342 v_1 := v.Args[1] 49343 if v_1.Op != OpAMD64SETGEF { 49344 break 49345 } 49346 if cmp != v_1.Args[0] { 49347 break 49348 } 49349 b.Kind = BlockAMD64UGE 49350 b.SetControl(cmp) 49351 b.Aux = nil 49352 return true 49353 } 49354 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 49355 // cond: 49356 // result: (EQF cmp yes no) 49357 for { 49358 v := b.Control 49359 if v.Op != OpAMD64TESTB { 49360 break 49361 } 49362 _ = v.Args[1] 49363 v_0 := v.Args[0] 49364 if v_0.Op != OpAMD64SETEQF { 49365 break 49366 } 49367 cmp := v_0.Args[0] 49368 v_1 := v.Args[1] 49369 if v_1.Op != OpAMD64SETEQF { 49370 break 49371 } 49372 if cmp != v_1.Args[0] { 49373 break 49374 } 49375 b.Kind = BlockAMD64EQF 49376 b.SetControl(cmp) 49377 b.Aux = nil 49378 return true 49379 } 49380 // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) 49381 // cond: 49382 // result: (EQF cmp yes no) 49383 for { 49384 v := b.Control 49385 if v.Op != OpAMD64TESTB { 49386 break 49387 } 49388 _ = v.Args[1] 49389 v_0 := v.Args[0] 49390 if v_0.Op != OpAMD64SETEQF { 49391 break 49392 } 49393 cmp := v_0.Args[0] 49394 v_1 := v.Args[1] 49395 if v_1.Op != OpAMD64SETEQF { 49396 break 49397 } 49398 if cmp != v_1.Args[0] { 49399 break 49400 } 49401 b.Kind = BlockAMD64EQF 49402 b.SetControl(cmp) 49403 b.Aux = nil 49404 return true 49405 } 49406 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 49407 // cond: 49408 // result: (NEF cmp yes no) 49409 for { 49410 v := b.Control 49411 if v.Op != OpAMD64TESTB { 49412 break 49413 } 49414 _ = v.Args[1] 49415 v_0 := v.Args[0] 49416 if v_0.Op != OpAMD64SETNEF { 49417 break 49418 } 49419 cmp := v_0.Args[0] 49420 v_1 := v.Args[1] 49421 if v_1.Op != OpAMD64SETNEF { 49422 break 49423 } 49424 if cmp != v_1.Args[0] { 49425 break 49426 } 49427 b.Kind = BlockAMD64NEF 49428 b.SetControl(cmp) 49429 b.Aux = nil 49430 return true 49431 } 49432 // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) 49433 // cond: 49434 // result: (NEF cmp yes no) 49435 for { 49436 v := b.Control 49437 if v.Op != OpAMD64TESTB { 49438 break 49439 } 49440 _ = v.Args[1] 49441 v_0 := v.Args[0] 49442 if v_0.Op != OpAMD64SETNEF { 49443 break 49444 } 49445 cmp := v_0.Args[0] 49446 v_1 := v.Args[1] 49447 if v_1.Op != OpAMD64SETNEF { 49448 break 49449 } 49450 if cmp != v_1.Args[0] { 49451 break 49452 } 49453 b.Kind = BlockAMD64NEF 49454 b.SetControl(cmp) 49455 b.Aux = nil 49456 return true 49457 } 49458 // match: (NE (InvertFlags cmp) yes no) 49459 // cond: 49460 // result: (NE cmp yes no) 49461 for { 49462 v := b.Control 49463 if v.Op != OpAMD64InvertFlags { 49464 break 49465 } 49466 cmp := v.Args[0] 49467 b.Kind = BlockAMD64NE 49468 b.SetControl(cmp) 49469 b.Aux = nil 49470 return true 49471 } 49472 // match: (NE (FlagEQ) yes no) 49473 // cond: 49474 // result: (First nil no yes) 49475 for { 49476 v := b.Control 49477 if v.Op != OpAMD64FlagEQ { 49478 break 49479 } 49480 b.Kind = BlockFirst 49481 b.SetControl(nil) 49482 b.Aux = nil 49483 b.swapSuccessors() 49484 return true 49485 } 49486 // match: (NE (FlagLT_ULT) yes no) 49487 // cond: 49488 // result: (First nil yes no) 49489 for { 49490 v := b.Control 49491 if v.Op != OpAMD64FlagLT_ULT { 49492 break 49493 } 49494 b.Kind = BlockFirst 49495 b.SetControl(nil) 49496 b.Aux = nil 49497 return true 49498 } 49499 // match: (NE (FlagLT_UGT) yes no) 49500 // cond: 49501 // result: (First nil yes no) 49502 for { 49503 v := b.Control 49504 if v.Op != OpAMD64FlagLT_UGT { 49505 break 49506 } 49507 b.Kind = BlockFirst 49508 b.SetControl(nil) 49509 b.Aux = nil 49510 return true 49511 } 49512 // match: (NE (FlagGT_ULT) yes no) 49513 // cond: 49514 // result: (First nil yes no) 49515 for { 49516 v := b.Control 49517 if v.Op != OpAMD64FlagGT_ULT { 49518 break 49519 } 49520 b.Kind = BlockFirst 49521 b.SetControl(nil) 49522 b.Aux = nil 49523 return true 49524 } 49525 // match: (NE (FlagGT_UGT) yes no) 49526 // cond: 49527 // result: (First nil yes no) 49528 for { 49529 v := b.Control 49530 if v.Op != OpAMD64FlagGT_UGT { 49531 break 49532 } 49533 b.Kind = BlockFirst 49534 b.SetControl(nil) 49535 b.Aux = nil 49536 return true 49537 } 49538 case BlockAMD64UGE: 49539 // match: (UGE (InvertFlags cmp) yes no) 49540 // cond: 49541 // result: (ULE cmp yes no) 49542 for { 49543 v := b.Control 49544 if v.Op != OpAMD64InvertFlags { 49545 break 49546 } 49547 cmp := v.Args[0] 49548 b.Kind = BlockAMD64ULE 49549 b.SetControl(cmp) 49550 b.Aux = nil 49551 return true 49552 } 49553 // match: (UGE (FlagEQ) yes no) 49554 // cond: 49555 // result: (First nil yes no) 49556 for { 49557 v := b.Control 49558 if v.Op != OpAMD64FlagEQ { 49559 break 49560 } 49561 b.Kind = BlockFirst 49562 b.SetControl(nil) 49563 b.Aux = nil 49564 return true 49565 } 49566 // match: (UGE (FlagLT_ULT) yes no) 49567 // cond: 49568 // result: (First nil no yes) 49569 for { 49570 v := b.Control 49571 if v.Op != OpAMD64FlagLT_ULT { 49572 break 49573 } 49574 b.Kind = BlockFirst 49575 b.SetControl(nil) 49576 b.Aux = nil 49577 b.swapSuccessors() 49578 return true 49579 } 49580 // match: (UGE (FlagLT_UGT) yes no) 49581 // cond: 49582 // result: (First nil yes no) 49583 for { 49584 v := b.Control 49585 if v.Op != OpAMD64FlagLT_UGT { 49586 break 49587 } 49588 b.Kind = BlockFirst 49589 b.SetControl(nil) 49590 b.Aux = nil 49591 return true 49592 } 49593 // match: (UGE (FlagGT_ULT) yes no) 49594 // cond: 49595 // result: (First nil no yes) 49596 for { 49597 v := b.Control 49598 if v.Op != OpAMD64FlagGT_ULT { 49599 break 49600 } 49601 b.Kind = BlockFirst 49602 b.SetControl(nil) 49603 b.Aux = nil 49604 b.swapSuccessors() 49605 return true 49606 } 49607 // match: (UGE (FlagGT_UGT) yes no) 49608 // cond: 49609 // result: (First nil yes no) 49610 for { 49611 v := b.Control 49612 if v.Op != OpAMD64FlagGT_UGT { 49613 break 49614 } 49615 b.Kind = BlockFirst 49616 b.SetControl(nil) 49617 b.Aux = nil 49618 return true 49619 } 49620 case BlockAMD64UGT: 49621 // match: (UGT (InvertFlags cmp) yes no) 49622 // cond: 49623 // result: (ULT cmp yes no) 49624 for { 49625 v := b.Control 49626 if v.Op != OpAMD64InvertFlags { 49627 break 49628 } 49629 cmp := v.Args[0] 49630 b.Kind = BlockAMD64ULT 49631 b.SetControl(cmp) 49632 b.Aux = nil 49633 return true 49634 } 49635 // match: (UGT (FlagEQ) yes no) 49636 // cond: 49637 // result: (First nil no yes) 49638 for { 49639 v := b.Control 49640 if v.Op != OpAMD64FlagEQ { 49641 break 49642 } 49643 b.Kind = BlockFirst 49644 b.SetControl(nil) 49645 b.Aux = nil 49646 b.swapSuccessors() 49647 return true 49648 } 49649 // match: (UGT (FlagLT_ULT) yes no) 49650 // cond: 49651 // result: (First nil no yes) 49652 for { 49653 v := b.Control 49654 if v.Op != OpAMD64FlagLT_ULT { 49655 break 49656 } 49657 b.Kind = BlockFirst 49658 b.SetControl(nil) 49659 b.Aux = nil 49660 b.swapSuccessors() 49661 return true 49662 } 49663 // match: (UGT (FlagLT_UGT) yes no) 49664 // cond: 49665 // result: (First nil yes no) 49666 for { 49667 v := b.Control 49668 if v.Op != OpAMD64FlagLT_UGT { 49669 break 49670 } 49671 b.Kind = BlockFirst 49672 b.SetControl(nil) 49673 b.Aux = nil 49674 return true 49675 } 49676 // match: (UGT (FlagGT_ULT) yes no) 49677 // cond: 49678 // result: (First nil no yes) 49679 for { 49680 v := b.Control 49681 if v.Op != OpAMD64FlagGT_ULT { 49682 break 49683 } 49684 b.Kind = BlockFirst 49685 b.SetControl(nil) 49686 b.Aux = nil 49687 b.swapSuccessors() 49688 return true 49689 } 49690 // match: (UGT (FlagGT_UGT) yes no) 49691 // cond: 49692 // result: (First nil yes no) 49693 for { 49694 v := b.Control 49695 if v.Op != OpAMD64FlagGT_UGT { 49696 break 49697 } 49698 b.Kind = BlockFirst 49699 b.SetControl(nil) 49700 b.Aux = nil 49701 return true 49702 } 49703 case BlockAMD64ULE: 49704 // match: (ULE (InvertFlags cmp) yes no) 49705 // cond: 49706 // result: (UGE cmp yes no) 49707 for { 49708 v := b.Control 49709 if v.Op != OpAMD64InvertFlags { 49710 break 49711 } 49712 cmp := v.Args[0] 49713 b.Kind = BlockAMD64UGE 49714 b.SetControl(cmp) 49715 b.Aux = nil 49716 return true 49717 } 49718 // match: (ULE (FlagEQ) yes no) 49719 // cond: 49720 // result: (First nil yes no) 49721 for { 49722 v := b.Control 49723 if v.Op != OpAMD64FlagEQ { 49724 break 49725 } 49726 b.Kind = BlockFirst 49727 b.SetControl(nil) 49728 b.Aux = nil 49729 return true 49730 } 49731 // match: (ULE (FlagLT_ULT) yes no) 49732 // cond: 49733 // result: (First nil yes no) 49734 for { 49735 v := b.Control 49736 if v.Op != OpAMD64FlagLT_ULT { 49737 break 49738 } 49739 b.Kind = BlockFirst 49740 b.SetControl(nil) 49741 b.Aux = nil 49742 return true 49743 } 49744 // match: (ULE (FlagLT_UGT) yes no) 49745 // cond: 49746 // result: (First nil no yes) 49747 for { 49748 v := b.Control 49749 if v.Op != OpAMD64FlagLT_UGT { 49750 break 49751 } 49752 b.Kind = BlockFirst 49753 b.SetControl(nil) 49754 b.Aux = nil 49755 b.swapSuccessors() 49756 return true 49757 } 49758 // match: (ULE (FlagGT_ULT) yes no) 49759 // cond: 49760 // result: (First nil yes no) 49761 for { 49762 v := b.Control 49763 if v.Op != OpAMD64FlagGT_ULT { 49764 break 49765 } 49766 b.Kind = BlockFirst 49767 b.SetControl(nil) 49768 b.Aux = nil 49769 return true 49770 } 49771 // match: (ULE (FlagGT_UGT) yes no) 49772 // cond: 49773 // result: (First nil no yes) 49774 for { 49775 v := b.Control 49776 if v.Op != OpAMD64FlagGT_UGT { 49777 break 49778 } 49779 b.Kind = BlockFirst 49780 b.SetControl(nil) 49781 b.Aux = nil 49782 b.swapSuccessors() 49783 return true 49784 } 49785 case BlockAMD64ULT: 49786 // match: (ULT (InvertFlags cmp) yes no) 49787 // cond: 49788 // result: (UGT cmp yes no) 49789 for { 49790 v := b.Control 49791 if v.Op != OpAMD64InvertFlags { 49792 break 49793 } 49794 cmp := v.Args[0] 49795 b.Kind = BlockAMD64UGT 49796 b.SetControl(cmp) 49797 b.Aux = nil 49798 return true 49799 } 49800 // match: (ULT (FlagEQ) yes no) 49801 // cond: 49802 // result: (First nil no yes) 49803 for { 49804 v := b.Control 49805 if v.Op != OpAMD64FlagEQ { 49806 break 49807 } 49808 b.Kind = BlockFirst 49809 b.SetControl(nil) 49810 b.Aux = nil 49811 b.swapSuccessors() 49812 return true 49813 } 49814 // match: (ULT (FlagLT_ULT) yes no) 49815 // cond: 49816 // result: (First nil yes no) 49817 for { 49818 v := b.Control 49819 if v.Op != OpAMD64FlagLT_ULT { 49820 break 49821 } 49822 b.Kind = BlockFirst 49823 b.SetControl(nil) 49824 b.Aux = nil 49825 return true 49826 } 49827 // match: (ULT (FlagLT_UGT) yes no) 49828 // cond: 49829 // result: (First nil no yes) 49830 for { 49831 v := b.Control 49832 if v.Op != OpAMD64FlagLT_UGT { 49833 break 49834 } 49835 b.Kind = BlockFirst 49836 b.SetControl(nil) 49837 b.Aux = nil 49838 b.swapSuccessors() 49839 return true 49840 } 49841 // match: (ULT (FlagGT_ULT) yes no) 49842 // cond: 49843 // result: (First nil yes no) 49844 for { 49845 v := b.Control 49846 if v.Op != OpAMD64FlagGT_ULT { 49847 break 49848 } 49849 b.Kind = BlockFirst 49850 b.SetControl(nil) 49851 b.Aux = nil 49852 return true 49853 } 49854 // match: (ULT (FlagGT_UGT) yes no) 49855 // cond: 49856 // result: (First nil no yes) 49857 for { 49858 v := b.Control 49859 if v.Op != OpAMD64FlagGT_UGT { 49860 break 49861 } 49862 b.Kind = BlockFirst 49863 b.SetControl(nil) 49864 b.Aux = nil 49865 b.swapSuccessors() 49866 return true 49867 } 49868 } 49869 return false 49870 }