github.com/cloudwego/frugal@v0.1.15/internal/atm/ssa/pass_reorder.go (about) 1 /* 2 * Copyright 2022 ByteDance Inc. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package ssa 18 19 import ( 20 `fmt` 21 `sort` 22 23 `github.com/cloudwego/frugal/internal/rt` 24 ) 25 26 type _ValueId struct { 27 i int 28 v IrNode 29 r bool 30 } 31 32 func mkvid(i int, v IrNode) *_ValueId { 33 return &_ValueId { 34 i: i, 35 v: v, 36 r: true, 37 } 38 } 39 40 type _BlockRef struct { 41 bb *BasicBlock 42 } 43 44 func (self *_BlockRef) update(cfg *CFG, bb *BasicBlock) { 45 u := bb 46 v := self.bb 47 48 /* move them to the same depth */ 49 for cfg.Depth[u.Id] != cfg.Depth[v.Id] { 50 if cfg.Depth[u.Id] > cfg.Depth[v.Id] { 51 u = cfg.DominatedBy[u.Id] 52 } else { 53 v = cfg.DominatedBy[v.Id] 54 } 55 } 56 57 /* move both nodes until they meet */ 58 for u != v { 59 u = cfg.DominatedBy[u.Id] 60 v = cfg.DominatedBy[v.Id] 61 } 62 63 /* sanity check */ 64 if u != nil { 65 self.bb = u 66 } else { 67 panic("reorder: invalid CFG dominator tree") 68 } 69 } 70 71 // Reorder moves value closer to it's usage, which reduces register pressure. 72 type Reorder struct{} 73 74 func (Reorder) isMovable(v IrNode) bool { 75 var f bool 76 var u IrUsages 77 var d IrDefinitions 78 79 /* marked as immovable */ 80 if _, f = v.(IrImmovable); f { 81 return false 82 } 83 84 /* blacklist all instructions that uses physical registers */ 85 if u, f = v.(IrUsages); f { 86 for _, r := range u.Usages() { 87 if r.Kind() == K_arch { 88 return false 89 } 90 } 91 } 92 93 /* blacklist all instructions that alters physical registers */ 94 if d, f = v.(IrDefinitions); f { 95 for _, r := range d.Definitions() { 96 if r.Kind() == K_arch { 97 return false 98 } 99 } 100 } 101 102 /* no such registers, all checked ok */ 103 return true 104 } 105 106 func (self Reorder) moveInterblock(cfg *CFG) { 107 defs := make(map[Reg]*_BlockRef) 108 uses := make(map[Pos]*_BlockRef) 109 move := make(map[*BasicBlock]int) 110 111 /* usage update routine */ 112 updateUsage := func(r Reg, bb *BasicBlock) { 113 if m, ok := defs[r]; ok { 114 if m.bb == nil { 115 m.bb = bb 116 } else { 117 m.update(cfg, bb) 118 } 119 } 120 } 121 122 /* retry until no modifications */ 123 for move[nil] = 0; len(move) != 0; { 124 rt.MapClear(defs) 125 rt.MapClear(move) 126 rt.MapClear(uses) 127 128 /* Phase 1: Find all movable value definitions */ 129 cfg.PostOrder().ForEach(func(bb *BasicBlock) { 130 for i, v := range bb.Ins { 131 var f bool 132 var p *_BlockRef 133 var d IrDefinitions 134 135 /* value must be movable, and have definitions */ 136 if d, f = v.(IrDefinitions); !f || !self.isMovable(v) { 137 continue 138 } 139 140 /* create a new value movement if needed */ 141 if p, f = uses[pos(bb, i)]; !f { 142 p = new(_BlockRef) 143 uses[pos(bb, i)] = p 144 } 145 146 /* mark all the non-definition sites */ 147 for _, r := range d.Definitions() { 148 if r.Kind() != K_zero { 149 defs[*r] = p 150 } 151 } 152 } 153 }) 154 155 /* Phase 2: Identify the earliest usage locations */ 156 for _, bb := range cfg.PostOrder().Reversed() { 157 var ok bool 158 var use IrUsages 159 160 /* search in Phi nodes */ 161 for _, v := range bb.Phi { 162 for b, r := range v.V { 163 updateUsage(*r, b) 164 } 165 } 166 167 /* search in instructions */ 168 for _, v := range bb.Ins { 169 if use, ok = v.(IrUsages); ok { 170 for _, r := range use.Usages() { 171 updateUsage(*r, bb) 172 } 173 } 174 } 175 176 /* search the terminator */ 177 if use, ok = bb.Term.(IrUsages); ok { 178 for _, r := range use.Usages() { 179 updateUsage(*r, bb) 180 } 181 } 182 } 183 184 /* Phase 3: Move value definitions to their usage block */ 185 for p, m := range uses { 186 if m.bb != nil && m.bb != p.B { 187 m.bb.Ins = append(m.bb.Ins, p.B.Ins[p.I]) 188 move[m.bb] = move[m.bb] + 1 189 p.B.Ins[p.I] = new(IrNop) 190 } 191 } 192 193 /* Phase 4: Move values to place */ 194 for bb, i := range move { 195 v := bb.Ins 196 n := len(bb.Ins) 197 bb.Ins = make([]IrNode, n) 198 copy(bb.Ins[i:], v[:n - i]) 199 copy(bb.Ins[:i], v[n - i:]) 200 } 201 } 202 203 /* Phase 5: Remove all the placeholder NOP instructions */ 204 cfg.PostOrder().ForEach(func(bb *BasicBlock) { 205 ins := bb.Ins 206 bb.Ins = bb.Ins[:0] 207 208 /* filter out the NOP instructions */ 209 for _, v := range ins { 210 if _, ok := v.(*IrNop); !ok { 211 bb.Ins = append(bb.Ins, v) 212 } 213 } 214 }) 215 } 216 217 func (self Reorder) moveIntrablock(cfg *CFG) { 218 var rbuf []IrNode 219 var mbuf []*_ValueId 220 var vbuf []*_ValueId 221 var addval func(*_ValueId, bool) 222 223 /* reusable states */ 224 adds := make(map[int]struct{}) 225 defs := make(map[Reg]*_ValueId) 226 227 /* topology sorter */ 228 addval = func(v *_ValueId, depsOnly bool) { 229 var ok bool 230 var use IrUsages 231 var val *_ValueId 232 233 /* check if it's been added */ 234 if _, ok = adds[v.i]; ok { 235 return 236 } 237 238 /* add all the dependencies recursively */ 239 if use, ok = v.v.(IrUsages); ok { 240 for _, r := range use.Usages() { 241 if val, ok = defs[*r]; ok { 242 addval(val, false) 243 } 244 } 245 } 246 247 /* add the instruction if needed */ 248 if !depsOnly { 249 rbuf = append(rbuf, v.v) 250 adds[v.i] = struct{}{} 251 } 252 } 253 254 /* process every block */ 255 cfg.PostOrder().ForEach(func(bb *BasicBlock) { 256 rbuf = rbuf[:0] 257 mbuf = mbuf[:0] 258 vbuf = vbuf[:0] 259 rt.MapClear(adds) 260 rt.MapClear(defs) 261 262 /* number all instructions */ 263 for i, v := range bb.Ins { 264 id := mkvid(i, v) 265 vbuf = append(vbuf, id) 266 267 /* preserve the order of immovable instructions */ 268 if !self.isMovable(v) { 269 mbuf = append(mbuf, id) 270 } 271 } 272 273 /* mark all non-Phi definitions in this block */ 274 for _, v := range vbuf { 275 if def, ok := v.v.(IrDefinitions); ok { 276 for _, r := range def.Definitions() { 277 if _, ok = defs[*r]; ok { 278 panic(fmt.Sprintf("reorder: multiple definitions for %s in bb_%d", r, bb.Id)) 279 } else { 280 defs[*r] = v 281 } 282 } 283 } 284 } 285 286 /* find all the root nodes */ 287 for _, v := range vbuf { 288 if use, ok := v.v.(IrUsages); !ok { 289 v.r = false 290 } else { 291 for _, r := range use.Usages() { 292 if v, ok = defs[*r]; ok { 293 v.r = false 294 } 295 } 296 } 297 } 298 299 /* all the immovable instructions needs to preserve their order */ 300 for _, v := range mbuf { 301 addval(v, false) 302 } 303 304 /* add all the root instructions */ 305 for _, v := range vbuf { 306 if v.r { 307 addval(v, false) 308 } 309 } 310 311 /* add remaining instructions */ 312 for _, v := range vbuf { 313 if _, ok := adds[v.i]; !ok { 314 addval(v, false) 315 } 316 } 317 318 /* add the terminator */ 319 addval(mkvid(-1, bb.Term), true) 320 bb.Ins = append(bb.Ins[:0], rbuf...) 321 }) 322 } 323 324 func (Reorder) moveArgumentLoad(cfg *CFG) { 325 var ok bool 326 var ir []IrNode 327 var vv *IrLoadArg 328 329 /* extract all the argument loads */ 330 cfg.PostOrder().ForEach(func(bb *BasicBlock) { 331 ins := bb.Ins 332 bb.Ins = bb.Ins[:0] 333 334 /* scan instructions */ 335 for _, v := range ins { 336 if vv, ok = v.(*IrLoadArg); ok { 337 ir = append(ir, vv) 338 } else { 339 bb.Ins = append(bb.Ins, v) 340 } 341 } 342 }) 343 344 /* sort by argument ID */ 345 sort.Slice(ir, func(i int, j int) bool { 346 return ir[i].(*IrLoadArg).I < ir[j].(*IrLoadArg).I 347 }) 348 349 /* prepend to the root node */ 350 ins := cfg.Root.Ins 351 cfg.Root.Ins = append(ir, ins...) 352 } 353 354 func (self Reorder) Apply(cfg *CFG) { 355 self.moveInterblock(cfg) 356 self.moveIntrablock(cfg) 357 self.moveArgumentLoad(cfg) 358 } 359