github.com/bir3/gocompiler@v0.3.205/src/cmd/compile/internal/ssa/deadcode.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package ssa 6 7 import ( 8 "github.com/bir3/gocompiler/src/cmd/internal/src" 9 ) 10 11 // findlive returns the reachable blocks and live values in f. 12 // The caller should call f.Cache.freeBoolSlice(live) when it is done with it. 13 func findlive(f *Func) (reachable []bool, live []bool) { 14 reachable = ReachableBlocks(f) 15 var order []*Value 16 live, order = liveValues(f, reachable) 17 f.Cache.freeValueSlice(order) 18 return 19 } 20 21 // ReachableBlocks returns the reachable blocks in f. 22 func ReachableBlocks(f *Func) []bool { 23 reachable := make([]bool, f.NumBlocks()) 24 reachable[f.Entry.ID] = true 25 p := make([]*Block, 0, 64) // stack-like worklist 26 p = append(p, f.Entry) 27 for len(p) > 0 { 28 // Pop a reachable block 29 b := p[len(p)-1] 30 p = p[:len(p)-1] 31 // Mark successors as reachable 32 s := b.Succs 33 if b.Kind == BlockFirst { 34 s = s[:1] 35 } 36 for _, e := range s { 37 c := e.b 38 if int(c.ID) >= len(reachable) { 39 f.Fatalf("block %s >= f.NumBlocks()=%d?", c, len(reachable)) 40 } 41 if !reachable[c.ID] { 42 reachable[c.ID] = true 43 p = append(p, c) // push 44 } 45 } 46 } 47 return reachable 48 } 49 50 // liveValues returns the live values in f and a list of values that are eligible 51 // to be statements in reversed data flow order. 52 // The second result is used to help conserve statement boundaries for debugging. 53 // reachable is a map from block ID to whether the block is reachable. 54 // The caller should call f.Cache.freeBoolSlice(live) and f.Cache.freeValueSlice(liveOrderStmts). 55 // when they are done with the return values. 56 func liveValues(f *Func, reachable []bool) (live []bool, liveOrderStmts []*Value) { 57 live = f.Cache.allocBoolSlice(f.NumValues()) 58 liveOrderStmts = f.Cache.allocValueSlice(f.NumValues())[:0] 59 60 // After regalloc, consider all values to be live. 61 // See the comment at the top of regalloc.go and in deadcode for details. 62 if f.RegAlloc != nil { 63 for i := range live { 64 live[i] = true 65 } 66 return 67 } 68 69 // Record all the inline indexes we need 70 var liveInlIdx map[int]bool 71 pt := f.Config.ctxt.PosTable 72 for _, b := range f.Blocks { 73 for _, v := range b.Values { 74 i := pt.Pos(v.Pos).Base().InliningIndex() 75 if i < 0 { 76 continue 77 } 78 if liveInlIdx == nil { 79 liveInlIdx = map[int]bool{} 80 } 81 liveInlIdx[i] = true 82 } 83 i := pt.Pos(b.Pos).Base().InliningIndex() 84 if i < 0 { 85 continue 86 } 87 if liveInlIdx == nil { 88 liveInlIdx = map[int]bool{} 89 } 90 liveInlIdx[i] = true 91 } 92 93 // Find all live values 94 q := f.Cache.allocValueSlice(f.NumValues())[:0] 95 defer f.Cache.freeValueSlice(q) 96 97 // Starting set: all control values of reachable blocks are live. 98 // Calls are live (because callee can observe the memory state). 99 for _, b := range f.Blocks { 100 if !reachable[b.ID] { 101 continue 102 } 103 for _, v := range b.ControlValues() { 104 if !live[v.ID] { 105 live[v.ID] = true 106 q = append(q, v) 107 if v.Pos.IsStmt() != src.PosNotStmt { 108 liveOrderStmts = append(liveOrderStmts, v) 109 } 110 } 111 } 112 for _, v := range b.Values { 113 if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects) && !live[v.ID] { 114 live[v.ID] = true 115 q = append(q, v) 116 if v.Pos.IsStmt() != src.PosNotStmt { 117 liveOrderStmts = append(liveOrderStmts, v) 118 } 119 } 120 if v.Type.IsVoid() && !live[v.ID] { 121 // The only Void ops are nil checks and inline marks. We must keep these. 122 if v.Op == OpInlMark && !liveInlIdx[int(v.AuxInt)] { 123 // We don't need marks for bodies that 124 // have been completely optimized away. 125 // TODO: save marks only for bodies which 126 // have a faulting instruction or a call? 127 continue 128 } 129 live[v.ID] = true 130 q = append(q, v) 131 if v.Pos.IsStmt() != src.PosNotStmt { 132 liveOrderStmts = append(liveOrderStmts, v) 133 } 134 } 135 } 136 } 137 138 // Compute transitive closure of live values. 139 for len(q) > 0 { 140 // pop a reachable value 141 v := q[len(q)-1] 142 q[len(q)-1] = nil 143 q = q[:len(q)-1] 144 for i, x := range v.Args { 145 if v.Op == OpPhi && !reachable[v.Block.Preds[i].b.ID] { 146 continue 147 } 148 if !live[x.ID] { 149 live[x.ID] = true 150 q = append(q, x) // push 151 if x.Pos.IsStmt() != src.PosNotStmt { 152 liveOrderStmts = append(liveOrderStmts, x) 153 } 154 } 155 } 156 } 157 158 return 159 } 160 161 // deadcode removes dead code from f. 162 func deadcode(f *Func) { 163 // deadcode after regalloc is forbidden for now. Regalloc 164 // doesn't quite generate legal SSA which will lead to some 165 // required moves being eliminated. See the comment at the 166 // top of regalloc.go for details. 167 if f.RegAlloc != nil { 168 f.Fatalf("deadcode after regalloc") 169 } 170 171 // Find reachable blocks. 172 reachable := ReachableBlocks(f) 173 174 // Get rid of edges from dead to live code. 175 for _, b := range f.Blocks { 176 if reachable[b.ID] { 177 continue 178 } 179 for i := 0; i < len(b.Succs); { 180 e := b.Succs[i] 181 if reachable[e.b.ID] { 182 b.removeEdge(i) 183 } else { 184 i++ 185 } 186 } 187 } 188 189 // Get rid of dead edges from live code. 190 for _, b := range f.Blocks { 191 if !reachable[b.ID] { 192 continue 193 } 194 if b.Kind != BlockFirst { 195 continue 196 } 197 b.removeEdge(1) 198 b.Kind = BlockPlain 199 b.Likely = BranchUnknown 200 } 201 202 // Splice out any copies introduced during dead block removal. 203 copyelim(f) 204 205 // Find live values. 206 live, order := liveValues(f, reachable) 207 defer func() { f.Cache.freeBoolSlice(live) }() 208 defer func() { f.Cache.freeValueSlice(order) }() 209 210 // Remove dead & duplicate entries from namedValues map. 211 s := f.newSparseSet(f.NumValues()) 212 defer f.retSparseSet(s) 213 i := 0 214 for _, name := range f.Names { 215 j := 0 216 s.clear() 217 values := f.NamedValues[*name] 218 for _, v := range values { 219 if live[v.ID] && !s.contains(v.ID) { 220 values[j] = v 221 j++ 222 s.add(v.ID) 223 } 224 } 225 if j == 0 { 226 delete(f.NamedValues, *name) 227 } else { 228 f.Names[i] = name 229 i++ 230 for k := len(values) - 1; k >= j; k-- { 231 values[k] = nil 232 } 233 f.NamedValues[*name] = values[:j] 234 } 235 } 236 clearNames := f.Names[i:] 237 for j := range clearNames { 238 clearNames[j] = nil 239 } 240 f.Names = f.Names[:i] 241 242 pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block 243 pendingLines.clear() 244 245 // Unlink values and conserve statement boundaries 246 for i, b := range f.Blocks { 247 if !reachable[b.ID] { 248 // TODO what if control is statement boundary? Too late here. 249 b.ResetControls() 250 } 251 for _, v := range b.Values { 252 if !live[v.ID] { 253 v.resetArgs() 254 if v.Pos.IsStmt() == src.PosIsStmt && reachable[b.ID] { 255 pendingLines.set(v.Pos, int32(i)) // TODO could be more than one pos for a line 256 } 257 } 258 } 259 } 260 261 // Find new homes for lost lines -- require earliest in data flow with same line that is also in same block 262 for i := len(order) - 1; i >= 0; i-- { 263 w := order[i] 264 if j := pendingLines.get(w.Pos); j > -1 && f.Blocks[j] == w.Block { 265 w.Pos = w.Pos.WithIsStmt() 266 pendingLines.remove(w.Pos) 267 } 268 } 269 270 // Any boundary that failed to match a live value can move to a block end 271 pendingLines.foreachEntry(func(j int32, l uint, bi int32) { 272 b := f.Blocks[bi] 273 if b.Pos.Line() == l && b.Pos.FileIndex() == j { 274 b.Pos = b.Pos.WithIsStmt() 275 } 276 }) 277 278 // Remove dead values from blocks' value list. Return dead 279 // values to the allocator. 280 for _, b := range f.Blocks { 281 i := 0 282 for _, v := range b.Values { 283 if live[v.ID] { 284 b.Values[i] = v 285 i++ 286 } else { 287 f.freeValue(v) 288 } 289 } 290 b.truncateValues(i) 291 } 292 293 // Remove dead blocks from WBLoads list. 294 i = 0 295 for _, b := range f.WBLoads { 296 if reachable[b.ID] { 297 f.WBLoads[i] = b 298 i++ 299 } 300 } 301 clearWBLoads := f.WBLoads[i:] 302 for j := range clearWBLoads { 303 clearWBLoads[j] = nil 304 } 305 f.WBLoads = f.WBLoads[:i] 306 307 // Remove unreachable blocks. Return dead blocks to allocator. 308 i = 0 309 for _, b := range f.Blocks { 310 if reachable[b.ID] { 311 f.Blocks[i] = b 312 i++ 313 } else { 314 if len(b.Values) > 0 { 315 b.Fatalf("live values in unreachable block %v: %v", b, b.Values) 316 } 317 f.freeBlock(b) 318 } 319 } 320 // zero remainder to help GC 321 tail := f.Blocks[i:] 322 for j := range tail { 323 tail[j] = nil 324 } 325 f.Blocks = f.Blocks[:i] 326 } 327 328 // removeEdge removes the i'th outgoing edge from b (and 329 // the corresponding incoming edge from b.Succs[i].b). 330 func (b *Block) removeEdge(i int) { 331 e := b.Succs[i] 332 c := e.b 333 j := e.i 334 335 // Adjust b.Succs 336 b.removeSucc(i) 337 338 // Adjust c.Preds 339 c.removePred(j) 340 341 // Remove phi args from c's phis. 342 for _, v := range c.Values { 343 if v.Op != OpPhi { 344 continue 345 } 346 c.removePhiArg(v, j) 347 phielimValue(v) 348 // Note: this is trickier than it looks. Replacing 349 // a Phi with a Copy can in general cause problems because 350 // Phi and Copy don't have exactly the same semantics. 351 // Phi arguments always come from a predecessor block, 352 // whereas copies don't. This matters in loops like: 353 // 1: x = (Phi y) 354 // y = (Add x 1) 355 // goto 1 356 // If we replace Phi->Copy, we get 357 // 1: x = (Copy y) 358 // y = (Add x 1) 359 // goto 1 360 // (Phi y) refers to the *previous* value of y, whereas 361 // (Copy y) refers to the *current* value of y. 362 // The modified code has a cycle and the scheduler 363 // will barf on it. 364 // 365 // Fortunately, this situation can only happen for dead 366 // code loops. We know the code we're working with is 367 // not dead, so we're ok. 368 // Proof: If we have a potential bad cycle, we have a 369 // situation like this: 370 // x = (Phi z) 371 // y = (op1 x ...) 372 // z = (op2 y ...) 373 // Where opX are not Phi ops. But such a situation 374 // implies a cycle in the dominator graph. In the 375 // example, x.Block dominates y.Block, y.Block dominates 376 // z.Block, and z.Block dominates x.Block (treating 377 // "dominates" as reflexive). Cycles in the dominator 378 // graph can only happen in an unreachable cycle. 379 } 380 }