github.com/gagliardetto/golang-go@v0.0.0-20201020153340-53909ea70814/cmd/compile/internal/ssa/deadcode.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package ssa 6 7 import ( 8 "github.com/gagliardetto/golang-go/cmd/internal/src" 9 ) 10 11 // findlive returns the reachable blocks and live values in f. 12 // The caller should call f.retDeadcodeLive(live) when it is done with it. 13 func findlive(f *Func) (reachable []bool, live []bool) { 14 reachable = ReachableBlocks(f) 15 var order []*Value 16 live, order = liveValues(f, reachable) 17 f.retDeadcodeLiveOrderStmts(order) 18 return 19 } 20 21 // ReachableBlocks returns the reachable blocks in f. 22 func ReachableBlocks(f *Func) []bool { 23 reachable := make([]bool, f.NumBlocks()) 24 reachable[f.Entry.ID] = true 25 p := make([]*Block, 0, 64) // stack-like worklist 26 p = append(p, f.Entry) 27 for len(p) > 0 { 28 // Pop a reachable block 29 b := p[len(p)-1] 30 p = p[:len(p)-1] 31 // Mark successors as reachable 32 s := b.Succs 33 if b.Kind == BlockFirst { 34 s = s[:1] 35 } 36 for _, e := range s { 37 c := e.b 38 if int(c.ID) >= len(reachable) { 39 f.Fatalf("block %s >= f.NumBlocks()=%d?", c, len(reachable)) 40 } 41 if !reachable[c.ID] { 42 reachable[c.ID] = true 43 p = append(p, c) // push 44 } 45 } 46 } 47 return reachable 48 } 49 50 // liveValues returns the live values in f and a list of values that are eligible 51 // to be statements in reversed data flow order. 52 // The second result is used to help conserve statement boundaries for debugging. 53 // reachable is a map from block ID to whether the block is reachable. 54 // The caller should call f.retDeadcodeLive(live) and f.retDeadcodeLiveOrderStmts(liveOrderStmts) 55 // when they are done with the return values. 56 func liveValues(f *Func, reachable []bool) (live []bool, liveOrderStmts []*Value) { 57 live = f.newDeadcodeLive() 58 if cap(live) < f.NumValues() { 59 live = make([]bool, f.NumValues()) 60 } else { 61 live = live[:f.NumValues()] 62 for i := range live { 63 live[i] = false 64 } 65 } 66 67 liveOrderStmts = f.newDeadcodeLiveOrderStmts() 68 liveOrderStmts = liveOrderStmts[:0] 69 70 // After regalloc, consider all values to be live. 71 // See the comment at the top of regalloc.go and in deadcode for details. 72 if f.RegAlloc != nil { 73 for i := range live { 74 live[i] = true 75 } 76 return 77 } 78 79 // Record all the inline indexes we need 80 var liveInlIdx map[int]bool 81 pt := f.Config.ctxt.PosTable 82 for _, b := range f.Blocks { 83 for _, v := range b.Values { 84 i := pt.Pos(v.Pos).Base().InliningIndex() 85 if i < 0 { 86 continue 87 } 88 if liveInlIdx == nil { 89 liveInlIdx = map[int]bool{} 90 } 91 liveInlIdx[i] = true 92 } 93 i := pt.Pos(b.Pos).Base().InliningIndex() 94 if i < 0 { 95 continue 96 } 97 if liveInlIdx == nil { 98 liveInlIdx = map[int]bool{} 99 } 100 liveInlIdx[i] = true 101 } 102 103 // Find all live values 104 q := f.Cache.deadcode.q[:0] 105 defer func() { f.Cache.deadcode.q = q }() 106 107 // Starting set: all control values of reachable blocks are live. 108 // Calls are live (because callee can observe the memory state). 109 for _, b := range f.Blocks { 110 if !reachable[b.ID] { 111 continue 112 } 113 for _, v := range b.ControlValues() { 114 if !live[v.ID] { 115 live[v.ID] = true 116 q = append(q, v) 117 if v.Pos.IsStmt() != src.PosNotStmt { 118 liveOrderStmts = append(liveOrderStmts, v) 119 } 120 } 121 } 122 for _, v := range b.Values { 123 if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects) && !live[v.ID] { 124 live[v.ID] = true 125 q = append(q, v) 126 if v.Pos.IsStmt() != src.PosNotStmt { 127 liveOrderStmts = append(liveOrderStmts, v) 128 } 129 } 130 if v.Type.IsVoid() && !live[v.ID] { 131 // The only Void ops are nil checks and inline marks. We must keep these. 132 if v.Op == OpInlMark && !liveInlIdx[int(v.AuxInt)] { 133 // We don't need marks for bodies that 134 // have been completely optimized away. 135 // TODO: save marks only for bodies which 136 // have a faulting instruction or a call? 137 continue 138 } 139 live[v.ID] = true 140 q = append(q, v) 141 if v.Pos.IsStmt() != src.PosNotStmt { 142 liveOrderStmts = append(liveOrderStmts, v) 143 } 144 } 145 } 146 } 147 148 // Compute transitive closure of live values. 149 for len(q) > 0 { 150 // pop a reachable value 151 v := q[len(q)-1] 152 q = q[:len(q)-1] 153 for i, x := range v.Args { 154 if v.Op == OpPhi && !reachable[v.Block.Preds[i].b.ID] { 155 continue 156 } 157 if !live[x.ID] { 158 live[x.ID] = true 159 q = append(q, x) // push 160 if x.Pos.IsStmt() != src.PosNotStmt { 161 liveOrderStmts = append(liveOrderStmts, x) 162 } 163 } 164 } 165 } 166 167 return 168 } 169 170 // deadcode removes dead code from f. 171 func deadcode(f *Func) { 172 // deadcode after regalloc is forbidden for now. Regalloc 173 // doesn't quite generate legal SSA which will lead to some 174 // required moves being eliminated. See the comment at the 175 // top of regalloc.go for details. 176 if f.RegAlloc != nil { 177 f.Fatalf("deadcode after regalloc") 178 } 179 180 // Find reachable blocks. 181 reachable := ReachableBlocks(f) 182 183 // Get rid of edges from dead to live code. 184 for _, b := range f.Blocks { 185 if reachable[b.ID] { 186 continue 187 } 188 for i := 0; i < len(b.Succs); { 189 e := b.Succs[i] 190 if reachable[e.b.ID] { 191 b.removeEdge(i) 192 } else { 193 i++ 194 } 195 } 196 } 197 198 // Get rid of dead edges from live code. 199 for _, b := range f.Blocks { 200 if !reachable[b.ID] { 201 continue 202 } 203 if b.Kind != BlockFirst { 204 continue 205 } 206 b.removeEdge(1) 207 b.Kind = BlockPlain 208 b.Likely = BranchUnknown 209 } 210 211 // Splice out any copies introduced during dead block removal. 212 copyelim(f) 213 214 // Find live values. 215 live, order := liveValues(f, reachable) 216 defer f.retDeadcodeLive(live) 217 defer f.retDeadcodeLiveOrderStmts(order) 218 219 // Remove dead & duplicate entries from namedValues map. 220 s := f.newSparseSet(f.NumValues()) 221 defer f.retSparseSet(s) 222 i := 0 223 for _, name := range f.Names { 224 j := 0 225 s.clear() 226 values := f.NamedValues[name] 227 for _, v := range values { 228 if live[v.ID] && !s.contains(v.ID) { 229 values[j] = v 230 j++ 231 s.add(v.ID) 232 } 233 } 234 if j == 0 { 235 delete(f.NamedValues, name) 236 } else { 237 f.Names[i] = name 238 i++ 239 for k := len(values) - 1; k >= j; k-- { 240 values[k] = nil 241 } 242 f.NamedValues[name] = values[:j] 243 } 244 } 245 for k := len(f.Names) - 1; k >= i; k-- { 246 f.Names[k] = LocalSlot{} 247 } 248 f.Names = f.Names[:i] 249 250 pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block 251 pendingLines.clear() 252 253 // Unlink values and conserve statement boundaries 254 for i, b := range f.Blocks { 255 if !reachable[b.ID] { 256 // TODO what if control is statement boundary? Too late here. 257 b.ResetControls() 258 } 259 for _, v := range b.Values { 260 if !live[v.ID] { 261 v.resetArgs() 262 if v.Pos.IsStmt() == src.PosIsStmt && reachable[b.ID] { 263 pendingLines.set(v.Pos, int32(i)) // TODO could be more than one pos for a line 264 } 265 } 266 } 267 } 268 269 // Find new homes for lost lines -- require earliest in data flow with same line that is also in same block 270 for i := len(order) - 1; i >= 0; i-- { 271 w := order[i] 272 if j := pendingLines.get(w.Pos); j > -1 && f.Blocks[j] == w.Block { 273 w.Pos = w.Pos.WithIsStmt() 274 pendingLines.remove(w.Pos) 275 } 276 } 277 278 // Any boundary that failed to match a live value can move to a block end 279 pendingLines.foreachEntry(func(j int32, l uint, bi int32) { 280 b := f.Blocks[bi] 281 if b.Pos.Line() == l && b.Pos.FileIndex() == j { 282 b.Pos = b.Pos.WithIsStmt() 283 } 284 }) 285 286 // Remove dead values from blocks' value list. Return dead 287 // values to the allocator. 288 for _, b := range f.Blocks { 289 i := 0 290 for _, v := range b.Values { 291 if live[v.ID] { 292 b.Values[i] = v 293 i++ 294 } else { 295 f.freeValue(v) 296 } 297 } 298 // aid GC 299 tail := b.Values[i:] 300 for j := range tail { 301 tail[j] = nil 302 } 303 b.Values = b.Values[:i] 304 } 305 306 // Remove dead blocks from WBLoads list. 307 i = 0 308 for _, b := range f.WBLoads { 309 if reachable[b.ID] { 310 f.WBLoads[i] = b 311 i++ 312 } 313 } 314 for j := i; j < len(f.WBLoads); j++ { 315 f.WBLoads[j] = nil 316 } 317 f.WBLoads = f.WBLoads[:i] 318 319 // Remove unreachable blocks. Return dead blocks to allocator. 320 i = 0 321 for _, b := range f.Blocks { 322 if reachable[b.ID] { 323 f.Blocks[i] = b 324 i++ 325 } else { 326 if len(b.Values) > 0 { 327 b.Fatalf("live values in unreachable block %v: %v", b, b.Values) 328 } 329 f.freeBlock(b) 330 } 331 } 332 // zero remainder to help GC 333 tail := f.Blocks[i:] 334 for j := range tail { 335 tail[j] = nil 336 } 337 f.Blocks = f.Blocks[:i] 338 } 339 340 // removeEdge removes the i'th outgoing edge from b (and 341 // the corresponding incoming edge from b.Succs[i].b). 342 func (b *Block) removeEdge(i int) { 343 e := b.Succs[i] 344 c := e.b 345 j := e.i 346 347 // Adjust b.Succs 348 b.removeSucc(i) 349 350 // Adjust c.Preds 351 c.removePred(j) 352 353 // Remove phi args from c's phis. 354 n := len(c.Preds) 355 for _, v := range c.Values { 356 if v.Op != OpPhi { 357 continue 358 } 359 v.Args[j].Uses-- 360 v.Args[j] = v.Args[n] 361 v.Args[n] = nil 362 v.Args = v.Args[:n] 363 phielimValue(v) 364 // Note: this is trickier than it looks. Replacing 365 // a Phi with a Copy can in general cause problems because 366 // Phi and Copy don't have exactly the same semantics. 367 // Phi arguments always come from a predecessor block, 368 // whereas copies don't. This matters in loops like: 369 // 1: x = (Phi y) 370 // y = (Add x 1) 371 // goto 1 372 // If we replace Phi->Copy, we get 373 // 1: x = (Copy y) 374 // y = (Add x 1) 375 // goto 1 376 // (Phi y) refers to the *previous* value of y, whereas 377 // (Copy y) refers to the *current* value of y. 378 // The modified code has a cycle and the scheduler 379 // will barf on it. 380 // 381 // Fortunately, this situation can only happen for dead 382 // code loops. We know the code we're working with is 383 // not dead, so we're ok. 384 // Proof: If we have a potential bad cycle, we have a 385 // situation like this: 386 // x = (Phi z) 387 // y = (op1 x ...) 388 // z = (op2 y ...) 389 // Where opX are not Phi ops. But such a situation 390 // implies a cycle in the dominator graph. In the 391 // example, x.Block dominates y.Block, y.Block dominates 392 // z.Block, and z.Block dominates x.Block (treating 393 // "dominates" as reflexive). Cycles in the dominator 394 // graph can only happen in an unreachable cycle. 395 } 396 }