github.com/FenixAra/go@v0.0.0-20170127160404-96ea0918e670/src/cmd/compile/internal/ssa/deadstore.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package ssa 6 7 // dse does dead-store elimination on the Function. 8 // Dead stores are those which are unconditionally followed by 9 // another store to the same location, with no intervening load. 10 // This implementation only works within a basic block. TODO: use something more global. 11 func dse(f *Func) { 12 var stores []*Value 13 loadUse := f.newSparseSet(f.NumValues()) 14 defer f.retSparseSet(loadUse) 15 storeUse := f.newSparseSet(f.NumValues()) 16 defer f.retSparseSet(storeUse) 17 shadowed := newSparseMap(f.NumValues()) // TODO: cache 18 for _, b := range f.Blocks { 19 // Find all the stores in this block. Categorize their uses: 20 // loadUse contains stores which are used by a subsequent load. 21 // storeUse contains stores which are used by a subsequent store. 22 loadUse.clear() 23 storeUse.clear() 24 stores = stores[:0] 25 for _, v := range b.Values { 26 if v.Op == OpPhi { 27 // Ignore phis - they will always be first and can't be eliminated 28 continue 29 } 30 if v.Type.IsMemory() { 31 stores = append(stores, v) 32 if v.Op == OpSelect1 { 33 // Use the args of the tuple-generating op. 34 v = v.Args[0] 35 } 36 for _, a := range v.Args { 37 if a.Block == b && a.Type.IsMemory() { 38 storeUse.add(a.ID) 39 if v.Op != OpStore && v.Op != OpZero && v.Op != OpVarDef && v.Op != OpVarKill { 40 // CALL, DUFFCOPY, etc. are both 41 // reads and writes. 42 loadUse.add(a.ID) 43 } 44 } 45 } 46 } else { 47 for _, a := range v.Args { 48 if a.Block == b && a.Type.IsMemory() { 49 loadUse.add(a.ID) 50 } 51 } 52 } 53 } 54 if len(stores) == 0 { 55 continue 56 } 57 58 // find last store in the block 59 var last *Value 60 for _, v := range stores { 61 if storeUse.contains(v.ID) { 62 continue 63 } 64 if last != nil { 65 b.Fatalf("two final stores - simultaneous live stores %s %s", last, v) 66 } 67 last = v 68 } 69 if last == nil { 70 b.Fatalf("no last store found - cycle?") 71 } 72 73 // Walk backwards looking for dead stores. Keep track of shadowed addresses. 74 // An "address" is an SSA Value which encodes both the address and size of 75 // the write. This code will not remove dead stores to the same address 76 // of different types. 77 shadowed.clear() 78 v := last 79 80 walkloop: 81 if loadUse.contains(v.ID) { 82 // Someone might be reading this memory state. 83 // Clear all shadowed addresses. 84 shadowed.clear() 85 } 86 if v.Op == OpStore || v.Op == OpZero { 87 var sz int64 88 if v.Op == OpStore { 89 sz = v.AuxInt 90 } else { // OpZero 91 sz = SizeAndAlign(v.AuxInt).Size() 92 } 93 if shadowedSize := int64(shadowed.get(v.Args[0].ID)); shadowedSize != -1 && shadowedSize >= sz { 94 // Modify store into a copy 95 if v.Op == OpStore { 96 // store addr value mem 97 v.SetArgs1(v.Args[2]) 98 } else { 99 // zero addr mem 100 typesz := v.Args[0].Type.ElemType().Size() 101 if sz != typesz { 102 f.Fatalf("mismatched zero/store sizes: %d and %d [%s]", 103 sz, typesz, v.LongString()) 104 } 105 v.SetArgs1(v.Args[1]) 106 } 107 v.Aux = nil 108 v.AuxInt = 0 109 v.Op = OpCopy 110 } else { 111 if sz > 0x7fffffff { // work around sparseMap's int32 value type 112 sz = 0x7fffffff 113 } 114 shadowed.set(v.Args[0].ID, int32(sz), 0) 115 } 116 } 117 // walk to previous store 118 if v.Op == OpPhi { 119 continue // At start of block. Move on to next block. 120 } 121 for _, a := range v.Args { 122 if a.Block == b && a.Type.IsMemory() { 123 v = a 124 goto walkloop 125 } 126 } 127 } 128 }