github.com/ltltlt/go-source-code@v0.0.0-20190830023027-95be009773aa/cmd/compile/internal/ssa/nilcheck.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package ssa 6 7 // nilcheckelim eliminates unnecessary nil checks. 8 // runs on machine-independent code. 9 func nilcheckelim(f *Func) { 10 // A nil check is redundant if the same nil check was successful in a 11 // dominating block. The efficacy of this pass depends heavily on the 12 // efficacy of the cse pass. 13 sdom := f.sdom() 14 15 // TODO: Eliminate more nil checks. 16 // We can recursively remove any chain of fixed offset calculations, 17 // i.e. struct fields and array elements, even with non-constant 18 // indices: x is non-nil iff x.a.b[i].c is. 19 20 type walkState int 21 const ( 22 Work walkState = iota // process nil checks and traverse to dominees 23 ClearPtr // forget the fact that ptr is nil 24 ) 25 26 type bp struct { 27 block *Block // block, or nil in ClearPtr state 28 ptr *Value // if non-nil, ptr that is to be cleared in ClearPtr state 29 op walkState 30 } 31 32 work := make([]bp, 0, 256) 33 work = append(work, bp{block: f.Entry}) 34 35 // map from value ID to bool indicating if value is known to be non-nil 36 // in the current dominator path being walked. This slice is updated by 37 // walkStates to maintain the known non-nil values. 38 nonNilValues := make([]bool, f.NumValues()) 39 40 // make an initial pass identifying any non-nil values 41 for _, b := range f.Blocks { 42 for _, v := range b.Values { 43 // a value resulting from taking the address of a 44 // value, or a value constructed from an offset of a 45 // non-nil ptr (OpAddPtr) implies it is non-nil 46 if v.Op == OpAddr || v.Op == OpAddPtr { 47 nonNilValues[v.ID] = true 48 } 49 } 50 } 51 52 for changed := true; changed; { 53 changed = false 54 for _, b := range f.Blocks { 55 for _, v := range b.Values { 56 // phis whose arguments are all non-nil 57 // are non-nil 58 if v.Op == OpPhi { 59 argsNonNil := true 60 for _, a := range v.Args { 61 if !nonNilValues[a.ID] { 62 argsNonNil = false 63 break 64 } 65 } 66 if argsNonNil { 67 if !nonNilValues[v.ID] { 68 changed = true 69 } 70 nonNilValues[v.ID] = true 71 } 72 } 73 } 74 } 75 } 76 77 // allocate auxiliary date structures for computing store order 78 sset := f.newSparseSet(f.NumValues()) 79 defer f.retSparseSet(sset) 80 storeNumber := make([]int32, f.NumValues()) 81 82 // perform a depth first walk of the dominee tree 83 for len(work) > 0 { 84 node := work[len(work)-1] 85 work = work[:len(work)-1] 86 87 switch node.op { 88 case Work: 89 b := node.block 90 91 // First, see if we're dominated by an explicit nil check. 92 if len(b.Preds) == 1 { 93 p := b.Preds[0].b 94 if p.Kind == BlockIf && p.Control.Op == OpIsNonNil && p.Succs[0].b == b { 95 ptr := p.Control.Args[0] 96 if !nonNilValues[ptr.ID] { 97 nonNilValues[ptr.ID] = true 98 work = append(work, bp{op: ClearPtr, ptr: ptr}) 99 } 100 } 101 } 102 103 // Next, order values in the current block w.r.t. stores. 104 b.Values = storeOrder(b.Values, sset, storeNumber) 105 106 // Next, process values in the block. 107 i := 0 108 for _, v := range b.Values { 109 b.Values[i] = v 110 i++ 111 switch v.Op { 112 case OpIsNonNil: 113 ptr := v.Args[0] 114 if nonNilValues[ptr.ID] { 115 // This is a redundant explicit nil check. 116 v.reset(OpConstBool) 117 v.AuxInt = 1 // true 118 } 119 case OpNilCheck: 120 ptr := v.Args[0] 121 if nonNilValues[ptr.ID] { 122 // This is a redundant implicit nil check. 123 // Logging in the style of the former compiler -- and omit line 1, 124 // which is usually in generated code. 125 if f.fe.Debug_checknil() && v.Pos.Line() > 1 { 126 f.Warnl(v.Pos, "removed nil check") 127 } 128 v.reset(OpUnknown) 129 f.freeValue(v) 130 i-- 131 continue 132 } 133 // Record the fact that we know ptr is non nil, and remember to 134 // undo that information when this dominator subtree is done. 135 nonNilValues[ptr.ID] = true 136 work = append(work, bp{op: ClearPtr, ptr: ptr}) 137 } 138 } 139 for j := i; j < len(b.Values); j++ { 140 b.Values[j] = nil 141 } 142 b.Values = b.Values[:i] 143 144 // Add all dominated blocks to the work list. 145 for w := sdom[node.block.ID].child; w != nil; w = sdom[w.ID].sibling { 146 work = append(work, bp{op: Work, block: w}) 147 } 148 149 case ClearPtr: 150 nonNilValues[node.ptr.ID] = false 151 continue 152 } 153 } 154 } 155 156 // All platforms are guaranteed to fault if we load/store to anything smaller than this address. 157 // 158 // This should agree with minLegalPointer in the runtime. 159 const minZeroPage = 4096 160 161 // nilcheckelim2 eliminates unnecessary nil checks. 162 // Runs after lowering and scheduling. 163 func nilcheckelim2(f *Func) { 164 unnecessary := f.newSparseSet(f.NumValues()) 165 defer f.retSparseSet(unnecessary) 166 for _, b := range f.Blocks { 167 // Walk the block backwards. Find instructions that will fault if their 168 // input pointer is nil. Remove nil checks on those pointers, as the 169 // faulting instruction effectively does the nil check for free. 170 unnecessary.clear() 171 // Optimization: keep track of removed nilcheck with smallest index 172 firstToRemove := len(b.Values) 173 for i := len(b.Values) - 1; i >= 0; i-- { 174 v := b.Values[i] 175 if opcodeTable[v.Op].nilCheck && unnecessary.contains(v.Args[0].ID) { 176 if f.fe.Debug_checknil() && v.Pos.Line() > 1 { 177 f.Warnl(v.Pos, "removed nil check") 178 } 179 v.reset(OpUnknown) 180 firstToRemove = i 181 continue 182 } 183 if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() { 184 if v.Op == OpVarDef || v.Op == OpVarKill || v.Op == OpVarLive { 185 // These ops don't really change memory. 186 continue 187 } 188 // This op changes memory. Any faulting instruction after v that 189 // we've recorded in the unnecessary map is now obsolete. 190 unnecessary.clear() 191 } 192 193 // Find any pointers that this op is guaranteed to fault on if nil. 194 var ptrstore [2]*Value 195 ptrs := ptrstore[:0] 196 if opcodeTable[v.Op].faultOnNilArg0 { 197 ptrs = append(ptrs, v.Args[0]) 198 } 199 if opcodeTable[v.Op].faultOnNilArg1 { 200 ptrs = append(ptrs, v.Args[1]) 201 } 202 for _, ptr := range ptrs { 203 // Check to make sure the offset is small. 204 switch opcodeTable[v.Op].auxType { 205 case auxSymOff: 206 if v.Aux != nil || v.AuxInt < 0 || v.AuxInt >= minZeroPage { 207 continue 208 } 209 case auxSymValAndOff: 210 off := ValAndOff(v.AuxInt).Off() 211 if v.Aux != nil || off < 0 || off >= minZeroPage { 212 continue 213 } 214 case auxInt32: 215 // Mips uses this auxType for atomic add constant. It does not affect the effective address. 216 case auxInt64: 217 // ARM uses this auxType for duffcopy/duffzero/alignment info. 218 // It does not affect the effective address. 219 case auxNone: 220 // offset is zero. 221 default: 222 v.Fatalf("can't handle aux %s (type %d) yet\n", v.auxString(), int(opcodeTable[v.Op].auxType)) 223 } 224 // This instruction is guaranteed to fault if ptr is nil. 225 // Any previous nil check op is unnecessary. 226 unnecessary.add(ptr.ID) 227 } 228 } 229 // Remove values we've clobbered with OpUnknown. 230 i := firstToRemove 231 for j := i; j < len(b.Values); j++ { 232 v := b.Values[j] 233 if v.Op != OpUnknown { 234 b.Values[i] = v 235 i++ 236 } 237 } 238 for j := i; j < len(b.Values); j++ { 239 b.Values[j] = nil 240 } 241 b.Values = b.Values[:i] 242 243 // TODO: if b.Kind == BlockPlain, start the analysis in the subsequent block to find 244 // more unnecessary nil checks. Would fix test/nilptr3_ssa.go:157. 245 } 246 }