github.com/rakyll/go@v0.0.0-20170216000551-64c02460d703/src/cmd/compile/internal/ssa/nilcheck.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package ssa 6 7 // nilcheckelim eliminates unnecessary nil checks. 8 // runs on machine-independent code. 9 func nilcheckelim(f *Func) { 10 // A nil check is redundant if the same nil check was successful in a 11 // dominating block. The efficacy of this pass depends heavily on the 12 // efficacy of the cse pass. 13 sdom := f.sdom() 14 15 // TODO: Eliminate more nil checks. 16 // We can recursively remove any chain of fixed offset calculations, 17 // i.e. struct fields and array elements, even with non-constant 18 // indices: x is non-nil iff x.a.b[i].c is. 19 20 type walkState int 21 const ( 22 Work walkState = iota // process nil checks and traverse to dominees 23 ClearPtr // forget the fact that ptr is nil 24 ) 25 26 type bp struct { 27 block *Block // block, or nil in ClearPtr state 28 ptr *Value // if non-nil, ptr that is to be cleared in ClearPtr state 29 op walkState 30 } 31 32 work := make([]bp, 0, 256) 33 work = append(work, bp{block: f.Entry}) 34 35 // map from value ID to bool indicating if value is known to be non-nil 36 // in the current dominator path being walked. This slice is updated by 37 // walkStates to maintain the known non-nil values. 38 nonNilValues := make([]bool, f.NumValues()) 39 40 // make an initial pass identifying any non-nil values 41 for _, b := range f.Blocks { 42 // a value resulting from taking the address of a 43 // value, or a value constructed from an offset of a 44 // non-nil ptr (OpAddPtr) implies it is non-nil 45 for _, v := range b.Values { 46 if v.Op == OpAddr || v.Op == OpAddPtr { 47 nonNilValues[v.ID] = true 48 } else if v.Op == OpPhi { 49 // phis whose arguments are all non-nil 50 // are non-nil 51 argsNonNil := true 52 for _, a := range v.Args { 53 if !nonNilValues[a.ID] { 54 argsNonNil = false 55 } 56 } 57 if argsNonNil { 58 nonNilValues[v.ID] = true 59 } 60 } 61 } 62 } 63 64 // perform a depth first walk of the dominee tree 65 for len(work) > 0 { 66 node := work[len(work)-1] 67 work = work[:len(work)-1] 68 69 switch node.op { 70 case Work: 71 b := node.block 72 73 // First, see if we're dominated by an explicit nil check. 74 if len(b.Preds) == 1 { 75 p := b.Preds[0].b 76 if p.Kind == BlockIf && p.Control.Op == OpIsNonNil && p.Succs[0].b == b { 77 ptr := p.Control.Args[0] 78 if !nonNilValues[ptr.ID] { 79 nonNilValues[ptr.ID] = true 80 work = append(work, bp{op: ClearPtr, ptr: ptr}) 81 } 82 } 83 } 84 85 // Next, eliminate any redundant nil checks in this block. 86 i := 0 87 for _, v := range b.Values { 88 b.Values[i] = v 89 i++ 90 switch v.Op { 91 case OpIsNonNil: 92 ptr := v.Args[0] 93 if nonNilValues[ptr.ID] { 94 // This is a redundant explicit nil check. 95 v.reset(OpConstBool) 96 v.AuxInt = 1 // true 97 } 98 case OpNilCheck: 99 ptr := v.Args[0] 100 if nonNilValues[ptr.ID] { 101 // This is a redundant implicit nil check. 102 // Logging in the style of the former compiler -- and omit line 1, 103 // which is usually in generated code. 104 if f.Config.Debug_checknil() && v.Pos.Line() > 1 { 105 f.Config.Warnl(v.Pos, "removed nil check") 106 } 107 v.reset(OpUnknown) 108 // TODO: f.freeValue(v) 109 i-- 110 continue 111 } 112 } 113 } 114 for j := i; j < len(b.Values); j++ { 115 b.Values[j] = nil 116 } 117 b.Values = b.Values[:i] 118 119 // Finally, find redundant nil checks for subsequent blocks. 120 // Note that we can't add these until the loop above is done, as the 121 // values in the block are not ordered in any way when this pass runs. 122 // This was the cause of issue #18725. 123 for _, v := range b.Values { 124 if v.Op != OpNilCheck { 125 continue 126 } 127 ptr := v.Args[0] 128 // Record the fact that we know ptr is non nil, and remember to 129 // undo that information when this dominator subtree is done. 130 nonNilValues[ptr.ID] = true 131 work = append(work, bp{op: ClearPtr, ptr: ptr}) 132 } 133 134 // Add all dominated blocks to the work list. 135 for w := sdom[node.block.ID].child; w != nil; w = sdom[w.ID].sibling { 136 work = append(work, bp{op: Work, block: w}) 137 } 138 139 case ClearPtr: 140 nonNilValues[node.ptr.ID] = false 141 continue 142 } 143 } 144 } 145 146 // All platforms are guaranteed to fault if we load/store to anything smaller than this address. 147 // 148 // This should agree with minLegalPointer in the runtime. 149 const minZeroPage = 4096 150 151 // nilcheckelim2 eliminates unnecessary nil checks. 152 // Runs after lowering and scheduling. 153 func nilcheckelim2(f *Func) { 154 unnecessary := f.newSparseSet(f.NumValues()) 155 defer f.retSparseSet(unnecessary) 156 for _, b := range f.Blocks { 157 // Walk the block backwards. Find instructions that will fault if their 158 // input pointer is nil. Remove nil checks on those pointers, as the 159 // faulting instruction effectively does the nil check for free. 160 unnecessary.clear() 161 for i := len(b.Values) - 1; i >= 0; i-- { 162 v := b.Values[i] 163 if opcodeTable[v.Op].nilCheck && unnecessary.contains(v.Args[0].ID) { 164 if f.Config.Debug_checknil() && v.Pos.Line() > 1 { 165 f.Config.Warnl(v.Pos, "removed nil check") 166 } 167 v.reset(OpUnknown) 168 continue 169 } 170 if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() { 171 if v.Op == OpVarDef || v.Op == OpVarKill || v.Op == OpVarLive { 172 // These ops don't really change memory. 173 continue 174 } 175 // This op changes memory. Any faulting instruction after v that 176 // we've recorded in the unnecessary map is now obsolete. 177 unnecessary.clear() 178 } 179 180 // Find any pointers that this op is guaranteed to fault on if nil. 181 var ptrstore [2]*Value 182 ptrs := ptrstore[:0] 183 if opcodeTable[v.Op].faultOnNilArg0 { 184 ptrs = append(ptrs, v.Args[0]) 185 } 186 if opcodeTable[v.Op].faultOnNilArg1 { 187 ptrs = append(ptrs, v.Args[1]) 188 } 189 for _, ptr := range ptrs { 190 // Check to make sure the offset is small. 191 switch opcodeTable[v.Op].auxType { 192 case auxSymOff: 193 if v.Aux != nil || v.AuxInt < 0 || v.AuxInt >= minZeroPage { 194 continue 195 } 196 case auxSymValAndOff: 197 off := ValAndOff(v.AuxInt).Off() 198 if v.Aux != nil || off < 0 || off >= minZeroPage { 199 continue 200 } 201 case auxInt32: 202 // Mips uses this auxType for atomic add constant. It does not affect the effective address. 203 case auxInt64: 204 // ARM uses this auxType for duffcopy/duffzero/alignment info. 205 // It does not affect the effective address. 206 case auxNone: 207 // offset is zero. 208 default: 209 v.Fatalf("can't handle aux %s (type %d) yet\n", v.auxString(), int(opcodeTable[v.Op].auxType)) 210 } 211 // This instruction is guaranteed to fault if ptr is nil. 212 // Any previous nil check op is unnecessary. 213 unnecessary.add(ptr.ID) 214 } 215 } 216 // Remove values we've clobbered with OpUnknown. 217 i := 0 218 for _, v := range b.Values { 219 if v.Op != OpUnknown { 220 b.Values[i] = v 221 i++ 222 } 223 } 224 for j := i; j < len(b.Values); j++ { 225 b.Values[j] = nil 226 } 227 b.Values = b.Values[:i] 228 229 // TODO: if b.Kind == BlockPlain, start the analysis in the subsequent block to find 230 // more unnecessary nil checks. Would fix test/nilptr3_ssa.go:157. 231 } 232 }