github.com/aclements/go-misc@v0.0.0-20240129233631-2f6ede80790c/rtcheck/handlers.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package main 6 7 import ( 8 "go/constant" 9 "go/token" 10 "log" 11 12 "golang.org/x/tools/go/ssa" 13 ) 14 15 // TODO: Stack barrier locks, semaphores, etc. 16 17 // A callHandler implements special handling of a function call. It 18 // should append the updated PathState to newps and return the 19 // resulting slice. 20 type callHandler func(s *state, ps PathState, instr ssa.Instruction, newps []PathState) []PathState 21 22 // callHandlers maps from function names (the result of 23 // ssa.Function.String()) to handlers for special functions. 24 // 25 // TODO: Several of these are not nosplit and hence should model the 26 // implicit morestack. 27 var callHandlers map[string]callHandler 28 29 // trackArgs is a set of function names (ssa.Function.String()) to 30 // track the argument values of. 31 var trackArgs = map[string]bool{ 32 // copystack's locking behavior is significantly affected by 33 // the "sync" argument. 34 "runtime.copystack": true, 35 36 // TODO: chan.go:recv takes an unlock closure. Can we track 37 // that? Pointer analysis says the unlock function could be 38 // selunlock, which has a path that unlocks nothing. 39 } 40 41 func init() { 42 // Go's initialization order rule doesn't distinguish between 43 // function pointers and function calls, so we have to 44 // initialize callHandlers outside of the initialization order. 45 callHandlers = map[string]callHandler{ 46 "runtime.lock": handleRuntimeLock, 47 "runtime.unlock": handleRuntimeUnlock, 48 49 "runtime.casgstatus": handleRuntimeCasgstatus, 50 "runtime.castogscanstatus": handleRuntimeCastogscanstatus, 51 "runtime.casfrom_Gscanstatus": handleRuntimeCasfrom_Gscanstatus, 52 53 "runtime.getg": handleRuntimeGetg, 54 "runtime.acquirem": handleRuntimeAcquirem, 55 "runtime.releasem": handleRuntimeReleasem, 56 "runtime.rtcheck۰presystemstack": handleRuntimePresystemstack, 57 "runtime.rtcheck۰postsystemstack": handleRuntimePostsystemstack, 58 59 "runtime.morestack": handleRuntimeMorestack, 60 61 // restartg does a conditional unlock of _Gscan, but it's hard 62 // to track that condition. In practice, it always does the 63 // unlock, so handle it just like casefrom_Gscanstatus. 64 // 65 // TODO: This function is silly. We should probably remove it 66 // from the runtime. 67 "runtime.restartg": handleRuntimeCasfrom_Gscanstatus, 68 } 69 } 70 71 func handleRuntimeLock(s *state, ps PathState, instr ssa.Instruction, newps []PathState) []PathState { 72 lock, err := s.lca.Get(instr.(*ssa.Call).Call.Args[0]) 73 if err != nil { 74 s.warnl(instr.Pos(), "%s", err) 75 } else { 76 newls := NewLockSet().Plus(lock, s.stack) 77 s.lockOrder.Add(ps.lockSet, newls, s.stack) 78 ls2 := ps.lockSet.Plus(lock, s.stack) 79 // If we self-deadlocked, terminate this path. 80 // 81 // TODO: This is only sound if we know it's the same lock 82 // *instance*. 83 if ps.lockSet == ls2 { 84 s.warnp(instr.Pos(), "possible self-deadlock %s %s; trimming path", ps.lockSet, lock) 85 return newps 86 } 87 ps.lockSet = ls2 88 } 89 90 // m.locks++ 91 mlocks := ps.vs.GetHeap(s.heap.curM_locks).(DynConst) 92 nlocks, _ := constant.Int64Val(mlocks.c) 93 const maxLocks = 16 94 if nlocks >= maxLocks { 95 s.warnp(instr.Pos(), "%d locks held; trimming path", nlocks) 96 return newps 97 } 98 ps.vs = ps.vs.ExtendHeap(s.heap.curM_locks, mlocks.BinOp(token.ADD, DynConst{constant.MakeInt64(1)})) 99 return append(newps, ps) 100 } 101 102 func handleRuntimeUnlock(s *state, ps PathState, instr ssa.Instruction, newps []PathState) []PathState { 103 held := false 104 lock, err := s.lca.Get(instr.(*ssa.Call).Call.Args[0]) 105 if err != nil { 106 s.warnl(instr.Pos(), "%s", err) 107 } else { 108 held = ps.lockSet.Contains(lock) 109 ps.lockSet = ps.lockSet.Minus(lock) 110 if !held { 111 // TODO: Perhaps warn more stringently if this is a 112 // single instance lock class, though even then we 113 // could be confused by control flow. 114 s.warnl(instr.Pos(), "possible unlock of unlocked lock") 115 } 116 } 117 118 // m.locks-- if lock is held. We only do this conditionally 119 // because sometimes our handling of correlated control flow 120 // leads to *three* paths: both lock and unlock, neither lock 121 // or unlock, and just unlock. 122 if held { 123 mlocks := ps.vs.GetHeap(s.heap.curM_locks).(DynConst) 124 if constant.Compare(mlocks.c, token.LEQ, constant.MakeInt64(0)) { 125 // Terminate path. 126 s.warnp(instr.Pos(), "unlock with m.locks <= 0; trimming path") 127 return newps 128 } 129 ps.vs = ps.vs.ExtendHeap(s.heap.curM_locks, mlocks.BinOp(token.SUB, DynConst{constant.MakeInt64(1)})) 130 } 131 return append(newps, ps) 132 } 133 134 func handleRuntimeCasgstatus(s *state, ps PathState, instr ssa.Instruction, newps []PathState) []PathState { 135 // Equivalent to acquiring and releasing _Gscan. 136 gscan := NewLockSet().Plus(s.gscanLock, s.stack) 137 s.lockOrder.Add(ps.lockSet, gscan, s.stack) 138 return append(newps, ps) 139 } 140 141 func handleRuntimeCastogscanstatus(s *state, ps PathState, instr ssa.Instruction, newps []PathState) []PathState { 142 // This is a conditional acquisition of _Gscan. _Gscan is 143 // acquired on the true branch and not acquired on the false 144 // branch. Either way it participates in the lock order. 145 gscan := NewLockSet().Plus(s.gscanLock, s.stack) 146 s.lockOrder.Add(ps.lockSet, gscan, s.stack) 147 148 psT, psF := ps, ps 149 150 psT.lockSet = psT.lockSet.Plus(s.gscanLock, s.stack) 151 psT.vs = psT.vs.Extend(instr.(ssa.Value), DynConst{constant.MakeBool(true)}) 152 153 psF.vs = psF.vs.Extend(instr.(ssa.Value), DynConst{constant.MakeBool(false)}) 154 155 return append(newps, psT, psF) 156 } 157 158 func handleRuntimeCasfrom_Gscanstatus(s *state, ps PathState, instr ssa.Instruction, newps []PathState) []PathState { 159 // Unlock of _Gscan. 160 ps.lockSet = ps.lockSet.Minus(s.gscanLock) 161 return append(newps, ps) 162 } 163 164 func handleRuntimeGetg(s *state, ps PathState, instr ssa.Instruction, newps []PathState) []PathState { 165 val := ps.vs.GetHeap(s.heap.curG) 166 if val == nil { 167 log.Fatal("failed to determine current G") 168 } 169 ps.vs = ps.vs.Extend(instr.(ssa.Value), val) 170 return append(newps, ps) 171 } 172 173 func handleRuntimeAcquirem(s *state, ps PathState, instr ssa.Instruction, newps []PathState) []PathState { 174 ps.vs = ps.vs.Extend(instr.(ssa.Value), DynHeapPtr{s.heap.curM}) 175 // m.locks++ 176 mlocks := ps.vs.GetHeap(s.heap.curM_locks).(DynConst) 177 ps.vs = ps.vs.ExtendHeap(s.heap.curM_locks, mlocks.BinOp(token.ADD, DynConst{constant.MakeInt64(1)})) 178 return append(newps, ps) 179 } 180 181 func handleRuntimeReleasem(s *state, ps PathState, instr ssa.Instruction, newps []PathState) []PathState { 182 // releasem does an m.locks--, but m comes in as an argument, 183 // so we can't tell that it's just curM. If we were to track 184 // argument and return values, we might be able to get this 185 // and acquirem right automatically. 186 // 187 // m.locks-- 188 mlocks := ps.vs.GetHeap(s.heap.curM_locks).(DynConst) 189 ps.vs = ps.vs.ExtendHeap(s.heap.curM_locks, mlocks.BinOp(token.SUB, DynConst{constant.MakeInt64(1)})) 190 return append(newps, ps) 191 } 192 193 func handleRuntimePresystemstack(s *state, ps PathState, instr ssa.Instruction, newps []PathState) []PathState { 194 // Get the current G. 195 curG := ps.vs.GetHeap(s.heap.curG) 196 if curG == nil { 197 log.Fatal("failed to determine current G") 198 } 199 // Set the current G to g0. This is a no-op if we're already 200 // on the system stack. 201 ps.vs = ps.vs.ExtendHeap(s.heap.curG, DynHeapPtr{s.heap.g0}) 202 // Return the original G. 203 ps.vs = ps.vs.Extend(instr.(ssa.Value), curG) 204 return append(newps, ps) 205 } 206 207 func handleRuntimePostsystemstack(s *state, ps PathState, instr ssa.Instruction, newps []PathState) []PathState { 208 // Return the to g returned by presystemstack. 209 origG := ps.vs.Get(instr.(*ssa.Call).Call.Args[0]) 210 if origG == nil { 211 log.Fatal("failed to restore G returned by presystemstack") 212 } 213 ps.vs = ps.vs.ExtendHeap(s.heap.curG, origG) 214 return append(newps, ps) 215 } 216 217 func handleRuntimeMorestack(s *state, ps PathState, instr ssa.Instruction, newps []PathState) []PathState { 218 // Get the current G. 219 curG := ps.vs.GetHeap(s.heap.curG) 220 if curG == nil { 221 log.Fatal("failed to determine current G") 222 } 223 // If we're on the system stack we either have room or we 224 // panic, so just return from morestack. 225 if curG.(DynHeapPtr).elem == s.heap.g0 { 226 return append(newps, ps) 227 } 228 229 // Otherwise, we may or may not call newstack. Take both 230 // paths. This is important because newstack doesn't 231 // technically "return", so we're going to lose that execution 232 // path. 233 newps = append(newps, ps) 234 235 // Call newstack on the system stack. 236 // 237 // TODO: This duplicates some of doCall. Can I make the 238 // walkFunction API nicer so this is nicer? 239 newstack := instr.Parent().Prog.ImportedPackage("runtime").Func("newstack") 240 ps = PathState{ 241 lockSet: ps.lockSet, 242 vs: ps.vs.ExtendHeap(s.heap.curG, DynHeapPtr{s.heap.g0}).LimitToHeap(), 243 } 244 s.walkFunction(newstack, ps).ForEach(func(ps2 PathState) { 245 ps.lockSet = ps2.lockSet 246 ps.vs.heap = ps2.vs.heap 247 // Leave system stack. 248 ps.vs = ps.vs.ExtendHeap(s.heap.curG, curG) 249 newps = append(newps, ps) 250 }) 251 return newps 252 }