github.com/go-darwin/sys@v0.0.0-20220510002607-68fd01f054ca/testdata/testprog/lockosthread.go (about) 1 // Copyright 2017 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package main 6 7 import ( 8 "os" 9 "runtime" 10 "sync" 11 "time" 12 ) 13 14 var mainTID int 15 16 func init() { 17 registerInit("LockOSThreadMain", func() { 18 // init is guaranteed to run on the main thread. 19 mainTID = gettid() 20 }) 21 register("LockOSThreadMain", LockOSThreadMain) 22 23 registerInit("LockOSThreadAlt", func() { 24 // Lock the OS thread now so main runs on the main thread. 25 runtime.LockOSThread() 26 }) 27 register("LockOSThreadAlt", LockOSThreadAlt) 28 29 registerInit("LockOSThreadAvoidsStatePropagation", func() { 30 // Lock the OS thread now so main runs on the main thread. 31 runtime.LockOSThread() 32 }) 33 register("LockOSThreadAvoidsStatePropagation", LockOSThreadAvoidsStatePropagation) 34 register("LockOSThreadTemplateThreadRace", LockOSThreadTemplateThreadRace) 35 } 36 37 func LockOSThreadMain() { 38 // gettid only works on Linux, so on other platforms this just 39 // checks that the runtime doesn't do anything terrible. 40 41 // This requires GOMAXPROCS=1 from the beginning to reliably 42 // start a goroutine on the main thread. 43 if runtime.GOMAXPROCS(-1) != 1 { 44 println("requires GOMAXPROCS=1") 45 os.Exit(1) 46 } 47 48 ready := make(chan bool, 1) 49 go func() { 50 // Because GOMAXPROCS=1, this *should* be on the main 51 // thread. Stay there. 52 runtime.LockOSThread() 53 if mainTID != 0 && gettid() != mainTID { 54 println("failed to start goroutine on main thread") 55 os.Exit(1) 56 } 57 // Exit with the thread locked, which should exit the 58 // main thread. 59 ready <- true 60 }() 61 <-ready 62 time.Sleep(1 * time.Millisecond) 63 // Check that this goroutine is still running on a different 64 // thread. 65 if mainTID != 0 && gettid() == mainTID { 66 println("goroutine migrated to locked thread") 67 os.Exit(1) 68 } 69 println("OK") 70 } 71 72 func LockOSThreadAlt() { 73 // This is running locked to the main OS thread. 74 75 var subTID int 76 ready := make(chan bool, 1) 77 go func() { 78 // This goroutine must be running on a new thread. 79 runtime.LockOSThread() 80 subTID = gettid() 81 ready <- true 82 // Exit with the thread locked. 83 }() 84 <-ready 85 runtime.UnlockOSThread() 86 for i := 0; i < 100; i++ { 87 time.Sleep(1 * time.Millisecond) 88 // Check that this goroutine is running on a different thread. 89 if subTID != 0 && gettid() == subTID { 90 println("locked thread reused") 91 os.Exit(1) 92 } 93 exists, supported := tidExists(subTID) 94 if !supported || !exists { 95 goto ok 96 } 97 } 98 println("sub thread", subTID, "still running") 99 return 100 ok: 101 println("OK") 102 } 103 104 func LockOSThreadAvoidsStatePropagation() { 105 // This test is similar to LockOSThreadAlt in that it will detect if a thread 106 // which should have died is still running. However, rather than do this with 107 // thread IDs, it does this by unsharing state on that thread. This way, it 108 // also detects whether new threads were cloned from the dead thread, and not 109 // from a clean thread. Cloning from a locked thread is undesirable since 110 // cloned threads will inherit potentially unwanted OS state. 111 // 112 // unshareFs, getcwd, and chdir("/tmp") are only guaranteed to work on 113 // Linux, so on other platforms this just checks that the runtime doesn't 114 // do anything terrible. 115 // 116 // This is running locked to the main OS thread. 117 118 // GOMAXPROCS=1 makes this fail much more reliably if a tainted thread is 119 // cloned from. 120 if runtime.GOMAXPROCS(-1) != 1 { 121 println("requires GOMAXPROCS=1") 122 os.Exit(1) 123 } 124 125 if err := chdir("/"); err != nil { 126 println("failed to chdir:", err.Error()) 127 os.Exit(1) 128 } 129 // On systems other than Linux, cwd == "". 130 cwd, err := getcwd() 131 if err != nil { 132 println("failed to get cwd:", err.Error()) 133 os.Exit(1) 134 } 135 if cwd != "" && cwd != "/" { 136 println("unexpected cwd", cwd, " wanted /") 137 os.Exit(1) 138 } 139 140 ready := make(chan bool, 1) 141 go func() { 142 // This goroutine must be running on a new thread. 143 runtime.LockOSThread() 144 145 // Unshare details about the FS, like the CWD, with 146 // the rest of the process on this thread. 147 // On systems other than Linux, this is a no-op. 148 if err := unshareFs(); err != nil { 149 if err == errNotPermitted { 150 println("unshare not permitted") 151 os.Exit(0) 152 } 153 println("failed to unshare fs:", err.Error()) 154 os.Exit(1) 155 } 156 // Chdir to somewhere else on this thread. 157 // On systems other than Linux, this is a no-op. 158 if err := chdir("/tmp"); err != nil { 159 println("failed to chdir:", err.Error()) 160 os.Exit(1) 161 } 162 163 // The state on this thread is now considered "tainted", but it 164 // should no longer be observable in any other context. 165 166 ready <- true 167 // Exit with the thread locked. 168 }() 169 <-ready 170 171 // Spawn yet another goroutine and lock it. Since GOMAXPROCS=1, if 172 // for some reason state from the (hopefully dead) locked thread above 173 // propagated into a newly created thread (via clone), or that thread 174 // is actually being re-used, then we should get scheduled on such a 175 // thread with high likelihood. 176 done := make(chan bool) 177 go func() { 178 runtime.LockOSThread() 179 180 // Get the CWD and check if this is the same as the main thread's 181 // CWD. Every thread should share the same CWD. 182 // On systems other than Linux, wd == "". 183 wd, err := getcwd() 184 if err != nil { 185 println("failed to get cwd:", err.Error()) 186 os.Exit(1) 187 } 188 if wd != cwd { 189 println("bad state from old thread propagated after it should have died") 190 os.Exit(1) 191 } 192 <-done 193 194 runtime.UnlockOSThread() 195 }() 196 done <- true 197 runtime.UnlockOSThread() 198 println("OK") 199 } 200 201 func LockOSThreadTemplateThreadRace() { 202 // This test attempts to reproduce the race described in 203 // golang.org/issue/38931. To do so, we must have a stop-the-world 204 // (achieved via ReadMemStats) racing with two LockOSThread calls. 205 // 206 // While this test attempts to line up the timing, it is only expected 207 // to fail (and thus hang) around 2% of the time if the race is 208 // present. 209 210 // Ensure enough Ps to actually run everything in parallel. Though on 211 // <4 core machines, we are still at the whim of the kernel scheduler. 212 runtime.GOMAXPROCS(4) 213 214 go func() { 215 // Stop the world; race with LockOSThread below. 216 var m runtime.MemStats 217 for { 218 runtime.ReadMemStats(&m) 219 } 220 }() 221 222 // Try to synchronize both LockOSThreads. 223 start := time.Now().Add(10 * time.Millisecond) 224 225 var wg sync.WaitGroup 226 wg.Add(2) 227 228 for i := 0; i < 2; i++ { 229 go func() { 230 for time.Now().Before(start) { 231 } 232 233 // Add work to the local runq to trigger early startm 234 // in handoffp. 235 go func() {}() 236 237 runtime.LockOSThread() 238 runtime.Gosched() // add a preemption point. 239 wg.Done() 240 }() 241 } 242 243 wg.Wait() 244 // If both LockOSThreads completed then we did not hit the race. 245 println("OK") 246 }