github.com/MerlinKodo/gvisor@v0.0.0-20231110090155-957f62ecf90e/pkg/sentry/kernel/task_futex.go (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package kernel 16 17 import ( 18 "github.com/MerlinKodo/gvisor/pkg/abi/linux" 19 "github.com/MerlinKodo/gvisor/pkg/hostarch" 20 "github.com/MerlinKodo/gvisor/pkg/marshal/primitive" 21 "github.com/MerlinKodo/gvisor/pkg/sentry/kernel/futex" 22 "github.com/MerlinKodo/gvisor/pkg/usermem" 23 ) 24 25 // Futex returns t's futex manager. 26 // 27 // Preconditions: The caller must be running on the task goroutine, or t.mu 28 // must be locked. 29 func (t *Task) Futex() *futex.Manager { 30 return t.image.fu 31 } 32 33 // SwapUint32 implements futex.Target.SwapUint32. 34 func (t *Task) SwapUint32(addr hostarch.Addr, new uint32) (uint32, error) { 35 return t.MemoryManager().SwapUint32(t, addr, new, usermem.IOOpts{ 36 AddressSpaceActive: true, 37 }) 38 } 39 40 // CompareAndSwapUint32 implements futex.Target.CompareAndSwapUint32. 41 func (t *Task) CompareAndSwapUint32(addr hostarch.Addr, old, new uint32) (uint32, error) { 42 return t.MemoryManager().CompareAndSwapUint32(t, addr, old, new, usermem.IOOpts{ 43 AddressSpaceActive: true, 44 }) 45 } 46 47 // LoadUint32 implements futex.Target.LoadUint32. 48 func (t *Task) LoadUint32(addr hostarch.Addr) (uint32, error) { 49 return t.MemoryManager().LoadUint32(t, addr, usermem.IOOpts{ 50 AddressSpaceActive: true, 51 }) 52 } 53 54 // GetSharedKey implements futex.Target.GetSharedKey. 55 func (t *Task) GetSharedKey(addr hostarch.Addr) (futex.Key, error) { 56 return t.MemoryManager().GetSharedFutexKey(t, addr) 57 } 58 59 // GetRobustList sets the robust futex list for the task. 60 func (t *Task) GetRobustList() hostarch.Addr { 61 t.mu.Lock() 62 addr := t.robustList 63 t.mu.Unlock() 64 return addr 65 } 66 67 // SetRobustList sets the robust futex list for the task. 68 func (t *Task) SetRobustList(addr hostarch.Addr) { 69 t.mu.Lock() 70 t.robustList = addr 71 t.mu.Unlock() 72 } 73 74 // exitRobustList walks the robust futex list, marking locks dead and notifying 75 // wakers. It corresponds to Linux's exit_robust_list(). Following Linux, 76 // errors are silently ignored. 77 func (t *Task) exitRobustList() { 78 t.mu.Lock() 79 addr := t.robustList 80 t.robustList = 0 81 t.mu.Unlock() 82 83 if addr == 0 { 84 return 85 } 86 87 var rl linux.RobustListHead 88 if _, err := rl.CopyIn(t, hostarch.Addr(addr)); err != nil { 89 return 90 } 91 92 next := primitive.Uint64(rl.List) 93 done := 0 94 var pendingLockAddr hostarch.Addr 95 if rl.ListOpPending != 0 { 96 pendingLockAddr = hostarch.Addr(rl.ListOpPending + rl.FutexOffset) 97 } 98 99 // Wake up normal elements. 100 for hostarch.Addr(next) != addr { 101 // We traverse to the next element of the list before we 102 // actually wake anything. This prevents the race where waking 103 // this futex causes a modification of the list. 104 thisLockAddr := hostarch.Addr(uint64(next) + rl.FutexOffset) 105 106 // Try to decode the next element in the list before waking the 107 // current futex. But don't check the error until after we've 108 // woken the current futex. Linux does it in this order too 109 _, nextErr := next.CopyIn(t, hostarch.Addr(next)) 110 111 // Wakeup the current futex if it's not pending. 112 if thisLockAddr != pendingLockAddr { 113 t.wakeRobustListOne(thisLockAddr) 114 } 115 116 // If there was an error copying the next futex, we must bail. 117 if nextErr != nil { 118 break 119 } 120 121 // This is a user structure, so it could be a massive list, or 122 // even contain a loop if they are trying to mess with us. We 123 // cap traversal to prevent that. 124 done++ 125 if done >= linux.ROBUST_LIST_LIMIT { 126 break 127 } 128 } 129 130 // Is there a pending entry to wake? 131 if pendingLockAddr != 0 { 132 t.wakeRobustListOne(pendingLockAddr) 133 } 134 } 135 136 // wakeRobustListOne wakes a single futex from the robust list. 137 func (t *Task) wakeRobustListOne(addr hostarch.Addr) { 138 // Bit 0 in address signals PI futex. 139 pi := addr&1 == 1 140 addr = addr &^ 1 141 142 // Load the futex. 143 f, err := t.LoadUint32(addr) 144 if err != nil { 145 // Can't read this single value? Ignore the problem. 146 // We can wake the other futexes in the list. 147 return 148 } 149 150 tid := uint32(t.ThreadID()) 151 for { 152 // Is this held by someone else? 153 if f&linux.FUTEX_TID_MASK != tid { 154 return 155 } 156 157 // This thread is dying and it's holding this futex. We need to 158 // set the owner died bit and wake up any waiters. 159 newF := (f & linux.FUTEX_WAITERS) | linux.FUTEX_OWNER_DIED 160 if curF, err := t.CompareAndSwapUint32(addr, f, newF); err != nil { 161 return 162 } else if curF != f { 163 // Futex changed out from under us. Try again... 164 f = curF 165 continue 166 } 167 168 // Wake waiters if there are any. 169 if f&linux.FUTEX_WAITERS != 0 { 170 private := f&linux.FUTEX_PRIVATE_FLAG != 0 171 if pi { 172 t.Futex().UnlockPI(t, addr, tid, private) 173 return 174 } 175 t.Futex().Wake(t, addr, private, linux.FUTEX_BITSET_MATCH_ANY, 1) 176 } 177 178 // Done. 179 return 180 } 181 }