github.com/AbhinandanKurakure/podman/v3@v3.4.10/libpod/lock/shm/shm_lock.go (about) 1 // +build linux,cgo 2 3 package shm 4 5 // #cgo LDFLAGS: -lrt -lpthread 6 // #cgo CFLAGS: -Wall -Werror 7 // #include <stdlib.h> 8 // #include "shm_lock.h" 9 // const uint32_t bitmap_size_c = BITMAP_SIZE; 10 import "C" 11 12 import ( 13 "runtime" 14 "syscall" 15 "unsafe" 16 17 "github.com/pkg/errors" 18 "github.com/sirupsen/logrus" 19 ) 20 21 var ( 22 // BitmapSize is the size of the bitmap used when managing SHM locks. 23 // an SHM lock manager's max locks will be rounded up to a multiple of 24 // this number. 25 BitmapSize = uint32(C.bitmap_size_c) 26 ) 27 28 // SHMLocks is a struct enabling POSIX semaphore locking in a shared memory 29 // segment. 30 type SHMLocks struct { // nolint 31 lockStruct *C.shm_struct_t 32 maxLocks uint32 33 valid bool 34 } 35 36 // CreateSHMLock sets up a shared-memory segment holding a given number of POSIX 37 // semaphores, and returns a struct that can be used to operate on those locks. 38 // numLocks must not be 0, and may be rounded up to a multiple of the bitmap 39 // size used by the underlying implementation. 40 func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) { 41 if numLocks == 0 { 42 return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be greater than 0") 43 } 44 45 locks := new(SHMLocks) 46 47 cPath := C.CString(path) 48 defer C.free(unsafe.Pointer(cPath)) 49 50 var errCode C.int 51 lockStruct := C.setup_lock_shm(cPath, C.uint32_t(numLocks), &errCode) 52 if lockStruct == nil { 53 // We got a null pointer, so something errored 54 return nil, errors.Wrapf(syscall.Errno(-1*errCode), "failed to create %d locks in %s", numLocks, path) 55 } 56 57 locks.lockStruct = lockStruct 58 locks.maxLocks = uint32(lockStruct.num_locks) 59 locks.valid = true 60 61 logrus.Debugf("Initialized SHM lock manager at path %s", path) 62 63 return locks, nil 64 } 65 66 // OpenSHMLock opens an existing shared-memory segment holding a given number of 67 // POSIX semaphores. numLocks must match the number of locks the shared memory 68 // segment was created with. 69 func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) { 70 if numLocks == 0 { 71 return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be greater than 0") 72 } 73 74 locks := new(SHMLocks) 75 76 cPath := C.CString(path) 77 defer C.free(unsafe.Pointer(cPath)) 78 79 var errCode C.int 80 lockStruct := C.open_lock_shm(cPath, C.uint32_t(numLocks), &errCode) 81 if lockStruct == nil { 82 // We got a null pointer, so something errored 83 return nil, errors.Wrapf(syscall.Errno(-1*errCode), "failed to open %d locks in %s", numLocks, path) 84 } 85 86 locks.lockStruct = lockStruct 87 locks.maxLocks = numLocks 88 locks.valid = true 89 90 return locks, nil 91 } 92 93 // GetMaxLocks returns the maximum number of locks in the SHM 94 func (locks *SHMLocks) GetMaxLocks() uint32 { 95 return locks.maxLocks 96 } 97 98 // Close closes an existing shared-memory segment. 99 // The segment will be rendered unusable after closing. 100 // WARNING: If you Close() while there are still locks locked, these locks may 101 // fail to release, causing a program freeze. 102 // Close() is only intended to be used while testing the locks. 103 func (locks *SHMLocks) Close() error { 104 if !locks.valid { 105 return errors.Wrapf(syscall.EINVAL, "locks have already been closed") 106 } 107 108 locks.valid = false 109 110 retCode := C.close_lock_shm(locks.lockStruct) 111 if retCode < 0 { 112 // Negative errno returned 113 return syscall.Errno(-1 * retCode) 114 } 115 116 return nil 117 } 118 119 // AllocateSemaphore allocates a semaphore from a shared-memory segment for use 120 // by a container or pod. 121 // Returns the index of the semaphore that was allocated. 122 // Allocations past the maximum number of locks given when the SHM segment was 123 // created will result in an error, and no semaphore will be allocated. 124 func (locks *SHMLocks) AllocateSemaphore() (uint32, error) { 125 if !locks.valid { 126 return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed") 127 } 128 129 // This returns a U64, so we have the full u32 range available for 130 // semaphore indexes, and can still return error codes. 131 retCode := C.allocate_semaphore(locks.lockStruct) 132 if retCode < 0 { 133 var err = syscall.Errno(-1 * retCode) 134 // Negative errno returned 135 if errors.Is(err, syscall.ENOSPC) { 136 // ENOSPC expands to "no space left on device". While it is technically true 137 // that there's no room in the SHM inn for this lock, this tends to send normal people 138 // down the path of checking disk-space which is not actually their problem. 139 // Give a clue that it's actually due to num_locks filling up. 140 var errFull = errors.Errorf("allocation failed; exceeded num_locks (%d)", locks.maxLocks) 141 return uint32(retCode), errFull 142 } 143 return uint32(retCode), syscall.Errno(-1 * retCode) 144 } 145 146 return uint32(retCode), nil 147 } 148 149 // AllocateGivenSemaphore allocates the given semaphore from the shared-memory 150 // segment for use by a container or pod. 151 // If the semaphore is already in use or the index is invalid an error will be 152 // returned. 153 func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error { 154 if !locks.valid { 155 return errors.Wrapf(syscall.EINVAL, "locks have already been closed") 156 } 157 158 retCode := C.allocate_given_semaphore(locks.lockStruct, C.uint32_t(sem)) 159 if retCode < 0 { 160 return syscall.Errno(-1 * retCode) 161 } 162 163 return nil 164 } 165 166 // DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be 167 // reallocated to another container or pod. 168 // The given semaphore must be already allocated, or an error will be returned. 169 func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error { 170 if !locks.valid { 171 return errors.Wrapf(syscall.EINVAL, "locks have already been closed") 172 } 173 174 if sem > locks.maxLocks { 175 return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks) 176 } 177 178 retCode := C.deallocate_semaphore(locks.lockStruct, C.uint32_t(sem)) 179 if retCode < 0 { 180 // Negative errno returned 181 return syscall.Errno(-1 * retCode) 182 } 183 184 return nil 185 } 186 187 // DeallocateAllSemaphores frees all semaphores so they can be reallocated to 188 // other containers and pods. 189 func (locks *SHMLocks) DeallocateAllSemaphores() error { 190 if !locks.valid { 191 return errors.Wrapf(syscall.EINVAL, "locks have already been closed") 192 } 193 194 retCode := C.deallocate_all_semaphores(locks.lockStruct) 195 if retCode < 0 { 196 // Negative errno return from C 197 return syscall.Errno(-1 * retCode) 198 } 199 200 return nil 201 } 202 203 // LockSemaphore locks the given semaphore. 204 // If the semaphore is already locked, LockSemaphore will block until the lock 205 // can be acquired. 206 // There is no requirement that the given semaphore be allocated. 207 // This ensures that attempts to lock a container after it has been deleted, 208 // but before the caller has queried the database to determine this, will 209 // succeed. 210 func (locks *SHMLocks) LockSemaphore(sem uint32) error { 211 if !locks.valid { 212 return errors.Wrapf(syscall.EINVAL, "locks have already been closed") 213 } 214 215 if sem > locks.maxLocks { 216 return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks) 217 } 218 219 // For pthread mutexes, we have to guarantee lock and unlock happen in 220 // the same thread. 221 runtime.LockOSThread() 222 223 retCode := C.lock_semaphore(locks.lockStruct, C.uint32_t(sem)) 224 if retCode < 0 { 225 // Negative errno returned 226 return syscall.Errno(-1 * retCode) 227 } 228 229 return nil 230 } 231 232 // UnlockSemaphore unlocks the given semaphore. 233 // Unlocking a semaphore that is already unlocked with return EBUSY. 234 // There is no requirement that the given semaphore be allocated. 235 // This ensures that attempts to lock a container after it has been deleted, 236 // but before the caller has queried the database to determine this, will 237 // succeed. 238 func (locks *SHMLocks) UnlockSemaphore(sem uint32) error { 239 if !locks.valid { 240 return errors.Wrapf(syscall.EINVAL, "locks have already been closed") 241 } 242 243 if sem > locks.maxLocks { 244 return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks) 245 } 246 247 retCode := C.unlock_semaphore(locks.lockStruct, C.uint32_t(sem)) 248 if retCode < 0 { 249 // Negative errno returned 250 return syscall.Errno(-1 * retCode) 251 } 252 253 // For pthread mutexes, we have to guarantee lock and unlock happen in 254 // the same thread. 255 // OK if we take multiple locks - UnlockOSThread() won't actually unlock 256 // until the number of calls equals the number of calls to 257 // LockOSThread() 258 runtime.UnlockOSThread() 259 260 return nil 261 }