github.com/containers/libpod@v1.9.4-0.20220419124438-4284fd425507/libpod/lock/shm/shm_lock.go (about)

     1  // +build linux,cgo
     2  
     3  package shm
     4  
     5  // #cgo LDFLAGS: -lrt -lpthread
     6  // #cgo CFLAGS: -Wall -Werror
     7  // #include <stdlib.h>
     8  // #include "shm_lock.h"
     9  // const uint32_t bitmap_size_c = BITMAP_SIZE;
    10  import "C"
    11  
    12  import (
    13  	"runtime"
    14  	"syscall"
    15  	"unsafe"
    16  
    17  	"github.com/pkg/errors"
    18  	"github.com/sirupsen/logrus"
    19  )
    20  
    21  var (
    22  	// BitmapSize is the size of the bitmap used when managing SHM locks.
    23  	// an SHM lock manager's max locks will be rounded up to a multiple of
    24  	// this number.
    25  	BitmapSize = uint32(C.bitmap_size_c)
    26  )
    27  
    28  // SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
    29  // segment.
    30  type SHMLocks struct { // nolint
    31  	lockStruct *C.shm_struct_t
    32  	maxLocks   uint32
    33  	valid      bool
    34  }
    35  
    36  // CreateSHMLock sets up a shared-memory segment holding a given number of POSIX
    37  // semaphores, and returns a struct that can be used to operate on those locks.
    38  // numLocks must not be 0, and may be rounded up to a multiple of the bitmap
    39  // size used by the underlying implementation.
    40  func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
    41  	if numLocks == 0 {
    42  		return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be greater than 0")
    43  	}
    44  
    45  	locks := new(SHMLocks)
    46  
    47  	cPath := C.CString(path)
    48  	defer C.free(unsafe.Pointer(cPath))
    49  
    50  	var errCode C.int
    51  	lockStruct := C.setup_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
    52  	if lockStruct == nil {
    53  		// We got a null pointer, so something errored
    54  		return nil, errors.Wrapf(syscall.Errno(-1*errCode), "failed to create %d locks in %s", numLocks, path)
    55  	}
    56  
    57  	locks.lockStruct = lockStruct
    58  	locks.maxLocks = uint32(lockStruct.num_locks)
    59  	locks.valid = true
    60  
    61  	logrus.Debugf("Initialized SHM lock manager at path %s", path)
    62  
    63  	return locks, nil
    64  }
    65  
    66  // OpenSHMLock opens an existing shared-memory segment holding a given number of
    67  // POSIX semaphores. numLocks must match the number of locks the shared memory
    68  // segment was created with.
    69  func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
    70  	if numLocks == 0 {
    71  		return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be greater than 0")
    72  	}
    73  
    74  	locks := new(SHMLocks)
    75  
    76  	cPath := C.CString(path)
    77  	defer C.free(unsafe.Pointer(cPath))
    78  
    79  	var errCode C.int
    80  	lockStruct := C.open_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
    81  	if lockStruct == nil {
    82  		// We got a null pointer, so something errored
    83  		return nil, errors.Wrapf(syscall.Errno(-1*errCode), "failed to open %d locks in %s", numLocks, path)
    84  	}
    85  
    86  	locks.lockStruct = lockStruct
    87  	locks.maxLocks = numLocks
    88  	locks.valid = true
    89  
    90  	return locks, nil
    91  }
    92  
    93  // GetMaxLocks returns the maximum number of locks in the SHM
    94  func (locks *SHMLocks) GetMaxLocks() uint32 {
    95  	return locks.maxLocks
    96  }
    97  
    98  // Close closes an existing shared-memory segment.
    99  // The segment will be rendered unusable after closing.
   100  // WARNING: If you Close() while there are still locks locked, these locks may
   101  // fail to release, causing a program freeze.
   102  // Close() is only intended to be used while testing the locks.
   103  func (locks *SHMLocks) Close() error {
   104  	if !locks.valid {
   105  		return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
   106  	}
   107  
   108  	locks.valid = false
   109  
   110  	retCode := C.close_lock_shm(locks.lockStruct)
   111  	if retCode < 0 {
   112  		// Negative errno returned
   113  		return syscall.Errno(-1 * retCode)
   114  	}
   115  
   116  	return nil
   117  }
   118  
   119  // AllocateSemaphore allocates a semaphore from a shared-memory segment for use
   120  // by a container or pod.
   121  // Returns the index of the semaphore that was allocated.
   122  // Allocations past the maximum number of locks given when the SHM segment was
   123  // created will result in an error, and no semaphore will be allocated.
   124  func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
   125  	if !locks.valid {
   126  		return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
   127  	}
   128  
   129  	// This returns a U64, so we have the full u32 range available for
   130  	// semaphore indexes, and can still return error codes.
   131  	retCode := C.allocate_semaphore(locks.lockStruct)
   132  	if retCode < 0 {
   133  		// Negative errno returned
   134  		return 0, syscall.Errno(-1 * retCode)
   135  	}
   136  
   137  	return uint32(retCode), nil
   138  }
   139  
   140  // AllocateGivenSemaphore allocates the given semaphore from the shared-memory
   141  // segment for use by a container or pod.
   142  // If the semaphore is already in use or the index is invalid an error will be
   143  // returned.
   144  func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
   145  	if !locks.valid {
   146  		return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
   147  	}
   148  
   149  	retCode := C.allocate_given_semaphore(locks.lockStruct, C.uint32_t(sem))
   150  	if retCode < 0 {
   151  		return syscall.Errno(-1 * retCode)
   152  	}
   153  
   154  	return nil
   155  }
   156  
   157  // DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be
   158  // reallocated to another container or pod.
   159  // The given semaphore must be already allocated, or an error will be returned.
   160  func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
   161  	if !locks.valid {
   162  		return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
   163  	}
   164  
   165  	if sem > locks.maxLocks {
   166  		return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
   167  	}
   168  
   169  	retCode := C.deallocate_semaphore(locks.lockStruct, C.uint32_t(sem))
   170  	if retCode < 0 {
   171  		// Negative errno returned
   172  		return syscall.Errno(-1 * retCode)
   173  	}
   174  
   175  	return nil
   176  }
   177  
   178  // DeallocateAllSemaphores frees all semaphores so they can be reallocated to
   179  // other containers and pods.
   180  func (locks *SHMLocks) DeallocateAllSemaphores() error {
   181  	if !locks.valid {
   182  		return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
   183  	}
   184  
   185  	retCode := C.deallocate_all_semaphores(locks.lockStruct)
   186  	if retCode < 0 {
   187  		// Negative errno return from C
   188  		return syscall.Errno(-1 * retCode)
   189  	}
   190  
   191  	return nil
   192  }
   193  
   194  // LockSemaphore locks the given semaphore.
   195  // If the semaphore is already locked, LockSemaphore will block until the lock
   196  // can be acquired.
   197  // There is no requirement that the given semaphore be allocated.
   198  // This ensures that attempts to lock a container after it has been deleted,
   199  // but before the caller has queried the database to determine this, will
   200  // succeed.
   201  func (locks *SHMLocks) LockSemaphore(sem uint32) error {
   202  	if !locks.valid {
   203  		return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
   204  	}
   205  
   206  	if sem > locks.maxLocks {
   207  		return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
   208  	}
   209  
   210  	// For pthread mutexes, we have to guarantee lock and unlock happen in
   211  	// the same thread.
   212  	runtime.LockOSThread()
   213  
   214  	retCode := C.lock_semaphore(locks.lockStruct, C.uint32_t(sem))
   215  	if retCode < 0 {
   216  		// Negative errno returned
   217  		return syscall.Errno(-1 * retCode)
   218  	}
   219  
   220  	return nil
   221  }
   222  
   223  // UnlockSemaphore unlocks the given semaphore.
   224  // Unlocking a semaphore that is already unlocked with return EBUSY.
   225  // There is no requirement that the given semaphore be allocated.
   226  // This ensures that attempts to lock a container after it has been deleted,
   227  // but before the caller has queried the database to determine this, will
   228  // succeed.
   229  func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
   230  	if !locks.valid {
   231  		return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
   232  	}
   233  
   234  	if sem > locks.maxLocks {
   235  		return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
   236  	}
   237  
   238  	retCode := C.unlock_semaphore(locks.lockStruct, C.uint32_t(sem))
   239  	if retCode < 0 {
   240  		// Negative errno returned
   241  		return syscall.Errno(-1 * retCode)
   242  	}
   243  
   244  	// For pthread mutexes, we have to guarantee lock and unlock happen in
   245  	// the same thread.
   246  	// OK if we take multiple locks - UnlockOSThread() won't actually unlock
   247  	// until the number of calls equals the number of calls to
   248  	// LockOSThread()
   249  	runtime.UnlockOSThread()
   250  
   251  	return nil
   252  }