github.com/hanks177/podman/v4@v4.1.3-0.20220613032544-16d90015bc83/libpod/lock/shm/shm_lock.go (about)

     1  //go:build linux && cgo
     2  // +build linux,cgo
     3  
     4  package shm
     5  
     6  // #cgo LDFLAGS: -lrt -lpthread
     7  // #cgo CFLAGS: -Wall -Werror
     8  // #include <stdlib.h>
     9  // #include "shm_lock.h"
    10  // const uint32_t bitmap_size_c = BITMAP_SIZE;
    11  import "C"
    12  
    13  import (
    14  	"runtime"
    15  	"syscall"
    16  	"unsafe"
    17  
    18  	"github.com/pkg/errors"
    19  	"github.com/sirupsen/logrus"
    20  )
    21  
    22  var (
    23  	// BitmapSize is the size of the bitmap used when managing SHM locks.
    24  	// an SHM lock manager's max locks will be rounded up to a multiple of
    25  	// this number.
    26  	BitmapSize = uint32(C.bitmap_size_c)
    27  )
    28  
    29  // SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
    30  // segment.
    31  type SHMLocks struct { // nolint
    32  	lockStruct *C.shm_struct_t
    33  	maxLocks   uint32
    34  	valid      bool
    35  }
    36  
    37  // CreateSHMLock sets up a shared-memory segment holding a given number of POSIX
    38  // semaphores, and returns a struct that can be used to operate on those locks.
    39  // numLocks must not be 0, and may be rounded up to a multiple of the bitmap
    40  // size used by the underlying implementation.
    41  func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
    42  	if numLocks == 0 {
    43  		return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be greater than 0")
    44  	}
    45  
    46  	locks := new(SHMLocks)
    47  
    48  	cPath := C.CString(path)
    49  	defer C.free(unsafe.Pointer(cPath))
    50  
    51  	var errCode C.int
    52  	lockStruct := C.setup_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
    53  	if lockStruct == nil {
    54  		// We got a null pointer, so something errored
    55  		return nil, errors.Wrapf(syscall.Errno(-1*errCode), "failed to create %d locks in %s", numLocks, path)
    56  	}
    57  
    58  	locks.lockStruct = lockStruct
    59  	locks.maxLocks = uint32(lockStruct.num_locks)
    60  	locks.valid = true
    61  
    62  	logrus.Debugf("Initialized SHM lock manager at path %s", path)
    63  
    64  	return locks, nil
    65  }
    66  
    67  // OpenSHMLock opens an existing shared-memory segment holding a given number of
    68  // POSIX semaphores. numLocks must match the number of locks the shared memory
    69  // segment was created with.
    70  func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
    71  	if numLocks == 0 {
    72  		return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be greater than 0")
    73  	}
    74  
    75  	locks := new(SHMLocks)
    76  
    77  	cPath := C.CString(path)
    78  	defer C.free(unsafe.Pointer(cPath))
    79  
    80  	var errCode C.int
    81  	lockStruct := C.open_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
    82  	if lockStruct == nil {
    83  		// We got a null pointer, so something errored
    84  		return nil, errors.Wrapf(syscall.Errno(-1*errCode), "failed to open %d locks in %s", numLocks, path)
    85  	}
    86  
    87  	locks.lockStruct = lockStruct
    88  	locks.maxLocks = numLocks
    89  	locks.valid = true
    90  
    91  	return locks, nil
    92  }
    93  
    94  // GetMaxLocks returns the maximum number of locks in the SHM
    95  func (locks *SHMLocks) GetMaxLocks() uint32 {
    96  	return locks.maxLocks
    97  }
    98  
    99  // Close closes an existing shared-memory segment.
   100  // The segment will be rendered unusable after closing.
   101  // WARNING: If you Close() while there are still locks locked, these locks may
   102  // fail to release, causing a program freeze.
   103  // Close() is only intended to be used while testing the locks.
   104  func (locks *SHMLocks) Close() error {
   105  	if !locks.valid {
   106  		return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
   107  	}
   108  
   109  	locks.valid = false
   110  
   111  	retCode := C.close_lock_shm(locks.lockStruct)
   112  	if retCode < 0 {
   113  		// Negative errno returned
   114  		return syscall.Errno(-1 * retCode)
   115  	}
   116  
   117  	return nil
   118  }
   119  
   120  // AllocateSemaphore allocates a semaphore from a shared-memory segment for use
   121  // by a container or pod.
   122  // Returns the index of the semaphore that was allocated.
   123  // Allocations past the maximum number of locks given when the SHM segment was
   124  // created will result in an error, and no semaphore will be allocated.
   125  func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
   126  	if !locks.valid {
   127  		return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
   128  	}
   129  
   130  	// This returns a U64, so we have the full u32 range available for
   131  	// semaphore indexes, and can still return error codes.
   132  	retCode := C.allocate_semaphore(locks.lockStruct)
   133  	if retCode < 0 {
   134  		var err = syscall.Errno(-1 * retCode)
   135  		// Negative errno returned
   136  		if errors.Is(err, syscall.ENOSPC) {
   137  			// ENOSPC expands to "no space left on device".  While it is technically true
   138  			// that there's no room in the SHM inn for this lock, this tends to send normal people
   139  			// down the path of checking disk-space which is not actually their problem.
   140  			// Give a clue that it's actually due to num_locks filling up.
   141  			var errFull = errors.Errorf("allocation failed; exceeded num_locks (%d)", locks.maxLocks)
   142  			return uint32(retCode), errFull
   143  		}
   144  		return uint32(retCode), syscall.Errno(-1 * retCode)
   145  	}
   146  
   147  	return uint32(retCode), nil
   148  }
   149  
   150  // AllocateGivenSemaphore allocates the given semaphore from the shared-memory
   151  // segment for use by a container or pod.
   152  // If the semaphore is already in use or the index is invalid an error will be
   153  // returned.
   154  func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
   155  	if !locks.valid {
   156  		return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
   157  	}
   158  
   159  	retCode := C.allocate_given_semaphore(locks.lockStruct, C.uint32_t(sem))
   160  	if retCode < 0 {
   161  		return syscall.Errno(-1 * retCode)
   162  	}
   163  
   164  	return nil
   165  }
   166  
   167  // DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be
   168  // reallocated to another container or pod.
   169  // The given semaphore must be already allocated, or an error will be returned.
   170  func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
   171  	if !locks.valid {
   172  		return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
   173  	}
   174  
   175  	if sem > locks.maxLocks {
   176  		return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
   177  	}
   178  
   179  	retCode := C.deallocate_semaphore(locks.lockStruct, C.uint32_t(sem))
   180  	if retCode < 0 {
   181  		// Negative errno returned
   182  		return syscall.Errno(-1 * retCode)
   183  	}
   184  
   185  	return nil
   186  }
   187  
   188  // DeallocateAllSemaphores frees all semaphores so they can be reallocated to
   189  // other containers and pods.
   190  func (locks *SHMLocks) DeallocateAllSemaphores() error {
   191  	if !locks.valid {
   192  		return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
   193  	}
   194  
   195  	retCode := C.deallocate_all_semaphores(locks.lockStruct)
   196  	if retCode < 0 {
   197  		// Negative errno return from C
   198  		return syscall.Errno(-1 * retCode)
   199  	}
   200  
   201  	return nil
   202  }
   203  
   204  // LockSemaphore locks the given semaphore.
   205  // If the semaphore is already locked, LockSemaphore will block until the lock
   206  // can be acquired.
   207  // There is no requirement that the given semaphore be allocated.
   208  // This ensures that attempts to lock a container after it has been deleted,
   209  // but before the caller has queried the database to determine this, will
   210  // succeed.
   211  func (locks *SHMLocks) LockSemaphore(sem uint32) error {
   212  	if !locks.valid {
   213  		return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
   214  	}
   215  
   216  	if sem > locks.maxLocks {
   217  		return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
   218  	}
   219  
   220  	// For pthread mutexes, we have to guarantee lock and unlock happen in
   221  	// the same thread.
   222  	runtime.LockOSThread()
   223  
   224  	retCode := C.lock_semaphore(locks.lockStruct, C.uint32_t(sem))
   225  	if retCode < 0 {
   226  		// Negative errno returned
   227  		return syscall.Errno(-1 * retCode)
   228  	}
   229  
   230  	return nil
   231  }
   232  
   233  // UnlockSemaphore unlocks the given semaphore.
   234  // Unlocking a semaphore that is already unlocked with return EBUSY.
   235  // There is no requirement that the given semaphore be allocated.
   236  // This ensures that attempts to lock a container after it has been deleted,
   237  // but before the caller has queried the database to determine this, will
   238  // succeed.
   239  func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
   240  	if !locks.valid {
   241  		return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
   242  	}
   243  
   244  	if sem > locks.maxLocks {
   245  		return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
   246  	}
   247  
   248  	retCode := C.unlock_semaphore(locks.lockStruct, C.uint32_t(sem))
   249  	if retCode < 0 {
   250  		// Negative errno returned
   251  		return syscall.Errno(-1 * retCode)
   252  	}
   253  
   254  	// For pthread mutexes, we have to guarantee lock and unlock happen in
   255  	// the same thread.
   256  	// OK if we take multiple locks - UnlockOSThread() won't actually unlock
   257  	// until the number of calls equals the number of calls to
   258  	// LockOSThread()
   259  	runtime.UnlockOSThread()
   260  
   261  	return nil
   262  }