github.com/containers/podman/v4@v4.9.4/libpod/lock/shm/shm_lock.go (about)

     1  //go:build (linux || freebsd) && cgo
     2  // +build linux freebsd
     3  // +build cgo
     4  
     5  package shm
     6  
     7  // #cgo LDFLAGS: -lrt -lpthread
     8  // #cgo CFLAGS: -Wall -Werror
     9  // #include <stdlib.h>
    10  // #include <sys/types.h>
    11  // #include <sys/mman.h>
    12  // #include <fcntl.h>
    13  // #include "shm_lock.h"
    14  // const uint32_t bitmap_size_c = BITMAP_SIZE;
    15  import "C"
    16  
    17  import (
    18  	"errors"
    19  	"fmt"
    20  	"runtime"
    21  	"syscall"
    22  	"unsafe"
    23  
    24  	"github.com/sirupsen/logrus"
    25  )
    26  
    27  var (
    28  	// BitmapSize is the size of the bitmap used when managing SHM locks.
    29  	// an SHM lock manager's max locks will be rounded up to a multiple of
    30  	// this number.
    31  	BitmapSize = uint32(C.bitmap_size_c)
    32  )
    33  
    34  // SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
    35  // segment.
    36  type SHMLocks struct { //nolint:revive // linter complains about stutter
    37  	lockStruct *C.shm_struct_t
    38  	maxLocks   uint32
    39  	valid      bool
    40  }
    41  
    42  // CreateSHMLock sets up a shared-memory segment holding a given number of POSIX
    43  // semaphores, and returns a struct that can be used to operate on those locks.
    44  // numLocks must not be 0, and may be rounded up to a multiple of the bitmap
    45  // size used by the underlying implementation.
    46  func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
    47  	if numLocks == 0 {
    48  		return nil, fmt.Errorf("number of locks must be greater than 0: %w", syscall.EINVAL)
    49  	}
    50  
    51  	locks := new(SHMLocks)
    52  
    53  	cPath := C.CString(path)
    54  	defer C.free(unsafe.Pointer(cPath))
    55  
    56  	var errCode C.int
    57  	lockStruct := C.setup_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
    58  	if lockStruct == nil {
    59  		// We got a null pointer, so something errored
    60  		return nil, fmt.Errorf("failed to create %d locks in %s: %w", numLocks, path, syscall.Errno(-1*errCode))
    61  	}
    62  
    63  	locks.lockStruct = lockStruct
    64  	locks.maxLocks = uint32(lockStruct.num_locks)
    65  	locks.valid = true
    66  
    67  	logrus.Debugf("Initialized SHM lock manager at path %s", path)
    68  
    69  	return locks, nil
    70  }
    71  
    72  // OpenSHMLock opens an existing shared-memory segment holding a given number of
    73  // POSIX semaphores. numLocks must match the number of locks the shared memory
    74  // segment was created with.
    75  func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
    76  	if numLocks == 0 {
    77  		return nil, fmt.Errorf("number of locks must be greater than 0: %w", syscall.EINVAL)
    78  	}
    79  
    80  	locks := new(SHMLocks)
    81  
    82  	cPath := C.CString(path)
    83  	defer C.free(unsafe.Pointer(cPath))
    84  
    85  	var errCode C.int
    86  	lockStruct := C.open_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
    87  	if lockStruct == nil {
    88  		// We got a null pointer, so something errored
    89  		return nil, fmt.Errorf("failed to open %d locks in %s: %w", numLocks, path, syscall.Errno(-1*errCode))
    90  	}
    91  
    92  	locks.lockStruct = lockStruct
    93  	locks.maxLocks = numLocks
    94  	locks.valid = true
    95  
    96  	return locks, nil
    97  }
    98  
    99  // GetMaxLocks returns the maximum number of locks in the SHM
   100  func (locks *SHMLocks) GetMaxLocks() uint32 {
   101  	return locks.maxLocks
   102  }
   103  
   104  // Close closes an existing shared-memory segment.
   105  // The segment will be rendered unusable after closing.
   106  // WARNING: If you Close() while there are still locks locked, these locks may
   107  // fail to release, causing a program freeze.
   108  // Close() is only intended to be used while testing the locks.
   109  func (locks *SHMLocks) Close() error {
   110  	if !locks.valid {
   111  		return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
   112  	}
   113  
   114  	locks.valid = false
   115  
   116  	retCode := C.close_lock_shm(locks.lockStruct)
   117  	if retCode < 0 {
   118  		// Negative errno returned
   119  		return syscall.Errno(-1 * retCode)
   120  	}
   121  
   122  	return nil
   123  }
   124  
   125  // AllocateSemaphore allocates a semaphore from a shared-memory segment for use
   126  // by a container or pod.
   127  // Returns the index of the semaphore that was allocated.
   128  // Allocations past the maximum number of locks given when the SHM segment was
   129  // created will result in an error, and no semaphore will be allocated.
   130  func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
   131  	if !locks.valid {
   132  		return 0, fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
   133  	}
   134  
   135  	// This returns a U64, so we have the full u32 range available for
   136  	// semaphore indexes, and can still return error codes.
   137  	retCode := C.allocate_semaphore(locks.lockStruct)
   138  	if retCode < 0 {
   139  		var err = syscall.Errno(-1 * retCode)
   140  		// Negative errno returned
   141  		if errors.Is(err, syscall.ENOSPC) {
   142  			// ENOSPC expands to "no space left on device".  While it is technically true
   143  			// that there's no room in the SHM inn for this lock, this tends to send normal people
   144  			// down the path of checking disk-space which is not actually their problem.
   145  			// Give a clue that it's actually due to num_locks filling up.
   146  			var errFull = fmt.Errorf("allocation failed; exceeded num_locks (%d)", locks.maxLocks)
   147  			return uint32(retCode), errFull
   148  		}
   149  		return uint32(retCode), syscall.Errno(-1 * retCode)
   150  	}
   151  
   152  	return uint32(retCode), nil
   153  }
   154  
   155  // AllocateGivenSemaphore allocates the given semaphore from the shared-memory
   156  // segment for use by a container or pod.
   157  // If the semaphore is already in use or the index is invalid an error will be
   158  // returned.
   159  func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
   160  	if !locks.valid {
   161  		return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
   162  	}
   163  
   164  	retCode := C.allocate_given_semaphore(locks.lockStruct, C.uint32_t(sem))
   165  	if retCode < 0 {
   166  		return syscall.Errno(-1 * retCode)
   167  	}
   168  
   169  	return nil
   170  }
   171  
   172  // DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be
   173  // reallocated to another container or pod.
   174  // The given semaphore must be already allocated, or an error will be returned.
   175  func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
   176  	if !locks.valid {
   177  		return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
   178  	}
   179  
   180  	if sem > locks.maxLocks {
   181  		return fmt.Errorf("given semaphore %d is higher than maximum locks count %d: %w", sem, locks.maxLocks, syscall.EINVAL)
   182  	}
   183  
   184  	retCode := C.deallocate_semaphore(locks.lockStruct, C.uint32_t(sem))
   185  	if retCode < 0 {
   186  		// Negative errno returned
   187  		return syscall.Errno(-1 * retCode)
   188  	}
   189  
   190  	return nil
   191  }
   192  
   193  // DeallocateAllSemaphores frees all semaphores so they can be reallocated to
   194  // other containers and pods.
   195  func (locks *SHMLocks) DeallocateAllSemaphores() error {
   196  	if !locks.valid {
   197  		return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
   198  	}
   199  
   200  	retCode := C.deallocate_all_semaphores(locks.lockStruct)
   201  	if retCode < 0 {
   202  		// Negative errno return from C
   203  		return syscall.Errno(-1 * retCode)
   204  	}
   205  
   206  	return nil
   207  }
   208  
   209  // LockSemaphore locks the given semaphore.
   210  // If the semaphore is already locked, LockSemaphore will block until the lock
   211  // can be acquired.
   212  // There is no requirement that the given semaphore be allocated.
   213  // This ensures that attempts to lock a container after it has been deleted,
   214  // but before the caller has queried the database to determine this, will
   215  // succeed.
   216  func (locks *SHMLocks) LockSemaphore(sem uint32) error {
   217  	if !locks.valid {
   218  		return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
   219  	}
   220  
   221  	if sem > locks.maxLocks {
   222  		return fmt.Errorf("given semaphore %d is higher than maximum locks count %d: %w", sem, locks.maxLocks, syscall.EINVAL)
   223  	}
   224  
   225  	// For pthread mutexes, we have to guarantee lock and unlock happen in
   226  	// the same thread.
   227  	runtime.LockOSThread()
   228  
   229  	retCode := C.lock_semaphore(locks.lockStruct, C.uint32_t(sem))
   230  	if retCode < 0 {
   231  		// Negative errno returned
   232  		return syscall.Errno(-1 * retCode)
   233  	}
   234  
   235  	return nil
   236  }
   237  
   238  // UnlockSemaphore unlocks the given semaphore.
   239  // Unlocking a semaphore that is already unlocked with return EBUSY.
   240  // There is no requirement that the given semaphore be allocated.
   241  // This ensures that attempts to lock a container after it has been deleted,
   242  // but before the caller has queried the database to determine this, will
   243  // succeed.
   244  func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
   245  	if !locks.valid {
   246  		return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
   247  	}
   248  
   249  	if sem > locks.maxLocks {
   250  		return fmt.Errorf("given semaphore %d is higher than maximum locks count %d: %w", sem, locks.maxLocks, syscall.EINVAL)
   251  	}
   252  
   253  	retCode := C.unlock_semaphore(locks.lockStruct, C.uint32_t(sem))
   254  	if retCode < 0 {
   255  		// Negative errno returned
   256  		return syscall.Errno(-1 * retCode)
   257  	}
   258  
   259  	// For pthread mutexes, we have to guarantee lock and unlock happen in
   260  	// the same thread.
   261  	// OK if we take multiple locks - UnlockOSThread() won't actually unlock
   262  	// until the number of calls equals the number of calls to
   263  	// LockOSThread()
   264  	runtime.UnlockOSThread()
   265  
   266  	return nil
   267  }
   268  
   269  // GetFreeLocks gets the number of locks available to be allocated.
   270  func (locks *SHMLocks) GetFreeLocks() (uint32, error) {
   271  	if !locks.valid {
   272  		return 0, fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
   273  	}
   274  
   275  	retCode := C.available_locks(locks.lockStruct)
   276  	if retCode < 0 {
   277  		// Negative errno returned
   278  		return 0, syscall.Errno(-1 * retCode)
   279  	}
   280  
   281  	return uint32(retCode), nil
   282  }
   283  
   284  // Get a list of locks that are currently taken.
   285  func (locks *SHMLocks) GetTakenLocks() ([]uint32, error) {
   286  	if !locks.valid {
   287  		return nil, fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
   288  	}
   289  
   290  	var usedLocks []uint32
   291  
   292  	// I don't think we need to lock the OS thread here, since the lock (if
   293  	// taken) is immediately released, and Go shouldn't reschedule the CGo
   294  	// to another thread before the function finished executing.
   295  	var i uint32
   296  	for i = 0; i < locks.maxLocks; i++ {
   297  		retCode := C.try_lock(locks.lockStruct, C.uint32_t(i))
   298  		if retCode < 0 {
   299  			return nil, syscall.Errno(-1 * retCode)
   300  		}
   301  		if retCode == 0 {
   302  			usedLocks = append(usedLocks, i)
   303  		}
   304  	}
   305  
   306  	return usedLocks, nil
   307  }
   308  
   309  func unlinkSHMLock(path string) error {
   310  	cPath := C.CString(path)
   311  	defer C.free(unsafe.Pointer(cPath))
   312  
   313  	if _, err := C.shm_unlink(cPath); err != nil {
   314  		return fmt.Errorf("failed to unlink SHM locks: %w", err)
   315  	}
   316  	return nil
   317  }