github.com/containers/podman/v4@v4.9.4/libpod/lock/shm/shm_lock_test.go (about)

     1  //go:build linux || freebsd
     2  // +build linux freebsd
     3  
     4  package shm
     5  
     6  import (
     7  	"errors"
     8  	"fmt"
     9  	"io/fs"
    10  	"os"
    11  	"runtime"
    12  	"testing"
    13  	"time"
    14  
    15  	"github.com/stretchr/testify/assert"
    16  	"github.com/stretchr/testify/require"
    17  )
    18  
    19  // All tests here are in the same process, which somewhat limits their utility
    20  // The big intent of this package it multiprocess locking, which is really hard
    21  // to test without actually having multiple processes...
    22  // We can at least verify that the locks work within the local process.
    23  
    24  var (
    25  	// 4 * BITMAP_SIZE to ensure we have to traverse bitmaps
    26  	numLocks = 4 * BitmapSize
    27  )
    28  
    29  const lockPath = "/libpod_test"
    30  
    31  // We need a test main to ensure that the SHM is created before the tests run
    32  func TestMain(m *testing.M) {
    33  	// Remove prior /libpod_test
    34  	if err := unlinkSHMLock(lockPath); err != nil && !errors.Is(err, fs.ErrNotExist) {
    35  		fmt.Fprintf(os.Stderr, "Error cleaning SHM for tests: %v\n", err)
    36  		os.Exit(-1)
    37  	}
    38  	shmLock, err := CreateSHMLock(lockPath, numLocks)
    39  	if err != nil {
    40  		fmt.Fprintf(os.Stderr, "Error creating SHM for tests: %v\n", err)
    41  		os.Exit(-1)
    42  	}
    43  
    44  	// Close the SHM - every subsequent test will reopen
    45  	if err := shmLock.Close(); err != nil {
    46  		fmt.Fprintf(os.Stderr, "Error closing SHM locks: %v\n", err)
    47  		os.Exit(-1)
    48  	}
    49  
    50  	exitCode := m.Run()
    51  
    52  	// We need to remove the SHM segment to clean up after ourselves
    53  	os.RemoveAll("/dev/shm/libpod_lock")
    54  
    55  	os.Exit(exitCode)
    56  }
    57  
    58  func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) {
    59  	locks, err := OpenSHMLock(lockPath, numLocks)
    60  	if err != nil {
    61  		t.Fatalf("Error opening locks: %v", err)
    62  	}
    63  	defer func() {
    64  		// Deallocate all locks
    65  		if err := locks.DeallocateAllSemaphores(); err != nil {
    66  			t.Fatalf("Error deallocating semaphores: %v", err)
    67  		}
    68  
    69  		if err := locks.Close(); err != nil {
    70  			t.Fatalf("Error closing locks: %v", err)
    71  		}
    72  	}()
    73  
    74  	success := t.Run("locks", func(t *testing.T) {
    75  		testFunc(t, locks)
    76  	})
    77  	if !success {
    78  		t.Fail()
    79  	}
    80  }
    81  
    82  // Test that creating an SHM with a bad size rounds up to a good size
    83  func TestCreateNewSHMBadSizeRoundsUp(t *testing.T) {
    84  	// Remove prior /test1
    85  	if err := unlinkSHMLock("/test1"); err != nil && !errors.Is(err, fs.ErrNotExist) {
    86  		t.Fatalf("Error cleaning SHM for tests: %v\n", err)
    87  	}
    88  	// Odd number, not a power of 2, should never be a word size on a system
    89  	lock, err := CreateSHMLock("/test1", 7)
    90  	assert.NoError(t, err)
    91  	assert.NotNil(t, lock)
    92  
    93  	assert.Equal(t, lock.GetMaxLocks(), BitmapSize)
    94  
    95  	if err := lock.Close(); err != nil {
    96  		t.Fatalf("Error closing locks: %v", err)
    97  	}
    98  }
    99  
   100  // Test that creating an SHM with 0 size fails
   101  func TestCreateNewSHMZeroSize(t *testing.T) {
   102  	_, err := CreateSHMLock("/test2", 0)
   103  	assert.Error(t, err)
   104  }
   105  
   106  // Test that deallocating an unallocated lock errors
   107  func TestDeallocateUnallocatedLockErrors(t *testing.T) {
   108  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   109  		err := locks.DeallocateSemaphore(0)
   110  		assert.Error(t, err)
   111  	})
   112  }
   113  
   114  // Test that unlocking an unlocked lock fails
   115  func TestUnlockingUnlockedLockFails(t *testing.T) {
   116  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   117  		err := locks.UnlockSemaphore(0)
   118  		assert.Error(t, err)
   119  	})
   120  }
   121  
   122  // Test that locking and double-unlocking fails
   123  func TestDoubleUnlockFails(t *testing.T) {
   124  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   125  		err := locks.LockSemaphore(0)
   126  		assert.NoError(t, err)
   127  
   128  		err = locks.UnlockSemaphore(0)
   129  		assert.NoError(t, err)
   130  
   131  		err = locks.UnlockSemaphore(0)
   132  		assert.Error(t, err)
   133  	})
   134  }
   135  
   136  // Test allocating - lock - unlock - deallocate cycle, single lock
   137  func TestLockLifecycleSingleLock(t *testing.T) {
   138  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   139  		sem, err := locks.AllocateSemaphore()
   140  		require.NoError(t, err)
   141  
   142  		err = locks.LockSemaphore(sem)
   143  		assert.NoError(t, err)
   144  
   145  		err = locks.UnlockSemaphore(sem)
   146  		assert.NoError(t, err)
   147  
   148  		err = locks.DeallocateSemaphore(sem)
   149  		assert.NoError(t, err)
   150  	})
   151  }
   152  
   153  // Test allocate two locks returns different locks
   154  func TestAllocateTwoLocksGetsDifferentLocks(t *testing.T) {
   155  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   156  		sem1, err := locks.AllocateSemaphore()
   157  		assert.NoError(t, err)
   158  
   159  		sem2, err := locks.AllocateSemaphore()
   160  		assert.NoError(t, err)
   161  
   162  		assert.NotEqual(t, sem1, sem2)
   163  	})
   164  }
   165  
   166  // Test allocate all locks successful and all are unique
   167  func TestAllocateAllLocksSucceeds(t *testing.T) {
   168  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   169  		sems := make(map[uint32]bool)
   170  		var i uint32
   171  		for i = 0; i < numLocks; i++ {
   172  			sem, err := locks.AllocateSemaphore()
   173  			assert.NoError(t, err)
   174  
   175  			// Ensure the allocate semaphore is unique
   176  			_, ok := sems[sem]
   177  			assert.False(t, ok)
   178  
   179  			sems[sem] = true
   180  		}
   181  	})
   182  }
   183  
   184  // Test allocating more than the given max fails
   185  func TestAllocateTooManyLocksFails(t *testing.T) {
   186  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   187  		// Allocate all locks
   188  		var i uint32
   189  		for i = 0; i < numLocks; i++ {
   190  			_, err := locks.AllocateSemaphore()
   191  			assert.NoError(t, err)
   192  		}
   193  
   194  		// Try and allocate one more
   195  		_, err := locks.AllocateSemaphore()
   196  		assert.Error(t, err)
   197  	})
   198  }
   199  
   200  // Test allocating max locks, deallocating one, and then allocating again succeeds
   201  func TestAllocateDeallocateCycle(t *testing.T) {
   202  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   203  		// Allocate all locks
   204  		var i uint32
   205  		for i = 0; i < numLocks; i++ {
   206  			_, err := locks.AllocateSemaphore()
   207  			assert.NoError(t, err)
   208  		}
   209  
   210  		// Now loop through again, deallocating and reallocating.
   211  		// Each time we free 1 semaphore, allocate again, and make sure
   212  		// we get the same semaphore back.
   213  		var j uint32
   214  		for j = 0; j < numLocks; j++ {
   215  			err := locks.DeallocateSemaphore(j)
   216  			assert.NoError(t, err)
   217  
   218  			newSem, err := locks.AllocateSemaphore()
   219  			assert.NoError(t, err)
   220  			assert.Equal(t, j, newSem)
   221  		}
   222  	})
   223  }
   224  
   225  // Test that DeallocateAllSemaphores deallocates all semaphores
   226  func TestDeallocateAllSemaphoresDeallocatesAll(t *testing.T) {
   227  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   228  		// Allocate a lock
   229  		locks1, err := locks.AllocateSemaphore()
   230  		assert.NoError(t, err)
   231  
   232  		// Free all locks
   233  		err = locks.DeallocateAllSemaphores()
   234  		assert.NoError(t, err)
   235  
   236  		// Allocate another lock
   237  		locks2, err := locks.AllocateSemaphore()
   238  		assert.NoError(t, err)
   239  
   240  		assert.Equal(t, locks1, locks2)
   241  	})
   242  }
   243  
   244  // Test that locks actually lock
   245  func TestLockSemaphoreActuallyLocks(t *testing.T) {
   246  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   247  		// This entire test is very ugly - lots of sleeps to try and get
   248  		// things to occur in the right order.
   249  		// It also doesn't even exercise the multiprocess nature of the
   250  		// locks.
   251  
   252  		// Get the current time
   253  		startTime := time.Now()
   254  
   255  		// Start a goroutine to take the lock and then release it after
   256  		// a second.
   257  		go func() {
   258  			err := locks.LockSemaphore(0)
   259  			assert.NoError(t, err)
   260  
   261  			time.Sleep(1 * time.Second)
   262  
   263  			err = locks.UnlockSemaphore(0)
   264  			assert.NoError(t, err)
   265  		}()
   266  
   267  		// Sleep for a quarter of a second to give the goroutine time
   268  		// to kick off and grab the lock
   269  		time.Sleep(250 * time.Millisecond)
   270  
   271  		// Take the lock
   272  		err := locks.LockSemaphore(0)
   273  		assert.NoError(t, err)
   274  
   275  		// Get the current time
   276  		endTime := time.Now()
   277  
   278  		// Verify that at least 1 second has passed since start
   279  		duration := endTime.Sub(startTime)
   280  		assert.True(t, duration.Seconds() > 1.0)
   281  	})
   282  }
   283  
   284  // Test that locking and unlocking two semaphores succeeds
   285  // Ensures that runtime.LockOSThread() is doing its job
   286  func TestLockAndUnlockTwoSemaphore(t *testing.T) {
   287  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   288  		err := locks.LockSemaphore(5)
   289  		assert.NoError(t, err)
   290  
   291  		err = locks.LockSemaphore(6)
   292  		assert.NoError(t, err)
   293  
   294  		err = locks.UnlockSemaphore(6)
   295  		assert.NoError(t, err)
   296  
   297  		// Now yield scheduling
   298  		// To try and get us on another OS thread
   299  		runtime.Gosched()
   300  
   301  		// And unlock the last semaphore
   302  		// If we are in a different OS thread, this should fail.
   303  		// However, runtime.UnlockOSThread() should guarantee we are not
   304  		err = locks.UnlockSemaphore(5)
   305  		assert.NoError(t, err)
   306  	})
   307  }