github.com/containers/libpod@v1.9.4-0.20220419124438-4284fd425507/libpod/lock/shm/shm_lock_test.go (about)

     1  // +build linux
     2  
     3  package shm
     4  
     5  import (
     6  	"fmt"
     7  	"os"
     8  	"runtime"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/stretchr/testify/assert"
    13  	"github.com/stretchr/testify/require"
    14  )
    15  
    16  // All tests here are in the same process, which somewhat limits their utility
    17  // The big intent of this package it multiprocess locking, which is really hard
    18  // to test without actually having multiple processes...
    19  // We can at least verify that the locks work within the local process.
    20  
    21  var (
    22  	// 4 * BITMAP_SIZE to ensure we have to traverse bitmaps
    23  	numLocks = 4 * BitmapSize
    24  )
    25  
    26  const lockPath = "/libpod_test"
    27  
    28  // We need a test main to ensure that the SHM is created before the tests run
    29  func TestMain(m *testing.M) {
    30  	shmLock, err := CreateSHMLock(lockPath, numLocks)
    31  	if err != nil {
    32  		fmt.Fprintf(os.Stderr, "Error creating SHM for tests: %v\n", err)
    33  		os.Exit(-1)
    34  	}
    35  
    36  	// Close the SHM - every subsequent test will reopen
    37  	if err := shmLock.Close(); err != nil {
    38  		fmt.Fprintf(os.Stderr, "Error closing SHM locks: %v\n", err)
    39  		os.Exit(-1)
    40  	}
    41  
    42  	exitCode := m.Run()
    43  
    44  	// We need to remove the SHM segment to clean up after ourselves
    45  	os.RemoveAll("/dev/shm/libpod_lock")
    46  
    47  	os.Exit(exitCode)
    48  }
    49  
    50  func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) {
    51  	locks, err := OpenSHMLock(lockPath, numLocks)
    52  	if err != nil {
    53  		t.Fatalf("Error opening locks: %v", err)
    54  	}
    55  	defer func() {
    56  		// Deallocate all locks
    57  		if err := locks.DeallocateAllSemaphores(); err != nil {
    58  			t.Fatalf("Error deallocating semaphores: %v", err)
    59  		}
    60  
    61  		if err := locks.Close(); err != nil {
    62  			t.Fatalf("Error closing locks: %v", err)
    63  		}
    64  	}()
    65  
    66  	success := t.Run("locks", func(t *testing.T) {
    67  		testFunc(t, locks)
    68  	})
    69  	if !success {
    70  		t.Fail()
    71  	}
    72  }
    73  
    74  // Test that creating an SHM with a bad size rounds up to a good size
    75  func TestCreateNewSHMBadSizeRoundsUp(t *testing.T) {
    76  	// Odd number, not a power of 2, should never be a word size on a system
    77  	lock, err := CreateSHMLock("/test1", 7)
    78  	assert.NoError(t, err)
    79  
    80  	assert.Equal(t, lock.GetMaxLocks(), BitmapSize)
    81  
    82  	if err := lock.Close(); err != nil {
    83  		t.Fatalf("Error closing locks: %v", err)
    84  	}
    85  }
    86  
    87  // Test that creating an SHM with 0 size fails
    88  func TestCreateNewSHMZeroSize(t *testing.T) {
    89  	_, err := CreateSHMLock("/test2", 0)
    90  	assert.Error(t, err)
    91  }
    92  
    93  // Test that deallocating an unallocated lock errors
    94  func TestDeallocateUnallocatedLockErrors(t *testing.T) {
    95  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
    96  		err := locks.DeallocateSemaphore(0)
    97  		assert.Error(t, err)
    98  	})
    99  }
   100  
   101  // Test that unlocking an unlocked lock fails
   102  func TestUnlockingUnlockedLockFails(t *testing.T) {
   103  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   104  		err := locks.UnlockSemaphore(0)
   105  		assert.Error(t, err)
   106  	})
   107  }
   108  
   109  // Test that locking and double-unlocking fails
   110  func TestDoubleUnlockFails(t *testing.T) {
   111  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   112  		err := locks.LockSemaphore(0)
   113  		assert.NoError(t, err)
   114  
   115  		err = locks.UnlockSemaphore(0)
   116  		assert.NoError(t, err)
   117  
   118  		err = locks.UnlockSemaphore(0)
   119  		assert.Error(t, err)
   120  	})
   121  }
   122  
   123  // Test allocating - lock - unlock - deallocate cycle, single lock
   124  func TestLockLifecycleSingleLock(t *testing.T) {
   125  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   126  		sem, err := locks.AllocateSemaphore()
   127  		require.NoError(t, err)
   128  
   129  		err = locks.LockSemaphore(sem)
   130  		assert.NoError(t, err)
   131  
   132  		err = locks.UnlockSemaphore(sem)
   133  		assert.NoError(t, err)
   134  
   135  		err = locks.DeallocateSemaphore(sem)
   136  		assert.NoError(t, err)
   137  	})
   138  }
   139  
   140  // Test allocate two locks returns different locks
   141  func TestAllocateTwoLocksGetsDifferentLocks(t *testing.T) {
   142  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   143  		sem1, err := locks.AllocateSemaphore()
   144  		assert.NoError(t, err)
   145  
   146  		sem2, err := locks.AllocateSemaphore()
   147  		assert.NoError(t, err)
   148  
   149  		assert.NotEqual(t, sem1, sem2)
   150  	})
   151  }
   152  
   153  // Test allocate all locks successful and all are unique
   154  func TestAllocateAllLocksSucceeds(t *testing.T) {
   155  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   156  		sems := make(map[uint32]bool)
   157  		var i uint32
   158  		for i = 0; i < numLocks; i++ {
   159  			sem, err := locks.AllocateSemaphore()
   160  			assert.NoError(t, err)
   161  
   162  			// Ensure the allocate semaphore is unique
   163  			_, ok := sems[sem]
   164  			assert.False(t, ok)
   165  
   166  			sems[sem] = true
   167  		}
   168  	})
   169  }
   170  
   171  // Test allocating more than the given max fails
   172  func TestAllocateTooManyLocksFails(t *testing.T) {
   173  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   174  		// Allocate all locks
   175  		var i uint32
   176  		for i = 0; i < numLocks; i++ {
   177  			_, err := locks.AllocateSemaphore()
   178  			assert.NoError(t, err)
   179  		}
   180  
   181  		// Try and allocate one more
   182  		_, err := locks.AllocateSemaphore()
   183  		assert.Error(t, err)
   184  	})
   185  }
   186  
   187  // Test allocating max locks, deallocating one, and then allocating again succeeds
   188  func TestAllocateDeallocateCycle(t *testing.T) {
   189  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   190  		// Allocate all locks
   191  		var i uint32
   192  		for i = 0; i < numLocks; i++ {
   193  			_, err := locks.AllocateSemaphore()
   194  			assert.NoError(t, err)
   195  		}
   196  
   197  		// Now loop through again, deallocating and reallocating.
   198  		// Each time we free 1 semaphore, allocate again, and make sure
   199  		// we get the same semaphore back.
   200  		var j uint32
   201  		for j = 0; j < numLocks; j++ {
   202  			err := locks.DeallocateSemaphore(j)
   203  			assert.NoError(t, err)
   204  
   205  			newSem, err := locks.AllocateSemaphore()
   206  			assert.NoError(t, err)
   207  			assert.Equal(t, j, newSem)
   208  		}
   209  	})
   210  }
   211  
   212  // Test that DeallocateAllSemaphores deallocates all semaphores
   213  func TestDeallocateAllSemaphoresDeallocatesAll(t *testing.T) {
   214  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   215  		// Allocate a lock
   216  		locks1, err := locks.AllocateSemaphore()
   217  		assert.NoError(t, err)
   218  
   219  		// Free all locks
   220  		err = locks.DeallocateAllSemaphores()
   221  		assert.NoError(t, err)
   222  
   223  		// Allocate another lock
   224  		locks2, err := locks.AllocateSemaphore()
   225  		assert.NoError(t, err)
   226  
   227  		assert.Equal(t, locks1, locks2)
   228  	})
   229  }
   230  
   231  // Test that locks actually lock
   232  func TestLockSemaphoreActuallyLocks(t *testing.T) {
   233  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   234  		// This entire test is very ugly - lots of sleeps to try and get
   235  		// things to occur in the right order.
   236  		// It also doesn't even exercise the multiprocess nature of the
   237  		// locks.
   238  
   239  		// Get the current time
   240  		startTime := time.Now()
   241  
   242  		// Start a goroutine to take the lock and then release it after
   243  		// a second.
   244  		go func() {
   245  			err := locks.LockSemaphore(0)
   246  			assert.NoError(t, err)
   247  
   248  			time.Sleep(1 * time.Second)
   249  
   250  			err = locks.UnlockSemaphore(0)
   251  			assert.NoError(t, err)
   252  		}()
   253  
   254  		// Sleep for a quarter of a second to give the goroutine time
   255  		// to kick off and grab the lock
   256  		time.Sleep(250 * time.Millisecond)
   257  
   258  		// Take the lock
   259  		err := locks.LockSemaphore(0)
   260  		assert.NoError(t, err)
   261  
   262  		// Get the current time
   263  		endTime := time.Now()
   264  
   265  		// Verify that at least 1 second has passed since start
   266  		duration := endTime.Sub(startTime)
   267  		assert.True(t, duration.Seconds() > 1.0)
   268  	})
   269  }
   270  
   271  // Test that locking and unlocking two semaphores succeeds
   272  // Ensures that runtime.LockOSThread() is doing its job
   273  func TestLockAndUnlockTwoSemaphore(t *testing.T) {
   274  	runLockTest(t, func(t *testing.T, locks *SHMLocks) {
   275  		err := locks.LockSemaphore(5)
   276  		assert.NoError(t, err)
   277  
   278  		err = locks.LockSemaphore(6)
   279  		assert.NoError(t, err)
   280  
   281  		err = locks.UnlockSemaphore(6)
   282  		assert.NoError(t, err)
   283  
   284  		// Now yield scheduling
   285  		// To try and get us on another OS thread
   286  		runtime.Gosched()
   287  
   288  		// And unlock the last semaphore
   289  		// If we are in a different OS thread, this should fail.
   290  		// However, runtime.UnlockOSThread() should guarantee we are not
   291  		err = locks.UnlockSemaphore(5)
   292  		assert.NoError(t, err)
   293  	})
   294  }