github.com/AbhinandanKurakure/podman/v3@v3.4.10/libpod/lock/shm/shm_lock_test.go (about) 1 // +build linux 2 3 package shm 4 5 import ( 6 "fmt" 7 "os" 8 "runtime" 9 "testing" 10 "time" 11 12 "github.com/stretchr/testify/assert" 13 "github.com/stretchr/testify/require" 14 ) 15 16 // All tests here are in the same process, which somewhat limits their utility 17 // The big intent of this package it multiprocess locking, which is really hard 18 // to test without actually having multiple processes... 19 // We can at least verify that the locks work within the local process. 20 21 var ( 22 // 4 * BITMAP_SIZE to ensure we have to traverse bitmaps 23 numLocks = 4 * BitmapSize 24 ) 25 26 const lockPath = "/libpod_test" 27 28 // We need a test main to ensure that the SHM is created before the tests run 29 func TestMain(m *testing.M) { 30 // Remove prior /dev/shm/libpod_test 31 os.RemoveAll("/dev/shm" + lockPath) 32 shmLock, err := CreateSHMLock(lockPath, numLocks) 33 if err != nil { 34 fmt.Fprintf(os.Stderr, "Error creating SHM for tests: %v\n", err) 35 os.Exit(-1) 36 } 37 38 // Close the SHM - every subsequent test will reopen 39 if err := shmLock.Close(); err != nil { 40 fmt.Fprintf(os.Stderr, "Error closing SHM locks: %v\n", err) 41 os.Exit(-1) 42 } 43 44 exitCode := m.Run() 45 46 // We need to remove the SHM segment to clean up after ourselves 47 os.RemoveAll("/dev/shm/libpod_lock") 48 49 os.Exit(exitCode) 50 } 51 52 func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) { 53 locks, err := OpenSHMLock(lockPath, numLocks) 54 if err != nil { 55 t.Fatalf("Error opening locks: %v", err) 56 } 57 defer func() { 58 // Deallocate all locks 59 if err := locks.DeallocateAllSemaphores(); err != nil { 60 t.Fatalf("Error deallocating semaphores: %v", err) 61 } 62 63 if err := locks.Close(); err != nil { 64 t.Fatalf("Error closing locks: %v", err) 65 } 66 }() 67 68 success := t.Run("locks", func(t *testing.T) { 69 testFunc(t, locks) 70 }) 71 if !success { 72 t.Fail() 73 } 74 } 75 76 // Test that creating an SHM with a bad size rounds up to a good size 77 func TestCreateNewSHMBadSizeRoundsUp(t *testing.T) { 78 // Remove prior /dev/shm/test1 79 os.RemoveAll("/dev/shm/test1") 80 // Odd number, not a power of 2, should never be a word size on a system 81 lock, err := CreateSHMLock("/test1", 7) 82 assert.NoError(t, err) 83 assert.NotNil(t, lock) 84 85 assert.Equal(t, lock.GetMaxLocks(), BitmapSize) 86 87 if err := lock.Close(); err != nil { 88 t.Fatalf("Error closing locks: %v", err) 89 } 90 } 91 92 // Test that creating an SHM with 0 size fails 93 func TestCreateNewSHMZeroSize(t *testing.T) { 94 _, err := CreateSHMLock("/test2", 0) 95 assert.Error(t, err) 96 } 97 98 // Test that deallocating an unallocated lock errors 99 func TestDeallocateUnallocatedLockErrors(t *testing.T) { 100 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 101 err := locks.DeallocateSemaphore(0) 102 assert.Error(t, err) 103 }) 104 } 105 106 // Test that unlocking an unlocked lock fails 107 func TestUnlockingUnlockedLockFails(t *testing.T) { 108 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 109 err := locks.UnlockSemaphore(0) 110 assert.Error(t, err) 111 }) 112 } 113 114 // Test that locking and double-unlocking fails 115 func TestDoubleUnlockFails(t *testing.T) { 116 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 117 err := locks.LockSemaphore(0) 118 assert.NoError(t, err) 119 120 err = locks.UnlockSemaphore(0) 121 assert.NoError(t, err) 122 123 err = locks.UnlockSemaphore(0) 124 assert.Error(t, err) 125 }) 126 } 127 128 // Test allocating - lock - unlock - deallocate cycle, single lock 129 func TestLockLifecycleSingleLock(t *testing.T) { 130 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 131 sem, err := locks.AllocateSemaphore() 132 require.NoError(t, err) 133 134 err = locks.LockSemaphore(sem) 135 assert.NoError(t, err) 136 137 err = locks.UnlockSemaphore(sem) 138 assert.NoError(t, err) 139 140 err = locks.DeallocateSemaphore(sem) 141 assert.NoError(t, err) 142 }) 143 } 144 145 // Test allocate two locks returns different locks 146 func TestAllocateTwoLocksGetsDifferentLocks(t *testing.T) { 147 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 148 sem1, err := locks.AllocateSemaphore() 149 assert.NoError(t, err) 150 151 sem2, err := locks.AllocateSemaphore() 152 assert.NoError(t, err) 153 154 assert.NotEqual(t, sem1, sem2) 155 }) 156 } 157 158 // Test allocate all locks successful and all are unique 159 func TestAllocateAllLocksSucceeds(t *testing.T) { 160 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 161 sems := make(map[uint32]bool) 162 var i uint32 163 for i = 0; i < numLocks; i++ { 164 sem, err := locks.AllocateSemaphore() 165 assert.NoError(t, err) 166 167 // Ensure the allocate semaphore is unique 168 _, ok := sems[sem] 169 assert.False(t, ok) 170 171 sems[sem] = true 172 } 173 }) 174 } 175 176 // Test allocating more than the given max fails 177 func TestAllocateTooManyLocksFails(t *testing.T) { 178 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 179 // Allocate all locks 180 var i uint32 181 for i = 0; i < numLocks; i++ { 182 _, err := locks.AllocateSemaphore() 183 assert.NoError(t, err) 184 } 185 186 // Try and allocate one more 187 _, err := locks.AllocateSemaphore() 188 assert.Error(t, err) 189 }) 190 } 191 192 // Test allocating max locks, deallocating one, and then allocating again succeeds 193 func TestAllocateDeallocateCycle(t *testing.T) { 194 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 195 // Allocate all locks 196 var i uint32 197 for i = 0; i < numLocks; i++ { 198 _, err := locks.AllocateSemaphore() 199 assert.NoError(t, err) 200 } 201 202 // Now loop through again, deallocating and reallocating. 203 // Each time we free 1 semaphore, allocate again, and make sure 204 // we get the same semaphore back. 205 var j uint32 206 for j = 0; j < numLocks; j++ { 207 err := locks.DeallocateSemaphore(j) 208 assert.NoError(t, err) 209 210 newSem, err := locks.AllocateSemaphore() 211 assert.NoError(t, err) 212 assert.Equal(t, j, newSem) 213 } 214 }) 215 } 216 217 // Test that DeallocateAllSemaphores deallocates all semaphores 218 func TestDeallocateAllSemaphoresDeallocatesAll(t *testing.T) { 219 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 220 // Allocate a lock 221 locks1, err := locks.AllocateSemaphore() 222 assert.NoError(t, err) 223 224 // Free all locks 225 err = locks.DeallocateAllSemaphores() 226 assert.NoError(t, err) 227 228 // Allocate another lock 229 locks2, err := locks.AllocateSemaphore() 230 assert.NoError(t, err) 231 232 assert.Equal(t, locks1, locks2) 233 }) 234 } 235 236 // Test that locks actually lock 237 func TestLockSemaphoreActuallyLocks(t *testing.T) { 238 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 239 // This entire test is very ugly - lots of sleeps to try and get 240 // things to occur in the right order. 241 // It also doesn't even exercise the multiprocess nature of the 242 // locks. 243 244 // Get the current time 245 startTime := time.Now() 246 247 // Start a goroutine to take the lock and then release it after 248 // a second. 249 go func() { 250 err := locks.LockSemaphore(0) 251 assert.NoError(t, err) 252 253 time.Sleep(1 * time.Second) 254 255 err = locks.UnlockSemaphore(0) 256 assert.NoError(t, err) 257 }() 258 259 // Sleep for a quarter of a second to give the goroutine time 260 // to kick off and grab the lock 261 time.Sleep(250 * time.Millisecond) 262 263 // Take the lock 264 err := locks.LockSemaphore(0) 265 assert.NoError(t, err) 266 267 // Get the current time 268 endTime := time.Now() 269 270 // Verify that at least 1 second has passed since start 271 duration := endTime.Sub(startTime) 272 assert.True(t, duration.Seconds() > 1.0) 273 }) 274 } 275 276 // Test that locking and unlocking two semaphores succeeds 277 // Ensures that runtime.LockOSThread() is doing its job 278 func TestLockAndUnlockTwoSemaphore(t *testing.T) { 279 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 280 err := locks.LockSemaphore(5) 281 assert.NoError(t, err) 282 283 err = locks.LockSemaphore(6) 284 assert.NoError(t, err) 285 286 err = locks.UnlockSemaphore(6) 287 assert.NoError(t, err) 288 289 // Now yield scheduling 290 // To try and get us on another OS thread 291 runtime.Gosched() 292 293 // And unlock the last semaphore 294 // If we are in a different OS thread, this should fail. 295 // However, runtime.UnlockOSThread() should guarantee we are not 296 err = locks.UnlockSemaphore(5) 297 assert.NoError(t, err) 298 }) 299 }