github.com/hanks177/podman/v4@v4.1.3-0.20220613032544-16d90015bc83/libpod/lock/shm/shm_lock_test.go (about) 1 //go:build linux 2 // +build linux 3 4 package shm 5 6 import ( 7 "fmt" 8 "os" 9 "runtime" 10 "testing" 11 "time" 12 13 "github.com/stretchr/testify/assert" 14 "github.com/stretchr/testify/require" 15 ) 16 17 // All tests here are in the same process, which somewhat limits their utility 18 // The big intent of this package it multiprocess locking, which is really hard 19 // to test without actually having multiple processes... 20 // We can at least verify that the locks work within the local process. 21 22 var ( 23 // 4 * BITMAP_SIZE to ensure we have to traverse bitmaps 24 numLocks = 4 * BitmapSize 25 ) 26 27 const lockPath = "/libpod_test" 28 29 // We need a test main to ensure that the SHM is created before the tests run 30 func TestMain(m *testing.M) { 31 // Remove prior /dev/shm/libpod_test 32 os.RemoveAll("/dev/shm" + lockPath) 33 shmLock, err := CreateSHMLock(lockPath, numLocks) 34 if err != nil { 35 fmt.Fprintf(os.Stderr, "Error creating SHM for tests: %v\n", err) 36 os.Exit(-1) 37 } 38 39 // Close the SHM - every subsequent test will reopen 40 if err := shmLock.Close(); err != nil { 41 fmt.Fprintf(os.Stderr, "Error closing SHM locks: %v\n", err) 42 os.Exit(-1) 43 } 44 45 exitCode := m.Run() 46 47 // We need to remove the SHM segment to clean up after ourselves 48 os.RemoveAll("/dev/shm/libpod_lock") 49 50 os.Exit(exitCode) 51 } 52 53 func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) { 54 locks, err := OpenSHMLock(lockPath, numLocks) 55 if err != nil { 56 t.Fatalf("Error opening locks: %v", err) 57 } 58 defer func() { 59 // Deallocate all locks 60 if err := locks.DeallocateAllSemaphores(); err != nil { 61 t.Fatalf("Error deallocating semaphores: %v", err) 62 } 63 64 if err := locks.Close(); err != nil { 65 t.Fatalf("Error closing locks: %v", err) 66 } 67 }() 68 69 success := t.Run("locks", func(t *testing.T) { 70 testFunc(t, locks) 71 }) 72 if !success { 73 t.Fail() 74 } 75 } 76 77 // Test that creating an SHM with a bad size rounds up to a good size 78 func TestCreateNewSHMBadSizeRoundsUp(t *testing.T) { 79 // Remove prior /dev/shm/test1 80 os.RemoveAll("/dev/shm/test1") 81 // Odd number, not a power of 2, should never be a word size on a system 82 lock, err := CreateSHMLock("/test1", 7) 83 assert.NoError(t, err) 84 assert.NotNil(t, lock) 85 86 assert.Equal(t, lock.GetMaxLocks(), BitmapSize) 87 88 if err := lock.Close(); err != nil { 89 t.Fatalf("Error closing locks: %v", err) 90 } 91 } 92 93 // Test that creating an SHM with 0 size fails 94 func TestCreateNewSHMZeroSize(t *testing.T) { 95 _, err := CreateSHMLock("/test2", 0) 96 assert.Error(t, err) 97 } 98 99 // Test that deallocating an unallocated lock errors 100 func TestDeallocateUnallocatedLockErrors(t *testing.T) { 101 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 102 err := locks.DeallocateSemaphore(0) 103 assert.Error(t, err) 104 }) 105 } 106 107 // Test that unlocking an unlocked lock fails 108 func TestUnlockingUnlockedLockFails(t *testing.T) { 109 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 110 err := locks.UnlockSemaphore(0) 111 assert.Error(t, err) 112 }) 113 } 114 115 // Test that locking and double-unlocking fails 116 func TestDoubleUnlockFails(t *testing.T) { 117 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 118 err := locks.LockSemaphore(0) 119 assert.NoError(t, err) 120 121 err = locks.UnlockSemaphore(0) 122 assert.NoError(t, err) 123 124 err = locks.UnlockSemaphore(0) 125 assert.Error(t, err) 126 }) 127 } 128 129 // Test allocating - lock - unlock - deallocate cycle, single lock 130 func TestLockLifecycleSingleLock(t *testing.T) { 131 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 132 sem, err := locks.AllocateSemaphore() 133 require.NoError(t, err) 134 135 err = locks.LockSemaphore(sem) 136 assert.NoError(t, err) 137 138 err = locks.UnlockSemaphore(sem) 139 assert.NoError(t, err) 140 141 err = locks.DeallocateSemaphore(sem) 142 assert.NoError(t, err) 143 }) 144 } 145 146 // Test allocate two locks returns different locks 147 func TestAllocateTwoLocksGetsDifferentLocks(t *testing.T) { 148 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 149 sem1, err := locks.AllocateSemaphore() 150 assert.NoError(t, err) 151 152 sem2, err := locks.AllocateSemaphore() 153 assert.NoError(t, err) 154 155 assert.NotEqual(t, sem1, sem2) 156 }) 157 } 158 159 // Test allocate all locks successful and all are unique 160 func TestAllocateAllLocksSucceeds(t *testing.T) { 161 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 162 sems := make(map[uint32]bool) 163 var i uint32 164 for i = 0; i < numLocks; i++ { 165 sem, err := locks.AllocateSemaphore() 166 assert.NoError(t, err) 167 168 // Ensure the allocate semaphore is unique 169 _, ok := sems[sem] 170 assert.False(t, ok) 171 172 sems[sem] = true 173 } 174 }) 175 } 176 177 // Test allocating more than the given max fails 178 func TestAllocateTooManyLocksFails(t *testing.T) { 179 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 180 // Allocate all locks 181 var i uint32 182 for i = 0; i < numLocks; i++ { 183 _, err := locks.AllocateSemaphore() 184 assert.NoError(t, err) 185 } 186 187 // Try and allocate one more 188 _, err := locks.AllocateSemaphore() 189 assert.Error(t, err) 190 }) 191 } 192 193 // Test allocating max locks, deallocating one, and then allocating again succeeds 194 func TestAllocateDeallocateCycle(t *testing.T) { 195 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 196 // Allocate all locks 197 var i uint32 198 for i = 0; i < numLocks; i++ { 199 _, err := locks.AllocateSemaphore() 200 assert.NoError(t, err) 201 } 202 203 // Now loop through again, deallocating and reallocating. 204 // Each time we free 1 semaphore, allocate again, and make sure 205 // we get the same semaphore back. 206 var j uint32 207 for j = 0; j < numLocks; j++ { 208 err := locks.DeallocateSemaphore(j) 209 assert.NoError(t, err) 210 211 newSem, err := locks.AllocateSemaphore() 212 assert.NoError(t, err) 213 assert.Equal(t, j, newSem) 214 } 215 }) 216 } 217 218 // Test that DeallocateAllSemaphores deallocates all semaphores 219 func TestDeallocateAllSemaphoresDeallocatesAll(t *testing.T) { 220 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 221 // Allocate a lock 222 locks1, err := locks.AllocateSemaphore() 223 assert.NoError(t, err) 224 225 // Free all locks 226 err = locks.DeallocateAllSemaphores() 227 assert.NoError(t, err) 228 229 // Allocate another lock 230 locks2, err := locks.AllocateSemaphore() 231 assert.NoError(t, err) 232 233 assert.Equal(t, locks1, locks2) 234 }) 235 } 236 237 // Test that locks actually lock 238 func TestLockSemaphoreActuallyLocks(t *testing.T) { 239 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 240 // This entire test is very ugly - lots of sleeps to try and get 241 // things to occur in the right order. 242 // It also doesn't even exercise the multiprocess nature of the 243 // locks. 244 245 // Get the current time 246 startTime := time.Now() 247 248 // Start a goroutine to take the lock and then release it after 249 // a second. 250 go func() { 251 err := locks.LockSemaphore(0) 252 assert.NoError(t, err) 253 254 time.Sleep(1 * time.Second) 255 256 err = locks.UnlockSemaphore(0) 257 assert.NoError(t, err) 258 }() 259 260 // Sleep for a quarter of a second to give the goroutine time 261 // to kick off and grab the lock 262 time.Sleep(250 * time.Millisecond) 263 264 // Take the lock 265 err := locks.LockSemaphore(0) 266 assert.NoError(t, err) 267 268 // Get the current time 269 endTime := time.Now() 270 271 // Verify that at least 1 second has passed since start 272 duration := endTime.Sub(startTime) 273 assert.True(t, duration.Seconds() > 1.0) 274 }) 275 } 276 277 // Test that locking and unlocking two semaphores succeeds 278 // Ensures that runtime.LockOSThread() is doing its job 279 func TestLockAndUnlockTwoSemaphore(t *testing.T) { 280 runLockTest(t, func(t *testing.T, locks *SHMLocks) { 281 err := locks.LockSemaphore(5) 282 assert.NoError(t, err) 283 284 err = locks.LockSemaphore(6) 285 assert.NoError(t, err) 286 287 err = locks.UnlockSemaphore(6) 288 assert.NoError(t, err) 289 290 // Now yield scheduling 291 // To try and get us on another OS thread 292 runtime.Gosched() 293 294 // And unlock the last semaphore 295 // If we are in a different OS thread, this should fail. 296 // However, runtime.UnlockOSThread() should guarantee we are not 297 err = locks.UnlockSemaphore(5) 298 assert.NoError(t, err) 299 }) 300 }