github.com/AbhinandanKurakure/podman/v3@v3.4.10/libpod/lock/shm/shm_lock.c (about)

     1  #include <errno.h>
     2  #include <fcntl.h>
     3  #include <pthread.h>
     4  #include <stdbool.h>
     5  #include <stdint.h>
     6  #include <stdlib.h>
     7  #include <sys/mman.h>
     8  #include <sys/stat.h>
     9  #include <sys/types.h>
    10  #include <unistd.h>
    11  
    12  #include "shm_lock.h"
    13  
    14  // Compute the size of the SHM struct
    15  static size_t compute_shm_size(uint32_t num_bitmaps) {
    16    return sizeof(shm_struct_t) + (num_bitmaps * sizeof(lock_group_t));
    17  }
    18  
    19  // Take the given mutex.
    20  // Handles exceptional conditions, including a mutex locked by a process that
    21  // died holding it.
    22  // Returns 0 on success, or positive errno on failure.
    23  static int take_mutex(pthread_mutex_t *mutex) {
    24    int ret_code;
    25  
    26    do {
    27      ret_code = pthread_mutex_lock(mutex);
    28    } while(ret_code == EAGAIN);
    29  
    30    if (ret_code == EOWNERDEAD) {
    31      // The previous owner of the mutex died while holding it
    32      // Take it for ourselves
    33      ret_code = pthread_mutex_consistent(mutex);
    34      if (ret_code != 0) {
    35        // Someone else may have gotten here first and marked the state consistent
    36        // However, the mutex could also be invalid.
    37        // Fail here instead of looping back to trying to lock the mutex.
    38        return ret_code;
    39      }
    40    } else if (ret_code != 0) {
    41      return ret_code;
    42    }
    43  
    44    return 0;
    45  }
    46  
    47  // Release the given mutex.
    48  // Returns 0 on success, or positive errno on failure.
    49  static int release_mutex(pthread_mutex_t *mutex) {
    50    int ret_code;
    51  
    52    do {
    53      ret_code = pthread_mutex_unlock(mutex);
    54    } while(ret_code == EAGAIN);
    55  
    56    if (ret_code != 0) {
    57      return ret_code;
    58    }
    59  
    60    return 0;
    61  }
    62  
    63  // Set up an SHM segment holding locks for libpod.
    64  // num_locks must not be 0.
    65  // Path is the path to the SHM segment. It must begin with a single / and
    66  // container no other / characters, and be at most 255 characters including
    67  // terminating NULL byte.
    68  // Returns a valid pointer on success or NULL on error.
    69  // If an error occurs, negative ERRNO values will be written to error_code.
    70  shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code) {
    71    int shm_fd, i, j, ret_code;
    72    uint32_t num_bitmaps;
    73    size_t shm_size;
    74    shm_struct_t *shm;
    75    pthread_mutexattr_t attr;
    76  
    77    // If error_code doesn't point to anything, we can't reasonably return errors
    78    // So fail immediately
    79    if (error_code == NULL) {
    80      return NULL;
    81    }
    82  
    83    // We need a nonzero number of locks
    84    if (num_locks == 0) {
    85      *error_code = -1 * EINVAL;
    86      return NULL;
    87    }
    88  
    89    if (path == NULL) {
    90      *error_code = -1 * EINVAL;
    91      return NULL;
    92    }
    93  
    94    // Calculate the number of bitmaps required
    95    num_bitmaps = num_locks / BITMAP_SIZE;
    96    if (num_locks % BITMAP_SIZE != 0) {
    97      // The actual number given is not an even multiple of our bitmap size
    98      // So round up
    99      num_bitmaps += 1;
   100    }
   101  
   102    // Calculate size of the shm segment
   103    shm_size = compute_shm_size(num_bitmaps);
   104  
   105    // Create a new SHM segment for us
   106    shm_fd = shm_open(path, O_RDWR | O_CREAT | O_EXCL, 0600);
   107    if (shm_fd < 0) {
   108      *error_code = -1 * errno;
   109      return NULL;
   110    }
   111  
   112    // Increase its size to what we need
   113    ret_code = ftruncate(shm_fd, shm_size);
   114    if (ret_code < 0) {
   115      *error_code = -1 * errno;
   116      goto CLEANUP_UNLINK;
   117    }
   118  
   119    // Map the shared memory in
   120    shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
   121    if (shm == MAP_FAILED) {
   122      *error_code = -1 * errno;
   123      goto CLEANUP_UNLINK;
   124    }
   125  
   126    // We have successfully mapped the memory, now initialize the region
   127    shm->magic = MAGIC;
   128    shm->unused = 0;
   129    shm->num_locks = num_bitmaps * BITMAP_SIZE;
   130    shm->num_bitmaps = num_bitmaps;
   131  
   132    // Create an initializer for our pthread mutexes
   133    ret_code = pthread_mutexattr_init(&attr);
   134    if (ret_code != 0) {
   135      *error_code = -1 * ret_code;
   136      goto CLEANUP_UNMAP;
   137    }
   138  
   139    // Set mutexes to pshared - multiprocess-safe
   140    ret_code = pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
   141    if (ret_code != 0) {
   142      *error_code = -1 * ret_code;
   143      goto CLEANUP_FREEATTR;
   144    }
   145  
   146    // Set mutexes to robust - if a process dies while holding a mutex, we'll get
   147    // a special error code on the next attempt to lock it.
   148    // This should prevent panicking processes from leaving the state unusable.
   149    ret_code = pthread_mutexattr_setrobust(&attr, PTHREAD_MUTEX_ROBUST);
   150    if (ret_code != 0) {
   151      *error_code = -1 * ret_code;
   152      goto CLEANUP_FREEATTR;
   153    }
   154  
   155    // Initialize the mutex that protects the bitmaps using the mutex attributes
   156    ret_code = pthread_mutex_init(&(shm->segment_lock), &attr);
   157    if (ret_code != 0) {
   158      *error_code = -1 * ret_code;
   159      goto CLEANUP_FREEATTR;
   160    }
   161  
   162    // Initialize all bitmaps to 0 initially
   163    // And initialize all semaphores they use
   164    for (i = 0; i < num_bitmaps; i++) {
   165      shm->locks[i].bitmap = 0;
   166      for (j = 0; j < BITMAP_SIZE; j++) {
   167        // Initialize each mutex
   168        ret_code = pthread_mutex_init(&(shm->locks[i].locks[j]), &attr);
   169        if (ret_code != 0) {
   170  	*error_code = -1 * ret_code;
   171  	goto CLEANUP_FREEATTR;
   172        }
   173      }
   174    }
   175  
   176    // Close the file descriptor, we're done with it
   177    // Ignore errors, it's ok if we leak a single FD and this should only run once
   178    close(shm_fd);
   179  
   180    // Destroy the pthread initializer attribute.
   181    // Again, ignore errors, this will only run once and we might leak a tiny bit
   182    // of memory at worst.
   183    pthread_mutexattr_destroy(&attr);
   184  
   185    return shm;
   186  
   187    // Cleanup after an error
   188   CLEANUP_FREEATTR:
   189    pthread_mutexattr_destroy(&attr);
   190   CLEANUP_UNMAP:
   191    munmap(shm, shm_size);
   192   CLEANUP_UNLINK:
   193    close(shm_fd);
   194    shm_unlink(path);
   195    return NULL;
   196  }
   197  
   198  // Open an existing SHM segment holding libpod locks.
   199  // num_locks is the number of locks that will be configured in the SHM segment.
   200  // num_locks cannot be 0.
   201  // Path is the path to the SHM segment. It must begin with a single / and
   202  // container no other / characters, and be at most 255 characters including
   203  // terminating NULL byte.
   204  // Returns a valid pointer on success or NULL on error.
   205  // If an error occurs, negative ERRNO values will be written to error_code.
   206  // ERANGE is returned for a mismatch between num_locks and the number of locks
   207  // available in the the SHM lock struct.
   208  shm_struct_t *open_lock_shm(char *path, uint32_t num_locks, int *error_code) {
   209    int shm_fd;
   210    shm_struct_t *shm;
   211    size_t shm_size;
   212    uint32_t num_bitmaps;
   213  
   214    if (error_code == NULL) {
   215      return NULL;
   216    }
   217  
   218    // We need a nonzero number of locks
   219    if (num_locks == 0) {
   220      *error_code = -1 * EINVAL;
   221      return NULL;
   222    }
   223  
   224    if (path == NULL) {
   225      *error_code = -1 * EINVAL;
   226      return NULL;
   227    }
   228  
   229    // Calculate the number of bitmaps required
   230    num_bitmaps = num_locks / BITMAP_SIZE;
   231    if (num_locks % BITMAP_SIZE != 0) {
   232      num_bitmaps += 1;
   233    }
   234  
   235    // Calculate size of the shm segment
   236    shm_size = compute_shm_size(num_bitmaps);
   237  
   238    shm_fd = shm_open(path, O_RDWR, 0600);
   239    if (shm_fd < 0) {
   240      *error_code = -1 * errno;
   241      return NULL;
   242    }
   243  
   244    // Map the shared memory in
   245    shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
   246    if (shm == MAP_FAILED) {
   247      *error_code = -1 * errno;
   248    }
   249  
   250    // Ignore errors, it's ok if we leak a single FD since this only runs once
   251    close(shm_fd);
   252  
   253    // Check if we successfully mmap'd
   254    if (shm == MAP_FAILED) {
   255      return NULL;
   256    }
   257  
   258    // Need to check the SHM to see if it's actually our locks
   259    if (shm->magic != MAGIC) {
   260      *error_code = -1 * EBADF;
   261      goto CLEANUP;
   262    }
   263    if (shm->num_locks != (num_bitmaps * BITMAP_SIZE)) {
   264      *error_code = -1 * ERANGE;
   265      goto CLEANUP;
   266    }
   267  
   268    return shm;
   269  
   270   CLEANUP:
   271    munmap(shm, shm_size);
   272    return NULL;
   273  }
   274  
   275  // Close an open SHM lock struct, unmapping the backing memory.
   276  // The given shm_struct_t will be rendered unusable as a result.
   277  // On success, 0 is returned. On failure, negative ERRNO values are returned.
   278  int32_t close_lock_shm(shm_struct_t *shm) {
   279    int ret_code;
   280    size_t shm_size;
   281  
   282    // We can't unmap null...
   283    if (shm == NULL) {
   284      return -1 * EINVAL;
   285    }
   286  
   287    shm_size = compute_shm_size(shm->num_bitmaps);
   288  
   289    ret_code = munmap(shm, shm_size);
   290  
   291    if (ret_code != 0) {
   292      return -1 * errno;
   293    }
   294  
   295    return 0;
   296  }
   297  
   298  // Allocate the first available semaphore
   299  // Returns a positive integer guaranteed to be less than UINT32_MAX on success,
   300  // or negative errno values on failure
   301  // On success, the returned integer is the number of the semaphore allocated
   302  int64_t allocate_semaphore(shm_struct_t *shm) {
   303    int ret_code, i;
   304    bitmap_t test_map;
   305    int64_t sem_number, num_within_bitmap;
   306  
   307    if (shm == NULL) {
   308      return -1 * EINVAL;
   309    }
   310  
   311    // Lock the semaphore controlling access to our shared memory
   312    ret_code = take_mutex(&(shm->segment_lock));
   313    if (ret_code != 0) {
   314      return -1 * ret_code;
   315    }
   316  
   317    // Loop through our bitmaps to search for one that is not full
   318    for (i = 0; i < shm->num_bitmaps; i++) {
   319      if (shm->locks[i].bitmap != 0xFFFFFFFF) {
   320        test_map = 0x1;
   321        num_within_bitmap = 0;
   322        while (test_map != 0) {
   323  	if ((test_map & shm->locks[i].bitmap) == 0) {
   324  	  // Compute the number of the semaphore we are allocating
   325  	  sem_number = (BITMAP_SIZE * i) + num_within_bitmap;
   326  	  // OR in the bitmap
   327  	  shm->locks[i].bitmap = shm->locks[i].bitmap | test_map;
   328  
   329  	  // Clear the mutex
   330  	  ret_code = release_mutex(&(shm->segment_lock));
   331  	  if (ret_code != 0) {
   332  	    return -1 * ret_code;
   333  	  }
   334  
   335  	  // Return the semaphore we've allocated
   336  	  return sem_number;
   337  	}
   338  	test_map = test_map << 1;
   339  	num_within_bitmap++;
   340        }
   341        // We should never fall through this loop
   342        // TODO maybe an assert() here to panic if we do?
   343      }
   344    }
   345  
   346    // Clear the mutex
   347    ret_code = release_mutex(&(shm->segment_lock));
   348    if (ret_code != 0) {
   349      return -1 * ret_code;
   350    }
   351  
   352    // All bitmaps are full
   353    // We have no available semaphores, report allocation failure
   354    return -1 * ENOSPC;
   355  }
   356  
   357  // Allocate the semaphore with the given ID.
   358  // Returns an error if the semaphore with this ID does not exist, or has already
   359  // been allocated.
   360  // Returns 0 on success, or negative errno values on failure.
   361  int32_t allocate_given_semaphore(shm_struct_t *shm, uint32_t sem_index) {
   362    int bitmap_index, index_in_bitmap, ret_code;
   363    bitmap_t test_map;
   364  
   365    if (shm == NULL) {
   366      return -1 * EINVAL;
   367    }
   368  
   369    // Check if the lock index is valid
   370    if (sem_index >= shm->num_locks) {
   371      return -1 * EINVAL;
   372    }
   373  
   374    bitmap_index = sem_index / BITMAP_SIZE;
   375    index_in_bitmap = sem_index % BITMAP_SIZE;
   376  
   377    // This should never happen if the sem_index test above succeeded, but better
   378    // safe than sorry
   379    if (bitmap_index >= shm->num_bitmaps) {
   380      return -1 * EFAULT;
   381    }
   382  
   383    test_map = 0x1 << index_in_bitmap;
   384  
   385    // Lock the mutex controlling access to our shared memory
   386    ret_code = take_mutex(&(shm->segment_lock));
   387    if (ret_code != 0) {
   388      return -1 * ret_code;
   389    }
   390  
   391    // Check if the semaphore is allocated
   392    if ((test_map & shm->locks[bitmap_index].bitmap) != 0) {
   393      ret_code = release_mutex(&(shm->segment_lock));
   394      if (ret_code != 0) {
   395        return -1 * ret_code;
   396      }
   397  
   398      return -1 * EEXIST;
   399    }
   400  
   401    // The semaphore is not allocated, allocate it
   402    shm->locks[bitmap_index].bitmap = shm->locks[bitmap_index].bitmap | test_map;
   403  
   404    ret_code = release_mutex(&(shm->segment_lock));
   405    if (ret_code != 0) {
   406      return -1 * ret_code;
   407    }
   408  
   409    return 0;
   410  }
   411  
   412  // Deallocate a given semaphore
   413  // Returns 0 on success, negative ERRNO values on failure
   414  int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
   415    bitmap_t test_map;
   416    int bitmap_index, index_in_bitmap, ret_code;
   417  
   418    if (shm == NULL) {
   419      return -1 * EINVAL;
   420    }
   421  
   422    // Check if the lock index is valid
   423    if (sem_index >= shm->num_locks) {
   424      return -1 * EINVAL;
   425    }
   426  
   427    bitmap_index = sem_index / BITMAP_SIZE;
   428    index_in_bitmap = sem_index % BITMAP_SIZE;
   429  
   430    // This should never happen if the sem_index test above succeeded, but better
   431    // safe than sorry
   432    if (bitmap_index >= shm->num_bitmaps) {
   433      return -1 * EFAULT;
   434    }
   435  
   436    test_map = 0x1 << index_in_bitmap;
   437  
   438    // Lock the mutex controlling access to our shared memory
   439    ret_code = take_mutex(&(shm->segment_lock));
   440    if (ret_code != 0) {
   441      return -1 * ret_code;
   442    }
   443  
   444    // Check if the semaphore is allocated
   445    if ((test_map & shm->locks[bitmap_index].bitmap) == 0) {
   446      ret_code = release_mutex(&(shm->segment_lock));
   447      if (ret_code != 0) {
   448        return -1 * ret_code;
   449      }
   450  
   451      return -1 * ENOENT;
   452    }
   453  
   454    // The semaphore is allocated, clear it
   455    // Invert the bitmask we used to test to clear the bit
   456    test_map = ~test_map;
   457    shm->locks[bitmap_index].bitmap = shm->locks[bitmap_index].bitmap & test_map;
   458  
   459    ret_code = release_mutex(&(shm->segment_lock));
   460    if (ret_code != 0) {
   461      return -1 * ret_code;
   462    }
   463  
   464    return 0;
   465  }
   466  
   467  // Deallocate all semaphores unconditionally.
   468  // Returns negative ERRNO values.
   469  int32_t deallocate_all_semaphores(shm_struct_t *shm) {
   470    int ret_code;
   471    uint i;
   472  
   473    if (shm == NULL) {
   474      return -1 * EINVAL;
   475    }
   476  
   477    // Lock the mutex controlling access to our shared memory
   478    ret_code = take_mutex(&(shm->segment_lock));
   479    if (ret_code != 0) {
   480      return -1 * ret_code;
   481    }
   482  
   483    // Iterate through all bitmaps and reset to unused
   484    for (i = 0; i < shm->num_bitmaps; i++) {
   485      shm->locks[i].bitmap = 0;
   486    }
   487  
   488    // Unlock the allocation control mutex
   489    ret_code = release_mutex(&(shm->segment_lock));
   490    if (ret_code != 0) {
   491      return -1 * ret_code;
   492    }
   493  
   494    return 0;
   495  }
   496  
   497  // Lock a given semaphore
   498  // Does not check if the semaphore is allocated - this ensures that, even for
   499  // removed containers, we can still successfully lock to check status (and
   500  // subsequently realize they have been removed).
   501  // Returns 0 on success, -1 on failure
   502  int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
   503    int bitmap_index, index_in_bitmap;
   504  
   505    if (shm == NULL) {
   506      return -1 * EINVAL;
   507    }
   508  
   509    if (sem_index >= shm->num_locks) {
   510      return -1 * EINVAL;
   511    }
   512  
   513    bitmap_index = sem_index / BITMAP_SIZE;
   514    index_in_bitmap = sem_index % BITMAP_SIZE;
   515  
   516    return -1 * take_mutex(&(shm->locks[bitmap_index].locks[index_in_bitmap]));
   517  }
   518  
   519  // Unlock a given semaphore
   520  // Does not check if the semaphore is allocated - this ensures that, even for
   521  // removed containers, we can still successfully lock to check status (and
   522  // subsequently realize they have been removed).
   523  // Returns 0 on success, -1 on failure
   524  int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
   525    int bitmap_index, index_in_bitmap;
   526  
   527    if (shm == NULL) {
   528      return -1 * EINVAL;
   529    }
   530  
   531    if (sem_index >= shm->num_locks) {
   532      return -1 * EINVAL;
   533    }
   534  
   535    bitmap_index = sem_index / BITMAP_SIZE;
   536    index_in_bitmap = sem_index % BITMAP_SIZE;
   537  
   538    return -1 * release_mutex(&(shm->locks[bitmap_index].locks[index_in_bitmap]));
   539  }