github.com/axw/llgo@v0.0.0-20160805011314-95b5fe4dca20/third_party/gofrontend/libbacktrace/mmap.c (about)

     1  /* mmap.c -- Memory allocation with mmap.
     2     Copyright (C) 2012-2015 Free Software Foundation, Inc.
     3     Written by Ian Lance Taylor, Google.
     4  
     5  Redistribution and use in source and binary forms, with or without
     6  modification, are permitted provided that the following conditions are
     7  met:
     8  
     9      (1) Redistributions of source code must retain the above copyright
    10      notice, this list of conditions and the following disclaimer. 
    11  
    12      (2) Redistributions in binary form must reproduce the above copyright
    13      notice, this list of conditions and the following disclaimer in
    14      the documentation and/or other materials provided with the
    15      distribution.  
    16      
    17      (3) The name of the author may not be used to
    18      endorse or promote products derived from this software without
    19      specific prior written permission.
    20  
    21  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
    22  IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
    23  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
    24  DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
    25  INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
    26  (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
    27  SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
    28  HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
    29  STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
    30  IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
    31  POSSIBILITY OF SUCH DAMAGE.  */
    32  
    33  #include "config.h"
    34  
    35  #include <errno.h>
    36  #include <string.h>
    37  #include <stdlib.h>
    38  #include <unistd.h>
    39  #include <sys/types.h>
    40  #include <sys/mman.h>
    41  
    42  #include "backtrace.h"
    43  #include "internal.h"
    44  
    45  /* Memory allocation on systems that provide anonymous mmap.  This
    46     permits the backtrace functions to be invoked from a signal
    47     handler, assuming that mmap is async-signal safe.  */
    48  
    49  #ifndef MAP_ANONYMOUS
    50  #define MAP_ANONYMOUS MAP_ANON
    51  #endif
    52  
    53  /* A list of free memory blocks.  */
    54  
    55  struct backtrace_freelist_struct
    56  {
    57    /* Next on list.  */
    58    struct backtrace_freelist_struct *next;
    59    /* Size of this block, including this structure.  */
    60    size_t size;
    61  };
    62  
    63  /* Free memory allocated by backtrace_alloc.  */
    64  
    65  static void
    66  backtrace_free_locked (struct backtrace_state *state, void *addr, size_t size)
    67  {
    68    /* Just leak small blocks.  We don't have to be perfect.  */
    69    if (size >= sizeof (struct backtrace_freelist_struct))
    70      {
    71        struct backtrace_freelist_struct *p;
    72  
    73        p = (struct backtrace_freelist_struct *) addr;
    74        p->next = state->freelist;
    75        p->size = size;
    76        state->freelist = p;
    77      }
    78  }
    79  
    80  /* Allocate memory like malloc.  */
    81  
    82  void *
    83  backtrace_alloc (struct backtrace_state *state,
    84  		 size_t size, backtrace_error_callback error_callback,
    85  		 void *data)
    86  {
    87    void *ret;
    88    int locked;
    89    struct backtrace_freelist_struct **pp;
    90    size_t pagesize;
    91    size_t asksize;
    92    void *page;
    93  
    94    ret = NULL;
    95  
    96    /* If we can acquire the lock, then see if there is space on the
    97       free list.  If we can't acquire the lock, drop straight into
    98       using mmap.  __sync_lock_test_and_set returns the old state of
    99       the lock, so we have acquired it if it returns 0.  */
   100  
   101    if (!state->threaded)
   102      locked = 1;
   103    else
   104      locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
   105  
   106    if (locked)
   107      {
   108        for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
   109  	{
   110  	  if ((*pp)->size >= size)
   111  	    {
   112  	      struct backtrace_freelist_struct *p;
   113  
   114  	      p = *pp;
   115  	      *pp = p->next;
   116  
   117  	      /* Round for alignment; we assume that no type we care about
   118  		 is more than 8 bytes.  */
   119  	      size = (size + 7) & ~ (size_t) 7;
   120  	      if (size < p->size)
   121  		backtrace_free_locked (state, (char *) p + size,
   122  				       p->size - size);
   123  
   124  	      ret = (void *) p;
   125  
   126  	      break;
   127  	    }
   128  	}
   129  
   130        if (state->threaded)
   131  	__sync_lock_release (&state->lock_alloc);
   132      }
   133  
   134    if (ret == NULL)
   135      {
   136        /* Allocate a new page.  */
   137  
   138        pagesize = getpagesize ();
   139        asksize = (size + pagesize - 1) & ~ (pagesize - 1);
   140        page = mmap (NULL, asksize, PROT_READ | PROT_WRITE,
   141  		   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
   142        if (page == NULL)
   143  	error_callback (data, "mmap", errno);
   144        else
   145  	{
   146  	  size = (size + 7) & ~ (size_t) 7;
   147  	  if (size < asksize)
   148  	    backtrace_free (state, (char *) page + size, asksize - size,
   149  			    error_callback, data);
   150  
   151  	  ret = page;
   152  	}
   153      }
   154  
   155    return ret;
   156  }
   157  
   158  /* Free memory allocated by backtrace_alloc.  */
   159  
   160  void
   161  backtrace_free (struct backtrace_state *state, void *addr, size_t size,
   162  		backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
   163  		void *data ATTRIBUTE_UNUSED)
   164  {
   165    int locked;
   166  
   167    /* If we are freeing a large aligned block, just release it back to
   168       the system.  This case arises when growing a vector for a large
   169       binary with lots of debug info.  Calling munmap here may cause us
   170       to call mmap again if there is also a large shared library; we
   171       just live with that.  */
   172    if (size >= 16 * 4096)
   173      {
   174        size_t pagesize;
   175  
   176        pagesize = getpagesize ();
   177        if (((uintptr_t) addr & (pagesize - 1)) == 0
   178  	  && (size & (pagesize - 1)) == 0)
   179  	{
   180  	  /* If munmap fails for some reason, just add the block to
   181  	     the freelist.  */
   182  	  if (munmap (addr, size) == 0)
   183  	    return;
   184  	}
   185      }
   186  
   187    /* If we can acquire the lock, add the new space to the free list.
   188       If we can't acquire the lock, just leak the memory.
   189       __sync_lock_test_and_set returns the old state of the lock, so we
   190       have acquired it if it returns 0.  */
   191  
   192    if (!state->threaded)
   193      locked = 1;
   194    else
   195      locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
   196  
   197    if (locked)
   198      {
   199        backtrace_free_locked (state, addr, size);
   200  
   201        if (state->threaded)
   202  	__sync_lock_release (&state->lock_alloc);
   203      }
   204  }
   205  
   206  /* Grow VEC by SIZE bytes.  */
   207  
   208  void *
   209  backtrace_vector_grow (struct backtrace_state *state,size_t size,
   210  		       backtrace_error_callback error_callback,
   211  		       void *data, struct backtrace_vector *vec)
   212  {
   213    void *ret;
   214  
   215    if (size > vec->alc)
   216      {
   217        size_t pagesize;
   218        size_t alc;
   219        void *base;
   220  
   221        pagesize = getpagesize ();
   222        alc = vec->size + size;
   223        if (vec->size == 0)
   224  	alc = 16 * size;
   225        else if (alc < pagesize)
   226  	{
   227  	  alc *= 2;
   228  	  if (alc > pagesize)
   229  	    alc = pagesize;
   230  	}
   231        else
   232  	{
   233  	  alc *= 2;
   234  	  alc = (alc + pagesize - 1) & ~ (pagesize - 1);
   235  	}
   236        base = backtrace_alloc (state, alc, error_callback, data);
   237        if (base == NULL)
   238  	return NULL;
   239        if (vec->base != NULL)
   240  	{
   241  	  memcpy (base, vec->base, vec->size);
   242  	  backtrace_free (state, vec->base, vec->size + vec->alc,
   243  			  error_callback, data);
   244  	}
   245        vec->base = base;
   246        vec->alc = alc - vec->size;
   247      }
   248  
   249    ret = (char *) vec->base + vec->size;
   250    vec->size += size;
   251    vec->alc -= size;
   252    return ret;
   253  }
   254  
   255  /* Finish the current allocation on VEC.  */
   256  
   257  void *
   258  backtrace_vector_finish (
   259    struct backtrace_state *state ATTRIBUTE_UNUSED,
   260    struct backtrace_vector *vec,
   261    backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
   262    void *data ATTRIBUTE_UNUSED)
   263  {
   264    void *ret;
   265  
   266    ret = vec->base;
   267    vec->base = (char *) vec->base + vec->size;
   268    vec->size = 0;
   269    return ret;
   270  }
   271  
   272  /* Release any extra space allocated for VEC.  */
   273  
   274  int
   275  backtrace_vector_release (struct backtrace_state *state,
   276  			  struct backtrace_vector *vec,
   277  			  backtrace_error_callback error_callback,
   278  			  void *data)
   279  {
   280    size_t size;
   281    size_t alc;
   282    size_t aligned;
   283  
   284    /* Make sure that the block that we free is aligned on an 8-byte
   285       boundary.  */
   286    size = vec->size;
   287    alc = vec->alc;
   288    aligned = (size + 7) & ~ (size_t) 7;
   289    alc -= aligned - size;
   290  
   291    backtrace_free (state, (char *) vec->base + aligned, alc,
   292  		  error_callback, data);
   293    vec->alc = 0;
   294    return 1;
   295  }