github.com/llvm-mirror/llgo@v0.0.0-20190322182713-bf6f0a60fce1/third_party/gofrontend/libgo/runtime/mem.c (about)

     1  /* Defining _XOPEN_SOURCE hides the declaration of madvise() on Solaris <
     2     11 and the MADV_DONTNEED definition on IRIX 6.5.  */
     3  #undef _XOPEN_SOURCE
     4  
     5  #include <errno.h>
     6  #include <unistd.h>
     7  
     8  #include "runtime.h"
     9  #include "arch.h"
    10  #include "malloc.h"
    11  
    12  #ifndef MAP_ANON
    13  #ifdef MAP_ANONYMOUS
    14  #define MAP_ANON MAP_ANONYMOUS
    15  #else
    16  #define USE_DEV_ZERO
    17  #define MAP_ANON 0
    18  #endif
    19  #endif
    20  
    21  #ifndef MAP_NORESERVE
    22  #define MAP_NORESERVE 0
    23  #endif
    24  
    25  #ifdef USE_DEV_ZERO
    26  static int dev_zero = -1;
    27  #endif
    28  
    29  static int32
    30  addrspace_free(void *v __attribute__ ((unused)), uintptr n __attribute__ ((unused)))
    31  {
    32  #ifdef HAVE_MINCORE
    33  	size_t page_size = getpagesize();
    34  	int32 errval;
    35  	uintptr chunk;
    36  	uintptr off;
    37  	
    38  	// NOTE: vec must be just 1 byte long here.
    39  	// Mincore returns ENOMEM if any of the pages are unmapped,
    40  	// but we want to know that all of the pages are unmapped.
    41  	// To make these the same, we can only ask about one page
    42  	// at a time. See golang.org/issue/7476.
    43  	static byte vec[1];
    44  
    45  	errno = 0;
    46  	for(off = 0; off < n; off += chunk) {
    47  		chunk = page_size * sizeof vec;
    48  		if(chunk > (n - off))
    49  			chunk = n - off;
    50  		errval = mincore((char*)v + off, chunk, (void*)vec);
    51  		// ENOMEM means unmapped, which is what we want.
    52  		// Anything else we assume means the pages are mapped.
    53  		if(errval == 0 || errno != ENOMEM)
    54  			return 0;
    55  	}
    56  #endif
    57  	return 1;
    58  }
    59  
    60  static void *
    61  mmap_fixed(byte *v, uintptr n, int32 prot, int32 flags, int32 fd, uint32 offset)
    62  {
    63  	void *p;
    64  
    65  	p = runtime_mmap((void *)v, n, prot, flags, fd, offset);
    66  	if(p != v && addrspace_free(v, n)) {
    67  		// On some systems, mmap ignores v without
    68  		// MAP_FIXED, so retry if the address space is free.
    69  		if(p != MAP_FAILED)
    70  			runtime_munmap(p, n);
    71  		p = runtime_mmap((void *)v, n, prot, flags|MAP_FIXED, fd, offset);
    72  	}
    73  	return p;
    74  }
    75  
    76  void*
    77  runtime_SysAlloc(uintptr n, uint64 *stat)
    78  {
    79  	void *p;
    80  	int fd = -1;
    81  
    82  #ifdef USE_DEV_ZERO
    83  	if (dev_zero == -1) {
    84  		dev_zero = open("/dev/zero", O_RDONLY);
    85  		if (dev_zero < 0) {
    86  			runtime_printf("open /dev/zero: errno=%d\n", errno);
    87  			exit(2);
    88  		}
    89  	}
    90  	fd = dev_zero;
    91  #endif
    92  
    93  	p = runtime_mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, fd, 0);
    94  	if (p == MAP_FAILED) {
    95  		if(errno == EACCES) {
    96  			runtime_printf("runtime: mmap: access denied\n");
    97  			runtime_printf("if you're running SELinux, enable execmem for this process.\n");
    98  			exit(2);
    99  		}
   100  		if(errno == EAGAIN) {
   101  			runtime_printf("runtime: mmap: too much locked memory (check 'ulimit -l').\n");
   102  			runtime_exit(2);
   103  		}
   104  		return nil;
   105  	}
   106  	runtime_xadd64(stat, n);
   107  	return p;
   108  }
   109  
   110  void
   111  runtime_SysUnused(void *v __attribute__ ((unused)), uintptr n __attribute__ ((unused)))
   112  {
   113  #ifdef MADV_DONTNEED
   114  	runtime_madvise(v, n, MADV_DONTNEED);
   115  #endif
   116  }
   117  
   118  void
   119  runtime_SysUsed(void *v, uintptr n)
   120  {
   121  	USED(v);
   122  	USED(n);
   123  }
   124  
   125  void
   126  runtime_SysFree(void *v, uintptr n, uint64 *stat)
   127  {
   128  	runtime_xadd64(stat, -(uint64)n);
   129  	runtime_munmap(v, n);
   130  }
   131  
   132  void
   133  runtime_SysFault(void *v, uintptr n)
   134  {
   135  	int fd = -1;
   136  
   137  #ifdef USE_DEV_ZERO
   138  	if (dev_zero == -1) {
   139  		dev_zero = open("/dev/zero", O_RDONLY);
   140  		if (dev_zero < 0) {
   141  			runtime_printf("open /dev/zero: errno=%d\n", errno);
   142  			exit(2);
   143  		}
   144  	}
   145  	fd = dev_zero;
   146  #endif
   147  
   148  	runtime_mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, fd, 0);
   149  }
   150  
   151  void*
   152  runtime_SysReserve(void *v, uintptr n, bool *reserved)
   153  {
   154  	int fd = -1;
   155  	void *p;
   156  
   157  #ifdef USE_DEV_ZERO
   158  	if (dev_zero == -1) {
   159  		dev_zero = open("/dev/zero", O_RDONLY);
   160  		if (dev_zero < 0) {
   161  			runtime_printf("open /dev/zero: errno=%d\n", errno);
   162  			exit(2);
   163  		}
   164  	}
   165  	fd = dev_zero;
   166  #endif
   167  
   168  	// On 64-bit, people with ulimit -v set complain if we reserve too
   169  	// much address space.  Instead, assume that the reservation is okay
   170  	// if we can reserve at least 64K and check the assumption in SysMap.
   171  	// Only user-mode Linux (UML) rejects these requests.
   172  	if(sizeof(void*) == 8 && (n >> 16) > 1LLU<<16) {
   173  		p = mmap_fixed(v, 64<<10, PROT_NONE, MAP_ANON|MAP_PRIVATE, fd, 0);
   174  		if (p != v) {
   175  			runtime_munmap(p, 64<<10);
   176  			return nil;
   177  		}
   178  		runtime_munmap(p, 64<<10);
   179  		*reserved = false;
   180  		return v;
   181  	}
   182  	
   183  	// Use the MAP_NORESERVE mmap() flag here because typically most of
   184  	// this reservation will never be used. It does not make sense
   185  	// reserve a huge amount of unneeded swap space. This is important on
   186  	// systems which do not overcommit memory by default.
   187  	p = runtime_mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_NORESERVE, fd, 0);
   188  	if(p == MAP_FAILED)
   189  		return nil;
   190  	*reserved = true;
   191  	return p;
   192  }
   193  
   194  void
   195  runtime_SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
   196  {
   197  	void *p;
   198  	int fd = -1;
   199  	
   200  	runtime_xadd64(stat, n);
   201  
   202  #ifdef USE_DEV_ZERO
   203  	if (dev_zero == -1) {
   204  		dev_zero = open("/dev/zero", O_RDONLY);
   205  		if (dev_zero < 0) {
   206  			runtime_printf("open /dev/zero: errno=%d\n", errno);
   207  			exit(2);
   208  		}
   209  	}
   210  	fd = dev_zero;
   211  #endif
   212  
   213  	// On 64-bit, we don't actually have v reserved, so tread carefully.
   214  	if(!reserved) {
   215  		p = mmap_fixed(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, fd, 0);
   216  		if(p == MAP_FAILED && errno == ENOMEM)
   217  			runtime_throw("runtime: out of memory");
   218  		if(p != v) {
   219  			runtime_printf("runtime: address space conflict: map(%p) = %p\n", v, p);
   220  			runtime_throw("runtime: address space conflict");
   221  		}
   222  		return;
   223  	}
   224  
   225  	p = runtime_mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, fd, 0);
   226  	if(p == MAP_FAILED && errno == ENOMEM)
   227  		runtime_throw("runtime: out of memory");
   228  	if(p != v)
   229  		runtime_throw("runtime: cannot map pages in arena address space");
   230  }