github.com/xushiwei/go@v0.0.0-20130601165731-2b9d83f45bc9/src/pkg/runtime/malloc.goc (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // See malloc.h for overview.
     6  //
     7  // TODO(rsc): double-check stats.
     8  
     9  package runtime
    10  #include "runtime.h"
    11  #include "arch_GOARCH.h"
    12  #include "malloc.h"
    13  #include "type.h"
    14  #include "typekind.h"
    15  #include "race.h"
    16  
    17  MHeap *runtime·mheap;
    18  
    19  int32	runtime·checking;
    20  
    21  extern MStats mstats;	// defined in zruntime_def_$GOOS_$GOARCH.go
    22  
    23  extern volatile intgo runtime·MemProfileRate;
    24  
    25  // Allocate an object of at least size bytes.
    26  // Small objects are allocated from the per-thread cache's free lists.
    27  // Large objects (> 32 kB) are allocated straight from the heap.
    28  void*
    29  runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
    30  {
    31  	int32 sizeclass;
    32  	intgo rate;
    33  	MCache *c;
    34  	uintptr npages;
    35  	MSpan *s;
    36  	void *v;
    37  
    38  	if(runtime·gcwaiting && g != m->g0 && m->locks == 0 && dogc)
    39  		runtime·gosched();
    40  	if(m->mallocing)
    41  		runtime·throw("malloc/free - deadlock");
    42  	m->mallocing = 1;
    43  	if(size == 0)
    44  		size = 1;
    45  
    46  	if(DebugTypeAtBlockEnd)
    47  		size += sizeof(uintptr);
    48  
    49  	c = m->mcache;
    50  	c->local_nmalloc++;
    51  	if(size <= MaxSmallSize) {
    52  		// Allocate from mcache free lists.
    53  		sizeclass = runtime·SizeToClass(size);
    54  		size = runtime·class_to_size[sizeclass];
    55  		v = runtime·MCache_Alloc(c, sizeclass, size, zeroed);
    56  		if(v == nil)
    57  			runtime·throw("out of memory");
    58  		c->local_alloc += size;
    59  		c->local_total_alloc += size;
    60  		c->local_by_size[sizeclass].nmalloc++;
    61  	} else {
    62  		// TODO(rsc): Report tracebacks for very large allocations.
    63  
    64  		// Allocate directly from heap.
    65  		npages = size >> PageShift;
    66  		if((size & PageMask) != 0)
    67  			npages++;
    68  		s = runtime·MHeap_Alloc(runtime·mheap, npages, 0, 1, zeroed);
    69  		if(s == nil)
    70  			runtime·throw("out of memory");
    71  		size = npages<<PageShift;
    72  		c->local_alloc += size;
    73  		c->local_total_alloc += size;
    74  		v = (void*)(s->start << PageShift);
    75  
    76  		// setup for mark sweep
    77  		runtime·markspan(v, 0, 0, true);
    78  	}
    79  
    80  	if (sizeof(void*) == 4 && c->local_total_alloc >= (1<<30)) {
    81  		// purge cache stats to prevent overflow
    82  		runtime·lock(runtime·mheap);
    83  		runtime·purgecachedstats(c);
    84  		runtime·unlock(runtime·mheap);
    85  	}
    86  
    87  	if(!(flag & FlagNoGC))
    88  		runtime·markallocated(v, size, (flag&FlagNoPointers) != 0);
    89  
    90  	if(DebugTypeAtBlockEnd)
    91  		*(uintptr*)((uintptr)v+size-sizeof(uintptr)) = 0;
    92  
    93  	m->mallocing = 0;
    94  
    95  	if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) {
    96  		if(size >= rate)
    97  			goto profile;
    98  		if(m->mcache->next_sample > size)
    99  			m->mcache->next_sample -= size;
   100  		else {
   101  			// pick next profile time
   102  			// If you change this, also change allocmcache.
   103  			if(rate > 0x3fffffff)	// make 2*rate not overflow
   104  				rate = 0x3fffffff;
   105  			m->mcache->next_sample = runtime·fastrand1() % (2*rate);
   106  		profile:
   107  			runtime·setblockspecial(v, true);
   108  			runtime·MProf_Malloc(v, size);
   109  		}
   110  	}
   111  
   112  	if(dogc && mstats.heap_alloc >= mstats.next_gc)
   113  		runtime·gc(0);
   114  
   115  	if(raceenabled) {
   116  		runtime·racemalloc(v, size, m->racepc);
   117  		m->racepc = nil;
   118  	}
   119  	return v;
   120  }
   121  
   122  void*
   123  runtime·malloc(uintptr size)
   124  {
   125  	return runtime·mallocgc(size, 0, 0, 1);
   126  }
   127  
   128  // Free the object whose base pointer is v.
   129  void
   130  runtime·free(void *v)
   131  {
   132  	int32 sizeclass;
   133  	MSpan *s;
   134  	MCache *c;
   135  	uint32 prof;
   136  	uintptr size;
   137  
   138  	if(v == nil)
   139  		return;
   140  	
   141  	// If you change this also change mgc0.c:/^sweep,
   142  	// which has a copy of the guts of free.
   143  
   144  	if(m->mallocing)
   145  		runtime·throw("malloc/free - deadlock");
   146  	m->mallocing = 1;
   147  
   148  	if(!runtime·mlookup(v, nil, nil, &s)) {
   149  		runtime·printf("free %p: not an allocated block\n", v);
   150  		runtime·throw("free runtime·mlookup");
   151  	}
   152  	prof = runtime·blockspecial(v);
   153  
   154  	if(raceenabled)
   155  		runtime·racefree(v);
   156  
   157  	// Find size class for v.
   158  	sizeclass = s->sizeclass;
   159  	c = m->mcache;
   160  	if(sizeclass == 0) {
   161  		// Large object.
   162  		size = s->npages<<PageShift;
   163  		*(uintptr*)(s->start<<PageShift) = (uintptr)0xfeedfeedfeedfeedll;	// mark as "needs to be zeroed"
   164  		// Must mark v freed before calling unmarkspan and MHeap_Free:
   165  		// they might coalesce v into other spans and change the bitmap further.
   166  		runtime·markfreed(v, size);
   167  		runtime·unmarkspan(v, 1<<PageShift);
   168  		runtime·MHeap_Free(runtime·mheap, s, 1);
   169  	} else {
   170  		// Small object.
   171  		size = runtime·class_to_size[sizeclass];
   172  		if(size > sizeof(uintptr))
   173  			((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll;	// mark as "needs to be zeroed"
   174  		// Must mark v freed before calling MCache_Free:
   175  		// it might coalesce v and other blocks into a bigger span
   176  		// and change the bitmap further.
   177  		runtime·markfreed(v, size);
   178  		c->local_by_size[sizeclass].nfree++;
   179  		runtime·MCache_Free(c, v, sizeclass, size);
   180  	}
   181  	c->local_nfree++;
   182  	c->local_alloc -= size;
   183  	if(prof)
   184  		runtime·MProf_Free(v, size);
   185  	m->mallocing = 0;
   186  }
   187  
   188  int32
   189  runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
   190  {
   191  	uintptr n, i;
   192  	byte *p;
   193  	MSpan *s;
   194  
   195  	m->mcache->local_nlookup++;
   196  	if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) {
   197  		// purge cache stats to prevent overflow
   198  		runtime·lock(runtime·mheap);
   199  		runtime·purgecachedstats(m->mcache);
   200  		runtime·unlock(runtime·mheap);
   201  	}
   202  
   203  	s = runtime·MHeap_LookupMaybe(runtime·mheap, v);
   204  	if(sp)
   205  		*sp = s;
   206  	if(s == nil) {
   207  		runtime·checkfreed(v, 1);
   208  		if(base)
   209  			*base = nil;
   210  		if(size)
   211  			*size = 0;
   212  		return 0;
   213  	}
   214  
   215  	p = (byte*)((uintptr)s->start<<PageShift);
   216  	if(s->sizeclass == 0) {
   217  		// Large object.
   218  		if(base)
   219  			*base = p;
   220  		if(size)
   221  			*size = s->npages<<PageShift;
   222  		return 1;
   223  	}
   224  
   225  	if((byte*)v >= (byte*)s->limit) {
   226  		// pointers past the last block do not count as pointers.
   227  		return 0;
   228  	}
   229  
   230  	n = s->elemsize;
   231  	if(base) {
   232  		i = ((byte*)v - p)/n;
   233  		*base = p + i*n;
   234  	}
   235  	if(size)
   236  		*size = n;
   237  
   238  	return 1;
   239  }
   240  
   241  MCache*
   242  runtime·allocmcache(void)
   243  {
   244  	intgo rate;
   245  	MCache *c;
   246  
   247  	runtime·lock(runtime·mheap);
   248  	c = runtime·FixAlloc_Alloc(&runtime·mheap->cachealloc);
   249  	mstats.mcache_inuse = runtime·mheap->cachealloc.inuse;
   250  	mstats.mcache_sys = runtime·mheap->cachealloc.sys;
   251  	runtime·unlock(runtime·mheap);
   252  	runtime·memclr((byte*)c, sizeof(*c));
   253  
   254  	// Set first allocation sample size.
   255  	rate = runtime·MemProfileRate;
   256  	if(rate > 0x3fffffff)	// make 2*rate not overflow
   257  		rate = 0x3fffffff;
   258  	if(rate != 0)
   259  		c->next_sample = runtime·fastrand1() % (2*rate);
   260  
   261  	return c;
   262  }
   263  
   264  void
   265  runtime·freemcache(MCache *c)
   266  {
   267  	runtime·MCache_ReleaseAll(c);
   268  	runtime·lock(runtime·mheap);
   269  	runtime·purgecachedstats(c);
   270  	runtime·FixAlloc_Free(&runtime·mheap->cachealloc, c);
   271  	runtime·unlock(runtime·mheap);
   272  }
   273  
   274  void
   275  runtime·purgecachedstats(MCache *c)
   276  {
   277  	// Protected by either heap or GC lock.
   278  	mstats.heap_alloc += c->local_cachealloc;
   279  	c->local_cachealloc = 0;
   280  	mstats.heap_objects += c->local_objects;
   281  	c->local_objects = 0;
   282  	mstats.nmalloc += c->local_nmalloc;
   283  	c->local_nmalloc = 0;
   284  	mstats.nfree += c->local_nfree;
   285  	c->local_nfree = 0;
   286  	mstats.nlookup += c->local_nlookup;
   287  	c->local_nlookup = 0;
   288  	mstats.alloc += c->local_alloc;
   289  	c->local_alloc= 0;
   290  	mstats.total_alloc += c->local_total_alloc;
   291  	c->local_total_alloc= 0;
   292  }
   293  
   294  uintptr runtime·sizeof_C_MStats = sizeof(MStats);
   295  
   296  #define MaxArena32 (2U<<30)
   297  
   298  void
   299  runtime·mallocinit(void)
   300  {
   301  	byte *p;
   302  	uintptr arena_size, bitmap_size;
   303  	extern byte end[];
   304  	byte *want;
   305  	uintptr limit;
   306  
   307  	p = nil;
   308  	arena_size = 0;
   309  	bitmap_size = 0;
   310  	
   311  	// for 64-bit build
   312  	USED(p);
   313  	USED(arena_size);
   314  	USED(bitmap_size);
   315  
   316  	if((runtime·mheap = runtime·SysAlloc(sizeof(*runtime·mheap))) == nil)
   317  		runtime·throw("runtime: cannot allocate heap metadata");
   318  
   319  	runtime·InitSizes();
   320  
   321  	// limit = runtime·memlimit();
   322  	// See https://code.google.com/p/go/issues/detail?id=5049
   323  	// TODO(rsc): Fix after 1.1.
   324  	limit = 0;
   325  
   326  	// Set up the allocation arena, a contiguous area of memory where
   327  	// allocated data will be found.  The arena begins with a bitmap large
   328  	// enough to hold 4 bits per allocated word.
   329  	if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
   330  		// On a 64-bit machine, allocate from a single contiguous reservation.
   331  		// 128 GB (MaxMem) should be big enough for now.
   332  		//
   333  		// The code will work with the reservation at any address, but ask
   334  		// SysReserve to use 0x000000c000000000 if possible.
   335  		// Allocating a 128 GB region takes away 37 bits, and the amd64
   336  		// doesn't let us choose the top 17 bits, so that leaves the 11 bits
   337  		// in the middle of 0x00c0 for us to choose.  Choosing 0x00c0 means
   338  		// that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x0x00df.
   339  		// In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
   340  		// UTF-8 sequences, and they are otherwise as far away from 
   341  		// ff (likely a common byte) as possible. An earlier attempt to use 0x11f8 
   342  		// caused out of memory errors on OS X during thread allocations.
   343  		// These choices are both for debuggability and to reduce the
   344  		// odds of the conservative garbage collector not collecting memory
   345  		// because some non-pointer block of memory had a bit pattern
   346  		// that matched a memory address.
   347  		//
   348  		// Actually we reserve 136 GB (because the bitmap ends up being 8 GB)
   349  		// but it hardly matters: e0 00 is not valid UTF-8 either.
   350  		//
   351  		// If this fails we fall back to the 32 bit memory mechanism
   352  		arena_size = MaxMem;
   353  		bitmap_size = arena_size / (sizeof(void*)*8/4);
   354  		p = runtime·SysReserve((void*)(0x00c0ULL<<32), bitmap_size + arena_size);
   355  	}
   356  	if (p == nil) {
   357  		// On a 32-bit machine, we can't typically get away
   358  		// with a giant virtual address space reservation.
   359  		// Instead we map the memory information bitmap
   360  		// immediately after the data segment, large enough
   361  		// to handle another 2GB of mappings (256 MB),
   362  		// along with a reservation for another 512 MB of memory.
   363  		// When that gets used up, we'll start asking the kernel
   364  		// for any memory anywhere and hope it's in the 2GB
   365  		// following the bitmap (presumably the executable begins
   366  		// near the bottom of memory, so we'll have to use up
   367  		// most of memory before the kernel resorts to giving out
   368  		// memory before the beginning of the text segment).
   369  		//
   370  		// Alternatively we could reserve 512 MB bitmap, enough
   371  		// for 4GB of mappings, and then accept any memory the
   372  		// kernel threw at us, but normally that's a waste of 512 MB
   373  		// of address space, which is probably too much in a 32-bit world.
   374  		bitmap_size = MaxArena32 / (sizeof(void*)*8/4);
   375  		arena_size = 512<<20;
   376  		if(limit > 0 && arena_size+bitmap_size > limit) {
   377  			bitmap_size = (limit / 9) & ~((1<<PageShift) - 1);
   378  			arena_size = bitmap_size * 8;
   379  		}
   380  		
   381  		// SysReserve treats the address we ask for, end, as a hint,
   382  		// not as an absolute requirement.  If we ask for the end
   383  		// of the data segment but the operating system requires
   384  		// a little more space before we can start allocating, it will
   385  		// give out a slightly higher pointer.  Except QEMU, which
   386  		// is buggy, as usual: it won't adjust the pointer upward.
   387  		// So adjust it upward a little bit ourselves: 1/4 MB to get
   388  		// away from the running binary image and then round up
   389  		// to a MB boundary.
   390  		want = (byte*)(((uintptr)end + (1<<18) + (1<<20) - 1)&~((1<<20)-1));
   391  		p = runtime·SysReserve(want, bitmap_size + arena_size);
   392  		if(p == nil)
   393  			runtime·throw("runtime: cannot reserve arena virtual address space");
   394  		if((uintptr)p & (((uintptr)1<<PageShift)-1))
   395  			runtime·printf("runtime: SysReserve returned unaligned address %p; asked for %p", p, bitmap_size+arena_size);
   396  	}
   397  	if((uintptr)p & (((uintptr)1<<PageShift)-1))
   398  		runtime·throw("runtime: SysReserve returned unaligned address");
   399  
   400  	runtime·mheap->bitmap = p;
   401  	runtime·mheap->arena_start = p + bitmap_size;
   402  	runtime·mheap->arena_used = runtime·mheap->arena_start;
   403  	runtime·mheap->arena_end = runtime·mheap->arena_start + arena_size;
   404  
   405  	// Initialize the rest of the allocator.	
   406  	runtime·MHeap_Init(runtime·mheap, runtime·SysAlloc);
   407  	m->mcache = runtime·allocmcache();
   408  
   409  	// See if it works.
   410  	runtime·free(runtime·malloc(1));
   411  }
   412  
   413  void*
   414  runtime·MHeap_SysAlloc(MHeap *h, uintptr n)
   415  {
   416  	byte *p;
   417  
   418  	if(n > h->arena_end - h->arena_used) {
   419  		// We are in 32-bit mode, maybe we didn't use all possible address space yet.
   420  		// Reserve some more space.
   421  		byte *new_end;
   422  		uintptr needed;
   423  
   424  		needed = (uintptr)h->arena_used + n - (uintptr)h->arena_end;
   425  		// Round wanted arena size to a multiple of 256MB.
   426  		needed = (needed + (256<<20) - 1) & ~((256<<20)-1);
   427  		new_end = h->arena_end + needed;
   428  		if(new_end <= h->arena_start + MaxArena32) {
   429  			p = runtime·SysReserve(h->arena_end, new_end - h->arena_end);
   430  			if(p == h->arena_end)
   431  				h->arena_end = new_end;
   432  		}
   433  	}
   434  	if(n <= h->arena_end - h->arena_used) {
   435  		// Keep taking from our reservation.
   436  		p = h->arena_used;
   437  		runtime·SysMap(p, n);
   438  		h->arena_used += n;
   439  		runtime·MHeap_MapBits(h);
   440  		if(raceenabled)
   441  			runtime·racemapshadow(p, n);
   442  		return p;
   443  	}
   444  	
   445  	// If using 64-bit, our reservation is all we have.
   446  	if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU)
   447  		return nil;
   448  
   449  	// On 32-bit, once the reservation is gone we can
   450  	// try to get memory at a location chosen by the OS
   451  	// and hope that it is in the range we allocated bitmap for.
   452  	p = runtime·SysAlloc(n);
   453  	if(p == nil)
   454  		return nil;
   455  
   456  	if(p < h->arena_start || p+n - h->arena_start >= MaxArena32) {
   457  		runtime·printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
   458  			p, h->arena_start, h->arena_start+MaxArena32);
   459  		runtime·SysFree(p, n);
   460  		return nil;
   461  	}
   462  
   463  	if(p+n > h->arena_used) {
   464  		h->arena_used = p+n;
   465  		if(h->arena_used > h->arena_end)
   466  			h->arena_end = h->arena_used;
   467  		runtime·MHeap_MapBits(h);
   468  		if(raceenabled)
   469  			runtime·racemapshadow(p, n);
   470  	}
   471  	
   472  	return p;
   473  }
   474  
   475  static Lock settype_lock;
   476  
   477  void
   478  runtime·settype_flush(M *mp, bool sysalloc)
   479  {
   480  	uintptr *buf, *endbuf;
   481  	uintptr size, ofs, j, t;
   482  	uintptr ntypes, nbytes2, nbytes3;
   483  	uintptr *data2;
   484  	byte *data3;
   485  	bool sysalloc3;
   486  	void *v;
   487  	uintptr typ, p;
   488  	MSpan *s;
   489  
   490  	buf = mp->settype_buf;
   491  	endbuf = buf + mp->settype_bufsize;
   492  
   493  	runtime·lock(&settype_lock);
   494  	while(buf < endbuf) {
   495  		v = (void*)*buf;
   496  		*buf = 0;
   497  		buf++;
   498  		typ = *buf;
   499  		buf++;
   500  
   501  		// (Manually inlined copy of runtime·MHeap_Lookup)
   502  		p = (uintptr)v>>PageShift;
   503  		if(sizeof(void*) == 8)
   504  			p -= (uintptr)runtime·mheap->arena_start >> PageShift;
   505  		s = runtime·mheap->map[p];
   506  
   507  		if(s->sizeclass == 0) {
   508  			s->types.compression = MTypes_Single;
   509  			s->types.data = typ;
   510  			continue;
   511  		}
   512  
   513  		size = s->elemsize;
   514  		ofs = ((uintptr)v - (s->start<<PageShift)) / size;
   515  
   516  		switch(s->types.compression) {
   517  		case MTypes_Empty:
   518  			ntypes = (s->npages << PageShift) / size;
   519  			nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
   520  
   521  			if(!sysalloc) {
   522  				data3 = runtime·mallocgc(nbytes3, FlagNoProfiling|FlagNoPointers, 0, 1);
   523  			} else {
   524  				data3 = runtime·SysAlloc(nbytes3);
   525  				if(data3 == nil)
   526  					runtime·throw("runtime: cannot allocate memory");
   527  				if(0) runtime·printf("settype(0->3): SysAlloc(%x) --> %p\n", (uint32)nbytes3, data3);
   528  			}
   529  
   530  			s->types.compression = MTypes_Bytes;
   531  			s->types.sysalloc = sysalloc;
   532  			s->types.data = (uintptr)data3;
   533  
   534  			((uintptr*)data3)[1] = typ;
   535  			data3[8*sizeof(uintptr) + ofs] = 1;
   536  			break;
   537  
   538  		case MTypes_Words:
   539  			((uintptr*)s->types.data)[ofs] = typ;
   540  			break;
   541  
   542  		case MTypes_Bytes:
   543  			data3 = (byte*)s->types.data;
   544  			for(j=1; j<8; j++) {
   545  				if(((uintptr*)data3)[j] == typ) {
   546  					break;
   547  				}
   548  				if(((uintptr*)data3)[j] == 0) {
   549  					((uintptr*)data3)[j] = typ;
   550  					break;
   551  				}
   552  			}
   553  			if(j < 8) {
   554  				data3[8*sizeof(uintptr) + ofs] = j;
   555  			} else {
   556  				ntypes = (s->npages << PageShift) / size;
   557  				nbytes2 = ntypes * sizeof(uintptr);
   558  
   559  				if(!sysalloc) {
   560  					data2 = runtime·mallocgc(nbytes2, FlagNoProfiling|FlagNoPointers, 0, 1);
   561  				} else {
   562  					data2 = runtime·SysAlloc(nbytes2);
   563  					if(data2 == nil)
   564  						runtime·throw("runtime: cannot allocate memory");
   565  					if(0) runtime·printf("settype.(3->2): SysAlloc(%x) --> %p\n", (uint32)nbytes2, data2);
   566  				}
   567  
   568  				sysalloc3 = s->types.sysalloc;
   569  
   570  				s->types.compression = MTypes_Words;
   571  				s->types.sysalloc = sysalloc;
   572  				s->types.data = (uintptr)data2;
   573  
   574  				// Move the contents of data3 to data2. Then deallocate data3.
   575  				for(j=0; j<ntypes; j++) {
   576  					t = data3[8*sizeof(uintptr) + j];
   577  					t = ((uintptr*)data3)[t];
   578  					data2[j] = t;
   579  				}
   580  				if(sysalloc3) {
   581  					nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
   582  					if(0) runtime·printf("settype.(3->2): SysFree(%p,%x)\n", data3, (uint32)nbytes3);
   583  					runtime·SysFree(data3, nbytes3);
   584  				}
   585  
   586  				data2[ofs] = typ;
   587  			}
   588  			break;
   589  		}
   590  	}
   591  	runtime·unlock(&settype_lock);
   592  
   593  	mp->settype_bufsize = 0;
   594  }
   595  
   596  // It is forbidden to use this function if it is possible that
   597  // explicit deallocation via calling runtime·free(v) may happen.
   598  void
   599  runtime·settype(void *v, uintptr t)
   600  {
   601  	M *mp;
   602  	uintptr *buf;
   603  	uintptr i;
   604  	MSpan *s;
   605  
   606  	if(t == 0)
   607  		runtime·throw("settype: zero type");
   608  
   609  	mp = m;
   610  	buf = mp->settype_buf;
   611  	i = mp->settype_bufsize;
   612  	buf[i+0] = (uintptr)v;
   613  	buf[i+1] = t;
   614  	i += 2;
   615  	mp->settype_bufsize = i;
   616  
   617  	if(i == nelem(mp->settype_buf)) {
   618  		runtime·settype_flush(mp, false);
   619  	}
   620  
   621  	if(DebugTypeAtBlockEnd) {
   622  		s = runtime·MHeap_Lookup(runtime·mheap, v);
   623  		*(uintptr*)((uintptr)v+s->elemsize-sizeof(uintptr)) = t;
   624  	}
   625  }
   626  
   627  void
   628  runtime·settype_sysfree(MSpan *s)
   629  {
   630  	uintptr ntypes, nbytes;
   631  
   632  	if(!s->types.sysalloc)
   633  		return;
   634  
   635  	nbytes = (uintptr)-1;
   636  
   637  	switch (s->types.compression) {
   638  	case MTypes_Words:
   639  		ntypes = (s->npages << PageShift) / s->elemsize;
   640  		nbytes = ntypes * sizeof(uintptr);
   641  		break;
   642  	case MTypes_Bytes:
   643  		ntypes = (s->npages << PageShift) / s->elemsize;
   644  		nbytes = 8*sizeof(uintptr) + 1*ntypes;
   645  		break;
   646  	}
   647  
   648  	if(nbytes != (uintptr)-1) {
   649  		if(0) runtime·printf("settype: SysFree(%p,%x)\n", (void*)s->types.data, (uint32)nbytes);
   650  		runtime·SysFree((void*)s->types.data, nbytes);
   651  	}
   652  }
   653  
   654  uintptr
   655  runtime·gettype(void *v)
   656  {
   657  	MSpan *s;
   658  	uintptr t, ofs;
   659  	byte *data;
   660  
   661  	s = runtime·MHeap_LookupMaybe(runtime·mheap, v);
   662  	if(s != nil) {
   663  		t = 0;
   664  		switch(s->types.compression) {
   665  		case MTypes_Empty:
   666  			break;
   667  		case MTypes_Single:
   668  			t = s->types.data;
   669  			break;
   670  		case MTypes_Words:
   671  			ofs = (uintptr)v - (s->start<<PageShift);
   672  			t = ((uintptr*)s->types.data)[ofs/s->elemsize];
   673  			break;
   674  		case MTypes_Bytes:
   675  			ofs = (uintptr)v - (s->start<<PageShift);
   676  			data = (byte*)s->types.data;
   677  			t = data[8*sizeof(uintptr) + ofs/s->elemsize];
   678  			t = ((uintptr*)data)[t];
   679  			break;
   680  		default:
   681  			runtime·throw("runtime·gettype: invalid compression kind");
   682  		}
   683  		if(0) {
   684  			runtime·lock(&settype_lock);
   685  			runtime·printf("%p -> %d,%X\n", v, (int32)s->types.compression, (int64)t);
   686  			runtime·unlock(&settype_lock);
   687  		}
   688  		return t;
   689  	}
   690  	return 0;
   691  }
   692  
   693  // Runtime stubs.
   694  
   695  void*
   696  runtime·mal(uintptr n)
   697  {
   698  	return runtime·mallocgc(n, 0, 1, 1);
   699  }
   700  
   701  #pragma textflag 7
   702  void
   703  runtime·new(Type *typ, uint8 *ret)
   704  {
   705  	uint32 flag;
   706  
   707  	if(raceenabled)
   708  		m->racepc = runtime·getcallerpc(&typ);
   709  
   710  	if(typ->size == 0) {
   711  		// All 0-length allocations use this pointer.
   712  		// The language does not require the allocations to
   713  		// have distinct values.
   714  		ret = (uint8*)&runtime·zerobase;
   715  	} else {
   716  		flag = typ->kind&KindNoPointers ? FlagNoPointers : 0;
   717  		ret = runtime·mallocgc(typ->size, flag, 1, 1);
   718  
   719  		if(UseSpanType && !flag) {
   720  			if(false) {
   721  				runtime·printf("new %S: %p\n", *typ->string, ret);
   722  			}
   723  			runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObject);
   724  		}
   725  	}
   726  
   727  	FLUSH(&ret);
   728  }
   729  
   730  // same as runtime·new, but callable from C
   731  void*
   732  runtime·cnew(Type *typ)
   733  {
   734  	uint32 flag;
   735  	void *ret;
   736  
   737  	if(raceenabled)
   738  		m->racepc = runtime·getcallerpc(&typ);
   739  
   740  	if(typ->size == 0) {
   741  		// All 0-length allocations use this pointer.
   742  		// The language does not require the allocations to
   743  		// have distinct values.
   744  		ret = (uint8*)&runtime·zerobase;
   745  	} else {
   746  		flag = typ->kind&KindNoPointers ? FlagNoPointers : 0;
   747  		ret = runtime·mallocgc(typ->size, flag, 1, 1);
   748  
   749  		if(UseSpanType && !flag) {
   750  			if(false) {
   751  				runtime·printf("new %S: %p\n", *typ->string, ret);
   752  			}
   753  			runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObject);
   754  		}
   755  	}
   756  
   757  	return ret;
   758  }
   759  
   760  func GC() {
   761  	runtime·gc(1);
   762  }
   763  
   764  func SetFinalizer(obj Eface, finalizer Eface) {
   765  	byte *base;
   766  	uintptr size;
   767  	FuncType *ft;
   768  	int32 i;
   769  	uintptr nret;
   770  	Type *t;
   771  
   772  	if(obj.type == nil) {
   773  		runtime·printf("runtime.SetFinalizer: first argument is nil interface\n");
   774  		goto throw;
   775  	}
   776  	if(obj.type->kind != KindPtr) {
   777  		runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
   778  		goto throw;
   779  	}
   780  	if(!runtime·mlookup(obj.data, &base, &size, nil) || obj.data != base) {
   781  		runtime·printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
   782  		goto throw;
   783  	}
   784  	nret = 0;
   785  	if(finalizer.type != nil) {
   786  		if(finalizer.type->kind != KindFunc)
   787  			goto badfunc;
   788  		ft = (FuncType*)finalizer.type;
   789  		if(ft->dotdotdot || ft->in.len != 1 || *(Type**)ft->in.array != obj.type)
   790  			goto badfunc;
   791  
   792  		// compute size needed for return parameters
   793  		for(i=0; i<ft->out.len; i++) {
   794  			t = ((Type**)ft->out.array)[i];
   795  			nret = (nret + t->align - 1) & ~(t->align - 1);
   796  			nret += t->size;
   797  		}
   798  		nret = (nret + sizeof(void*)-1) & ~(sizeof(void*)-1);
   799  	}
   800  	
   801  	if(!runtime·addfinalizer(obj.data, finalizer.data, nret)) {
   802  		runtime·printf("runtime.SetFinalizer: finalizer already set\n");
   803  		goto throw;
   804  	}
   805  	return;
   806  
   807  badfunc:
   808  	runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string);
   809  throw:
   810  	runtime·throw("runtime.SetFinalizer");
   811  }