github.com/varialus/godfly@v0.0.0-20130904042352-1934f9f095ab/src/pkg/runtime/malloc.goc (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // See malloc.h for overview. 6 // 7 // TODO(rsc): double-check stats. 8 9 package runtime 10 #include "runtime.h" 11 #include "arch_GOARCH.h" 12 #include "malloc.h" 13 #include "type.h" 14 #include "typekind.h" 15 #include "race.h" 16 #include "stack.h" 17 #include "../../cmd/ld/textflag.h" 18 19 // Mark mheap as 'no pointers', it does not contain interesting pointers but occupies ~45K. 20 #pragma dataflag NOPTR 21 MHeap runtime·mheap; 22 23 int32 runtime·checking; 24 25 extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go 26 27 extern volatile intgo runtime·MemProfileRate; 28 29 // Allocate an object of at least size bytes. 30 // Small objects are allocated from the per-thread cache's free lists. 31 // Large objects (> 32 kB) are allocated straight from the heap. 32 // If the block will be freed with runtime·free(), typ must be 0. 33 void* 34 runtime·mallocgc(uintptr size, uintptr typ, uint32 flag) 35 { 36 int32 sizeclass; 37 intgo rate; 38 MCache *c; 39 MCacheList *l; 40 uintptr npages; 41 MSpan *s; 42 MLink *v; 43 44 if(size == 0) { 45 // All 0-length allocations use this pointer. 46 // The language does not require the allocations to 47 // have distinct values. 48 return &runtime·zerobase; 49 } 50 if(m->mallocing) 51 runtime·throw("malloc/free - deadlock"); 52 // Disable preemption during settype_flush. 53 // We can not use m->mallocing for this, because settype_flush calls mallocgc. 54 m->locks++; 55 m->mallocing = 1; 56 57 if(DebugTypeAtBlockEnd) 58 size += sizeof(uintptr); 59 60 c = m->mcache; 61 if(size <= MaxSmallSize) { 62 // Allocate from mcache free lists. 63 // Inlined version of SizeToClass(). 64 if(size <= 1024-8) 65 sizeclass = runtime·size_to_class8[(size+7)>>3]; 66 else 67 sizeclass = runtime·size_to_class128[(size-1024+127) >> 7]; 68 size = runtime·class_to_size[sizeclass]; 69 l = &c->list[sizeclass]; 70 if(l->list == nil) 71 runtime·MCache_Refill(c, sizeclass); 72 v = l->list; 73 l->list = v->next; 74 l->nlist--; 75 if(!(flag & FlagNoZero)) { 76 v->next = nil; 77 // block is zeroed iff second word is zero ... 78 if(size > sizeof(uintptr) && ((uintptr*)v)[1] != 0) 79 runtime·memclr((byte*)v, size); 80 } 81 c->local_cachealloc += size; 82 } else { 83 // TODO(rsc): Report tracebacks for very large allocations. 84 85 // Allocate directly from heap. 86 npages = size >> PageShift; 87 if((size & PageMask) != 0) 88 npages++; 89 s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1, !(flag & FlagNoZero)); 90 if(s == nil) 91 runtime·throw("out of memory"); 92 s->limit = (byte*)(s->start<<PageShift) + size; 93 size = npages<<PageShift; 94 v = (void*)(s->start << PageShift); 95 96 // setup for mark sweep 97 runtime·markspan(v, 0, 0, true); 98 } 99 100 if(!(flag & FlagNoGC)) 101 runtime·markallocated(v, size, (flag&FlagNoScan) != 0); 102 103 if(DebugTypeAtBlockEnd) 104 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = typ; 105 106 // TODO: save type even if FlagNoScan? Potentially expensive but might help 107 // heap profiling/tracing. 108 if(UseSpanType && !(flag & FlagNoScan) && typ != 0) { 109 uintptr *buf, i; 110 111 buf = m->settype_buf; 112 i = m->settype_bufsize; 113 buf[i++] = (uintptr)v; 114 buf[i++] = typ; 115 m->settype_bufsize = i; 116 } 117 118 m->mallocing = 0; 119 if(UseSpanType && !(flag & FlagNoScan) && typ != 0 && m->settype_bufsize == nelem(m->settype_buf)) 120 runtime·settype_flush(m); 121 m->locks--; 122 if(m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack 123 g->stackguard0 = StackPreempt; 124 125 if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) { 126 if(size >= rate) 127 goto profile; 128 if(m->mcache->next_sample > size) 129 m->mcache->next_sample -= size; 130 else { 131 // pick next profile time 132 // If you change this, also change allocmcache. 133 if(rate > 0x3fffffff) // make 2*rate not overflow 134 rate = 0x3fffffff; 135 m->mcache->next_sample = runtime·fastrand1() % (2*rate); 136 profile: 137 runtime·setblockspecial(v, true); 138 runtime·MProf_Malloc(v, size); 139 } 140 } 141 142 if(!(flag & FlagNoInvokeGC) && mstats.heap_alloc >= mstats.next_gc) 143 runtime·gc(0); 144 145 if(raceenabled) 146 runtime·racemalloc(v, size); 147 return v; 148 } 149 150 void* 151 runtime·malloc(uintptr size) 152 { 153 return runtime·mallocgc(size, 0, FlagNoInvokeGC); 154 } 155 156 // Free the object whose base pointer is v. 157 void 158 runtime·free(void *v) 159 { 160 int32 sizeclass; 161 MSpan *s; 162 MCache *c; 163 uint32 prof; 164 uintptr size; 165 166 if(v == nil) 167 return; 168 169 // If you change this also change mgc0.c:/^sweep, 170 // which has a copy of the guts of free. 171 172 if(m->mallocing) 173 runtime·throw("malloc/free - deadlock"); 174 m->mallocing = 1; 175 176 if(!runtime·mlookup(v, nil, nil, &s)) { 177 runtime·printf("free %p: not an allocated block\n", v); 178 runtime·throw("free runtime·mlookup"); 179 } 180 prof = runtime·blockspecial(v); 181 182 if(raceenabled) 183 runtime·racefree(v); 184 185 // Find size class for v. 186 sizeclass = s->sizeclass; 187 c = m->mcache; 188 if(sizeclass == 0) { 189 // Large object. 190 size = s->npages<<PageShift; 191 *(uintptr*)(s->start<<PageShift) = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed" 192 // Must mark v freed before calling unmarkspan and MHeap_Free: 193 // they might coalesce v into other spans and change the bitmap further. 194 runtime·markfreed(v, size); 195 runtime·unmarkspan(v, 1<<PageShift); 196 runtime·MHeap_Free(&runtime·mheap, s, 1); 197 c->local_nlargefree++; 198 c->local_largefree += size; 199 } else { 200 // Small object. 201 size = runtime·class_to_size[sizeclass]; 202 if(size > sizeof(uintptr)) 203 ((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed" 204 // Must mark v freed before calling MCache_Free: 205 // it might coalesce v and other blocks into a bigger span 206 // and change the bitmap further. 207 runtime·markfreed(v, size); 208 c->local_nsmallfree[sizeclass]++; 209 runtime·MCache_Free(c, v, sizeclass, size); 210 } 211 if(prof) 212 runtime·MProf_Free(v, size); 213 m->mallocing = 0; 214 } 215 216 int32 217 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp) 218 { 219 uintptr n, i; 220 byte *p; 221 MSpan *s; 222 223 m->mcache->local_nlookup++; 224 if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) { 225 // purge cache stats to prevent overflow 226 runtime·lock(&runtime·mheap); 227 runtime·purgecachedstats(m->mcache); 228 runtime·unlock(&runtime·mheap); 229 } 230 231 s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); 232 if(sp) 233 *sp = s; 234 if(s == nil) { 235 runtime·checkfreed(v, 1); 236 if(base) 237 *base = nil; 238 if(size) 239 *size = 0; 240 return 0; 241 } 242 243 p = (byte*)((uintptr)s->start<<PageShift); 244 if(s->sizeclass == 0) { 245 // Large object. 246 if(base) 247 *base = p; 248 if(size) 249 *size = s->npages<<PageShift; 250 return 1; 251 } 252 253 n = s->elemsize; 254 if(base) { 255 i = ((byte*)v - p)/n; 256 *base = p + i*n; 257 } 258 if(size) 259 *size = n; 260 261 return 1; 262 } 263 264 MCache* 265 runtime·allocmcache(void) 266 { 267 intgo rate; 268 MCache *c; 269 270 runtime·lock(&runtime·mheap); 271 c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc); 272 mstats.mcache_inuse = runtime·mheap.cachealloc.inuse; 273 mstats.mcache_sys = runtime·mheap.cachealloc.sys; 274 runtime·unlock(&runtime·mheap); 275 runtime·memclr((byte*)c, sizeof(*c)); 276 277 // Set first allocation sample size. 278 rate = runtime·MemProfileRate; 279 if(rate > 0x3fffffff) // make 2*rate not overflow 280 rate = 0x3fffffff; 281 if(rate != 0) 282 c->next_sample = runtime·fastrand1() % (2*rate); 283 284 return c; 285 } 286 287 void 288 runtime·freemcache(MCache *c) 289 { 290 runtime·MCache_ReleaseAll(c); 291 runtime·lock(&runtime·mheap); 292 runtime·purgecachedstats(c); 293 runtime·FixAlloc_Free(&runtime·mheap.cachealloc, c); 294 runtime·unlock(&runtime·mheap); 295 } 296 297 void 298 runtime·purgecachedstats(MCache *c) 299 { 300 MHeap *h; 301 int32 i; 302 303 // Protected by either heap or GC lock. 304 h = &runtime·mheap; 305 mstats.heap_alloc += c->local_cachealloc; 306 c->local_cachealloc = 0; 307 mstats.nlookup += c->local_nlookup; 308 c->local_nlookup = 0; 309 h->largefree += c->local_largefree; 310 c->local_largefree = 0; 311 h->nlargefree += c->local_nlargefree; 312 c->local_nlargefree = 0; 313 for(i=0; i<nelem(c->local_nsmallfree); i++) { 314 h->nsmallfree[i] += c->local_nsmallfree[i]; 315 c->local_nsmallfree[i] = 0; 316 } 317 } 318 319 uintptr runtime·sizeof_C_MStats = sizeof(MStats); 320 321 #define MaxArena32 (2U<<30) 322 323 void 324 runtime·mallocinit(void) 325 { 326 byte *p; 327 uintptr arena_size, bitmap_size, spans_size; 328 extern byte end[]; 329 byte *want; 330 uintptr limit; 331 uint64 i; 332 333 p = nil; 334 arena_size = 0; 335 bitmap_size = 0; 336 spans_size = 0; 337 338 // for 64-bit build 339 USED(p); 340 USED(arena_size); 341 USED(bitmap_size); 342 USED(spans_size); 343 344 runtime·InitSizes(); 345 346 // limit = runtime·memlimit(); 347 // See https://code.google.com/p/go/issues/detail?id=5049 348 // TODO(rsc): Fix after 1.1. 349 limit = 0; 350 351 // Set up the allocation arena, a contiguous area of memory where 352 // allocated data will be found. The arena begins with a bitmap large 353 // enough to hold 4 bits per allocated word. 354 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) { 355 // On a 64-bit machine, allocate from a single contiguous reservation. 356 // 128 GB (MaxMem) should be big enough for now. 357 // 358 // The code will work with the reservation at any address, but ask 359 // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f). 360 // Allocating a 128 GB region takes away 37 bits, and the amd64 361 // doesn't let us choose the top 17 bits, so that leaves the 11 bits 362 // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means 363 // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df. 364 // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid 365 // UTF-8 sequences, and they are otherwise as far away from 366 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 367 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 368 // on OS X during thread allocations. 0x00c0 causes conflicts with 369 // AddressSanitizer which reserves all memory up to 0x0100. 370 // These choices are both for debuggability and to reduce the 371 // odds of the conservative garbage collector not collecting memory 372 // because some non-pointer block of memory had a bit pattern 373 // that matched a memory address. 374 // 375 // Actually we reserve 136 GB (because the bitmap ends up being 8 GB) 376 // but it hardly matters: e0 00 is not valid UTF-8 either. 377 // 378 // If this fails we fall back to the 32 bit memory mechanism 379 arena_size = MaxMem; 380 bitmap_size = arena_size / (sizeof(void*)*8/4); 381 spans_size = arena_size / PageSize * sizeof(runtime·mheap.spans[0]); 382 spans_size = ROUND(spans_size, PageSize); 383 for(i = 0; i <= 0x7f; i++) { 384 p = (void*)(i<<40 | 0x00c0ULL<<32); 385 p = runtime·SysReserve(p, bitmap_size + spans_size + arena_size); 386 if(p != nil) 387 break; 388 } 389 } 390 if (p == nil) { 391 // On a 32-bit machine, we can't typically get away 392 // with a giant virtual address space reservation. 393 // Instead we map the memory information bitmap 394 // immediately after the data segment, large enough 395 // to handle another 2GB of mappings (256 MB), 396 // along with a reservation for another 512 MB of memory. 397 // When that gets used up, we'll start asking the kernel 398 // for any memory anywhere and hope it's in the 2GB 399 // following the bitmap (presumably the executable begins 400 // near the bottom of memory, so we'll have to use up 401 // most of memory before the kernel resorts to giving out 402 // memory before the beginning of the text segment). 403 // 404 // Alternatively we could reserve 512 MB bitmap, enough 405 // for 4GB of mappings, and then accept any memory the 406 // kernel threw at us, but normally that's a waste of 512 MB 407 // of address space, which is probably too much in a 32-bit world. 408 bitmap_size = MaxArena32 / (sizeof(void*)*8/4); 409 arena_size = 512<<20; 410 spans_size = MaxArena32 / PageSize * sizeof(runtime·mheap.spans[0]); 411 if(limit > 0 && arena_size+bitmap_size+spans_size > limit) { 412 bitmap_size = (limit / 9) & ~((1<<PageShift) - 1); 413 arena_size = bitmap_size * 8; 414 spans_size = arena_size / PageSize * sizeof(runtime·mheap.spans[0]); 415 } 416 spans_size = ROUND(spans_size, PageSize); 417 418 // SysReserve treats the address we ask for, end, as a hint, 419 // not as an absolute requirement. If we ask for the end 420 // of the data segment but the operating system requires 421 // a little more space before we can start allocating, it will 422 // give out a slightly higher pointer. Except QEMU, which 423 // is buggy, as usual: it won't adjust the pointer upward. 424 // So adjust it upward a little bit ourselves: 1/4 MB to get 425 // away from the running binary image and then round up 426 // to a MB boundary. 427 want = (byte*)ROUND((uintptr)end + (1<<18), 1<<20); 428 p = runtime·SysReserve(want, bitmap_size + spans_size + arena_size); 429 if(p == nil) 430 runtime·throw("runtime: cannot reserve arena virtual address space"); 431 if((uintptr)p & (((uintptr)1<<PageShift)-1)) 432 runtime·printf("runtime: SysReserve returned unaligned address %p; asked for %p", p, 433 bitmap_size+spans_size+arena_size); 434 } 435 if((uintptr)p & (((uintptr)1<<PageShift)-1)) 436 runtime·throw("runtime: SysReserve returned unaligned address"); 437 438 runtime·mheap.spans = (MSpan**)p; 439 runtime·mheap.bitmap = p + spans_size; 440 runtime·mheap.arena_start = p + spans_size + bitmap_size; 441 runtime·mheap.arena_used = runtime·mheap.arena_start; 442 runtime·mheap.arena_end = runtime·mheap.arena_start + arena_size; 443 444 // Initialize the rest of the allocator. 445 runtime·MHeap_Init(&runtime·mheap); 446 m->mcache = runtime·allocmcache(); 447 448 // See if it works. 449 runtime·free(runtime·malloc(1)); 450 } 451 452 void* 453 runtime·MHeap_SysAlloc(MHeap *h, uintptr n) 454 { 455 byte *p; 456 457 if(n > h->arena_end - h->arena_used) { 458 // We are in 32-bit mode, maybe we didn't use all possible address space yet. 459 // Reserve some more space. 460 byte *new_end; 461 uintptr needed; 462 463 needed = (uintptr)h->arena_used + n - (uintptr)h->arena_end; 464 needed = ROUND(needed, 256<<20); 465 new_end = h->arena_end + needed; 466 if(new_end <= h->arena_start + MaxArena32) { 467 p = runtime·SysReserve(h->arena_end, new_end - h->arena_end); 468 if(p == h->arena_end) 469 h->arena_end = new_end; 470 } 471 } 472 if(n <= h->arena_end - h->arena_used) { 473 // Keep taking from our reservation. 474 p = h->arena_used; 475 runtime·SysMap(p, n); 476 h->arena_used += n; 477 runtime·MHeap_MapBits(h); 478 runtime·MHeap_MapSpans(h); 479 if(raceenabled) 480 runtime·racemapshadow(p, n); 481 return p; 482 } 483 484 // If using 64-bit, our reservation is all we have. 485 if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU) 486 return nil; 487 488 // On 32-bit, once the reservation is gone we can 489 // try to get memory at a location chosen by the OS 490 // and hope that it is in the range we allocated bitmap for. 491 p = runtime·SysAlloc(n); 492 if(p == nil) 493 return nil; 494 495 if(p < h->arena_start || p+n - h->arena_start >= MaxArena32) { 496 runtime·printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n", 497 p, h->arena_start, h->arena_start+MaxArena32); 498 runtime·SysFree(p, n); 499 return nil; 500 } 501 502 if(p+n > h->arena_used) { 503 h->arena_used = p+n; 504 if(h->arena_used > h->arena_end) 505 h->arena_end = h->arena_used; 506 runtime·MHeap_MapBits(h); 507 runtime·MHeap_MapSpans(h); 508 if(raceenabled) 509 runtime·racemapshadow(p, n); 510 } 511 512 return p; 513 } 514 515 static struct 516 { 517 Lock; 518 byte* pos; 519 byte* end; 520 } persistent; 521 522 enum 523 { 524 PersistentAllocChunk = 256<<10, 525 PersistentAllocMaxBlock = 64<<10, // VM reservation granularity is 64K on windows 526 }; 527 528 // Wrapper around SysAlloc that can allocate small chunks. 529 // There is no associated free operation. 530 // Intended for things like function/type/debug-related persistent data. 531 // If align is 0, uses default align (currently 8). 532 void* 533 runtime·persistentalloc(uintptr size, uintptr align) 534 { 535 byte *p; 536 537 if(align != 0) { 538 if(align&(align-1)) 539 runtime·throw("persistentalloc: align is now a power of 2"); 540 if(align > PageSize) 541 runtime·throw("persistentalloc: align is too large"); 542 } else 543 align = 8; 544 if(size >= PersistentAllocMaxBlock) 545 return runtime·SysAlloc(size); 546 runtime·lock(&persistent); 547 persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align); 548 if(persistent.pos + size > persistent.end) { 549 persistent.pos = runtime·SysAlloc(PersistentAllocChunk); 550 if(persistent.pos == nil) { 551 runtime·unlock(&persistent); 552 runtime·throw("runtime: cannot allocate memory"); 553 } 554 persistent.end = persistent.pos + PersistentAllocChunk; 555 } 556 p = persistent.pos; 557 persistent.pos += size; 558 runtime·unlock(&persistent); 559 return p; 560 } 561 562 static Lock settype_lock; 563 564 void 565 runtime·settype_flush(M *mp) 566 { 567 uintptr *buf, *endbuf; 568 uintptr size, ofs, j, t; 569 uintptr ntypes, nbytes2, nbytes3; 570 uintptr *data2; 571 byte *data3; 572 void *v; 573 uintptr typ, p; 574 MSpan *s; 575 576 buf = mp->settype_buf; 577 endbuf = buf + mp->settype_bufsize; 578 579 runtime·lock(&settype_lock); 580 while(buf < endbuf) { 581 v = (void*)*buf; 582 *buf = 0; 583 buf++; 584 typ = *buf; 585 buf++; 586 587 // (Manually inlined copy of runtime·MHeap_Lookup) 588 p = (uintptr)v>>PageShift; 589 if(sizeof(void*) == 8) 590 p -= (uintptr)runtime·mheap.arena_start >> PageShift; 591 s = runtime·mheap.spans[p]; 592 593 if(s->sizeclass == 0) { 594 s->types.compression = MTypes_Single; 595 s->types.data = typ; 596 continue; 597 } 598 599 size = s->elemsize; 600 ofs = ((uintptr)v - (s->start<<PageShift)) / size; 601 602 switch(s->types.compression) { 603 case MTypes_Empty: 604 ntypes = (s->npages << PageShift) / size; 605 nbytes3 = 8*sizeof(uintptr) + 1*ntypes; 606 data3 = runtime·mallocgc(nbytes3, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC); 607 s->types.compression = MTypes_Bytes; 608 s->types.data = (uintptr)data3; 609 ((uintptr*)data3)[1] = typ; 610 data3[8*sizeof(uintptr) + ofs] = 1; 611 break; 612 613 case MTypes_Words: 614 ((uintptr*)s->types.data)[ofs] = typ; 615 break; 616 617 case MTypes_Bytes: 618 data3 = (byte*)s->types.data; 619 for(j=1; j<8; j++) { 620 if(((uintptr*)data3)[j] == typ) { 621 break; 622 } 623 if(((uintptr*)data3)[j] == 0) { 624 ((uintptr*)data3)[j] = typ; 625 break; 626 } 627 } 628 if(j < 8) { 629 data3[8*sizeof(uintptr) + ofs] = j; 630 } else { 631 ntypes = (s->npages << PageShift) / size; 632 nbytes2 = ntypes * sizeof(uintptr); 633 data2 = runtime·mallocgc(nbytes2, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC); 634 s->types.compression = MTypes_Words; 635 s->types.data = (uintptr)data2; 636 637 // Move the contents of data3 to data2. Then deallocate data3. 638 for(j=0; j<ntypes; j++) { 639 t = data3[8*sizeof(uintptr) + j]; 640 t = ((uintptr*)data3)[t]; 641 data2[j] = t; 642 } 643 data2[ofs] = typ; 644 } 645 break; 646 } 647 } 648 runtime·unlock(&settype_lock); 649 650 mp->settype_bufsize = 0; 651 } 652 653 uintptr 654 runtime·gettype(void *v) 655 { 656 MSpan *s; 657 uintptr t, ofs; 658 byte *data; 659 660 s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); 661 if(s != nil) { 662 t = 0; 663 switch(s->types.compression) { 664 case MTypes_Empty: 665 break; 666 case MTypes_Single: 667 t = s->types.data; 668 break; 669 case MTypes_Words: 670 ofs = (uintptr)v - (s->start<<PageShift); 671 t = ((uintptr*)s->types.data)[ofs/s->elemsize]; 672 break; 673 case MTypes_Bytes: 674 ofs = (uintptr)v - (s->start<<PageShift); 675 data = (byte*)s->types.data; 676 t = data[8*sizeof(uintptr) + ofs/s->elemsize]; 677 t = ((uintptr*)data)[t]; 678 break; 679 default: 680 runtime·throw("runtime·gettype: invalid compression kind"); 681 } 682 if(0) { 683 runtime·lock(&settype_lock); 684 runtime·printf("%p -> %d,%X\n", v, (int32)s->types.compression, (int64)t); 685 runtime·unlock(&settype_lock); 686 } 687 return t; 688 } 689 return 0; 690 } 691 692 // Runtime stubs. 693 694 void* 695 runtime·mal(uintptr n) 696 { 697 return runtime·mallocgc(n, 0, 0); 698 } 699 700 #pragma textflag NOSPLIT 701 void 702 runtime·new(Type *typ, uint8 *ret) 703 { 704 ret = runtime·mallocgc(typ->size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&KindNoPointers ? FlagNoScan : 0); 705 FLUSH(&ret); 706 } 707 708 static void* 709 cnew(Type *typ, intgo n, int32 objtyp) 710 { 711 if((objtyp&(PtrSize-1)) != objtyp) 712 runtime·throw("runtime: invalid objtyp"); 713 if(n < 0 || (typ->size > 0 && n > MaxMem/typ->size)) 714 runtime·panicstring("runtime: allocation size out of range"); 715 return runtime·mallocgc(typ->size*n, (uintptr)typ | objtyp, typ->kind&KindNoPointers ? FlagNoScan : 0); 716 } 717 718 // same as runtime·new, but callable from C 719 void* 720 runtime·cnew(Type *typ) 721 { 722 return cnew(typ, 1, TypeInfo_SingleObject); 723 } 724 725 void* 726 runtime·cnewarray(Type *typ, intgo n) 727 { 728 return cnew(typ, n, TypeInfo_Array); 729 } 730 731 func GC() { 732 runtime·gc(1); 733 } 734 735 func SetFinalizer(obj Eface, finalizer Eface) { 736 byte *base; 737 uintptr size; 738 FuncType *ft; 739 int32 i; 740 uintptr nret; 741 Type *t; 742 Type *fint; 743 PtrType *ot; 744 Iface iface; 745 746 if(obj.type == nil) { 747 runtime·printf("runtime.SetFinalizer: first argument is nil interface\n"); 748 goto throw; 749 } 750 if(obj.type->kind != KindPtr) { 751 runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string); 752 goto throw; 753 } 754 if(!runtime·mlookup(obj.data, &base, &size, nil) || obj.data != base) { 755 runtime·printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n"); 756 goto throw; 757 } 758 nret = 0; 759 ot = (PtrType*)obj.type; 760 fint = nil; 761 if(finalizer.type != nil) { 762 if(finalizer.type->kind != KindFunc) 763 goto badfunc; 764 ft = (FuncType*)finalizer.type; 765 if(ft->dotdotdot || ft->in.len != 1) 766 goto badfunc; 767 fint = *(Type**)ft->in.array; 768 if(fint == obj.type) { 769 // ok - same type 770 } else if(fint->kind == KindPtr && (fint->x == nil || fint->x->name == nil || obj.type->x == nil || obj.type->x->name == nil) && ((PtrType*)fint)->elem == ((PtrType*)obj.type)->elem) { 771 // ok - not same type, but both pointers, 772 // one or the other is unnamed, and same element type, so assignable. 773 } else if(fint->kind == KindInterface && ((InterfaceType*)fint)->mhdr.len == 0) { 774 // ok - satisfies empty interface 775 } else if(fint->kind == KindInterface && runtime·ifaceE2I2((InterfaceType*)fint, obj, &iface)) { 776 // ok - satisfies non-empty interface 777 } else 778 goto badfunc; 779 780 // compute size needed for return parameters 781 for(i=0; i<ft->out.len; i++) { 782 t = ((Type**)ft->out.array)[i]; 783 nret = ROUND(nret, t->align) + t->size; 784 } 785 nret = ROUND(nret, sizeof(void*)); 786 } 787 788 if(!runtime·addfinalizer(obj.data, finalizer.data, nret, fint, ot)) { 789 runtime·printf("runtime.SetFinalizer: finalizer already set\n"); 790 goto throw; 791 } 792 return; 793 794 badfunc: 795 runtime·printf("runtime.SetFinalizer: cannot pass %S to finalizer %S\n", *obj.type->string, *finalizer.type->string); 796 throw: 797 runtime·throw("runtime.SetFinalizer"); 798 }