github.com/rohankumardubey/syslog-redirector-golang@v0.0.0-20140320174030-4859f03d829a/src/pkg/runtime/malloc.goc (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // See malloc.h for overview. 6 // 7 // TODO(rsc): double-check stats. 8 9 package runtime 10 #include "runtime.h" 11 #include "arch_GOARCH.h" 12 #include "malloc.h" 13 #include "type.h" 14 #include "typekind.h" 15 #include "race.h" 16 #include "stack.h" 17 #include "../../cmd/ld/textflag.h" 18 19 // Mark mheap as 'no pointers', it does not contain interesting pointers but occupies ~45K. 20 #pragma dataflag NOPTR 21 MHeap runtime·mheap; 22 23 int32 runtime·checking; 24 25 extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go 26 27 extern volatile intgo runtime·MemProfileRate; 28 29 // Allocate an object of at least size bytes. 30 // Small objects are allocated from the per-thread cache's free lists. 31 // Large objects (> 32 kB) are allocated straight from the heap. 32 // If the block will be freed with runtime·free(), typ must be 0. 33 void* 34 runtime·mallocgc(uintptr size, uintptr typ, uint32 flag) 35 { 36 int32 sizeclass; 37 intgo rate; 38 MCache *c; 39 MCacheList *l; 40 uintptr npages; 41 MSpan *s; 42 MLink *v; 43 44 if(size == 0) { 45 // All 0-length allocations use this pointer. 46 // The language does not require the allocations to 47 // have distinct values. 48 return &runtime·zerobase; 49 } 50 if(m->mallocing) 51 runtime·throw("malloc/free - deadlock"); 52 // Disable preemption during settype_flush. 53 // We can not use m->mallocing for this, because settype_flush calls mallocgc. 54 m->locks++; 55 m->mallocing = 1; 56 57 if(DebugTypeAtBlockEnd) 58 size += sizeof(uintptr); 59 60 c = m->mcache; 61 if(size <= MaxSmallSize) { 62 // Allocate from mcache free lists. 63 // Inlined version of SizeToClass(). 64 if(size <= 1024-8) 65 sizeclass = runtime·size_to_class8[(size+7)>>3]; 66 else 67 sizeclass = runtime·size_to_class128[(size-1024+127) >> 7]; 68 size = runtime·class_to_size[sizeclass]; 69 l = &c->list[sizeclass]; 70 if(l->list == nil) 71 runtime·MCache_Refill(c, sizeclass); 72 v = l->list; 73 l->list = v->next; 74 l->nlist--; 75 if(!(flag & FlagNoZero)) { 76 v->next = nil; 77 // block is zeroed iff second word is zero ... 78 if(size > sizeof(uintptr) && ((uintptr*)v)[1] != 0) 79 runtime·memclr((byte*)v, size); 80 } 81 c->local_cachealloc += size; 82 } else { 83 // TODO(rsc): Report tracebacks for very large allocations. 84 85 // Allocate directly from heap. 86 npages = size >> PageShift; 87 if((size & PageMask) != 0) 88 npages++; 89 s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1, !(flag & FlagNoZero)); 90 if(s == nil) 91 runtime·throw("out of memory"); 92 s->limit = (byte*)(s->start<<PageShift) + size; 93 size = npages<<PageShift; 94 v = (void*)(s->start << PageShift); 95 96 // setup for mark sweep 97 runtime·markspan(v, 0, 0, true); 98 } 99 100 if(!(flag & FlagNoGC)) 101 runtime·markallocated(v, size, (flag&FlagNoScan) != 0); 102 103 if(DebugTypeAtBlockEnd) 104 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = typ; 105 106 // TODO: save type even if FlagNoScan? Potentially expensive but might help 107 // heap profiling/tracing. 108 if(UseSpanType && !(flag & FlagNoScan) && typ != 0) { 109 uintptr *buf, i; 110 111 buf = m->settype_buf; 112 i = m->settype_bufsize; 113 buf[i++] = (uintptr)v; 114 buf[i++] = typ; 115 m->settype_bufsize = i; 116 } 117 118 m->mallocing = 0; 119 if(UseSpanType && !(flag & FlagNoScan) && typ != 0 && m->settype_bufsize == nelem(m->settype_buf)) 120 runtime·settype_flush(m); 121 m->locks--; 122 if(m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack 123 g->stackguard0 = StackPreempt; 124 125 if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) { 126 if(size >= rate) 127 goto profile; 128 if(m->mcache->next_sample > size) 129 m->mcache->next_sample -= size; 130 else { 131 // pick next profile time 132 // If you change this, also change allocmcache. 133 if(rate > 0x3fffffff) // make 2*rate not overflow 134 rate = 0x3fffffff; 135 m->mcache->next_sample = runtime·fastrand1() % (2*rate); 136 profile: 137 runtime·setblockspecial(v, true); 138 runtime·MProf_Malloc(v, size); 139 } 140 } 141 142 if(!(flag & FlagNoInvokeGC) && mstats.heap_alloc >= mstats.next_gc) 143 runtime·gc(0); 144 145 if(raceenabled) 146 runtime·racemalloc(v, size); 147 return v; 148 } 149 150 void* 151 runtime·malloc(uintptr size) 152 { 153 return runtime·mallocgc(size, 0, FlagNoInvokeGC); 154 } 155 156 // Free the object whose base pointer is v. 157 void 158 runtime·free(void *v) 159 { 160 int32 sizeclass; 161 MSpan *s; 162 MCache *c; 163 uint32 prof; 164 uintptr size; 165 166 if(v == nil) 167 return; 168 169 // If you change this also change mgc0.c:/^sweep, 170 // which has a copy of the guts of free. 171 172 if(m->mallocing) 173 runtime·throw("malloc/free - deadlock"); 174 m->mallocing = 1; 175 176 if(!runtime·mlookup(v, nil, nil, &s)) { 177 runtime·printf("free %p: not an allocated block\n", v); 178 runtime·throw("free runtime·mlookup"); 179 } 180 prof = runtime·blockspecial(v); 181 182 if(raceenabled) 183 runtime·racefree(v); 184 185 // Find size class for v. 186 sizeclass = s->sizeclass; 187 c = m->mcache; 188 if(sizeclass == 0) { 189 // Large object. 190 size = s->npages<<PageShift; 191 *(uintptr*)(s->start<<PageShift) = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed" 192 // Must mark v freed before calling unmarkspan and MHeap_Free: 193 // they might coalesce v into other spans and change the bitmap further. 194 runtime·markfreed(v, size); 195 runtime·unmarkspan(v, 1<<PageShift); 196 runtime·MHeap_Free(&runtime·mheap, s, 1); 197 c->local_nlargefree++; 198 c->local_largefree += size; 199 } else { 200 // Small object. 201 size = runtime·class_to_size[sizeclass]; 202 if(size > sizeof(uintptr)) 203 ((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed" 204 // Must mark v freed before calling MCache_Free: 205 // it might coalesce v and other blocks into a bigger span 206 // and change the bitmap further. 207 runtime·markfreed(v, size); 208 c->local_nsmallfree[sizeclass]++; 209 runtime·MCache_Free(c, v, sizeclass, size); 210 } 211 if(prof) 212 runtime·MProf_Free(v, size); 213 m->mallocing = 0; 214 } 215 216 int32 217 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp) 218 { 219 uintptr n, i; 220 byte *p; 221 MSpan *s; 222 223 m->mcache->local_nlookup++; 224 if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) { 225 // purge cache stats to prevent overflow 226 runtime·lock(&runtime·mheap); 227 runtime·purgecachedstats(m->mcache); 228 runtime·unlock(&runtime·mheap); 229 } 230 231 s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); 232 if(sp) 233 *sp = s; 234 if(s == nil) { 235 runtime·checkfreed(v, 1); 236 if(base) 237 *base = nil; 238 if(size) 239 *size = 0; 240 return 0; 241 } 242 243 p = (byte*)((uintptr)s->start<<PageShift); 244 if(s->sizeclass == 0) { 245 // Large object. 246 if(base) 247 *base = p; 248 if(size) 249 *size = s->npages<<PageShift; 250 return 1; 251 } 252 253 n = s->elemsize; 254 if(base) { 255 i = ((byte*)v - p)/n; 256 *base = p + i*n; 257 } 258 if(size) 259 *size = n; 260 261 return 1; 262 } 263 264 MCache* 265 runtime·allocmcache(void) 266 { 267 intgo rate; 268 MCache *c; 269 270 runtime·lock(&runtime·mheap); 271 c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc); 272 runtime·unlock(&runtime·mheap); 273 runtime·memclr((byte*)c, sizeof(*c)); 274 275 // Set first allocation sample size. 276 rate = runtime·MemProfileRate; 277 if(rate > 0x3fffffff) // make 2*rate not overflow 278 rate = 0x3fffffff; 279 if(rate != 0) 280 c->next_sample = runtime·fastrand1() % (2*rate); 281 282 return c; 283 } 284 285 void 286 runtime·freemcache(MCache *c) 287 { 288 runtime·MCache_ReleaseAll(c); 289 runtime·lock(&runtime·mheap); 290 runtime·purgecachedstats(c); 291 runtime·FixAlloc_Free(&runtime·mheap.cachealloc, c); 292 runtime·unlock(&runtime·mheap); 293 } 294 295 void 296 runtime·purgecachedstats(MCache *c) 297 { 298 MHeap *h; 299 int32 i; 300 301 // Protected by either heap or GC lock. 302 h = &runtime·mheap; 303 mstats.heap_alloc += c->local_cachealloc; 304 c->local_cachealloc = 0; 305 mstats.nlookup += c->local_nlookup; 306 c->local_nlookup = 0; 307 h->largefree += c->local_largefree; 308 c->local_largefree = 0; 309 h->nlargefree += c->local_nlargefree; 310 c->local_nlargefree = 0; 311 for(i=0; i<nelem(c->local_nsmallfree); i++) { 312 h->nsmallfree[i] += c->local_nsmallfree[i]; 313 c->local_nsmallfree[i] = 0; 314 } 315 } 316 317 uintptr runtime·sizeof_C_MStats = sizeof(MStats); 318 319 #define MaxArena32 (2U<<30) 320 321 void 322 runtime·mallocinit(void) 323 { 324 byte *p; 325 uintptr arena_size, bitmap_size, spans_size; 326 extern byte end[]; 327 byte *want; 328 uintptr limit; 329 uint64 i; 330 331 p = nil; 332 arena_size = 0; 333 bitmap_size = 0; 334 spans_size = 0; 335 336 // for 64-bit build 337 USED(p); 338 USED(arena_size); 339 USED(bitmap_size); 340 USED(spans_size); 341 342 runtime·InitSizes(); 343 344 // limit = runtime·memlimit(); 345 // See https://code.google.com/p/go/issues/detail?id=5049 346 // TODO(rsc): Fix after 1.1. 347 limit = 0; 348 349 // Set up the allocation arena, a contiguous area of memory where 350 // allocated data will be found. The arena begins with a bitmap large 351 // enough to hold 4 bits per allocated word. 352 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) { 353 // On a 64-bit machine, allocate from a single contiguous reservation. 354 // 128 GB (MaxMem) should be big enough for now. 355 // 356 // The code will work with the reservation at any address, but ask 357 // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f). 358 // Allocating a 128 GB region takes away 37 bits, and the amd64 359 // doesn't let us choose the top 17 bits, so that leaves the 11 bits 360 // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means 361 // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df. 362 // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid 363 // UTF-8 sequences, and they are otherwise as far away from 364 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 365 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 366 // on OS X during thread allocations. 0x00c0 causes conflicts with 367 // AddressSanitizer which reserves all memory up to 0x0100. 368 // These choices are both for debuggability and to reduce the 369 // odds of the conservative garbage collector not collecting memory 370 // because some non-pointer block of memory had a bit pattern 371 // that matched a memory address. 372 // 373 // Actually we reserve 136 GB (because the bitmap ends up being 8 GB) 374 // but it hardly matters: e0 00 is not valid UTF-8 either. 375 // 376 // If this fails we fall back to the 32 bit memory mechanism 377 arena_size = MaxMem; 378 bitmap_size = arena_size / (sizeof(void*)*8/4); 379 spans_size = arena_size / PageSize * sizeof(runtime·mheap.spans[0]); 380 spans_size = ROUND(spans_size, PageSize); 381 for(i = 0; i <= 0x7f; i++) { 382 p = (void*)(i<<40 | 0x00c0ULL<<32); 383 p = runtime·SysReserve(p, bitmap_size + spans_size + arena_size); 384 if(p != nil) 385 break; 386 } 387 } 388 if (p == nil) { 389 // On a 32-bit machine, we can't typically get away 390 // with a giant virtual address space reservation. 391 // Instead we map the memory information bitmap 392 // immediately after the data segment, large enough 393 // to handle another 2GB of mappings (256 MB), 394 // along with a reservation for another 512 MB of memory. 395 // When that gets used up, we'll start asking the kernel 396 // for any memory anywhere and hope it's in the 2GB 397 // following the bitmap (presumably the executable begins 398 // near the bottom of memory, so we'll have to use up 399 // most of memory before the kernel resorts to giving out 400 // memory before the beginning of the text segment). 401 // 402 // Alternatively we could reserve 512 MB bitmap, enough 403 // for 4GB of mappings, and then accept any memory the 404 // kernel threw at us, but normally that's a waste of 512 MB 405 // of address space, which is probably too much in a 32-bit world. 406 bitmap_size = MaxArena32 / (sizeof(void*)*8/4); 407 arena_size = 512<<20; 408 spans_size = MaxArena32 / PageSize * sizeof(runtime·mheap.spans[0]); 409 if(limit > 0 && arena_size+bitmap_size+spans_size > limit) { 410 bitmap_size = (limit / 9) & ~((1<<PageShift) - 1); 411 arena_size = bitmap_size * 8; 412 spans_size = arena_size / PageSize * sizeof(runtime·mheap.spans[0]); 413 } 414 spans_size = ROUND(spans_size, PageSize); 415 416 // SysReserve treats the address we ask for, end, as a hint, 417 // not as an absolute requirement. If we ask for the end 418 // of the data segment but the operating system requires 419 // a little more space before we can start allocating, it will 420 // give out a slightly higher pointer. Except QEMU, which 421 // is buggy, as usual: it won't adjust the pointer upward. 422 // So adjust it upward a little bit ourselves: 1/4 MB to get 423 // away from the running binary image and then round up 424 // to a MB boundary. 425 want = (byte*)ROUND((uintptr)end + (1<<18), 1<<20); 426 p = runtime·SysReserve(want, bitmap_size + spans_size + arena_size); 427 if(p == nil) 428 runtime·throw("runtime: cannot reserve arena virtual address space"); 429 if((uintptr)p & (((uintptr)1<<PageShift)-1)) 430 runtime·printf("runtime: SysReserve returned unaligned address %p; asked for %p", p, 431 bitmap_size+spans_size+arena_size); 432 } 433 if((uintptr)p & (((uintptr)1<<PageShift)-1)) 434 runtime·throw("runtime: SysReserve returned unaligned address"); 435 436 runtime·mheap.spans = (MSpan**)p; 437 runtime·mheap.bitmap = p + spans_size; 438 runtime·mheap.arena_start = p + spans_size + bitmap_size; 439 runtime·mheap.arena_used = runtime·mheap.arena_start; 440 runtime·mheap.arena_end = runtime·mheap.arena_start + arena_size; 441 442 // Initialize the rest of the allocator. 443 runtime·MHeap_Init(&runtime·mheap); 444 m->mcache = runtime·allocmcache(); 445 446 // See if it works. 447 runtime·free(runtime·malloc(1)); 448 } 449 450 void* 451 runtime·MHeap_SysAlloc(MHeap *h, uintptr n) 452 { 453 byte *p; 454 455 if(n > h->arena_end - h->arena_used) { 456 // We are in 32-bit mode, maybe we didn't use all possible address space yet. 457 // Reserve some more space. 458 byte *new_end; 459 uintptr needed; 460 461 needed = (uintptr)h->arena_used + n - (uintptr)h->arena_end; 462 needed = ROUND(needed, 256<<20); 463 new_end = h->arena_end + needed; 464 if(new_end <= h->arena_start + MaxArena32) { 465 p = runtime·SysReserve(h->arena_end, new_end - h->arena_end); 466 if(p == h->arena_end) 467 h->arena_end = new_end; 468 } 469 } 470 if(n <= h->arena_end - h->arena_used) { 471 // Keep taking from our reservation. 472 p = h->arena_used; 473 runtime·SysMap(p, n, &mstats.heap_sys); 474 h->arena_used += n; 475 runtime·MHeap_MapBits(h); 476 runtime·MHeap_MapSpans(h); 477 if(raceenabled) 478 runtime·racemapshadow(p, n); 479 return p; 480 } 481 482 // If using 64-bit, our reservation is all we have. 483 if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU) 484 return nil; 485 486 // On 32-bit, once the reservation is gone we can 487 // try to get memory at a location chosen by the OS 488 // and hope that it is in the range we allocated bitmap for. 489 p = runtime·SysAlloc(n, &mstats.heap_sys); 490 if(p == nil) 491 return nil; 492 493 if(p < h->arena_start || p+n - h->arena_start >= MaxArena32) { 494 runtime·printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n", 495 p, h->arena_start, h->arena_start+MaxArena32); 496 runtime·SysFree(p, n, &mstats.heap_sys); 497 return nil; 498 } 499 500 if(p+n > h->arena_used) { 501 h->arena_used = p+n; 502 if(h->arena_used > h->arena_end) 503 h->arena_end = h->arena_used; 504 runtime·MHeap_MapBits(h); 505 runtime·MHeap_MapSpans(h); 506 if(raceenabled) 507 runtime·racemapshadow(p, n); 508 } 509 510 return p; 511 } 512 513 static struct 514 { 515 Lock; 516 byte* pos; 517 byte* end; 518 } persistent; 519 520 enum 521 { 522 PersistentAllocChunk = 256<<10, 523 PersistentAllocMaxBlock = 64<<10, // VM reservation granularity is 64K on windows 524 }; 525 526 // Wrapper around SysAlloc that can allocate small chunks. 527 // There is no associated free operation. 528 // Intended for things like function/type/debug-related persistent data. 529 // If align is 0, uses default align (currently 8). 530 void* 531 runtime·persistentalloc(uintptr size, uintptr align, uint64 *stat) 532 { 533 byte *p; 534 535 if(align != 0) { 536 if(align&(align-1)) 537 runtime·throw("persistentalloc: align is now a power of 2"); 538 if(align > PageSize) 539 runtime·throw("persistentalloc: align is too large"); 540 } else 541 align = 8; 542 if(size >= PersistentAllocMaxBlock) 543 return runtime·SysAlloc(size, stat); 544 runtime·lock(&persistent); 545 persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align); 546 if(persistent.pos + size > persistent.end) { 547 persistent.pos = runtime·SysAlloc(PersistentAllocChunk, &mstats.other_sys); 548 if(persistent.pos == nil) { 549 runtime·unlock(&persistent); 550 runtime·throw("runtime: cannot allocate memory"); 551 } 552 persistent.end = persistent.pos + PersistentAllocChunk; 553 } 554 p = persistent.pos; 555 persistent.pos += size; 556 runtime·unlock(&persistent); 557 if(stat != &mstats.other_sys) { 558 // reaccount the allocation against provided stat 559 runtime·xadd64(stat, size); 560 runtime·xadd64(&mstats.other_sys, -(uint64)size); 561 } 562 return p; 563 } 564 565 static Lock settype_lock; 566 567 void 568 runtime·settype_flush(M *mp) 569 { 570 uintptr *buf, *endbuf; 571 uintptr size, ofs, j, t; 572 uintptr ntypes, nbytes2, nbytes3; 573 uintptr *data2; 574 byte *data3; 575 void *v; 576 uintptr typ, p; 577 MSpan *s; 578 579 buf = mp->settype_buf; 580 endbuf = buf + mp->settype_bufsize; 581 582 runtime·lock(&settype_lock); 583 while(buf < endbuf) { 584 v = (void*)*buf; 585 *buf = 0; 586 buf++; 587 typ = *buf; 588 buf++; 589 590 // (Manually inlined copy of runtime·MHeap_Lookup) 591 p = (uintptr)v>>PageShift; 592 if(sizeof(void*) == 8) 593 p -= (uintptr)runtime·mheap.arena_start >> PageShift; 594 s = runtime·mheap.spans[p]; 595 596 if(s->sizeclass == 0) { 597 s->types.compression = MTypes_Single; 598 s->types.data = typ; 599 continue; 600 } 601 602 size = s->elemsize; 603 ofs = ((uintptr)v - (s->start<<PageShift)) / size; 604 605 switch(s->types.compression) { 606 case MTypes_Empty: 607 ntypes = (s->npages << PageShift) / size; 608 nbytes3 = 8*sizeof(uintptr) + 1*ntypes; 609 data3 = runtime·mallocgc(nbytes3, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC); 610 s->types.compression = MTypes_Bytes; 611 s->types.data = (uintptr)data3; 612 ((uintptr*)data3)[1] = typ; 613 data3[8*sizeof(uintptr) + ofs] = 1; 614 break; 615 616 case MTypes_Words: 617 ((uintptr*)s->types.data)[ofs] = typ; 618 break; 619 620 case MTypes_Bytes: 621 data3 = (byte*)s->types.data; 622 for(j=1; j<8; j++) { 623 if(((uintptr*)data3)[j] == typ) { 624 break; 625 } 626 if(((uintptr*)data3)[j] == 0) { 627 ((uintptr*)data3)[j] = typ; 628 break; 629 } 630 } 631 if(j < 8) { 632 data3[8*sizeof(uintptr) + ofs] = j; 633 } else { 634 ntypes = (s->npages << PageShift) / size; 635 nbytes2 = ntypes * sizeof(uintptr); 636 data2 = runtime·mallocgc(nbytes2, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC); 637 s->types.compression = MTypes_Words; 638 s->types.data = (uintptr)data2; 639 640 // Move the contents of data3 to data2. Then deallocate data3. 641 for(j=0; j<ntypes; j++) { 642 t = data3[8*sizeof(uintptr) + j]; 643 t = ((uintptr*)data3)[t]; 644 data2[j] = t; 645 } 646 data2[ofs] = typ; 647 } 648 break; 649 } 650 } 651 runtime·unlock(&settype_lock); 652 653 mp->settype_bufsize = 0; 654 } 655 656 uintptr 657 runtime·gettype(void *v) 658 { 659 MSpan *s; 660 uintptr t, ofs; 661 byte *data; 662 663 s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); 664 if(s != nil) { 665 t = 0; 666 switch(s->types.compression) { 667 case MTypes_Empty: 668 break; 669 case MTypes_Single: 670 t = s->types.data; 671 break; 672 case MTypes_Words: 673 ofs = (uintptr)v - (s->start<<PageShift); 674 t = ((uintptr*)s->types.data)[ofs/s->elemsize]; 675 break; 676 case MTypes_Bytes: 677 ofs = (uintptr)v - (s->start<<PageShift); 678 data = (byte*)s->types.data; 679 t = data[8*sizeof(uintptr) + ofs/s->elemsize]; 680 t = ((uintptr*)data)[t]; 681 break; 682 default: 683 runtime·throw("runtime·gettype: invalid compression kind"); 684 } 685 if(0) { 686 runtime·lock(&settype_lock); 687 runtime·printf("%p -> %d,%X\n", v, (int32)s->types.compression, (int64)t); 688 runtime·unlock(&settype_lock); 689 } 690 return t; 691 } 692 return 0; 693 } 694 695 // Runtime stubs. 696 697 void* 698 runtime·mal(uintptr n) 699 { 700 return runtime·mallocgc(n, 0, 0); 701 } 702 703 #pragma textflag NOSPLIT 704 void 705 runtime·new(Type *typ, uint8 *ret) 706 { 707 ret = runtime·mallocgc(typ->size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&KindNoPointers ? FlagNoScan : 0); 708 FLUSH(&ret); 709 } 710 711 static void* 712 cnew(Type *typ, intgo n, int32 objtyp) 713 { 714 if((objtyp&(PtrSize-1)) != objtyp) 715 runtime·throw("runtime: invalid objtyp"); 716 if(n < 0 || (typ->size > 0 && n > MaxMem/typ->size)) 717 runtime·panicstring("runtime: allocation size out of range"); 718 return runtime·mallocgc(typ->size*n, (uintptr)typ | objtyp, typ->kind&KindNoPointers ? FlagNoScan : 0); 719 } 720 721 // same as runtime·new, but callable from C 722 void* 723 runtime·cnew(Type *typ) 724 { 725 return cnew(typ, 1, TypeInfo_SingleObject); 726 } 727 728 void* 729 runtime·cnewarray(Type *typ, intgo n) 730 { 731 return cnew(typ, n, TypeInfo_Array); 732 } 733 734 func GC() { 735 runtime·gc(1); 736 } 737 738 func SetFinalizer(obj Eface, finalizer Eface) { 739 byte *base; 740 uintptr size; 741 FuncType *ft; 742 int32 i; 743 uintptr nret; 744 Type *t; 745 Type *fint; 746 PtrType *ot; 747 Iface iface; 748 749 if(obj.type == nil) { 750 runtime·printf("runtime.SetFinalizer: first argument is nil interface\n"); 751 goto throw; 752 } 753 if(obj.type->kind != KindPtr) { 754 runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string); 755 goto throw; 756 } 757 if(!runtime·mlookup(obj.data, &base, &size, nil) || obj.data != base) { 758 runtime·printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n"); 759 goto throw; 760 } 761 nret = 0; 762 ot = (PtrType*)obj.type; 763 fint = nil; 764 if(finalizer.type != nil) { 765 if(finalizer.type->kind != KindFunc) 766 goto badfunc; 767 ft = (FuncType*)finalizer.type; 768 if(ft->dotdotdot || ft->in.len != 1) 769 goto badfunc; 770 fint = *(Type**)ft->in.array; 771 if(fint == obj.type) { 772 // ok - same type 773 } else if(fint->kind == KindPtr && (fint->x == nil || fint->x->name == nil || obj.type->x == nil || obj.type->x->name == nil) && ((PtrType*)fint)->elem == ((PtrType*)obj.type)->elem) { 774 // ok - not same type, but both pointers, 775 // one or the other is unnamed, and same element type, so assignable. 776 } else if(fint->kind == KindInterface && ((InterfaceType*)fint)->mhdr.len == 0) { 777 // ok - satisfies empty interface 778 } else if(fint->kind == KindInterface && runtime·ifaceE2I2((InterfaceType*)fint, obj, &iface)) { 779 // ok - satisfies non-empty interface 780 } else 781 goto badfunc; 782 783 // compute size needed for return parameters 784 for(i=0; i<ft->out.len; i++) { 785 t = ((Type**)ft->out.array)[i]; 786 nret = ROUND(nret, t->align) + t->size; 787 } 788 nret = ROUND(nret, sizeof(void*)); 789 } 790 791 if(!runtime·addfinalizer(obj.data, finalizer.data, nret, fint, ot)) { 792 runtime·printf("runtime.SetFinalizer: finalizer already set\n"); 793 goto throw; 794 } 795 return; 796 797 badfunc: 798 runtime·printf("runtime.SetFinalizer: cannot pass %S to finalizer %S\n", *obj.type->string, *finalizer.type->string); 799 throw: 800 runtime·throw("runtime.SetFinalizer"); 801 }