github.com/rohankumardubey/syslog-redirector-golang@v0.0.0-20140320174030-4859f03d829a/src/pkg/runtime/mheap.c (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Page heap. 6 // 7 // See malloc.h for overview. 8 // 9 // When a MSpan is in the heap free list, state == MSpanFree 10 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. 11 // 12 // When a MSpan is allocated, state == MSpanInUse 13 // and heapmap(i) == span for all s->start <= i < s->start+s->npages. 14 15 #include "runtime.h" 16 #include "arch_GOARCH.h" 17 #include "malloc.h" 18 19 static MSpan *MHeap_AllocLocked(MHeap*, uintptr, int32); 20 static bool MHeap_Grow(MHeap*, uintptr); 21 static void MHeap_FreeLocked(MHeap*, MSpan*); 22 static MSpan *MHeap_AllocLarge(MHeap*, uintptr); 23 static MSpan *BestFit(MSpan*, uintptr, MSpan*); 24 25 static void 26 RecordSpan(void *vh, byte *p) 27 { 28 MHeap *h; 29 MSpan *s; 30 MSpan **all; 31 uint32 cap; 32 33 h = vh; 34 s = (MSpan*)p; 35 if(h->nspan >= h->nspancap) { 36 cap = 64*1024/sizeof(all[0]); 37 if(cap < h->nspancap*3/2) 38 cap = h->nspancap*3/2; 39 all = (MSpan**)runtime·SysAlloc(cap*sizeof(all[0]), &mstats.other_sys); 40 if(all == nil) 41 runtime·throw("runtime: cannot allocate memory"); 42 if(h->allspans) { 43 runtime·memmove(all, h->allspans, h->nspancap*sizeof(all[0])); 44 runtime·SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats.other_sys); 45 } 46 h->allspans = all; 47 h->nspancap = cap; 48 } 49 h->allspans[h->nspan++] = s; 50 } 51 52 // Initialize the heap; fetch memory using alloc. 53 void 54 runtime·MHeap_Init(MHeap *h) 55 { 56 uint32 i; 57 58 runtime·FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &mstats.mspan_sys); 59 runtime·FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &mstats.mcache_sys); 60 // h->mapcache needs no init 61 for(i=0; i<nelem(h->free); i++) 62 runtime·MSpanList_Init(&h->free[i]); 63 runtime·MSpanList_Init(&h->large); 64 for(i=0; i<nelem(h->central); i++) 65 runtime·MCentral_Init(&h->central[i], i); 66 } 67 68 void 69 runtime·MHeap_MapSpans(MHeap *h) 70 { 71 uintptr n; 72 73 // Map spans array, PageSize at a time. 74 n = (uintptr)h->arena_used; 75 if(sizeof(void*) == 8) 76 n -= (uintptr)h->arena_start; 77 n = n / PageSize * sizeof(h->spans[0]); 78 n = ROUND(n, PageSize); 79 if(h->spans_mapped >= n) 80 return; 81 runtime·SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, &mstats.other_sys); 82 h->spans_mapped = n; 83 } 84 85 // Allocate a new span of npage pages from the heap 86 // and record its size class in the HeapMap and HeapMapCache. 87 MSpan* 88 runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed) 89 { 90 MSpan *s; 91 92 runtime·lock(h); 93 mstats.heap_alloc += m->mcache->local_cachealloc; 94 m->mcache->local_cachealloc = 0; 95 s = MHeap_AllocLocked(h, npage, sizeclass); 96 if(s != nil) { 97 mstats.heap_inuse += npage<<PageShift; 98 if(acct) { 99 mstats.heap_objects++; 100 mstats.heap_alloc += npage<<PageShift; 101 } 102 } 103 runtime·unlock(h); 104 if(s != nil && *(uintptr*)(s->start<<PageShift) != 0 && zeroed) 105 runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift); 106 return s; 107 } 108 109 static MSpan* 110 MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass) 111 { 112 uintptr n; 113 MSpan *s, *t; 114 PageID p; 115 116 // Try in fixed-size lists up to max. 117 for(n=npage; n < nelem(h->free); n++) { 118 if(!runtime·MSpanList_IsEmpty(&h->free[n])) { 119 s = h->free[n].next; 120 goto HaveSpan; 121 } 122 } 123 124 // Best fit in list of large spans. 125 if((s = MHeap_AllocLarge(h, npage)) == nil) { 126 if(!MHeap_Grow(h, npage)) 127 return nil; 128 if((s = MHeap_AllocLarge(h, npage)) == nil) 129 return nil; 130 } 131 132 HaveSpan: 133 // Mark span in use. 134 if(s->state != MSpanFree) 135 runtime·throw("MHeap_AllocLocked - MSpan not free"); 136 if(s->npages < npage) 137 runtime·throw("MHeap_AllocLocked - bad npages"); 138 runtime·MSpanList_Remove(s); 139 s->state = MSpanInUse; 140 mstats.heap_idle -= s->npages<<PageShift; 141 mstats.heap_released -= s->npreleased<<PageShift; 142 if(s->npreleased > 0) { 143 // We have called runtime·SysUnused with these pages, and on 144 // Unix systems it called madvise. At this point at least 145 // some BSD-based kernels will return these pages either as 146 // zeros or with the old data. For our caller, the first word 147 // in the page indicates whether the span contains zeros or 148 // not (this word was set when the span was freed by 149 // MCentral_Free or runtime·MCentral_FreeSpan). If the first 150 // page in the span is returned as zeros, and some subsequent 151 // page is returned with the old data, then we will be 152 // returning a span that is assumed to be all zeros, but the 153 // actual data will not be all zeros. Avoid that problem by 154 // explicitly marking the span as not being zeroed, just in 155 // case. The beadbead constant we use here means nothing, it 156 // is just a unique constant not seen elsewhere in the 157 // runtime, as a clue in case it turns up unexpectedly in 158 // memory or in a stack trace. 159 runtime·SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift); 160 *(uintptr*)(s->start<<PageShift) = (uintptr)0xbeadbeadbeadbeadULL; 161 } 162 s->npreleased = 0; 163 164 if(s->npages > npage) { 165 // Trim extra and put it back in the heap. 166 t = runtime·FixAlloc_Alloc(&h->spanalloc); 167 runtime·MSpan_Init(t, s->start + npage, s->npages - npage); 168 s->npages = npage; 169 p = t->start; 170 if(sizeof(void*) == 8) 171 p -= ((uintptr)h->arena_start>>PageShift); 172 if(p > 0) 173 h->spans[p-1] = s; 174 h->spans[p] = t; 175 h->spans[p+t->npages-1] = t; 176 *(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark 177 t->state = MSpanInUse; 178 MHeap_FreeLocked(h, t); 179 t->unusedsince = s->unusedsince; // preserve age 180 } 181 s->unusedsince = 0; 182 183 // Record span info, because gc needs to be 184 // able to map interior pointer to containing span. 185 s->sizeclass = sizeclass; 186 s->elemsize = (sizeclass==0 ? s->npages<<PageShift : runtime·class_to_size[sizeclass]); 187 s->types.compression = MTypes_Empty; 188 p = s->start; 189 if(sizeof(void*) == 8) 190 p -= ((uintptr)h->arena_start>>PageShift); 191 for(n=0; n<npage; n++) 192 h->spans[p+n] = s; 193 return s; 194 } 195 196 // Allocate a span of exactly npage pages from the list of large spans. 197 static MSpan* 198 MHeap_AllocLarge(MHeap *h, uintptr npage) 199 { 200 return BestFit(&h->large, npage, nil); 201 } 202 203 // Search list for smallest span with >= npage pages. 204 // If there are multiple smallest spans, take the one 205 // with the earliest starting address. 206 static MSpan* 207 BestFit(MSpan *list, uintptr npage, MSpan *best) 208 { 209 MSpan *s; 210 211 for(s=list->next; s != list; s=s->next) { 212 if(s->npages < npage) 213 continue; 214 if(best == nil 215 || s->npages < best->npages 216 || (s->npages == best->npages && s->start < best->start)) 217 best = s; 218 } 219 return best; 220 } 221 222 // Try to add at least npage pages of memory to the heap, 223 // returning whether it worked. 224 static bool 225 MHeap_Grow(MHeap *h, uintptr npage) 226 { 227 uintptr ask; 228 void *v; 229 MSpan *s; 230 PageID p; 231 232 // Ask for a big chunk, to reduce the number of mappings 233 // the operating system needs to track; also amortizes 234 // the overhead of an operating system mapping. 235 // Allocate a multiple of 64kB (16 pages). 236 npage = (npage+15)&~15; 237 ask = npage<<PageShift; 238 if(ask < HeapAllocChunk) 239 ask = HeapAllocChunk; 240 241 v = runtime·MHeap_SysAlloc(h, ask); 242 if(v == nil) { 243 if(ask > (npage<<PageShift)) { 244 ask = npage<<PageShift; 245 v = runtime·MHeap_SysAlloc(h, ask); 246 } 247 if(v == nil) { 248 runtime·printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats.heap_sys); 249 return false; 250 } 251 } 252 253 // Create a fake "in use" span and free it, so that the 254 // right coalescing happens. 255 s = runtime·FixAlloc_Alloc(&h->spanalloc); 256 runtime·MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift); 257 p = s->start; 258 if(sizeof(void*) == 8) 259 p -= ((uintptr)h->arena_start>>PageShift); 260 h->spans[p] = s; 261 h->spans[p + s->npages - 1] = s; 262 s->state = MSpanInUse; 263 MHeap_FreeLocked(h, s); 264 return true; 265 } 266 267 // Look up the span at the given address. 268 // Address is guaranteed to be in map 269 // and is guaranteed to be start or end of span. 270 MSpan* 271 runtime·MHeap_Lookup(MHeap *h, void *v) 272 { 273 uintptr p; 274 275 p = (uintptr)v; 276 if(sizeof(void*) == 8) 277 p -= (uintptr)h->arena_start; 278 return h->spans[p >> PageShift]; 279 } 280 281 // Look up the span at the given address. 282 // Address is *not* guaranteed to be in map 283 // and may be anywhere in the span. 284 // Map entries for the middle of a span are only 285 // valid for allocated spans. Free spans may have 286 // other garbage in their middles, so we have to 287 // check for that. 288 MSpan* 289 runtime·MHeap_LookupMaybe(MHeap *h, void *v) 290 { 291 MSpan *s; 292 PageID p, q; 293 294 if((byte*)v < h->arena_start || (byte*)v >= h->arena_used) 295 return nil; 296 p = (uintptr)v>>PageShift; 297 q = p; 298 if(sizeof(void*) == 8) 299 q -= (uintptr)h->arena_start >> PageShift; 300 s = h->spans[q]; 301 if(s == nil || p < s->start || v >= s->limit || s->state != MSpanInUse) 302 return nil; 303 return s; 304 } 305 306 // Free the span back into the heap. 307 void 308 runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct) 309 { 310 runtime·lock(h); 311 mstats.heap_alloc += m->mcache->local_cachealloc; 312 m->mcache->local_cachealloc = 0; 313 mstats.heap_inuse -= s->npages<<PageShift; 314 if(acct) { 315 mstats.heap_alloc -= s->npages<<PageShift; 316 mstats.heap_objects--; 317 } 318 MHeap_FreeLocked(h, s); 319 runtime·unlock(h); 320 } 321 322 static void 323 MHeap_FreeLocked(MHeap *h, MSpan *s) 324 { 325 uintptr *sp, *tp; 326 MSpan *t; 327 PageID p; 328 329 s->types.compression = MTypes_Empty; 330 331 if(s->state != MSpanInUse || s->ref != 0) { 332 runtime·printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref); 333 runtime·throw("MHeap_FreeLocked - invalid free"); 334 } 335 mstats.heap_idle += s->npages<<PageShift; 336 s->state = MSpanFree; 337 runtime·MSpanList_Remove(s); 338 sp = (uintptr*)(s->start<<PageShift); 339 // Stamp newly unused spans. The scavenger will use that 340 // info to potentially give back some pages to the OS. 341 s->unusedsince = runtime·nanotime(); 342 s->npreleased = 0; 343 344 // Coalesce with earlier, later spans. 345 p = s->start; 346 if(sizeof(void*) == 8) 347 p -= (uintptr)h->arena_start >> PageShift; 348 if(p > 0 && (t = h->spans[p-1]) != nil && t->state != MSpanInUse) { 349 if(t->npreleased == 0) { // cant't touch this otherwise 350 tp = (uintptr*)(t->start<<PageShift); 351 *tp |= *sp; // propagate "needs zeroing" mark 352 } 353 s->start = t->start; 354 s->npages += t->npages; 355 s->npreleased = t->npreleased; // absorb released pages 356 p -= t->npages; 357 h->spans[p] = s; 358 runtime·MSpanList_Remove(t); 359 t->state = MSpanDead; 360 runtime·FixAlloc_Free(&h->spanalloc, t); 361 } 362 if((p+s->npages)*sizeof(h->spans[0]) < h->spans_mapped && (t = h->spans[p+s->npages]) != nil && t->state != MSpanInUse) { 363 if(t->npreleased == 0) { // cant't touch this otherwise 364 tp = (uintptr*)(t->start<<PageShift); 365 *sp |= *tp; // propagate "needs zeroing" mark 366 } 367 s->npages += t->npages; 368 s->npreleased += t->npreleased; 369 h->spans[p + s->npages - 1] = s; 370 runtime·MSpanList_Remove(t); 371 t->state = MSpanDead; 372 runtime·FixAlloc_Free(&h->spanalloc, t); 373 } 374 375 // Insert s into appropriate list. 376 if(s->npages < nelem(h->free)) 377 runtime·MSpanList_Insert(&h->free[s->npages], s); 378 else 379 runtime·MSpanList_Insert(&h->large, s); 380 } 381 382 static void 383 forcegchelper(Note *note) 384 { 385 runtime·gc(1); 386 runtime·notewakeup(note); 387 } 388 389 static uintptr 390 scavengelist(MSpan *list, uint64 now, uint64 limit) 391 { 392 uintptr released, sumreleased; 393 MSpan *s; 394 395 if(runtime·MSpanList_IsEmpty(list)) 396 return 0; 397 398 sumreleased = 0; 399 for(s=list->next; s != list; s=s->next) { 400 if((now - s->unusedsince) > limit && s->npreleased != s->npages) { 401 released = (s->npages - s->npreleased) << PageShift; 402 mstats.heap_released += released; 403 sumreleased += released; 404 s->npreleased = s->npages; 405 runtime·SysUnused((void*)(s->start << PageShift), s->npages << PageShift); 406 } 407 } 408 return sumreleased; 409 } 410 411 static void 412 scavenge(int32 k, uint64 now, uint64 limit) 413 { 414 uint32 i; 415 uintptr sumreleased; 416 MHeap *h; 417 418 h = &runtime·mheap; 419 sumreleased = 0; 420 for(i=0; i < nelem(h->free); i++) 421 sumreleased += scavengelist(&h->free[i], now, limit); 422 sumreleased += scavengelist(&h->large, now, limit); 423 424 if(runtime·debug.gctrace > 0) { 425 if(sumreleased > 0) 426 runtime·printf("scvg%d: %D MB released\n", k, (uint64)sumreleased>>20); 427 runtime·printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n", 428 k, mstats.heap_inuse>>20, mstats.heap_idle>>20, mstats.heap_sys>>20, 429 mstats.heap_released>>20, (mstats.heap_sys - mstats.heap_released)>>20); 430 } 431 } 432 433 static FuncVal forcegchelperv = {(void(*)(void))forcegchelper}; 434 435 // Release (part of) unused memory to OS. 436 // Goroutine created at startup. 437 // Loop forever. 438 void 439 runtime·MHeap_Scavenger(void) 440 { 441 MHeap *h; 442 uint64 tick, now, forcegc, limit; 443 int32 k; 444 Note note, *notep; 445 446 g->issystem = true; 447 g->isbackground = true; 448 449 // If we go two minutes without a garbage collection, force one to run. 450 forcegc = 2*60*1e9; 451 // If a span goes unused for 5 minutes after a garbage collection, 452 // we hand it back to the operating system. 453 limit = 5*60*1e9; 454 // Make wake-up period small enough for the sampling to be correct. 455 if(forcegc < limit) 456 tick = forcegc/2; 457 else 458 tick = limit/2; 459 460 h = &runtime·mheap; 461 for(k=0;; k++) { 462 runtime·noteclear(¬e); 463 runtime·notetsleepg(¬e, tick); 464 465 runtime·lock(h); 466 now = runtime·nanotime(); 467 if(now - mstats.last_gc > forcegc) { 468 runtime·unlock(h); 469 // The scavenger can not block other goroutines, 470 // otherwise deadlock detector can fire spuriously. 471 // GC blocks other goroutines via the runtime·worldsema. 472 runtime·noteclear(¬e); 473 notep = ¬e; 474 runtime·newproc1(&forcegchelperv, (byte*)¬ep, sizeof(notep), 0, runtime·MHeap_Scavenger); 475 runtime·notetsleepg(¬e, -1); 476 if(runtime·debug.gctrace > 0) 477 runtime·printf("scvg%d: GC forced\n", k); 478 runtime·lock(h); 479 now = runtime·nanotime(); 480 } 481 scavenge(k, now, limit); 482 runtime·unlock(h); 483 } 484 } 485 486 void 487 runtime∕debug·freeOSMemory(void) 488 { 489 runtime·gc(1); 490 runtime·lock(&runtime·mheap); 491 scavenge(-1, ~(uintptr)0, 0); 492 runtime·unlock(&runtime·mheap); 493 } 494 495 // Initialize a new span with the given start and npages. 496 void 497 runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages) 498 { 499 span->next = nil; 500 span->prev = nil; 501 span->start = start; 502 span->npages = npages; 503 span->freelist = nil; 504 span->ref = 0; 505 span->sizeclass = 0; 506 span->elemsize = 0; 507 span->state = 0; 508 span->unusedsince = 0; 509 span->npreleased = 0; 510 span->types.compression = MTypes_Empty; 511 } 512 513 // Initialize an empty doubly-linked list. 514 void 515 runtime·MSpanList_Init(MSpan *list) 516 { 517 list->state = MSpanListHead; 518 list->next = list; 519 list->prev = list; 520 } 521 522 void 523 runtime·MSpanList_Remove(MSpan *span) 524 { 525 if(span->prev == nil && span->next == nil) 526 return; 527 span->prev->next = span->next; 528 span->next->prev = span->prev; 529 span->prev = nil; 530 span->next = nil; 531 } 532 533 bool 534 runtime·MSpanList_IsEmpty(MSpan *list) 535 { 536 return list->next == list; 537 } 538 539 void 540 runtime·MSpanList_Insert(MSpan *list, MSpan *span) 541 { 542 if(span->next != nil || span->prev != nil) { 543 runtime·printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev); 544 runtime·throw("MSpanList_Insert"); 545 } 546 span->next = list->next; 547 span->prev = list; 548 span->next->prev = span; 549 span->prev->next = span; 550 } 551 552