github.com/xushiwei/go@v0.0.0-20130601165731-2b9d83f45bc9/src/pkg/runtime/mheap.c (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Page heap. 6 // 7 // See malloc.h for overview. 8 // 9 // When a MSpan is in the heap free list, state == MSpanFree 10 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. 11 // 12 // When a MSpan is allocated, state == MSpanInUse 13 // and heapmap(i) == span for all s->start <= i < s->start+s->npages. 14 15 #include "runtime.h" 16 #include "arch_GOARCH.h" 17 #include "malloc.h" 18 19 static MSpan *MHeap_AllocLocked(MHeap*, uintptr, int32); 20 static bool MHeap_Grow(MHeap*, uintptr); 21 static void MHeap_FreeLocked(MHeap*, MSpan*); 22 static MSpan *MHeap_AllocLarge(MHeap*, uintptr); 23 static MSpan *BestFit(MSpan*, uintptr, MSpan*); 24 25 static void 26 RecordSpan(void *vh, byte *p) 27 { 28 MHeap *h; 29 MSpan *s; 30 MSpan **all; 31 uint32 cap; 32 33 h = vh; 34 s = (MSpan*)p; 35 if(h->nspan >= h->nspancap) { 36 cap = 64*1024/sizeof(all[0]); 37 if(cap < h->nspancap*3/2) 38 cap = h->nspancap*3/2; 39 all = (MSpan**)runtime·SysAlloc(cap*sizeof(all[0])); 40 if(all == nil) 41 runtime·throw("runtime: cannot allocate memory"); 42 if(h->allspans) { 43 runtime·memmove(all, h->allspans, h->nspancap*sizeof(all[0])); 44 runtime·SysFree(h->allspans, h->nspancap*sizeof(all[0])); 45 } 46 h->allspans = all; 47 h->nspancap = cap; 48 } 49 h->allspans[h->nspan++] = s; 50 } 51 52 // Initialize the heap; fetch memory using alloc. 53 void 54 runtime·MHeap_Init(MHeap *h, void *(*alloc)(uintptr)) 55 { 56 uint32 i; 57 58 runtime·FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h); 59 runtime·FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil); 60 // h->mapcache needs no init 61 for(i=0; i<nelem(h->free); i++) 62 runtime·MSpanList_Init(&h->free[i]); 63 runtime·MSpanList_Init(&h->large); 64 for(i=0; i<nelem(h->central); i++) 65 runtime·MCentral_Init(&h->central[i], i); 66 } 67 68 // Allocate a new span of npage pages from the heap 69 // and record its size class in the HeapMap and HeapMapCache. 70 MSpan* 71 runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed) 72 { 73 MSpan *s; 74 75 runtime·lock(h); 76 runtime·purgecachedstats(m->mcache); 77 s = MHeap_AllocLocked(h, npage, sizeclass); 78 if(s != nil) { 79 mstats.heap_inuse += npage<<PageShift; 80 if(acct) { 81 mstats.heap_objects++; 82 mstats.heap_alloc += npage<<PageShift; 83 } 84 } 85 runtime·unlock(h); 86 if(s != nil && *(uintptr*)(s->start<<PageShift) != 0 && zeroed) 87 runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift); 88 return s; 89 } 90 91 static MSpan* 92 MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass) 93 { 94 uintptr n; 95 MSpan *s, *t; 96 PageID p; 97 98 // Try in fixed-size lists up to max. 99 for(n=npage; n < nelem(h->free); n++) { 100 if(!runtime·MSpanList_IsEmpty(&h->free[n])) { 101 s = h->free[n].next; 102 goto HaveSpan; 103 } 104 } 105 106 // Best fit in list of large spans. 107 if((s = MHeap_AllocLarge(h, npage)) == nil) { 108 if(!MHeap_Grow(h, npage)) 109 return nil; 110 if((s = MHeap_AllocLarge(h, npage)) == nil) 111 return nil; 112 } 113 114 HaveSpan: 115 // Mark span in use. 116 if(s->state != MSpanFree) 117 runtime·throw("MHeap_AllocLocked - MSpan not free"); 118 if(s->npages < npage) 119 runtime·throw("MHeap_AllocLocked - bad npages"); 120 runtime·MSpanList_Remove(s); 121 s->state = MSpanInUse; 122 mstats.heap_idle -= s->npages<<PageShift; 123 mstats.heap_released -= s->npreleased<<PageShift; 124 if(s->npreleased > 0) { 125 // We have called runtime·SysUnused with these pages, and on 126 // Unix systems it called madvise. At this point at least 127 // some BSD-based kernels will return these pages either as 128 // zeros or with the old data. For our caller, the first word 129 // in the page indicates whether the span contains zeros or 130 // not (this word was set when the span was freed by 131 // MCentral_Free or runtime·MCentral_FreeSpan). If the first 132 // page in the span is returned as zeros, and some subsequent 133 // page is returned with the old data, then we will be 134 // returning a span that is assumed to be all zeros, but the 135 // actual data will not be all zeros. Avoid that problem by 136 // explicitly marking the span as not being zeroed, just in 137 // case. The beadbead constant we use here means nothing, it 138 // is just a unique constant not seen elsewhere in the 139 // runtime, as a clue in case it turns up unexpectedly in 140 // memory or in a stack trace. 141 *(uintptr*)(s->start<<PageShift) = (uintptr)0xbeadbeadbeadbeadULL; 142 } 143 s->npreleased = 0; 144 145 if(s->npages > npage) { 146 // Trim extra and put it back in the heap. 147 t = runtime·FixAlloc_Alloc(&h->spanalloc); 148 mstats.mspan_inuse = h->spanalloc.inuse; 149 mstats.mspan_sys = h->spanalloc.sys; 150 runtime·MSpan_Init(t, s->start + npage, s->npages - npage); 151 s->npages = npage; 152 p = t->start; 153 if(sizeof(void*) == 8) 154 p -= ((uintptr)h->arena_start>>PageShift); 155 if(p > 0) 156 h->map[p-1] = s; 157 h->map[p] = t; 158 h->map[p+t->npages-1] = t; 159 *(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark 160 t->state = MSpanInUse; 161 MHeap_FreeLocked(h, t); 162 t->unusedsince = s->unusedsince; // preserve age 163 } 164 s->unusedsince = 0; 165 166 // Record span info, because gc needs to be 167 // able to map interior pointer to containing span. 168 s->sizeclass = sizeclass; 169 s->elemsize = (sizeclass==0 ? s->npages<<PageShift : runtime·class_to_size[sizeclass]); 170 s->types.compression = MTypes_Empty; 171 p = s->start; 172 if(sizeof(void*) == 8) 173 p -= ((uintptr)h->arena_start>>PageShift); 174 for(n=0; n<npage; n++) 175 h->map[p+n] = s; 176 return s; 177 } 178 179 // Allocate a span of exactly npage pages from the list of large spans. 180 static MSpan* 181 MHeap_AllocLarge(MHeap *h, uintptr npage) 182 { 183 return BestFit(&h->large, npage, nil); 184 } 185 186 // Search list for smallest span with >= npage pages. 187 // If there are multiple smallest spans, take the one 188 // with the earliest starting address. 189 static MSpan* 190 BestFit(MSpan *list, uintptr npage, MSpan *best) 191 { 192 MSpan *s; 193 194 for(s=list->next; s != list; s=s->next) { 195 if(s->npages < npage) 196 continue; 197 if(best == nil 198 || s->npages < best->npages 199 || (s->npages == best->npages && s->start < best->start)) 200 best = s; 201 } 202 return best; 203 } 204 205 // Try to add at least npage pages of memory to the heap, 206 // returning whether it worked. 207 static bool 208 MHeap_Grow(MHeap *h, uintptr npage) 209 { 210 uintptr ask; 211 void *v; 212 MSpan *s; 213 PageID p; 214 215 // Ask for a big chunk, to reduce the number of mappings 216 // the operating system needs to track; also amortizes 217 // the overhead of an operating system mapping. 218 // Allocate a multiple of 64kB (16 pages). 219 npage = (npage+15)&~15; 220 ask = npage<<PageShift; 221 if(ask < HeapAllocChunk) 222 ask = HeapAllocChunk; 223 224 v = runtime·MHeap_SysAlloc(h, ask); 225 if(v == nil) { 226 if(ask > (npage<<PageShift)) { 227 ask = npage<<PageShift; 228 v = runtime·MHeap_SysAlloc(h, ask); 229 } 230 if(v == nil) { 231 runtime·printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats.heap_sys); 232 return false; 233 } 234 } 235 mstats.heap_sys += ask; 236 237 // Create a fake "in use" span and free it, so that the 238 // right coalescing happens. 239 s = runtime·FixAlloc_Alloc(&h->spanalloc); 240 mstats.mspan_inuse = h->spanalloc.inuse; 241 mstats.mspan_sys = h->spanalloc.sys; 242 runtime·MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift); 243 p = s->start; 244 if(sizeof(void*) == 8) 245 p -= ((uintptr)h->arena_start>>PageShift); 246 h->map[p] = s; 247 h->map[p + s->npages - 1] = s; 248 s->state = MSpanInUse; 249 MHeap_FreeLocked(h, s); 250 return true; 251 } 252 253 // Look up the span at the given address. 254 // Address is guaranteed to be in map 255 // and is guaranteed to be start or end of span. 256 MSpan* 257 runtime·MHeap_Lookup(MHeap *h, void *v) 258 { 259 uintptr p; 260 261 p = (uintptr)v; 262 if(sizeof(void*) == 8) 263 p -= (uintptr)h->arena_start; 264 return h->map[p >> PageShift]; 265 } 266 267 // Look up the span at the given address. 268 // Address is *not* guaranteed to be in map 269 // and may be anywhere in the span. 270 // Map entries for the middle of a span are only 271 // valid for allocated spans. Free spans may have 272 // other garbage in their middles, so we have to 273 // check for that. 274 MSpan* 275 runtime·MHeap_LookupMaybe(MHeap *h, void *v) 276 { 277 MSpan *s; 278 PageID p, q; 279 280 if((byte*)v < h->arena_start || (byte*)v >= h->arena_used) 281 return nil; 282 p = (uintptr)v>>PageShift; 283 q = p; 284 if(sizeof(void*) == 8) 285 q -= (uintptr)h->arena_start >> PageShift; 286 s = h->map[q]; 287 if(s == nil || p < s->start || p - s->start >= s->npages) 288 return nil; 289 if(s->state != MSpanInUse) 290 return nil; 291 return s; 292 } 293 294 // Free the span back into the heap. 295 void 296 runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct) 297 { 298 runtime·lock(h); 299 runtime·purgecachedstats(m->mcache); 300 mstats.heap_inuse -= s->npages<<PageShift; 301 if(acct) { 302 mstats.heap_alloc -= s->npages<<PageShift; 303 mstats.heap_objects--; 304 } 305 MHeap_FreeLocked(h, s); 306 runtime·unlock(h); 307 } 308 309 static void 310 MHeap_FreeLocked(MHeap *h, MSpan *s) 311 { 312 uintptr *sp, *tp; 313 MSpan *t; 314 PageID p; 315 316 if(s->types.sysalloc) 317 runtime·settype_sysfree(s); 318 s->types.compression = MTypes_Empty; 319 320 if(s->state != MSpanInUse || s->ref != 0) { 321 runtime·printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref); 322 runtime·throw("MHeap_FreeLocked - invalid free"); 323 } 324 mstats.heap_idle += s->npages<<PageShift; 325 s->state = MSpanFree; 326 runtime·MSpanList_Remove(s); 327 sp = (uintptr*)(s->start<<PageShift); 328 // Stamp newly unused spans. The scavenger will use that 329 // info to potentially give back some pages to the OS. 330 s->unusedsince = runtime·nanotime(); 331 s->npreleased = 0; 332 333 // Coalesce with earlier, later spans. 334 p = s->start; 335 if(sizeof(void*) == 8) 336 p -= (uintptr)h->arena_start >> PageShift; 337 if(p > 0 && (t = h->map[p-1]) != nil && t->state != MSpanInUse) { 338 tp = (uintptr*)(t->start<<PageShift); 339 *tp |= *sp; // propagate "needs zeroing" mark 340 s->start = t->start; 341 s->npages += t->npages; 342 s->npreleased = t->npreleased; // absorb released pages 343 p -= t->npages; 344 h->map[p] = s; 345 runtime·MSpanList_Remove(t); 346 t->state = MSpanDead; 347 runtime·FixAlloc_Free(&h->spanalloc, t); 348 mstats.mspan_inuse = h->spanalloc.inuse; 349 mstats.mspan_sys = h->spanalloc.sys; 350 } 351 if(p+s->npages < nelem(h->map) && (t = h->map[p+s->npages]) != nil && t->state != MSpanInUse) { 352 tp = (uintptr*)(t->start<<PageShift); 353 *sp |= *tp; // propagate "needs zeroing" mark 354 s->npages += t->npages; 355 s->npreleased += t->npreleased; 356 h->map[p + s->npages - 1] = s; 357 runtime·MSpanList_Remove(t); 358 t->state = MSpanDead; 359 runtime·FixAlloc_Free(&h->spanalloc, t); 360 mstats.mspan_inuse = h->spanalloc.inuse; 361 mstats.mspan_sys = h->spanalloc.sys; 362 } 363 364 // Insert s into appropriate list. 365 if(s->npages < nelem(h->free)) 366 runtime·MSpanList_Insert(&h->free[s->npages], s); 367 else 368 runtime·MSpanList_Insert(&h->large, s); 369 } 370 371 static void 372 forcegchelper(Note *note) 373 { 374 runtime·gc(1); 375 runtime·notewakeup(note); 376 } 377 378 static uintptr 379 scavengelist(MSpan *list, uint64 now, uint64 limit) 380 { 381 uintptr released, sumreleased; 382 MSpan *s; 383 384 if(runtime·MSpanList_IsEmpty(list)) 385 return 0; 386 387 sumreleased = 0; 388 for(s=list->next; s != list; s=s->next) { 389 if((now - s->unusedsince) > limit) { 390 released = (s->npages - s->npreleased) << PageShift; 391 mstats.heap_released += released; 392 sumreleased += released; 393 s->npreleased = s->npages; 394 runtime·SysUnused((void*)(s->start << PageShift), s->npages << PageShift); 395 } 396 } 397 return sumreleased; 398 } 399 400 static uintptr 401 scavenge(uint64 now, uint64 limit) 402 { 403 uint32 i; 404 uintptr sumreleased; 405 MHeap *h; 406 407 h = runtime·mheap; 408 sumreleased = 0; 409 for(i=0; i < nelem(h->free); i++) 410 sumreleased += scavengelist(&h->free[i], now, limit); 411 sumreleased += scavengelist(&h->large, now, limit); 412 return sumreleased; 413 } 414 415 static FuncVal forcegchelperv = {(void(*)(void))forcegchelper}; 416 417 // Release (part of) unused memory to OS. 418 // Goroutine created at startup. 419 // Loop forever. 420 void 421 runtime·MHeap_Scavenger(void) 422 { 423 MHeap *h; 424 uint64 tick, now, forcegc, limit; 425 uint32 k; 426 uintptr sumreleased; 427 byte *env; 428 bool trace; 429 Note note, *notep; 430 431 g->issystem = true; 432 g->isbackground = true; 433 434 // If we go two minutes without a garbage collection, force one to run. 435 forcegc = 2*60*1e9; 436 // If a span goes unused for 5 minutes after a garbage collection, 437 // we hand it back to the operating system. 438 limit = 5*60*1e9; 439 // Make wake-up period small enough for the sampling to be correct. 440 if(forcegc < limit) 441 tick = forcegc/2; 442 else 443 tick = limit/2; 444 445 trace = false; 446 env = runtime·getenv("GOGCTRACE"); 447 if(env != nil) 448 trace = runtime·atoi(env) > 0; 449 450 h = runtime·mheap; 451 for(k=0;; k++) { 452 runtime·noteclear(¬e); 453 runtime·entersyscallblock(); 454 runtime·notetsleep(¬e, tick); 455 runtime·exitsyscall(); 456 457 runtime·lock(h); 458 now = runtime·nanotime(); 459 if(now - mstats.last_gc > forcegc) { 460 runtime·unlock(h); 461 // The scavenger can not block other goroutines, 462 // otherwise deadlock detector can fire spuriously. 463 // GC blocks other goroutines via the runtime·worldsema. 464 runtime·noteclear(¬e); 465 notep = ¬e; 466 runtime·newproc1(&forcegchelperv, (byte*)¬ep, sizeof(notep), 0, runtime·MHeap_Scavenger); 467 runtime·entersyscallblock(); 468 runtime·notesleep(¬e); 469 runtime·exitsyscall(); 470 if(trace) 471 runtime·printf("scvg%d: GC forced\n", k); 472 runtime·lock(h); 473 now = runtime·nanotime(); 474 } 475 sumreleased = scavenge(now, limit); 476 runtime·unlock(h); 477 478 if(trace) { 479 if(sumreleased > 0) 480 runtime·printf("scvg%d: %p MB released\n", k, sumreleased>>20); 481 runtime·printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n", 482 k, mstats.heap_inuse>>20, mstats.heap_idle>>20, mstats.heap_sys>>20, 483 mstats.heap_released>>20, (mstats.heap_sys - mstats.heap_released)>>20); 484 } 485 } 486 } 487 488 void 489 runtime∕debug·freeOSMemory(void) 490 { 491 runtime·gc(1); 492 runtime·lock(runtime·mheap); 493 scavenge(~(uintptr)0, 0); 494 runtime·unlock(runtime·mheap); 495 } 496 497 // Initialize a new span with the given start and npages. 498 void 499 runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages) 500 { 501 span->next = nil; 502 span->prev = nil; 503 span->start = start; 504 span->npages = npages; 505 span->freelist = nil; 506 span->ref = 0; 507 span->sizeclass = 0; 508 span->elemsize = 0; 509 span->state = 0; 510 span->unusedsince = 0; 511 span->npreleased = 0; 512 span->types.compression = MTypes_Empty; 513 } 514 515 // Initialize an empty doubly-linked list. 516 void 517 runtime·MSpanList_Init(MSpan *list) 518 { 519 list->state = MSpanListHead; 520 list->next = list; 521 list->prev = list; 522 } 523 524 void 525 runtime·MSpanList_Remove(MSpan *span) 526 { 527 if(span->prev == nil && span->next == nil) 528 return; 529 span->prev->next = span->next; 530 span->next->prev = span->prev; 531 span->prev = nil; 532 span->next = nil; 533 } 534 535 bool 536 runtime·MSpanList_IsEmpty(MSpan *list) 537 { 538 return list->next == list; 539 } 540 541 void 542 runtime·MSpanList_Insert(MSpan *list, MSpan *span) 543 { 544 if(span->next != nil || span->prev != nil) { 545 runtime·printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev); 546 runtime·throw("MSpanList_Insert"); 547 } 548 span->next = list->next; 549 span->prev = list; 550 span->next->prev = span; 551 span->prev->next = span; 552 } 553 554