github.com/mdempsky/go@v0.0.0-20151201204031-5dd372bd1e70/src/runtime/heapdump.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Implementation of runtime/debug.WriteHeapDump. Writes all 6 // objects in the heap plus additional info (roots, threads, 7 // finalizers, etc.) to a file. 8 9 // The format of the dumped file is described at 10 // https://golang.org/s/go14heapdump. 11 12 package runtime 13 14 import ( 15 "runtime/internal/sys" 16 "unsafe" 17 ) 18 19 //go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump 20 func runtime_debug_WriteHeapDump(fd uintptr) { 21 stopTheWorld("write heap dump") 22 23 systemstack(func() { 24 writeheapdump_m(fd) 25 }) 26 27 startTheWorld() 28 } 29 30 const ( 31 fieldKindEol = 0 32 fieldKindPtr = 1 33 fieldKindIface = 2 34 fieldKindEface = 3 35 tagEOF = 0 36 tagObject = 1 37 tagOtherRoot = 2 38 tagType = 3 39 tagGoroutine = 4 40 tagStackFrame = 5 41 tagParams = 6 42 tagFinalizer = 7 43 tagItab = 8 44 tagOSThread = 9 45 tagMemStats = 10 46 tagQueuedFinalizer = 11 47 tagData = 12 48 tagBSS = 13 49 tagDefer = 14 50 tagPanic = 15 51 tagMemProf = 16 52 tagAllocSample = 17 53 ) 54 55 var dumpfd uintptr // fd to write the dump to. 56 var tmpbuf []byte 57 58 // buffer of pending write data 59 const ( 60 bufSize = 4096 61 ) 62 63 var buf [bufSize]byte 64 var nbuf uintptr 65 66 func dwrite(data unsafe.Pointer, len uintptr) { 67 if len == 0 { 68 return 69 } 70 if nbuf+len <= bufSize { 71 copy(buf[nbuf:], (*[bufSize]byte)(data)[:len]) 72 nbuf += len 73 return 74 } 75 76 write(dumpfd, unsafe.Pointer(&buf), int32(nbuf)) 77 if len >= bufSize { 78 write(dumpfd, data, int32(len)) 79 nbuf = 0 80 } else { 81 copy(buf[:], (*[bufSize]byte)(data)[:len]) 82 nbuf = len 83 } 84 } 85 86 func dwritebyte(b byte) { 87 dwrite(unsafe.Pointer(&b), 1) 88 } 89 90 func flush() { 91 write(dumpfd, unsafe.Pointer(&buf), int32(nbuf)) 92 nbuf = 0 93 } 94 95 // Cache of types that have been serialized already. 96 // We use a type's hash field to pick a bucket. 97 // Inside a bucket, we keep a list of types that 98 // have been serialized so far, most recently used first. 99 // Note: when a bucket overflows we may end up 100 // serializing a type more than once. That's ok. 101 const ( 102 typeCacheBuckets = 256 103 typeCacheAssoc = 4 104 ) 105 106 type typeCacheBucket struct { 107 t [typeCacheAssoc]*_type 108 } 109 110 var typecache [typeCacheBuckets]typeCacheBucket 111 112 // dump a uint64 in a varint format parseable by encoding/binary 113 func dumpint(v uint64) { 114 var buf [10]byte 115 var n int 116 for v >= 0x80 { 117 buf[n] = byte(v | 0x80) 118 n++ 119 v >>= 7 120 } 121 buf[n] = byte(v) 122 n++ 123 dwrite(unsafe.Pointer(&buf), uintptr(n)) 124 } 125 126 func dumpbool(b bool) { 127 if b { 128 dumpint(1) 129 } else { 130 dumpint(0) 131 } 132 } 133 134 // dump varint uint64 length followed by memory contents 135 func dumpmemrange(data unsafe.Pointer, len uintptr) { 136 dumpint(uint64(len)) 137 dwrite(data, len) 138 } 139 140 func dumpslice(b []byte) { 141 dumpint(uint64(len(b))) 142 if len(b) > 0 { 143 dwrite(unsafe.Pointer(&b[0]), uintptr(len(b))) 144 } 145 } 146 147 func dumpstr(s string) { 148 sp := stringStructOf(&s) 149 dumpmemrange(sp.str, uintptr(sp.len)) 150 } 151 152 // dump information for a type 153 func dumptype(t *_type) { 154 if t == nil { 155 return 156 } 157 158 // If we've definitely serialized the type before, 159 // no need to do it again. 160 b := &typecache[t.hash&(typeCacheBuckets-1)] 161 if t == b.t[0] { 162 return 163 } 164 for i := 1; i < typeCacheAssoc; i++ { 165 if t == b.t[i] { 166 // Move-to-front 167 for j := i; j > 0; j-- { 168 b.t[j] = b.t[j-1] 169 } 170 b.t[0] = t 171 return 172 } 173 } 174 175 // Might not have been dumped yet. Dump it and 176 // remember we did so. 177 for j := typeCacheAssoc - 1; j > 0; j-- { 178 b.t[j] = b.t[j-1] 179 } 180 b.t[0] = t 181 182 // dump the type 183 dumpint(tagType) 184 dumpint(uint64(uintptr(unsafe.Pointer(t)))) 185 dumpint(uint64(t.size)) 186 if t.x == nil || t.x.pkgpath == nil || t.x.name == nil { 187 dumpstr(*t._string) 188 } else { 189 pkgpath := stringStructOf(t.x.pkgpath) 190 name := stringStructOf(t.x.name) 191 dumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len))) 192 dwrite(pkgpath.str, uintptr(pkgpath.len)) 193 dwritebyte('.') 194 dwrite(name.str, uintptr(name.len)) 195 } 196 dumpbool(t.kind&kindDirectIface == 0 || t.kind&kindNoPointers == 0) 197 } 198 199 // dump an object 200 func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) { 201 dumpbvtypes(&bv, obj) 202 dumpint(tagObject) 203 dumpint(uint64(uintptr(obj))) 204 dumpmemrange(obj, size) 205 dumpfields(bv) 206 } 207 208 func dumpotherroot(description string, to unsafe.Pointer) { 209 dumpint(tagOtherRoot) 210 dumpstr(description) 211 dumpint(uint64(uintptr(to))) 212 } 213 214 func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) { 215 dumpint(tagFinalizer) 216 dumpint(uint64(uintptr(obj))) 217 dumpint(uint64(uintptr(unsafe.Pointer(fn)))) 218 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn)))) 219 dumpint(uint64(uintptr(unsafe.Pointer(fint)))) 220 dumpint(uint64(uintptr(unsafe.Pointer(ot)))) 221 } 222 223 type childInfo struct { 224 // Information passed up from the callee frame about 225 // the layout of the outargs region. 226 argoff uintptr // where the arguments start in the frame 227 arglen uintptr // size of args region 228 args bitvector // if args.n >= 0, pointer map of args region 229 sp *uint8 // callee sp 230 depth uintptr // depth in call stack (0 == most recent) 231 } 232 233 // dump kinds & offsets of interesting fields in bv 234 func dumpbv(cbv *bitvector, offset uintptr) { 235 bv := gobv(*cbv) 236 for i := uintptr(0); i < uintptr(bv.n); i++ { 237 if bv.bytedata[i/8]>>(i%8)&1 == 1 { 238 dumpint(fieldKindPtr) 239 dumpint(uint64(offset + i*sys.PtrSize)) 240 } 241 } 242 } 243 244 func dumpframe(s *stkframe, arg unsafe.Pointer) bool { 245 child := (*childInfo)(arg) 246 f := s.fn 247 248 // Figure out what we can about our stack map 249 pc := s.pc 250 if pc != f.entry { 251 pc-- 252 } 253 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, pc, nil) 254 if pcdata == -1 { 255 // We do not have a valid pcdata value but there might be a 256 // stackmap for this function. It is likely that we are looking 257 // at the function prologue, assume so and hope for the best. 258 pcdata = 0 259 } 260 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps)) 261 262 // Dump any types we will need to resolve Efaces. 263 if child.args.n >= 0 { 264 dumpbvtypes(&child.args, unsafe.Pointer(s.sp+child.argoff)) 265 } 266 var bv bitvector 267 if stkmap != nil && stkmap.n > 0 { 268 bv = stackmapdata(stkmap, pcdata) 269 dumpbvtypes(&bv, unsafe.Pointer(s.varp-uintptr(bv.n*sys.PtrSize))) 270 } else { 271 bv.n = -1 272 } 273 274 // Dump main body of stack frame. 275 dumpint(tagStackFrame) 276 dumpint(uint64(s.sp)) // lowest address in frame 277 dumpint(uint64(child.depth)) // # of frames deep on the stack 278 dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack 279 dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp) // frame contents 280 dumpint(uint64(f.entry)) 281 dumpint(uint64(s.pc)) 282 dumpint(uint64(s.continpc)) 283 name := funcname(f) 284 if name == "" { 285 name = "unknown function" 286 } 287 dumpstr(name) 288 289 // Dump fields in the outargs section 290 if child.args.n >= 0 { 291 dumpbv(&child.args, child.argoff) 292 } else { 293 // conservative - everything might be a pointer 294 for off := child.argoff; off < child.argoff+child.arglen; off += sys.PtrSize { 295 dumpint(fieldKindPtr) 296 dumpint(uint64(off)) 297 } 298 } 299 300 // Dump fields in the local vars section 301 if stkmap == nil { 302 // No locals information, dump everything. 303 for off := child.arglen; off < s.varp-s.sp; off += sys.PtrSize { 304 dumpint(fieldKindPtr) 305 dumpint(uint64(off)) 306 } 307 } else if stkmap.n < 0 { 308 // Locals size information, dump just the locals. 309 size := uintptr(-stkmap.n) 310 for off := s.varp - size - s.sp; off < s.varp-s.sp; off += sys.PtrSize { 311 dumpint(fieldKindPtr) 312 dumpint(uint64(off)) 313 } 314 } else if stkmap.n > 0 { 315 // Locals bitmap information, scan just the pointers in 316 // locals. 317 dumpbv(&bv, s.varp-uintptr(bv.n)*sys.PtrSize-s.sp) 318 } 319 dumpint(fieldKindEol) 320 321 // Record arg info for parent. 322 child.argoff = s.argp - s.fp 323 child.arglen = s.arglen 324 child.sp = (*uint8)(unsafe.Pointer(s.sp)) 325 child.depth++ 326 stkmap = (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 327 if stkmap != nil { 328 child.args = stackmapdata(stkmap, pcdata) 329 } else { 330 child.args.n = -1 331 } 332 return true 333 } 334 335 func dumpgoroutine(gp *g) { 336 var sp, pc, lr uintptr 337 if gp.syscallsp != 0 { 338 sp = gp.syscallsp 339 pc = gp.syscallpc 340 lr = 0 341 } else { 342 sp = gp.sched.sp 343 pc = gp.sched.pc 344 lr = gp.sched.lr 345 } 346 347 dumpint(tagGoroutine) 348 dumpint(uint64(uintptr(unsafe.Pointer(gp)))) 349 dumpint(uint64(sp)) 350 dumpint(uint64(gp.goid)) 351 dumpint(uint64(gp.gopc)) 352 dumpint(uint64(readgstatus(gp))) 353 dumpbool(isSystemGoroutine(gp)) 354 dumpbool(false) // isbackground 355 dumpint(uint64(gp.waitsince)) 356 dumpstr(gp.waitreason) 357 dumpint(uint64(uintptr(gp.sched.ctxt))) 358 dumpint(uint64(uintptr(unsafe.Pointer(gp.m)))) 359 dumpint(uint64(uintptr(unsafe.Pointer(gp._defer)))) 360 dumpint(uint64(uintptr(unsafe.Pointer(gp._panic)))) 361 362 // dump stack 363 var child childInfo 364 child.args.n = -1 365 child.arglen = 0 366 child.sp = nil 367 child.depth = 0 368 gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, noescape(unsafe.Pointer(&child)), 0) 369 370 // dump defer & panic records 371 for d := gp._defer; d != nil; d = d.link { 372 dumpint(tagDefer) 373 dumpint(uint64(uintptr(unsafe.Pointer(d)))) 374 dumpint(uint64(uintptr(unsafe.Pointer(gp)))) 375 dumpint(uint64(d.sp)) 376 dumpint(uint64(d.pc)) 377 dumpint(uint64(uintptr(unsafe.Pointer(d.fn)))) 378 dumpint(uint64(uintptr(unsafe.Pointer(d.fn.fn)))) 379 dumpint(uint64(uintptr(unsafe.Pointer(d.link)))) 380 } 381 for p := gp._panic; p != nil; p = p.link { 382 dumpint(tagPanic) 383 dumpint(uint64(uintptr(unsafe.Pointer(p)))) 384 dumpint(uint64(uintptr(unsafe.Pointer(gp)))) 385 eface := efaceOf(&p.arg) 386 dumpint(uint64(uintptr(unsafe.Pointer(eface._type)))) 387 dumpint(uint64(uintptr(unsafe.Pointer(eface.data)))) 388 dumpint(0) // was p->defer, no longer recorded 389 dumpint(uint64(uintptr(unsafe.Pointer(p.link)))) 390 } 391 } 392 393 func dumpgs() { 394 // goroutines & stacks 395 for i := 0; uintptr(i) < allglen; i++ { 396 gp := allgs[i] 397 status := readgstatus(gp) // The world is stopped so gp will not be in a scan state. 398 switch status { 399 default: 400 print("runtime: unexpected G.status ", hex(status), "\n") 401 throw("dumpgs in STW - bad status") 402 case _Gdead: 403 // ok 404 case _Grunnable, 405 _Gsyscall, 406 _Gwaiting: 407 dumpgoroutine(gp) 408 } 409 } 410 } 411 412 func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) { 413 dumpint(tagQueuedFinalizer) 414 dumpint(uint64(uintptr(obj))) 415 dumpint(uint64(uintptr(unsafe.Pointer(fn)))) 416 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn)))) 417 dumpint(uint64(uintptr(unsafe.Pointer(fint)))) 418 dumpint(uint64(uintptr(unsafe.Pointer(ot)))) 419 } 420 421 func dumproots() { 422 // TODO(mwhudson): dump datamask etc from all objects 423 // data segment 424 dumpbvtypes(&firstmoduledata.gcdatamask, unsafe.Pointer(firstmoduledata.data)) 425 dumpint(tagData) 426 dumpint(uint64(firstmoduledata.data)) 427 dumpmemrange(unsafe.Pointer(firstmoduledata.data), firstmoduledata.edata-firstmoduledata.data) 428 dumpfields(firstmoduledata.gcdatamask) 429 430 // bss segment 431 dumpbvtypes(&firstmoduledata.gcbssmask, unsafe.Pointer(firstmoduledata.bss)) 432 dumpint(tagBSS) 433 dumpint(uint64(firstmoduledata.bss)) 434 dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss) 435 dumpfields(firstmoduledata.gcbssmask) 436 437 // MSpan.types 438 allspans := h_allspans 439 for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ { 440 s := allspans[spanidx] 441 if s.state == _MSpanInUse { 442 // Finalizers 443 for sp := s.specials; sp != nil; sp = sp.next { 444 if sp.kind != _KindSpecialFinalizer { 445 continue 446 } 447 spf := (*specialfinalizer)(unsafe.Pointer(sp)) 448 p := unsafe.Pointer((uintptr(s.start) << _PageShift) + uintptr(spf.special.offset)) 449 dumpfinalizer(p, spf.fn, spf.fint, spf.ot) 450 } 451 } 452 } 453 454 // Finalizer queue 455 iterate_finq(finq_callback) 456 } 457 458 // Bit vector of free marks. 459 // Needs to be as big as the largest number of objects per span. 460 var freemark [_PageSize / 8]bool 461 462 func dumpobjs() { 463 for i := uintptr(0); i < uintptr(mheap_.nspan); i++ { 464 s := h_allspans[i] 465 if s.state != _MSpanInUse { 466 continue 467 } 468 p := uintptr(s.start << _PageShift) 469 size := s.elemsize 470 n := (s.npages << _PageShift) / size 471 if n > uintptr(len(freemark)) { 472 throw("freemark array doesn't have enough entries") 473 } 474 for l := s.freelist; l.ptr() != nil; l = l.ptr().next { 475 freemark[(uintptr(l)-p)/size] = true 476 } 477 for j := uintptr(0); j < n; j, p = j+1, p+size { 478 if freemark[j] { 479 freemark[j] = false 480 continue 481 } 482 dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size)) 483 } 484 } 485 } 486 487 func dumpparams() { 488 dumpint(tagParams) 489 x := uintptr(1) 490 if *(*byte)(unsafe.Pointer(&x)) == 1 { 491 dumpbool(false) // little-endian ptrs 492 } else { 493 dumpbool(true) // big-endian ptrs 494 } 495 dumpint(sys.PtrSize) 496 dumpint(uint64(mheap_.arena_start)) 497 dumpint(uint64(mheap_.arena_used)) 498 dumpint(sys.TheChar) 499 dumpstr(sys.Goexperiment) 500 dumpint(uint64(ncpu)) 501 } 502 503 func itab_callback(tab *itab) { 504 t := tab._type 505 // Dump a map from itab* to the type of its data field. 506 // We want this map so we can deduce types of interface referents. 507 if t.kind&kindDirectIface == 0 { 508 // indirect - data slot is a pointer to t. 509 dumptype(t.ptrto) 510 dumpint(tagItab) 511 dumpint(uint64(uintptr(unsafe.Pointer(tab)))) 512 dumpint(uint64(uintptr(unsafe.Pointer(t.ptrto)))) 513 } else if t.kind&kindNoPointers == 0 { 514 // t is pointer-like - data slot is a t. 515 dumptype(t) 516 dumpint(tagItab) 517 dumpint(uint64(uintptr(unsafe.Pointer(tab)))) 518 dumpint(uint64(uintptr(unsafe.Pointer(t)))) 519 } else { 520 // Data slot is a scalar. Dump type just for fun. 521 // With pointer-only interfaces, this shouldn't happen. 522 dumptype(t) 523 dumpint(tagItab) 524 dumpint(uint64(uintptr(unsafe.Pointer(tab)))) 525 dumpint(uint64(uintptr(unsafe.Pointer(t)))) 526 } 527 } 528 529 func dumpitabs() { 530 iterate_itabs(itab_callback) 531 } 532 533 func dumpms() { 534 for mp := allm; mp != nil; mp = mp.alllink { 535 dumpint(tagOSThread) 536 dumpint(uint64(uintptr(unsafe.Pointer(mp)))) 537 dumpint(uint64(mp.id)) 538 dumpint(mp.procid) 539 } 540 } 541 542 func dumpmemstats() { 543 dumpint(tagMemStats) 544 dumpint(memstats.alloc) 545 dumpint(memstats.total_alloc) 546 dumpint(memstats.sys) 547 dumpint(memstats.nlookup) 548 dumpint(memstats.nmalloc) 549 dumpint(memstats.nfree) 550 dumpint(memstats.heap_alloc) 551 dumpint(memstats.heap_sys) 552 dumpint(memstats.heap_idle) 553 dumpint(memstats.heap_inuse) 554 dumpint(memstats.heap_released) 555 dumpint(memstats.heap_objects) 556 dumpint(memstats.stacks_inuse) 557 dumpint(memstats.stacks_sys) 558 dumpint(memstats.mspan_inuse) 559 dumpint(memstats.mspan_sys) 560 dumpint(memstats.mcache_inuse) 561 dumpint(memstats.mcache_sys) 562 dumpint(memstats.buckhash_sys) 563 dumpint(memstats.gc_sys) 564 dumpint(memstats.other_sys) 565 dumpint(memstats.next_gc) 566 dumpint(memstats.last_gc) 567 dumpint(memstats.pause_total_ns) 568 for i := 0; i < 256; i++ { 569 dumpint(memstats.pause_ns[i]) 570 } 571 dumpint(uint64(memstats.numgc)) 572 } 573 574 func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) { 575 stk := (*[100000]uintptr)(unsafe.Pointer(pstk)) 576 dumpint(tagMemProf) 577 dumpint(uint64(uintptr(unsafe.Pointer(b)))) 578 dumpint(uint64(size)) 579 dumpint(uint64(nstk)) 580 for i := uintptr(0); i < nstk; i++ { 581 pc := stk[i] 582 f := findfunc(pc) 583 if f == nil { 584 var buf [64]byte 585 n := len(buf) 586 n-- 587 buf[n] = ')' 588 if pc == 0 { 589 n-- 590 buf[n] = '0' 591 } else { 592 for pc > 0 { 593 n-- 594 buf[n] = "0123456789abcdef"[pc&15] 595 pc >>= 4 596 } 597 } 598 n-- 599 buf[n] = 'x' 600 n-- 601 buf[n] = '0' 602 n-- 603 buf[n] = '(' 604 dumpslice(buf[n:]) 605 dumpstr("?") 606 dumpint(0) 607 } else { 608 dumpstr(funcname(f)) 609 if i > 0 && pc > f.entry { 610 pc-- 611 } 612 file, line := funcline(f, pc) 613 dumpstr(file) 614 dumpint(uint64(line)) 615 } 616 } 617 dumpint(uint64(allocs)) 618 dumpint(uint64(frees)) 619 } 620 621 func dumpmemprof() { 622 iterate_memprof(dumpmemprof_callback) 623 allspans := h_allspans 624 for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ { 625 s := allspans[spanidx] 626 if s.state != _MSpanInUse { 627 continue 628 } 629 for sp := s.specials; sp != nil; sp = sp.next { 630 if sp.kind != _KindSpecialProfile { 631 continue 632 } 633 spp := (*specialprofile)(unsafe.Pointer(sp)) 634 p := uintptr(s.start<<_PageShift) + uintptr(spp.special.offset) 635 dumpint(tagAllocSample) 636 dumpint(uint64(p)) 637 dumpint(uint64(uintptr(unsafe.Pointer(spp.b)))) 638 } 639 } 640 } 641 642 var dumphdr = []byte("go1.5 heap dump\n") 643 644 func mdump() { 645 // make sure we're done sweeping 646 for i := uintptr(0); i < uintptr(mheap_.nspan); i++ { 647 s := h_allspans[i] 648 if s.state == _MSpanInUse { 649 s.ensureSwept() 650 } 651 } 652 memclr(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache)) 653 dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr))) 654 dumpparams() 655 dumpitabs() 656 dumpobjs() 657 dumpgs() 658 dumpms() 659 dumproots() 660 dumpmemstats() 661 dumpmemprof() 662 dumpint(tagEOF) 663 flush() 664 } 665 666 func writeheapdump_m(fd uintptr) { 667 _g_ := getg() 668 casgstatus(_g_.m.curg, _Grunning, _Gwaiting) 669 _g_.waitreason = "dumping heap" 670 671 // Update stats so we can dump them. 672 // As a side effect, flushes all the MCaches so the MSpan.freelist 673 // lists contain all the free objects. 674 updatememstats(nil) 675 676 // Set dump file. 677 dumpfd = fd 678 679 // Call dump routine. 680 mdump() 681 682 // Reset dump file. 683 dumpfd = 0 684 if tmpbuf != nil { 685 sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys) 686 tmpbuf = nil 687 } 688 689 casgstatus(_g_.m.curg, _Gwaiting, _Grunning) 690 } 691 692 // dumpint() the kind & offset of each field in an object. 693 func dumpfields(bv bitvector) { 694 dumpbv(&bv, 0) 695 dumpint(fieldKindEol) 696 } 697 698 // The heap dump reader needs to be able to disambiguate 699 // Eface entries. So it needs to know every type that might 700 // appear in such an entry. The following routine accomplishes that. 701 // TODO(rsc, khr): Delete - no longer possible. 702 703 // Dump all the types that appear in the type field of 704 // any Eface described by this bit vector. 705 func dumpbvtypes(bv *bitvector, base unsafe.Pointer) { 706 } 707 708 func makeheapobjbv(p uintptr, size uintptr) bitvector { 709 // Extend the temp buffer if necessary. 710 nptr := size / sys.PtrSize 711 if uintptr(len(tmpbuf)) < nptr/8+1 { 712 if tmpbuf != nil { 713 sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys) 714 } 715 n := nptr/8 + 1 716 p := sysAlloc(n, &memstats.other_sys) 717 if p == nil { 718 throw("heapdump: out of memory") 719 } 720 tmpbuf = (*[1 << 30]byte)(p)[:n] 721 } 722 // Convert heap bitmap to pointer bitmap. 723 for i := uintptr(0); i < nptr/8+1; i++ { 724 tmpbuf[i] = 0 725 } 726 i := uintptr(0) 727 hbits := heapBitsForAddr(p) 728 for ; i < nptr; i++ { 729 if i >= 2 && !hbits.isMarked() { 730 break // end of object 731 } 732 if hbits.isPointer() { 733 tmpbuf[i/8] |= 1 << (i % 8) 734 } 735 hbits = hbits.next() 736 } 737 return bitvector{int32(i), &tmpbuf[0]} 738 }