github.com/yanyiwu/go@v0.0.0-20150106053140-03d6637dbb7f/src/runtime/heapdump.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Implementation of runtime/debug.WriteHeapDump.  Writes all
     6  // objects in the heap plus additional info (roots, threads,
     7  // finalizers, etc.) to a file.
     8  
     9  // The format of the dumped file is described at
    10  // http://golang.org/s/go14heapdump.
    11  
    12  package runtime
    13  
    14  import "unsafe"
    15  
    16  const (
    17  	fieldKindEol       = 0
    18  	fieldKindPtr       = 1
    19  	fieldKindIface     = 2
    20  	fieldKindEface     = 3
    21  	tagEOF             = 0
    22  	tagObject          = 1
    23  	tagOtherRoot       = 2
    24  	tagType            = 3
    25  	tagGoroutine       = 4
    26  	tagStackFrame      = 5
    27  	tagParams          = 6
    28  	tagFinalizer       = 7
    29  	tagItab            = 8
    30  	tagOSThread        = 9
    31  	tagMemStats        = 10
    32  	tagQueuedFinalizer = 11
    33  	tagData            = 12
    34  	tagBSS             = 13
    35  	tagDefer           = 14
    36  	tagPanic           = 15
    37  	tagMemProf         = 16
    38  	tagAllocSample     = 17
    39  )
    40  
    41  var dumpfd uintptr // fd to write the dump to.
    42  var tmpbuf []byte
    43  
    44  // buffer of pending write data
    45  const (
    46  	bufSize = 4096
    47  )
    48  
    49  var buf [bufSize]byte
    50  var nbuf uintptr
    51  
    52  func dwrite(data unsafe.Pointer, len uintptr) {
    53  	if len == 0 {
    54  		return
    55  	}
    56  	if nbuf+len <= bufSize {
    57  		copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
    58  		nbuf += len
    59  		return
    60  	}
    61  
    62  	write(dumpfd, (unsafe.Pointer)(&buf), int32(nbuf))
    63  	if len >= bufSize {
    64  		write(dumpfd, data, int32(len))
    65  		nbuf = 0
    66  	} else {
    67  		copy(buf[:], (*[bufSize]byte)(data)[:len])
    68  		nbuf = len
    69  	}
    70  }
    71  
    72  func dwritebyte(b byte) {
    73  	dwrite(unsafe.Pointer(&b), 1)
    74  }
    75  
    76  func flush() {
    77  	write(dumpfd, (unsafe.Pointer)(&buf), int32(nbuf))
    78  	nbuf = 0
    79  }
    80  
    81  // Cache of types that have been serialized already.
    82  // We use a type's hash field to pick a bucket.
    83  // Inside a bucket, we keep a list of types that
    84  // have been serialized so far, most recently used first.
    85  // Note: when a bucket overflows we may end up
    86  // serializing a type more than once.  That's ok.
    87  const (
    88  	typeCacheBuckets = 256
    89  	typeCacheAssoc   = 4
    90  )
    91  
    92  type typeCacheBucket struct {
    93  	t [typeCacheAssoc]*_type
    94  }
    95  
    96  var typecache [typeCacheBuckets]typeCacheBucket
    97  
    98  // dump a uint64 in a varint format parseable by encoding/binary
    99  func dumpint(v uint64) {
   100  	var buf [10]byte
   101  	var n int
   102  	for v >= 0x80 {
   103  		buf[n] = byte(v | 0x80)
   104  		n++
   105  		v >>= 7
   106  	}
   107  	buf[n] = byte(v)
   108  	n++
   109  	dwrite(unsafe.Pointer(&buf), uintptr(n))
   110  }
   111  
   112  func dumpbool(b bool) {
   113  	if b {
   114  		dumpint(1)
   115  	} else {
   116  		dumpint(0)
   117  	}
   118  }
   119  
   120  // dump varint uint64 length followed by memory contents
   121  func dumpmemrange(data unsafe.Pointer, len uintptr) {
   122  	dumpint(uint64(len))
   123  	dwrite(data, len)
   124  }
   125  
   126  func dumpslice(b []byte) {
   127  	dumpint(uint64(len(b)))
   128  	if len(b) > 0 {
   129  		dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
   130  	}
   131  }
   132  
   133  func dumpstr(s string) {
   134  	sp := (*stringStruct)(unsafe.Pointer(&s))
   135  	dumpmemrange(sp.str, uintptr(sp.len))
   136  }
   137  
   138  // dump information for a type
   139  func dumptype(t *_type) {
   140  	if t == nil {
   141  		return
   142  	}
   143  
   144  	// If we've definitely serialized the type before,
   145  	// no need to do it again.
   146  	b := &typecache[t.hash&(typeCacheBuckets-1)]
   147  	if t == b.t[0] {
   148  		return
   149  	}
   150  	for i := 1; i < typeCacheAssoc; i++ {
   151  		if t == b.t[i] {
   152  			// Move-to-front
   153  			for j := i; j > 0; j-- {
   154  				b.t[j] = b.t[j-1]
   155  			}
   156  			b.t[0] = t
   157  			return
   158  		}
   159  	}
   160  
   161  	// Might not have been dumped yet.  Dump it and
   162  	// remember we did so.
   163  	for j := typeCacheAssoc - 1; j > 0; j-- {
   164  		b.t[j] = b.t[j-1]
   165  	}
   166  	b.t[0] = t
   167  
   168  	// dump the type
   169  	dumpint(tagType)
   170  	dumpint(uint64(uintptr(unsafe.Pointer(t))))
   171  	dumpint(uint64(t.size))
   172  	if t.x == nil || t.x.pkgpath == nil || t.x.name == nil {
   173  		dumpstr(*t._string)
   174  	} else {
   175  		pkgpath := (*stringStruct)(unsafe.Pointer(&t.x.pkgpath))
   176  		name := (*stringStruct)(unsafe.Pointer(&t.x.name))
   177  		dumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len)))
   178  		dwrite(pkgpath.str, uintptr(pkgpath.len))
   179  		dwritebyte('.')
   180  		dwrite(name.str, uintptr(name.len))
   181  	}
   182  	dumpbool(t.kind&kindDirectIface == 0 || t.kind&kindNoPointers == 0)
   183  }
   184  
   185  // dump an object
   186  func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
   187  	dumpbvtypes(&bv, obj)
   188  	dumpint(tagObject)
   189  	dumpint(uint64(uintptr(obj)))
   190  	dumpmemrange(obj, size)
   191  	dumpfields(bv)
   192  }
   193  
   194  func dumpotherroot(description string, to unsafe.Pointer) {
   195  	dumpint(tagOtherRoot)
   196  	dumpstr(description)
   197  	dumpint(uint64(uintptr(to)))
   198  }
   199  
   200  func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) {
   201  	dumpint(tagFinalizer)
   202  	dumpint(uint64(uintptr(obj)))
   203  	dumpint(uint64(uintptr(unsafe.Pointer(fn))))
   204  	dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
   205  	dumpint(uint64(uintptr(unsafe.Pointer(fint))))
   206  	dumpint(uint64(uintptr(unsafe.Pointer(ot))))
   207  }
   208  
   209  type childInfo struct {
   210  	// Information passed up from the callee frame about
   211  	// the layout of the outargs region.
   212  	argoff uintptr   // where the arguments start in the frame
   213  	arglen uintptr   // size of args region
   214  	args   bitvector // if args.n >= 0, pointer map of args region
   215  	sp     *uint8    // callee sp
   216  	depth  uintptr   // depth in call stack (0 == most recent)
   217  }
   218  
   219  // dump kinds & offsets of interesting fields in bv
   220  func dumpbv(cbv *bitvector, offset uintptr) {
   221  	bv := gobv(*cbv)
   222  	for i := uintptr(0); i < uintptr(bv.n); i += bitsPerPointer {
   223  		switch bv.bytedata[i/8] >> (i % 8) & 3 {
   224  		default:
   225  			throw("unexpected pointer bits")
   226  		case _BitsDead:
   227  			// BitsDead has already been processed in makeheapobjbv.
   228  			// We should only see it in stack maps, in which case we should continue processing.
   229  		case _BitsScalar:
   230  			// ok
   231  		case _BitsPointer:
   232  			dumpint(fieldKindPtr)
   233  			dumpint(uint64(offset + i/_BitsPerPointer*ptrSize))
   234  		}
   235  	}
   236  }
   237  
   238  func dumpframe(s *stkframe, arg unsafe.Pointer) bool {
   239  	child := (*childInfo)(arg)
   240  	f := s.fn
   241  
   242  	// Figure out what we can about our stack map
   243  	pc := s.pc
   244  	if pc != f.entry {
   245  		pc--
   246  	}
   247  	pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, pc)
   248  	if pcdata == -1 {
   249  		// We do not have a valid pcdata value but there might be a
   250  		// stackmap for this function.  It is likely that we are looking
   251  		// at the function prologue, assume so and hope for the best.
   252  		pcdata = 0
   253  	}
   254  	stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
   255  
   256  	// Dump any types we will need to resolve Efaces.
   257  	if child.args.n >= 0 {
   258  		dumpbvtypes(&child.args, unsafe.Pointer(s.sp+child.argoff))
   259  	}
   260  	var bv bitvector
   261  	if stkmap != nil && stkmap.n > 0 {
   262  		bv = stackmapdata(stkmap, pcdata)
   263  		dumpbvtypes(&bv, unsafe.Pointer(s.varp-uintptr(bv.n/_BitsPerPointer*ptrSize)))
   264  	} else {
   265  		bv.n = -1
   266  	}
   267  
   268  	// Dump main body of stack frame.
   269  	dumpint(tagStackFrame)
   270  	dumpint(uint64(s.sp))                              // lowest address in frame
   271  	dumpint(uint64(child.depth))                       // # of frames deep on the stack
   272  	dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack
   273  	dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp)      // frame contents
   274  	dumpint(uint64(f.entry))
   275  	dumpint(uint64(s.pc))
   276  	dumpint(uint64(s.continpc))
   277  	name := funcname(f)
   278  	if name == "" {
   279  		name = "unknown function"
   280  	}
   281  	dumpstr(name)
   282  
   283  	// Dump fields in the outargs section
   284  	if child.args.n >= 0 {
   285  		dumpbv(&child.args, child.argoff)
   286  	} else {
   287  		// conservative - everything might be a pointer
   288  		for off := child.argoff; off < child.argoff+child.arglen; off += ptrSize {
   289  			dumpint(fieldKindPtr)
   290  			dumpint(uint64(off))
   291  		}
   292  	}
   293  
   294  	// Dump fields in the local vars section
   295  	if stkmap == nil {
   296  		// No locals information, dump everything.
   297  		for off := child.arglen; off < s.varp-s.sp; off += ptrSize {
   298  			dumpint(fieldKindPtr)
   299  			dumpint(uint64(off))
   300  		}
   301  	} else if stkmap.n < 0 {
   302  		// Locals size information, dump just the locals.
   303  		size := uintptr(-stkmap.n)
   304  		for off := s.varp - size - s.sp; off < s.varp-s.sp; off += ptrSize {
   305  			dumpint(fieldKindPtr)
   306  			dumpint(uint64(off))
   307  		}
   308  	} else if stkmap.n > 0 {
   309  		// Locals bitmap information, scan just the pointers in
   310  		// locals.
   311  		dumpbv(&bv, s.varp-uintptr(bv.n)/_BitsPerPointer*ptrSize-s.sp)
   312  	}
   313  	dumpint(fieldKindEol)
   314  
   315  	// Record arg info for parent.
   316  	child.argoff = s.argp - s.fp
   317  	child.arglen = s.arglen
   318  	child.sp = (*uint8)(unsafe.Pointer(s.sp))
   319  	child.depth++
   320  	stkmap = (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
   321  	if stkmap != nil {
   322  		child.args = stackmapdata(stkmap, pcdata)
   323  	} else {
   324  		child.args.n = -1
   325  	}
   326  	return true
   327  }
   328  
   329  func dumpgoroutine(gp *g) {
   330  	var sp, pc, lr uintptr
   331  	if gp.syscallsp != 0 {
   332  		sp = gp.syscallsp
   333  		pc = gp.syscallpc
   334  		lr = 0
   335  	} else {
   336  		sp = gp.sched.sp
   337  		pc = gp.sched.pc
   338  		lr = gp.sched.lr
   339  	}
   340  
   341  	dumpint(tagGoroutine)
   342  	dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   343  	dumpint(uint64(sp))
   344  	dumpint(uint64(gp.goid))
   345  	dumpint(uint64(gp.gopc))
   346  	dumpint(uint64(readgstatus(gp)))
   347  	dumpbool(gp.issystem)
   348  	dumpbool(false) // isbackground
   349  	dumpint(uint64(gp.waitsince))
   350  	dumpstr(gp.waitreason)
   351  	dumpint(uint64(uintptr(gp.sched.ctxt)))
   352  	dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
   353  	dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
   354  	dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
   355  
   356  	// dump stack
   357  	var child childInfo
   358  	child.args.n = -1
   359  	child.arglen = 0
   360  	child.sp = nil
   361  	child.depth = 0
   362  	gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, noescape(unsafe.Pointer(&child)), 0)
   363  
   364  	// dump defer & panic records
   365  	for d := gp._defer; d != nil; d = d.link {
   366  		dumpint(tagDefer)
   367  		dumpint(uint64(uintptr(unsafe.Pointer(d))))
   368  		dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   369  		dumpint(uint64(d.sp))
   370  		dumpint(uint64(d.pc))
   371  		dumpint(uint64(uintptr(unsafe.Pointer(d.fn))))
   372  		dumpint(uint64(uintptr(unsafe.Pointer(d.fn.fn))))
   373  		dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
   374  	}
   375  	for p := gp._panic; p != nil; p = p.link {
   376  		dumpint(tagPanic)
   377  		dumpint(uint64(uintptr(unsafe.Pointer(p))))
   378  		dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   379  		eface := (*eface)(unsafe.Pointer(&p.arg))
   380  		dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
   381  		dumpint(uint64(uintptr(unsafe.Pointer(eface.data))))
   382  		dumpint(0) // was p->defer, no longer recorded
   383  		dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
   384  	}
   385  }
   386  
   387  func dumpgs() {
   388  	// goroutines & stacks
   389  	for i := 0; uintptr(i) < allglen; i++ {
   390  		gp := allgs[i]
   391  		status := readgstatus(gp) // The world is stopped so gp will not be in a scan state.
   392  		switch status {
   393  		default:
   394  			print("runtime: unexpected G.status ", hex(status), "\n")
   395  			throw("dumpgs in STW - bad status")
   396  		case _Gdead:
   397  			// ok
   398  		case _Grunnable,
   399  			_Gsyscall,
   400  			_Gwaiting:
   401  			dumpgoroutine(gp)
   402  		}
   403  	}
   404  }
   405  
   406  func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) {
   407  	dumpint(tagQueuedFinalizer)
   408  	dumpint(uint64(uintptr(obj)))
   409  	dumpint(uint64(uintptr(unsafe.Pointer(fn))))
   410  	dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
   411  	dumpint(uint64(uintptr(unsafe.Pointer(fint))))
   412  	dumpint(uint64(uintptr(unsafe.Pointer(ot))))
   413  }
   414  
   415  func dumproots() {
   416  	// data segment
   417  	dumpbvtypes(&gcdatamask, unsafe.Pointer(&data))
   418  	dumpint(tagData)
   419  	dumpint(uint64(uintptr(unsafe.Pointer(&data))))
   420  	dumpmemrange(unsafe.Pointer(&data), uintptr(unsafe.Pointer(&edata))-uintptr(unsafe.Pointer(&data)))
   421  	dumpfields(gcdatamask)
   422  
   423  	// bss segment
   424  	dumpbvtypes(&gcbssmask, unsafe.Pointer(&bss))
   425  	dumpint(tagBSS)
   426  	dumpint(uint64(uintptr(unsafe.Pointer(&bss))))
   427  	dumpmemrange(unsafe.Pointer(&bss), uintptr(unsafe.Pointer(&ebss))-uintptr(unsafe.Pointer(&bss)))
   428  	dumpfields(gcbssmask)
   429  
   430  	// MSpan.types
   431  	allspans := h_allspans
   432  	for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ {
   433  		s := allspans[spanidx]
   434  		if s.state == _MSpanInUse {
   435  			// Finalizers
   436  			for sp := s.specials; sp != nil; sp = sp.next {
   437  				if sp.kind != _KindSpecialFinalizer {
   438  					continue
   439  				}
   440  				spf := (*specialfinalizer)(unsafe.Pointer(sp))
   441  				p := unsafe.Pointer((uintptr(s.start) << _PageShift) + uintptr(spf.special.offset))
   442  				dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
   443  			}
   444  		}
   445  	}
   446  
   447  	// Finalizer queue
   448  	iterate_finq(finq_callback)
   449  }
   450  
   451  // Bit vector of free marks.
   452  // Needs to be as big as the largest number of objects per span.
   453  var freemark [_PageSize / 8]bool
   454  
   455  func dumpobjs() {
   456  	for i := uintptr(0); i < uintptr(mheap_.nspan); i++ {
   457  		s := h_allspans[i]
   458  		if s.state != _MSpanInUse {
   459  			continue
   460  		}
   461  		p := uintptr(s.start << _PageShift)
   462  		size := s.elemsize
   463  		n := (s.npages << _PageShift) / size
   464  		if n > uintptr(len(freemark)) {
   465  			throw("freemark array doesn't have enough entries")
   466  		}
   467  		for l := s.freelist; l.ptr() != nil; l = l.ptr().next {
   468  			freemark[(uintptr(l)-p)/size] = true
   469  		}
   470  		for j := uintptr(0); j < n; j, p = j+1, p+size {
   471  			if freemark[j] {
   472  				freemark[j] = false
   473  				continue
   474  			}
   475  			dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
   476  		}
   477  	}
   478  }
   479  
   480  func dumpparams() {
   481  	dumpint(tagParams)
   482  	x := uintptr(1)
   483  	if *(*byte)(unsafe.Pointer(&x)) == 1 {
   484  		dumpbool(false) // little-endian ptrs
   485  	} else {
   486  		dumpbool(true) // big-endian ptrs
   487  	}
   488  	dumpint(ptrSize)
   489  	dumpint(uint64(mheap_.arena_start))
   490  	dumpint(uint64(mheap_.arena_used))
   491  	dumpint(thechar)
   492  	dumpstr(goexperiment)
   493  	dumpint(uint64(ncpu))
   494  }
   495  
   496  func itab_callback(tab *itab) {
   497  	t := tab._type
   498  	// Dump a map from itab* to the type of its data field.
   499  	// We want this map so we can deduce types of interface referents.
   500  	if t.kind&kindDirectIface == 0 {
   501  		// indirect - data slot is a pointer to t.
   502  		dumptype(t.ptrto)
   503  		dumpint(tagItab)
   504  		dumpint(uint64(uintptr(unsafe.Pointer(tab))))
   505  		dumpint(uint64(uintptr(unsafe.Pointer(t.ptrto))))
   506  	} else if t.kind&kindNoPointers == 0 {
   507  		// t is pointer-like - data slot is a t.
   508  		dumptype(t)
   509  		dumpint(tagItab)
   510  		dumpint(uint64(uintptr(unsafe.Pointer(tab))))
   511  		dumpint(uint64(uintptr(unsafe.Pointer(t))))
   512  	} else {
   513  		// Data slot is a scalar.  Dump type just for fun.
   514  		// With pointer-only interfaces, this shouldn't happen.
   515  		dumptype(t)
   516  		dumpint(tagItab)
   517  		dumpint(uint64(uintptr(unsafe.Pointer(tab))))
   518  		dumpint(uint64(uintptr(unsafe.Pointer(t))))
   519  	}
   520  }
   521  
   522  func dumpitabs() {
   523  	iterate_itabs(itab_callback)
   524  }
   525  
   526  func dumpms() {
   527  	for mp := allm; mp != nil; mp = mp.alllink {
   528  		dumpint(tagOSThread)
   529  		dumpint(uint64(uintptr(unsafe.Pointer(mp))))
   530  		dumpint(uint64(mp.id))
   531  		dumpint(mp.procid)
   532  	}
   533  }
   534  
   535  func dumpmemstats() {
   536  	dumpint(tagMemStats)
   537  	dumpint(memstats.alloc)
   538  	dumpint(memstats.total_alloc)
   539  	dumpint(memstats.sys)
   540  	dumpint(memstats.nlookup)
   541  	dumpint(memstats.nmalloc)
   542  	dumpint(memstats.nfree)
   543  	dumpint(memstats.heap_alloc)
   544  	dumpint(memstats.heap_sys)
   545  	dumpint(memstats.heap_idle)
   546  	dumpint(memstats.heap_inuse)
   547  	dumpint(memstats.heap_released)
   548  	dumpint(memstats.heap_objects)
   549  	dumpint(memstats.stacks_inuse)
   550  	dumpint(memstats.stacks_sys)
   551  	dumpint(memstats.mspan_inuse)
   552  	dumpint(memstats.mspan_sys)
   553  	dumpint(memstats.mcache_inuse)
   554  	dumpint(memstats.mcache_sys)
   555  	dumpint(memstats.buckhash_sys)
   556  	dumpint(memstats.gc_sys)
   557  	dumpint(memstats.other_sys)
   558  	dumpint(memstats.next_gc)
   559  	dumpint(memstats.last_gc)
   560  	dumpint(memstats.pause_total_ns)
   561  	for i := 0; i < 256; i++ {
   562  		dumpint(memstats.pause_ns[i])
   563  	}
   564  	dumpint(uint64(memstats.numgc))
   565  }
   566  
   567  func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
   568  	stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
   569  	dumpint(tagMemProf)
   570  	dumpint(uint64(uintptr(unsafe.Pointer(b))))
   571  	dumpint(uint64(size))
   572  	dumpint(uint64(nstk))
   573  	for i := uintptr(0); i < nstk; i++ {
   574  		pc := stk[i]
   575  		f := findfunc(pc)
   576  		if f == nil {
   577  			var buf [64]byte
   578  			n := len(buf)
   579  			n--
   580  			buf[n] = ')'
   581  			if pc == 0 {
   582  				n--
   583  				buf[n] = '0'
   584  			} else {
   585  				for pc > 0 {
   586  					n--
   587  					buf[n] = "0123456789abcdef"[pc&15]
   588  					pc >>= 4
   589  				}
   590  			}
   591  			n--
   592  			buf[n] = 'x'
   593  			n--
   594  			buf[n] = '0'
   595  			n--
   596  			buf[n] = '('
   597  			dumpslice(buf[n:])
   598  			dumpstr("?")
   599  			dumpint(0)
   600  		} else {
   601  			dumpstr(funcname(f))
   602  			if i > 0 && pc > f.entry {
   603  				pc--
   604  			}
   605  			file, line := funcline(f, pc)
   606  			dumpstr(file)
   607  			dumpint(uint64(line))
   608  		}
   609  	}
   610  	dumpint(uint64(allocs))
   611  	dumpint(uint64(frees))
   612  }
   613  
   614  func dumpmemprof() {
   615  	iterate_memprof(dumpmemprof_callback)
   616  	allspans := h_allspans
   617  	for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ {
   618  		s := allspans[spanidx]
   619  		if s.state != _MSpanInUse {
   620  			continue
   621  		}
   622  		for sp := s.specials; sp != nil; sp = sp.next {
   623  			if sp.kind != _KindSpecialProfile {
   624  				continue
   625  			}
   626  			spp := (*specialprofile)(unsafe.Pointer(sp))
   627  			p := uintptr(s.start<<_PageShift) + uintptr(spp.special.offset)
   628  			dumpint(tagAllocSample)
   629  			dumpint(uint64(p))
   630  			dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
   631  		}
   632  	}
   633  }
   634  
   635  var dumphdr = []byte("go1.4 heap dump\n")
   636  
   637  func mdump() {
   638  	// make sure we're done sweeping
   639  	for i := uintptr(0); i < uintptr(mheap_.nspan); i++ {
   640  		s := h_allspans[i]
   641  		if s.state == _MSpanInUse {
   642  			mSpan_EnsureSwept(s)
   643  		}
   644  	}
   645  	memclr(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
   646  	dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
   647  	dumpparams()
   648  	dumpitabs()
   649  	dumpobjs()
   650  	dumpgs()
   651  	dumpms()
   652  	dumproots()
   653  	dumpmemstats()
   654  	dumpmemprof()
   655  	dumpint(tagEOF)
   656  	flush()
   657  }
   658  
   659  func writeheapdump_m(fd uintptr) {
   660  	_g_ := getg()
   661  	casgstatus(_g_.m.curg, _Grunning, _Gwaiting)
   662  	_g_.waitreason = "dumping heap"
   663  
   664  	// Update stats so we can dump them.
   665  	// As a side effect, flushes all the MCaches so the MSpan.freelist
   666  	// lists contain all the free objects.
   667  	updatememstats(nil)
   668  
   669  	// Set dump file.
   670  	dumpfd = fd
   671  
   672  	// Call dump routine.
   673  	mdump()
   674  
   675  	// Reset dump file.
   676  	dumpfd = 0
   677  	if tmpbuf != nil {
   678  		sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
   679  		tmpbuf = nil
   680  	}
   681  
   682  	casgstatus(_g_.m.curg, _Gwaiting, _Grunning)
   683  }
   684  
   685  // dumpint() the kind & offset of each field in an object.
   686  func dumpfields(bv bitvector) {
   687  	dumpbv(&bv, 0)
   688  	dumpint(fieldKindEol)
   689  }
   690  
   691  // The heap dump reader needs to be able to disambiguate
   692  // Eface entries.  So it needs to know every type that might
   693  // appear in such an entry.  The following routine accomplishes that.
   694  // TODO(rsc, khr): Delete - no longer possible.
   695  
   696  // Dump all the types that appear in the type field of
   697  // any Eface described by this bit vector.
   698  func dumpbvtypes(bv *bitvector, base unsafe.Pointer) {
   699  }
   700  
   701  func makeheapobjbv(p uintptr, size uintptr) bitvector {
   702  	// Extend the temp buffer if necessary.
   703  	nptr := size / ptrSize
   704  	if uintptr(len(tmpbuf)) < nptr*_BitsPerPointer/8+1 {
   705  		if tmpbuf != nil {
   706  			sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
   707  		}
   708  		n := nptr*_BitsPerPointer/8 + 1
   709  		p := sysAlloc(n, &memstats.other_sys)
   710  		if p == nil {
   711  			throw("heapdump: out of memory")
   712  		}
   713  		tmpbuf = (*[1 << 30]byte)(p)[:n]
   714  	}
   715  	// Copy and compact the bitmap.
   716  	var i uintptr
   717  	for i = 0; i < nptr; i++ {
   718  		off := (p + i*ptrSize - mheap_.arena_start) / ptrSize
   719  		bitp := (*uint8)(unsafe.Pointer(mheap_.arena_start - off/wordsPerBitmapByte - 1))
   720  		shift := uint8((off % wordsPerBitmapByte) * gcBits)
   721  		bits := (*bitp >> (shift + 2)) & _BitsMask
   722  		if bits == _BitsDead {
   723  			break // end of heap object
   724  		}
   725  		tmpbuf[i*_BitsPerPointer/8] &^= (_BitsMask << ((i * _BitsPerPointer) % 8))
   726  		tmpbuf[i*_BitsPerPointer/8] |= bits << ((i * _BitsPerPointer) % 8)
   727  	}
   728  	return bitvector{int32(i * _BitsPerPointer), &tmpbuf[0]}
   729  }