github.com/4ad/go@v0.0.0-20161219182952-69a12818b605/src/runtime/mstats.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Memory statistics
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"runtime/internal/sys"
    12  	"unsafe"
    13  )
    14  
    15  // Statistics.
    16  // If you edit this structure, also edit type MemStats below.
    17  type mstats struct {
    18  	// General statistics.
    19  	alloc       uint64 // bytes allocated and not yet freed
    20  	total_alloc uint64 // bytes allocated (even if freed)
    21  	sys         uint64 // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
    22  	nlookup     uint64 // number of pointer lookups
    23  	nmalloc     uint64 // number of mallocs
    24  	nfree       uint64 // number of frees
    25  
    26  	// Statistics about malloc heap.
    27  	// protected by mheap.lock
    28  	heap_alloc    uint64 // bytes allocated and not yet freed (same as alloc above)
    29  	heap_sys      uint64 // bytes obtained from system
    30  	heap_idle     uint64 // bytes in idle spans
    31  	heap_inuse    uint64 // bytes in non-idle spans
    32  	heap_released uint64 // bytes released to the os
    33  	heap_objects  uint64 // total number of allocated objects
    34  
    35  	// Statistics about allocation of low-level fixed-size structures.
    36  	// Protected by FixAlloc locks.
    37  	stacks_inuse uint64 // this number is included in heap_inuse above
    38  	stacks_sys   uint64 // always 0 in mstats
    39  	mspan_inuse  uint64 // mspan structures
    40  	mspan_sys    uint64
    41  	mcache_inuse uint64 // mcache structures
    42  	mcache_sys   uint64
    43  	buckhash_sys uint64 // profiling bucket hash table
    44  	gc_sys       uint64
    45  	other_sys    uint64
    46  
    47  	// Statistics about garbage collector.
    48  	// Protected by mheap or stopping the world during GC.
    49  	next_gc         uint64 // next gc (in heap_live time)
    50  	last_gc         uint64 // last gc (in absolute time)
    51  	pause_total_ns  uint64
    52  	pause_ns        [256]uint64 // circular buffer of recent gc pause lengths
    53  	pause_end       [256]uint64 // circular buffer of recent gc end times (nanoseconds since 1970)
    54  	numgc           uint32
    55  	gc_cpu_fraction float64 // fraction of CPU time used by GC
    56  	enablegc        bool
    57  	debuggc         bool
    58  
    59  	// Statistics about allocation size classes.
    60  
    61  	by_size [_NumSizeClasses]struct {
    62  		size    uint32
    63  		nmalloc uint64
    64  		nfree   uint64
    65  	}
    66  
    67  	// Statistics below here are not exported to Go directly.
    68  
    69  	tinyallocs uint64 // number of tiny allocations that didn't cause actual allocation; not exported to go directly
    70  
    71  	// heap_live is the number of bytes considered live by the GC.
    72  	// That is: retained by the most recent GC plus allocated
    73  	// since then. heap_live <= heap_alloc, since heap_alloc
    74  	// includes unmarked objects that have not yet been swept (and
    75  	// hence goes up as we allocate and down as we sweep) while
    76  	// heap_live excludes these objects (and hence only goes up
    77  	// between GCs).
    78  	//
    79  	// This is updated atomically without locking. To reduce
    80  	// contention, this is updated only when obtaining a span from
    81  	// an mcentral and at this point it counts all of the
    82  	// unallocated slots in that span (which will be allocated
    83  	// before that mcache obtains another span from that
    84  	// mcentral). Hence, it slightly overestimates the "true" live
    85  	// heap size. It's better to overestimate than to
    86  	// underestimate because 1) this triggers the GC earlier than
    87  	// necessary rather than potentially too late and 2) this
    88  	// leads to a conservative GC rate rather than a GC rate that
    89  	// is potentially too low.
    90  	//
    91  	// Whenever this is updated, call traceHeapAlloc() and
    92  	// gcController.revise().
    93  	heap_live uint64
    94  
    95  	// heap_scan is the number of bytes of "scannable" heap. This
    96  	// is the live heap (as counted by heap_live), but omitting
    97  	// no-scan objects and no-scan tails of objects.
    98  	//
    99  	// Whenever this is updated, call gcController.revise().
   100  	heap_scan uint64
   101  
   102  	// heap_marked is the number of bytes marked by the previous
   103  	// GC. After mark termination, heap_live == heap_marked, but
   104  	// unlike heap_live, heap_marked does not change until the
   105  	// next mark termination.
   106  	heap_marked uint64
   107  
   108  	// heap_reachable is an estimate of the reachable heap bytes
   109  	// at the end of the previous GC.
   110  	heap_reachable uint64
   111  }
   112  
   113  var memstats mstats
   114  
   115  // A MemStats records statistics about the memory allocator.
   116  type MemStats struct {
   117  	// General statistics.
   118  	Alloc      uint64 // bytes allocated and not yet freed
   119  	TotalAlloc uint64 // bytes allocated (even if freed)
   120  	Sys        uint64 // bytes obtained from system (sum of XxxSys below)
   121  	Lookups    uint64 // number of pointer lookups
   122  	Mallocs    uint64 // number of mallocs
   123  	Frees      uint64 // number of frees
   124  
   125  	// Main allocation heap statistics.
   126  	HeapAlloc    uint64 // bytes allocated and not yet freed (same as Alloc above)
   127  	HeapSys      uint64 // bytes obtained from system
   128  	HeapIdle     uint64 // bytes in idle spans
   129  	HeapInuse    uint64 // bytes in non-idle span
   130  	HeapReleased uint64 // bytes released to the OS
   131  	HeapObjects  uint64 // total number of allocated objects
   132  
   133  	// Low-level fixed-size structure allocator statistics.
   134  	//	Inuse is bytes used now.
   135  	//	Sys is bytes obtained from system.
   136  	StackInuse  uint64 // bytes used by stack allocator
   137  	StackSys    uint64
   138  	MSpanInuse  uint64 // mspan structures
   139  	MSpanSys    uint64
   140  	MCacheInuse uint64 // mcache structures
   141  	MCacheSys   uint64
   142  	BuckHashSys uint64 // profiling bucket hash table
   143  	GCSys       uint64 // GC metadata
   144  	OtherSys    uint64 // other system allocations
   145  
   146  	// Garbage collector statistics.
   147  	NextGC        uint64 // next collection will happen when HeapAlloc ≥ this amount
   148  	LastGC        uint64 // end time of last collection (nanoseconds since 1970)
   149  	PauseTotalNs  uint64
   150  	PauseNs       [256]uint64 // circular buffer of recent GC pause durations, most recent at [(NumGC+255)%256]
   151  	PauseEnd      [256]uint64 // circular buffer of recent GC pause end times
   152  	NumGC         uint32
   153  	GCCPUFraction float64 // fraction of CPU time used by GC
   154  	EnableGC      bool
   155  	DebugGC       bool
   156  
   157  	// Per-size allocation statistics.
   158  	// 61 is NumSizeClasses in the C code.
   159  	BySize [61]struct {
   160  		Size    uint32
   161  		Mallocs uint64
   162  		Frees   uint64
   163  	}
   164  }
   165  
   166  // Size of the trailing by_size array differs between Go and C,
   167  // and all data after by_size is local to runtime, not exported.
   168  // NumSizeClasses was changed, but we cannot change Go struct because of backward compatibility.
   169  // sizeof_C_MStats is what C thinks about size of Go struct.
   170  var sizeof_C_MStats = unsafe.Offsetof(memstats.by_size) + 61*unsafe.Sizeof(memstats.by_size[0])
   171  
   172  func init() {
   173  	var memStats MemStats
   174  	if sizeof_C_MStats != unsafe.Sizeof(memStats) {
   175  		println(sizeof_C_MStats, unsafe.Sizeof(memStats))
   176  		throw("MStats vs MemStatsType size mismatch")
   177  	}
   178  }
   179  
   180  // ReadMemStats populates m with memory allocator statistics.
   181  func ReadMemStats(m *MemStats) {
   182  	stopTheWorld("read mem stats")
   183  
   184  	systemstack(func() {
   185  		readmemstats_m(m)
   186  	})
   187  
   188  	startTheWorld()
   189  }
   190  
   191  func readmemstats_m(stats *MemStats) {
   192  	updatememstats(nil)
   193  
   194  	// Size of the trailing by_size array differs between Go and C,
   195  	// NumSizeClasses was changed, but we cannot change Go struct because of backward compatibility.
   196  	memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats)
   197  
   198  	// Stack numbers are part of the heap numbers, separate those out for user consumption
   199  	stats.StackSys += stats.StackInuse
   200  	stats.HeapInuse -= stats.StackInuse
   201  	stats.HeapSys -= stats.StackInuse
   202  }
   203  
   204  //go:linkname readGCStats runtime/debug.readGCStats
   205  func readGCStats(pauses *[]uint64) {
   206  	systemstack(func() {
   207  		readGCStats_m(pauses)
   208  	})
   209  }
   210  
   211  func readGCStats_m(pauses *[]uint64) {
   212  	p := *pauses
   213  	// Calling code in runtime/debug should make the slice large enough.
   214  	if cap(p) < len(memstats.pause_ns)+3 {
   215  		throw("short slice passed to readGCStats")
   216  	}
   217  
   218  	// Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
   219  	lock(&mheap_.lock)
   220  
   221  	n := memstats.numgc
   222  	if n > uint32(len(memstats.pause_ns)) {
   223  		n = uint32(len(memstats.pause_ns))
   224  	}
   225  
   226  	// The pause buffer is circular. The most recent pause is at
   227  	// pause_ns[(numgc-1)%len(pause_ns)], and then backward
   228  	// from there to go back farther in time. We deliver the times
   229  	// most recent first (in p[0]).
   230  	p = p[:cap(p)]
   231  	for i := uint32(0); i < n; i++ {
   232  		j := (memstats.numgc - 1 - i) % uint32(len(memstats.pause_ns))
   233  		p[i] = memstats.pause_ns[j]
   234  		p[n+i] = memstats.pause_end[j]
   235  	}
   236  
   237  	p[n+n] = memstats.last_gc
   238  	p[n+n+1] = uint64(memstats.numgc)
   239  	p[n+n+2] = memstats.pause_total_ns
   240  	unlock(&mheap_.lock)
   241  	*pauses = p[:n+n+3]
   242  }
   243  
   244  //go:nowritebarrier
   245  func updatememstats(stats *gcstats) {
   246  	if stats != nil {
   247  		*stats = gcstats{}
   248  	}
   249  	for mp := allm; mp != nil; mp = mp.alllink {
   250  		if stats != nil {
   251  			src := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(&mp.gcstats))
   252  			dst := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(stats))
   253  			for i, v := range src {
   254  				dst[i] += v
   255  			}
   256  			mp.gcstats = gcstats{}
   257  		}
   258  	}
   259  
   260  	memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
   261  	memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
   262  	memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys +
   263  		memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys
   264  
   265  	// Calculate memory allocator stats.
   266  	// During program execution we only count number of frees and amount of freed memory.
   267  	// Current number of alive object in the heap and amount of alive heap memory
   268  	// are calculated by scanning all spans.
   269  	// Total number of mallocs is calculated as number of frees plus number of alive objects.
   270  	// Similarly, total amount of allocated memory is calculated as amount of freed memory
   271  	// plus amount of alive heap memory.
   272  	memstats.alloc = 0
   273  	memstats.total_alloc = 0
   274  	memstats.nmalloc = 0
   275  	memstats.nfree = 0
   276  	for i := 0; i < len(memstats.by_size); i++ {
   277  		memstats.by_size[i].nmalloc = 0
   278  		memstats.by_size[i].nfree = 0
   279  	}
   280  
   281  	// Flush MCache's to MCentral.
   282  	systemstack(flushallmcaches)
   283  
   284  	// Aggregate local stats.
   285  	cachestats()
   286  
   287  	// Scan all spans and count number of alive objects.
   288  	lock(&mheap_.lock)
   289  	for i := uint32(0); i < mheap_.nspan; i++ {
   290  		s := h_allspans[i]
   291  		if s.state != mSpanInUse {
   292  			continue
   293  		}
   294  		if s.sizeclass == 0 {
   295  			memstats.nmalloc++
   296  			memstats.alloc += uint64(s.elemsize)
   297  		} else {
   298  			memstats.nmalloc += uint64(s.allocCount)
   299  			memstats.by_size[s.sizeclass].nmalloc += uint64(s.allocCount)
   300  			memstats.alloc += uint64(s.allocCount) * uint64(s.elemsize)
   301  		}
   302  	}
   303  	unlock(&mheap_.lock)
   304  
   305  	// Aggregate by size class.
   306  	smallfree := uint64(0)
   307  	memstats.nfree = mheap_.nlargefree
   308  	for i := 0; i < len(memstats.by_size); i++ {
   309  		memstats.nfree += mheap_.nsmallfree[i]
   310  		memstats.by_size[i].nfree = mheap_.nsmallfree[i]
   311  		memstats.by_size[i].nmalloc += mheap_.nsmallfree[i]
   312  		smallfree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
   313  	}
   314  	memstats.nfree += memstats.tinyallocs
   315  	memstats.nmalloc += memstats.nfree
   316  
   317  	// Calculate derived stats.
   318  	memstats.total_alloc = memstats.alloc + mheap_.largefree + smallfree
   319  	memstats.heap_alloc = memstats.alloc
   320  	memstats.heap_objects = memstats.nmalloc - memstats.nfree
   321  }
   322  
   323  //go:nowritebarrier
   324  func cachestats() {
   325  	for i := 0; ; i++ {
   326  		p := allp[i]
   327  		if p == nil {
   328  			break
   329  		}
   330  		c := p.mcache
   331  		if c == nil {
   332  			continue
   333  		}
   334  		purgecachedstats(c)
   335  	}
   336  }
   337  
   338  //go:nowritebarrier
   339  func flushallmcaches() {
   340  	for i := 0; ; i++ {
   341  		p := allp[i]
   342  		if p == nil {
   343  			break
   344  		}
   345  		c := p.mcache
   346  		if c == nil {
   347  			continue
   348  		}
   349  		c.releaseAll()
   350  		stackcache_clear(c)
   351  	}
   352  }
   353  
   354  //go:nosplit
   355  func purgecachedstats(c *mcache) {
   356  	// Protected by either heap or GC lock.
   357  	h := &mheap_
   358  	memstats.heap_scan += uint64(c.local_scan)
   359  	c.local_scan = 0
   360  	memstats.tinyallocs += uint64(c.local_tinyallocs)
   361  	c.local_tinyallocs = 0
   362  	memstats.nlookup += uint64(c.local_nlookup)
   363  	c.local_nlookup = 0
   364  	h.largefree += uint64(c.local_largefree)
   365  	c.local_largefree = 0
   366  	h.nlargefree += uint64(c.local_nlargefree)
   367  	c.local_nlargefree = 0
   368  	for i := 0; i < len(c.local_nsmallfree); i++ {
   369  		h.nsmallfree[i] += uint64(c.local_nsmallfree[i])
   370  		c.local_nsmallfree[i] = 0
   371  	}
   372  }
   373  
   374  // Atomically increases a given *system* memory stat. We are counting on this
   375  // stat never overflowing a uintptr, so this function must only be used for
   376  // system memory stats.
   377  //
   378  // The current implementation for little endian architectures is based on
   379  // xadduintptr(), which is less than ideal: xadd64() should really be used.
   380  // Using xadduintptr() is a stop-gap solution until arm supports xadd64() that
   381  // doesn't use locks.  (Locks are a problem as they require a valid G, which
   382  // restricts their useability.)
   383  //
   384  // A side-effect of using xadduintptr() is that we need to check for
   385  // overflow errors.
   386  //go:nosplit
   387  func mSysStatInc(sysStat *uint64, n uintptr) {
   388  	if sys.BigEndian != 0 {
   389  		atomic.Xadd64(sysStat, int64(n))
   390  		return
   391  	}
   392  	if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), n); val < n {
   393  		print("runtime: stat overflow: val ", val, ", n ", n, "\n")
   394  		exit(2)
   395  	}
   396  }
   397  
   398  // Atomically decreases a given *system* memory stat. Same comments as
   399  // mSysStatInc apply.
   400  //go:nosplit
   401  func mSysStatDec(sysStat *uint64, n uintptr) {
   402  	if sys.BigEndian != 0 {
   403  		atomic.Xadd64(sysStat, -int64(n))
   404  		return
   405  	}
   406  	if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), uintptr(-int64(n))); val+n < n {
   407  		print("runtime: stat underflow: val ", val, ", n ", n, "\n")
   408  		exit(2)
   409  	}
   410  }