github.com/mdempsky/go@v0.0.0-20151201204031-5dd372bd1e70/src/runtime/mstats.go (about)

     1  // Copyright 2009 The Go Authors.  All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Memory statistics
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"runtime/internal/sys"
    12  	"unsafe"
    13  )
    14  
    15  // Statistics.
    16  // If you edit this structure, also edit type MemStats below.
    17  type mstats struct {
    18  	// General statistics.
    19  	alloc       uint64 // bytes allocated and not yet freed
    20  	total_alloc uint64 // bytes allocated (even if freed)
    21  	sys         uint64 // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
    22  	nlookup     uint64 // number of pointer lookups
    23  	nmalloc     uint64 // number of mallocs
    24  	nfree       uint64 // number of frees
    25  
    26  	// Statistics about malloc heap.
    27  	// protected by mheap.lock
    28  	heap_alloc    uint64 // bytes allocated and not yet freed (same as alloc above)
    29  	heap_sys      uint64 // bytes obtained from system
    30  	heap_idle     uint64 // bytes in idle spans
    31  	heap_inuse    uint64 // bytes in non-idle spans
    32  	heap_released uint64 // bytes released to the os
    33  	heap_objects  uint64 // total number of allocated objects
    34  
    35  	// Statistics about allocation of low-level fixed-size structures.
    36  	// Protected by FixAlloc locks.
    37  	stacks_inuse uint64 // this number is included in heap_inuse above
    38  	stacks_sys   uint64 // always 0 in mstats
    39  	mspan_inuse  uint64 // mspan structures
    40  	mspan_sys    uint64
    41  	mcache_inuse uint64 // mcache structures
    42  	mcache_sys   uint64
    43  	buckhash_sys uint64 // profiling bucket hash table
    44  	gc_sys       uint64
    45  	other_sys    uint64
    46  
    47  	// Statistics about garbage collector.
    48  	// Protected by mheap or stopping the world during GC.
    49  	next_gc         uint64 // next gc (in heap_alloc time)
    50  	last_gc         uint64 // last gc (in absolute time)
    51  	pause_total_ns  uint64
    52  	pause_ns        [256]uint64 // circular buffer of recent gc pause lengths
    53  	pause_end       [256]uint64 // circular buffer of recent gc end times (nanoseconds since 1970)
    54  	numgc           uint32
    55  	gc_cpu_fraction float64 // fraction of CPU time used by GC
    56  	enablegc        bool
    57  	debuggc         bool
    58  
    59  	// Statistics about allocation size classes.
    60  
    61  	by_size [_NumSizeClasses]struct {
    62  		size    uint32
    63  		nmalloc uint64
    64  		nfree   uint64
    65  	}
    66  
    67  	// Statistics below here are not exported to Go directly.
    68  
    69  	tinyallocs uint64 // number of tiny allocations that didn't cause actual allocation; not exported to go directly
    70  
    71  	// heap_live is the number of bytes considered live by the GC.
    72  	// That is: retained by the most recent GC plus allocated
    73  	// since then. heap_live <= heap_alloc, since heap_live
    74  	// excludes unmarked objects that have not yet been swept.
    75  	heap_live uint64
    76  
    77  	// heap_scan is the number of bytes of "scannable" heap. This
    78  	// is the live heap (as counted by heap_live), but omitting
    79  	// no-scan objects and no-scan tails of objects.
    80  	heap_scan uint64
    81  
    82  	// heap_marked is the number of bytes marked by the previous
    83  	// GC. After mark termination, heap_live == heap_marked, but
    84  	// unlike heap_live, heap_marked does not change until the
    85  	// next mark termination.
    86  	heap_marked uint64
    87  
    88  	// heap_reachable is an estimate of the reachable heap bytes
    89  	// at the end of the previous GC.
    90  	heap_reachable uint64
    91  }
    92  
    93  var memstats mstats
    94  
    95  // A MemStats records statistics about the memory allocator.
    96  type MemStats struct {
    97  	// General statistics.
    98  	Alloc      uint64 // bytes allocated and not yet freed
    99  	TotalAlloc uint64 // bytes allocated (even if freed)
   100  	Sys        uint64 // bytes obtained from system (sum of XxxSys below)
   101  	Lookups    uint64 // number of pointer lookups
   102  	Mallocs    uint64 // number of mallocs
   103  	Frees      uint64 // number of frees
   104  
   105  	// Main allocation heap statistics.
   106  	HeapAlloc    uint64 // bytes allocated and not yet freed (same as Alloc above)
   107  	HeapSys      uint64 // bytes obtained from system
   108  	HeapIdle     uint64 // bytes in idle spans
   109  	HeapInuse    uint64 // bytes in non-idle span
   110  	HeapReleased uint64 // bytes released to the OS
   111  	HeapObjects  uint64 // total number of allocated objects
   112  
   113  	// Low-level fixed-size structure allocator statistics.
   114  	//	Inuse is bytes used now.
   115  	//	Sys is bytes obtained from system.
   116  	StackInuse  uint64 // bytes used by stack allocator
   117  	StackSys    uint64
   118  	MSpanInuse  uint64 // mspan structures
   119  	MSpanSys    uint64
   120  	MCacheInuse uint64 // mcache structures
   121  	MCacheSys   uint64
   122  	BuckHashSys uint64 // profiling bucket hash table
   123  	GCSys       uint64 // GC metadata
   124  	OtherSys    uint64 // other system allocations
   125  
   126  	// Garbage collector statistics.
   127  	NextGC        uint64 // next collection will happen when HeapAlloc ≥ this amount
   128  	LastGC        uint64 // end time of last collection (nanoseconds since 1970)
   129  	PauseTotalNs  uint64
   130  	PauseNs       [256]uint64 // circular buffer of recent GC pause durations, most recent at [(NumGC+255)%256]
   131  	PauseEnd      [256]uint64 // circular buffer of recent GC pause end times
   132  	NumGC         uint32
   133  	GCCPUFraction float64 // fraction of CPU time used by GC
   134  	EnableGC      bool
   135  	DebugGC       bool
   136  
   137  	// Per-size allocation statistics.
   138  	// 61 is NumSizeClasses in the C code.
   139  	BySize [61]struct {
   140  		Size    uint32
   141  		Mallocs uint64
   142  		Frees   uint64
   143  	}
   144  }
   145  
   146  // Size of the trailing by_size array differs between Go and C,
   147  // and all data after by_size is local to runtime, not exported.
   148  // NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
   149  // sizeof_C_MStats is what C thinks about size of Go struct.
   150  var sizeof_C_MStats = unsafe.Offsetof(memstats.by_size) + 61*unsafe.Sizeof(memstats.by_size[0])
   151  
   152  func init() {
   153  	var memStats MemStats
   154  	if sizeof_C_MStats != unsafe.Sizeof(memStats) {
   155  		println(sizeof_C_MStats, unsafe.Sizeof(memStats))
   156  		throw("MStats vs MemStatsType size mismatch")
   157  	}
   158  }
   159  
   160  // ReadMemStats populates m with memory allocator statistics.
   161  func ReadMemStats(m *MemStats) {
   162  	stopTheWorld("read mem stats")
   163  
   164  	systemstack(func() {
   165  		readmemstats_m(m)
   166  	})
   167  
   168  	startTheWorld()
   169  }
   170  
   171  func readmemstats_m(stats *MemStats) {
   172  	updatememstats(nil)
   173  
   174  	// Size of the trailing by_size array differs between Go and C,
   175  	// NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
   176  	memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats)
   177  
   178  	// Stack numbers are part of the heap numbers, separate those out for user consumption
   179  	stats.StackSys += stats.StackInuse
   180  	stats.HeapInuse -= stats.StackInuse
   181  	stats.HeapSys -= stats.StackInuse
   182  }
   183  
   184  //go:linkname readGCStats runtime/debug.readGCStats
   185  func readGCStats(pauses *[]uint64) {
   186  	systemstack(func() {
   187  		readGCStats_m(pauses)
   188  	})
   189  }
   190  
   191  func readGCStats_m(pauses *[]uint64) {
   192  	p := *pauses
   193  	// Calling code in runtime/debug should make the slice large enough.
   194  	if cap(p) < len(memstats.pause_ns)+3 {
   195  		throw("short slice passed to readGCStats")
   196  	}
   197  
   198  	// Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
   199  	lock(&mheap_.lock)
   200  
   201  	n := memstats.numgc
   202  	if n > uint32(len(memstats.pause_ns)) {
   203  		n = uint32(len(memstats.pause_ns))
   204  	}
   205  
   206  	// The pause buffer is circular. The most recent pause is at
   207  	// pause_ns[(numgc-1)%len(pause_ns)], and then backward
   208  	// from there to go back farther in time. We deliver the times
   209  	// most recent first (in p[0]).
   210  	p = p[:cap(p)]
   211  	for i := uint32(0); i < n; i++ {
   212  		j := (memstats.numgc - 1 - i) % uint32(len(memstats.pause_ns))
   213  		p[i] = memstats.pause_ns[j]
   214  		p[n+i] = memstats.pause_end[j]
   215  	}
   216  
   217  	p[n+n] = memstats.last_gc
   218  	p[n+n+1] = uint64(memstats.numgc)
   219  	p[n+n+2] = memstats.pause_total_ns
   220  	unlock(&mheap_.lock)
   221  	*pauses = p[:n+n+3]
   222  }
   223  
   224  //go:nowritebarrier
   225  func updatememstats(stats *gcstats) {
   226  	if stats != nil {
   227  		*stats = gcstats{}
   228  	}
   229  	for mp := allm; mp != nil; mp = mp.alllink {
   230  		if stats != nil {
   231  			src := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(&mp.gcstats))
   232  			dst := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(stats))
   233  			for i, v := range src {
   234  				dst[i] += v
   235  			}
   236  			mp.gcstats = gcstats{}
   237  		}
   238  	}
   239  
   240  	memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
   241  	memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
   242  	memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys +
   243  		memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys
   244  
   245  	// Calculate memory allocator stats.
   246  	// During program execution we only count number of frees and amount of freed memory.
   247  	// Current number of alive object in the heap and amount of alive heap memory
   248  	// are calculated by scanning all spans.
   249  	// Total number of mallocs is calculated as number of frees plus number of alive objects.
   250  	// Similarly, total amount of allocated memory is calculated as amount of freed memory
   251  	// plus amount of alive heap memory.
   252  	memstats.alloc = 0
   253  	memstats.total_alloc = 0
   254  	memstats.nmalloc = 0
   255  	memstats.nfree = 0
   256  	for i := 0; i < len(memstats.by_size); i++ {
   257  		memstats.by_size[i].nmalloc = 0
   258  		memstats.by_size[i].nfree = 0
   259  	}
   260  
   261  	// Flush MCache's to MCentral.
   262  	systemstack(flushallmcaches)
   263  
   264  	// Aggregate local stats.
   265  	cachestats()
   266  
   267  	// Scan all spans and count number of alive objects.
   268  	lock(&mheap_.lock)
   269  	for i := uint32(0); i < mheap_.nspan; i++ {
   270  		s := h_allspans[i]
   271  		if s.state != mSpanInUse {
   272  			continue
   273  		}
   274  		if s.sizeclass == 0 {
   275  			memstats.nmalloc++
   276  			memstats.alloc += uint64(s.elemsize)
   277  		} else {
   278  			memstats.nmalloc += uint64(s.ref)
   279  			memstats.by_size[s.sizeclass].nmalloc += uint64(s.ref)
   280  			memstats.alloc += uint64(s.ref) * uint64(s.elemsize)
   281  		}
   282  	}
   283  	unlock(&mheap_.lock)
   284  
   285  	// Aggregate by size class.
   286  	smallfree := uint64(0)
   287  	memstats.nfree = mheap_.nlargefree
   288  	for i := 0; i < len(memstats.by_size); i++ {
   289  		memstats.nfree += mheap_.nsmallfree[i]
   290  		memstats.by_size[i].nfree = mheap_.nsmallfree[i]
   291  		memstats.by_size[i].nmalloc += mheap_.nsmallfree[i]
   292  		smallfree += uint64(mheap_.nsmallfree[i]) * uint64(class_to_size[i])
   293  	}
   294  	memstats.nfree += memstats.tinyallocs
   295  	memstats.nmalloc += memstats.nfree
   296  
   297  	// Calculate derived stats.
   298  	memstats.total_alloc = uint64(memstats.alloc) + uint64(mheap_.largefree) + smallfree
   299  	memstats.heap_alloc = memstats.alloc
   300  	memstats.heap_objects = memstats.nmalloc - memstats.nfree
   301  }
   302  
   303  //go:nowritebarrier
   304  func cachestats() {
   305  	for i := 0; ; i++ {
   306  		p := allp[i]
   307  		if p == nil {
   308  			break
   309  		}
   310  		c := p.mcache
   311  		if c == nil {
   312  			continue
   313  		}
   314  		purgecachedstats(c)
   315  	}
   316  }
   317  
   318  //go:nowritebarrier
   319  func flushallmcaches() {
   320  	for i := 0; ; i++ {
   321  		p := allp[i]
   322  		if p == nil {
   323  			break
   324  		}
   325  		c := p.mcache
   326  		if c == nil {
   327  			continue
   328  		}
   329  		c.releaseAll()
   330  		stackcache_clear(c)
   331  	}
   332  }
   333  
   334  //go:nosplit
   335  func purgecachedstats(c *mcache) {
   336  	// Protected by either heap or GC lock.
   337  	h := &mheap_
   338  	memstats.heap_live += uint64(c.local_cachealloc)
   339  	c.local_cachealloc = 0
   340  	if trace.enabled {
   341  		traceHeapAlloc()
   342  	}
   343  	memstats.heap_scan += uint64(c.local_scan)
   344  	c.local_scan = 0
   345  	memstats.tinyallocs += uint64(c.local_tinyallocs)
   346  	c.local_tinyallocs = 0
   347  	memstats.nlookup += uint64(c.local_nlookup)
   348  	c.local_nlookup = 0
   349  	h.largefree += uint64(c.local_largefree)
   350  	c.local_largefree = 0
   351  	h.nlargefree += uint64(c.local_nlargefree)
   352  	c.local_nlargefree = 0
   353  	for i := 0; i < len(c.local_nsmallfree); i++ {
   354  		h.nsmallfree[i] += uint64(c.local_nsmallfree[i])
   355  		c.local_nsmallfree[i] = 0
   356  	}
   357  }
   358  
   359  // Atomically increases a given *system* memory stat.  We are counting on this
   360  // stat never overflowing a uintptr, so this function must only be used for
   361  // system memory stats.
   362  //
   363  // The current implementation for little endian architectures is based on
   364  // xadduintptr(), which is less than ideal: xadd64() should really be used.
   365  // Using xadduintptr() is a stop-gap solution until arm supports xadd64() that
   366  // doesn't use locks.  (Locks are a problem as they require a valid G, which
   367  // restricts their useability.)
   368  //
   369  // A side-effect of using xadduintptr() is that we need to check for
   370  // overflow errors.
   371  //go:nosplit
   372  func mSysStatInc(sysStat *uint64, n uintptr) {
   373  	if sys.BigEndian != 0 {
   374  		atomic.Xadd64(sysStat, int64(n))
   375  		return
   376  	}
   377  	if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), n); val < n {
   378  		print("runtime: stat overflow: val ", val, ", n ", n, "\n")
   379  		exit(2)
   380  	}
   381  }
   382  
   383  // Atomically decreases a given *system* memory stat.  Same comments as
   384  // mSysStatInc apply.
   385  //go:nosplit
   386  func mSysStatDec(sysStat *uint64, n uintptr) {
   387  	if sys.BigEndian != 0 {
   388  		atomic.Xadd64(sysStat, -int64(n))
   389  		return
   390  	}
   391  	if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), uintptr(-int64(n))); val+n < n {
   392  		print("runtime: stat underflow: val ", val, ", n ", n, "\n")
   393  		exit(2)
   394  	}
   395  }