github.com/q45/go@v0.0.0-20151101211701-a4fb8c13db3f/src/runtime/mstats.go (about)

     1  // Copyright 2009 The Go Authors.  All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Memory statistics
     6  
     7  package runtime
     8  
     9  import "unsafe"
    10  
    11  // Statistics.
    12  // If you edit this structure, also edit type MemStats below.
    13  type mstats struct {
    14  	// General statistics.
    15  	alloc       uint64 // bytes allocated and not yet freed
    16  	total_alloc uint64 // bytes allocated (even if freed)
    17  	sys         uint64 // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
    18  	nlookup     uint64 // number of pointer lookups
    19  	nmalloc     uint64 // number of mallocs
    20  	nfree       uint64 // number of frees
    21  
    22  	// Statistics about malloc heap.
    23  	// protected by mheap.lock
    24  	heap_alloc    uint64 // bytes allocated and not yet freed (same as alloc above)
    25  	heap_sys      uint64 // bytes obtained from system
    26  	heap_idle     uint64 // bytes in idle spans
    27  	heap_inuse    uint64 // bytes in non-idle spans
    28  	heap_released uint64 // bytes released to the os
    29  	heap_objects  uint64 // total number of allocated objects
    30  
    31  	// Statistics about allocation of low-level fixed-size structures.
    32  	// Protected by FixAlloc locks.
    33  	stacks_inuse uint64 // this number is included in heap_inuse above
    34  	stacks_sys   uint64 // always 0 in mstats
    35  	mspan_inuse  uint64 // mspan structures
    36  	mspan_sys    uint64
    37  	mcache_inuse uint64 // mcache structures
    38  	mcache_sys   uint64
    39  	buckhash_sys uint64 // profiling bucket hash table
    40  	gc_sys       uint64
    41  	other_sys    uint64
    42  
    43  	// Statistics about garbage collector.
    44  	// Protected by mheap or stopping the world during GC.
    45  	next_gc         uint64 // next gc (in heap_alloc time)
    46  	last_gc         uint64 // last gc (in absolute time)
    47  	pause_total_ns  uint64
    48  	pause_ns        [256]uint64 // circular buffer of recent gc pause lengths
    49  	pause_end       [256]uint64 // circular buffer of recent gc end times (nanoseconds since 1970)
    50  	numgc           uint32
    51  	gc_cpu_fraction float64 // fraction of CPU time used by GC
    52  	enablegc        bool
    53  	debuggc         bool
    54  
    55  	// Statistics about allocation size classes.
    56  
    57  	by_size [_NumSizeClasses]struct {
    58  		size    uint32
    59  		nmalloc uint64
    60  		nfree   uint64
    61  	}
    62  
    63  	// Statistics below here are not exported to Go directly.
    64  
    65  	tinyallocs uint64 // number of tiny allocations that didn't cause actual allocation; not exported to go directly
    66  
    67  	// heap_live is the number of bytes considered live by the GC.
    68  	// That is: retained by the most recent GC plus allocated
    69  	// since then. heap_live <= heap_alloc, since heap_live
    70  	// excludes unmarked objects that have not yet been swept.
    71  	heap_live uint64
    72  
    73  	// heap_scan is the number of bytes of "scannable" heap. This
    74  	// is the live heap (as counted by heap_live), but omitting
    75  	// no-scan objects and no-scan tails of objects.
    76  	heap_scan uint64
    77  
    78  	// heap_marked is the number of bytes marked by the previous
    79  	// GC. After mark termination, heap_live == heap_marked, but
    80  	// unlike heap_live, heap_marked does not change until the
    81  	// next mark termination.
    82  	heap_marked uint64
    83  
    84  	// heap_reachable is an estimate of the reachable heap bytes
    85  	// at the end of the previous GC.
    86  	heap_reachable uint64
    87  }
    88  
    89  var memstats mstats
    90  
    91  // A MemStats records statistics about the memory allocator.
    92  type MemStats struct {
    93  	// General statistics.
    94  	Alloc      uint64 // bytes allocated and not yet freed
    95  	TotalAlloc uint64 // bytes allocated (even if freed)
    96  	Sys        uint64 // bytes obtained from system (sum of XxxSys below)
    97  	Lookups    uint64 // number of pointer lookups
    98  	Mallocs    uint64 // number of mallocs
    99  	Frees      uint64 // number of frees
   100  
   101  	// Main allocation heap statistics.
   102  	HeapAlloc    uint64 // bytes allocated and not yet freed (same as Alloc above)
   103  	HeapSys      uint64 // bytes obtained from system
   104  	HeapIdle     uint64 // bytes in idle spans
   105  	HeapInuse    uint64 // bytes in non-idle span
   106  	HeapReleased uint64 // bytes released to the OS
   107  	HeapObjects  uint64 // total number of allocated objects
   108  
   109  	// Low-level fixed-size structure allocator statistics.
   110  	//	Inuse is bytes used now.
   111  	//	Sys is bytes obtained from system.
   112  	StackInuse  uint64 // bytes used by stack allocator
   113  	StackSys    uint64
   114  	MSpanInuse  uint64 // mspan structures
   115  	MSpanSys    uint64
   116  	MCacheInuse uint64 // mcache structures
   117  	MCacheSys   uint64
   118  	BuckHashSys uint64 // profiling bucket hash table
   119  	GCSys       uint64 // GC metadata
   120  	OtherSys    uint64 // other system allocations
   121  
   122  	// Garbage collector statistics.
   123  	NextGC        uint64 // next collection will happen when HeapAlloc ≥ this amount
   124  	LastGC        uint64 // end time of last collection (nanoseconds since 1970)
   125  	PauseTotalNs  uint64
   126  	PauseNs       [256]uint64 // circular buffer of recent GC pause durations, most recent at [(NumGC+255)%256]
   127  	PauseEnd      [256]uint64 // circular buffer of recent GC pause end times
   128  	NumGC         uint32
   129  	GCCPUFraction float64 // fraction of CPU time used by GC
   130  	EnableGC      bool
   131  	DebugGC       bool
   132  
   133  	// Per-size allocation statistics.
   134  	// 61 is NumSizeClasses in the C code.
   135  	BySize [61]struct {
   136  		Size    uint32
   137  		Mallocs uint64
   138  		Frees   uint64
   139  	}
   140  }
   141  
   142  // Size of the trailing by_size array differs between Go and C,
   143  // and all data after by_size is local to runtime, not exported.
   144  // NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
   145  // sizeof_C_MStats is what C thinks about size of Go struct.
   146  var sizeof_C_MStats = unsafe.Offsetof(memstats.by_size) + 61*unsafe.Sizeof(memstats.by_size[0])
   147  
   148  func init() {
   149  	var memStats MemStats
   150  	if sizeof_C_MStats != unsafe.Sizeof(memStats) {
   151  		println(sizeof_C_MStats, unsafe.Sizeof(memStats))
   152  		throw("MStats vs MemStatsType size mismatch")
   153  	}
   154  }
   155  
   156  // ReadMemStats populates m with memory allocator statistics.
   157  func ReadMemStats(m *MemStats) {
   158  	stopTheWorld("read mem stats")
   159  
   160  	systemstack(func() {
   161  		readmemstats_m(m)
   162  	})
   163  
   164  	startTheWorld()
   165  }
   166  
   167  func readmemstats_m(stats *MemStats) {
   168  	updatememstats(nil)
   169  
   170  	// Size of the trailing by_size array differs between Go and C,
   171  	// NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
   172  	memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats)
   173  
   174  	// Stack numbers are part of the heap numbers, separate those out for user consumption
   175  	stats.StackSys += stats.StackInuse
   176  	stats.HeapInuse -= stats.StackInuse
   177  	stats.HeapSys -= stats.StackInuse
   178  }
   179  
   180  //go:linkname readGCStats runtime/debug.readGCStats
   181  func readGCStats(pauses *[]uint64) {
   182  	systemstack(func() {
   183  		readGCStats_m(pauses)
   184  	})
   185  }
   186  
   187  func readGCStats_m(pauses *[]uint64) {
   188  	p := *pauses
   189  	// Calling code in runtime/debug should make the slice large enough.
   190  	if cap(p) < len(memstats.pause_ns)+3 {
   191  		throw("short slice passed to readGCStats")
   192  	}
   193  
   194  	// Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
   195  	lock(&mheap_.lock)
   196  
   197  	n := memstats.numgc
   198  	if n > uint32(len(memstats.pause_ns)) {
   199  		n = uint32(len(memstats.pause_ns))
   200  	}
   201  
   202  	// The pause buffer is circular. The most recent pause is at
   203  	// pause_ns[(numgc-1)%len(pause_ns)], and then backward
   204  	// from there to go back farther in time. We deliver the times
   205  	// most recent first (in p[0]).
   206  	p = p[:cap(p)]
   207  	for i := uint32(0); i < n; i++ {
   208  		j := (memstats.numgc - 1 - i) % uint32(len(memstats.pause_ns))
   209  		p[i] = memstats.pause_ns[j]
   210  		p[n+i] = memstats.pause_end[j]
   211  	}
   212  
   213  	p[n+n] = memstats.last_gc
   214  	p[n+n+1] = uint64(memstats.numgc)
   215  	p[n+n+2] = memstats.pause_total_ns
   216  	unlock(&mheap_.lock)
   217  	*pauses = p[:n+n+3]
   218  }
   219  
   220  //go:nowritebarrier
   221  func updatememstats(stats *gcstats) {
   222  	if stats != nil {
   223  		*stats = gcstats{}
   224  	}
   225  	for mp := allm; mp != nil; mp = mp.alllink {
   226  		if stats != nil {
   227  			src := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(&mp.gcstats))
   228  			dst := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(stats))
   229  			for i, v := range src {
   230  				dst[i] += v
   231  			}
   232  			mp.gcstats = gcstats{}
   233  		}
   234  	}
   235  
   236  	memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
   237  	memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
   238  	memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys +
   239  		memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys
   240  
   241  	// Calculate memory allocator stats.
   242  	// During program execution we only count number of frees and amount of freed memory.
   243  	// Current number of alive object in the heap and amount of alive heap memory
   244  	// are calculated by scanning all spans.
   245  	// Total number of mallocs is calculated as number of frees plus number of alive objects.
   246  	// Similarly, total amount of allocated memory is calculated as amount of freed memory
   247  	// plus amount of alive heap memory.
   248  	memstats.alloc = 0
   249  	memstats.total_alloc = 0
   250  	memstats.nmalloc = 0
   251  	memstats.nfree = 0
   252  	for i := 0; i < len(memstats.by_size); i++ {
   253  		memstats.by_size[i].nmalloc = 0
   254  		memstats.by_size[i].nfree = 0
   255  	}
   256  
   257  	// Flush MCache's to MCentral.
   258  	systemstack(flushallmcaches)
   259  
   260  	// Aggregate local stats.
   261  	cachestats()
   262  
   263  	// Scan all spans and count number of alive objects.
   264  	lock(&mheap_.lock)
   265  	for i := uint32(0); i < mheap_.nspan; i++ {
   266  		s := h_allspans[i]
   267  		if s.state != mSpanInUse {
   268  			continue
   269  		}
   270  		if s.sizeclass == 0 {
   271  			memstats.nmalloc++
   272  			memstats.alloc += uint64(s.elemsize)
   273  		} else {
   274  			memstats.nmalloc += uint64(s.ref)
   275  			memstats.by_size[s.sizeclass].nmalloc += uint64(s.ref)
   276  			memstats.alloc += uint64(s.ref) * uint64(s.elemsize)
   277  		}
   278  	}
   279  	unlock(&mheap_.lock)
   280  
   281  	// Aggregate by size class.
   282  	smallfree := uint64(0)
   283  	memstats.nfree = mheap_.nlargefree
   284  	for i := 0; i < len(memstats.by_size); i++ {
   285  		memstats.nfree += mheap_.nsmallfree[i]
   286  		memstats.by_size[i].nfree = mheap_.nsmallfree[i]
   287  		memstats.by_size[i].nmalloc += mheap_.nsmallfree[i]
   288  		smallfree += uint64(mheap_.nsmallfree[i]) * uint64(class_to_size[i])
   289  	}
   290  	memstats.nfree += memstats.tinyallocs
   291  	memstats.nmalloc += memstats.nfree
   292  
   293  	// Calculate derived stats.
   294  	memstats.total_alloc = uint64(memstats.alloc) + uint64(mheap_.largefree) + smallfree
   295  	memstats.heap_alloc = memstats.alloc
   296  	memstats.heap_objects = memstats.nmalloc - memstats.nfree
   297  }
   298  
   299  //go:nowritebarrier
   300  func cachestats() {
   301  	for i := 0; ; i++ {
   302  		p := allp[i]
   303  		if p == nil {
   304  			break
   305  		}
   306  		c := p.mcache
   307  		if c == nil {
   308  			continue
   309  		}
   310  		purgecachedstats(c)
   311  	}
   312  }
   313  
   314  //go:nowritebarrier
   315  func flushallmcaches() {
   316  	for i := 0; ; i++ {
   317  		p := allp[i]
   318  		if p == nil {
   319  			break
   320  		}
   321  		c := p.mcache
   322  		if c == nil {
   323  			continue
   324  		}
   325  		mCache_ReleaseAll(c)
   326  		stackcache_clear(c)
   327  	}
   328  }
   329  
   330  //go:nosplit
   331  func purgecachedstats(c *mcache) {
   332  	// Protected by either heap or GC lock.
   333  	h := &mheap_
   334  	memstats.heap_live += uint64(c.local_cachealloc)
   335  	c.local_cachealloc = 0
   336  	if trace.enabled {
   337  		traceHeapAlloc()
   338  	}
   339  	memstats.heap_scan += uint64(c.local_scan)
   340  	c.local_scan = 0
   341  	memstats.tinyallocs += uint64(c.local_tinyallocs)
   342  	c.local_tinyallocs = 0
   343  	memstats.nlookup += uint64(c.local_nlookup)
   344  	c.local_nlookup = 0
   345  	h.largefree += uint64(c.local_largefree)
   346  	c.local_largefree = 0
   347  	h.nlargefree += uint64(c.local_nlargefree)
   348  	c.local_nlargefree = 0
   349  	for i := 0; i < len(c.local_nsmallfree); i++ {
   350  		h.nsmallfree[i] += uint64(c.local_nsmallfree[i])
   351  		c.local_nsmallfree[i] = 0
   352  	}
   353  }
   354  
   355  // Atomically increases a given *system* memory stat.  We are counting on this
   356  // stat never overflowing a uintptr, so this function must only be used for
   357  // system memory stats.
   358  //
   359  // The current implementation for little endian architectures is based on
   360  // xadduintptr(), which is less than ideal: xadd64() should really be used.
   361  // Using xadduintptr() is a stop-gap solution until arm supports xadd64() that
   362  // doesn't use locks.  (Locks are a problem as they require a valid G, which
   363  // restricts their useability.)
   364  //
   365  // A side-effect of using xadduintptr() is that we need to check for
   366  // overflow errors.
   367  //go:nosplit
   368  func mSysStatInc(sysStat *uint64, n uintptr) {
   369  	if _BigEndian != 0 {
   370  		xadd64(sysStat, int64(n))
   371  		return
   372  	}
   373  	if val := xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), n); val < n {
   374  		print("runtime: stat overflow: val ", val, ", n ", n, "\n")
   375  		exit(2)
   376  	}
   377  }
   378  
   379  // Atomically decreases a given *system* memory stat.  Same comments as
   380  // mSysStatInc apply.
   381  //go:nosplit
   382  func mSysStatDec(sysStat *uint64, n uintptr) {
   383  	if _BigEndian != 0 {
   384  		xadd64(sysStat, -int64(n))
   385  		return
   386  	}
   387  	if val := xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), uintptr(-int64(n))); val+n < n {
   388  		print("runtime: stat underflow: val ", val, ", n ", n, "\n")
   389  		exit(2)
   390  	}
   391  }