github.com/megatontech/mynoteforgo@v0.0.0-20200507084910-5d0c6ea6e890/源码/runtime/mstats.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory statistics 6 7 package runtime 8 9 import ( 10 "runtime/internal/atomic" 11 "runtime/internal/sys" 12 "unsafe" 13 ) 14 15 // Statistics. 16 // If you edit this structure, also edit type MemStats below. 17 // Their layouts must match exactly. 18 // 19 // For detailed descriptions see the documentation for MemStats. 20 // Fields that differ from MemStats are further documented here. 21 // 22 // Many of these fields are updated on the fly, while others are only 23 // updated when updatememstats is called. 24 type mstats struct { 25 // General statistics. 26 alloc uint64 // bytes allocated and not yet freed 27 total_alloc uint64 // bytes allocated (even if freed) 28 sys uint64 // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate) 29 nlookup uint64 // number of pointer lookups (unused) 30 nmalloc uint64 // number of mallocs 31 nfree uint64 // number of frees 32 33 // Statistics about malloc heap. 34 // Protected by mheap.lock 35 // 36 // Like MemStats, heap_sys and heap_inuse do not count memory 37 // in manually-managed spans. 38 heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above) 39 heap_sys uint64 // virtual address space obtained from system for GC'd heap 40 heap_idle uint64 // bytes in idle spans 41 heap_inuse uint64 // bytes in mSpanInUse spans 42 heap_released uint64 // bytes released to the os 43 heap_objects uint64 // total number of allocated objects 44 45 // Statistics about allocation of low-level fixed-size structures. 46 // Protected by FixAlloc locks. 47 stacks_inuse uint64 // bytes in manually-managed stack spans 48 stacks_sys uint64 // only counts newosproc0 stack in mstats; differs from MemStats.StackSys 49 mspan_inuse uint64 // mspan structures 50 mspan_sys uint64 51 mcache_inuse uint64 // mcache structures 52 mcache_sys uint64 53 buckhash_sys uint64 // profiling bucket hash table 54 gc_sys uint64 55 other_sys uint64 56 57 // Statistics about garbage collector. 58 // Protected by mheap or stopping the world during GC. 59 next_gc uint64 // goal heap_live for when next GC ends; ^0 if disabled 60 last_gc_unix uint64 // last gc (in unix time) 61 pause_total_ns uint64 62 pause_ns [256]uint64 // circular buffer of recent gc pause lengths 63 pause_end [256]uint64 // circular buffer of recent gc end times (nanoseconds since 1970) 64 numgc uint32 65 numforcedgc uint32 // number of user-forced GCs 66 gc_cpu_fraction float64 // fraction of CPU time used by GC 67 enablegc bool 68 debuggc bool 69 70 // Statistics about allocation size classes. 71 72 by_size [_NumSizeClasses]struct { 73 size uint32 74 nmalloc uint64 75 nfree uint64 76 } 77 78 // Statistics below here are not exported to MemStats directly. 79 80 last_gc_nanotime uint64 // last gc (monotonic time) 81 tinyallocs uint64 // number of tiny allocations that didn't cause actual allocation; not exported to go directly 82 83 // triggerRatio is the heap growth ratio that triggers marking. 84 // 85 // E.g., if this is 0.6, then GC should start when the live 86 // heap has reached 1.6 times the heap size marked by the 87 // previous cycle. This should be ≤ GOGC/100 so the trigger 88 // heap size is less than the goal heap size. This is set 89 // during mark termination for the next cycle's trigger. 90 triggerRatio float64 91 92 // gc_trigger is the heap size that triggers marking. 93 // 94 // When heap_live ≥ gc_trigger, the mark phase will start. 95 // This is also the heap size by which proportional sweeping 96 // must be complete. 97 // 98 // This is computed from triggerRatio during mark termination 99 // for the next cycle's trigger. 100 gc_trigger uint64 101 102 // heap_live is the number of bytes considered live by the GC. 103 // That is: retained by the most recent GC plus allocated 104 // since then. heap_live <= heap_alloc, since heap_alloc 105 // includes unmarked objects that have not yet been swept (and 106 // hence goes up as we allocate and down as we sweep) while 107 // heap_live excludes these objects (and hence only goes up 108 // between GCs). 109 // 110 // This is updated atomically without locking. To reduce 111 // contention, this is updated only when obtaining a span from 112 // an mcentral and at this point it counts all of the 113 // unallocated slots in that span (which will be allocated 114 // before that mcache obtains another span from that 115 // mcentral). Hence, it slightly overestimates the "true" live 116 // heap size. It's better to overestimate than to 117 // underestimate because 1) this triggers the GC earlier than 118 // necessary rather than potentially too late and 2) this 119 // leads to a conservative GC rate rather than a GC rate that 120 // is potentially too low. 121 // 122 // Reads should likewise be atomic (or during STW). 123 // 124 // Whenever this is updated, call traceHeapAlloc() and 125 // gcController.revise(). 126 heap_live uint64 127 128 // heap_scan is the number of bytes of "scannable" heap. This 129 // is the live heap (as counted by heap_live), but omitting 130 // no-scan objects and no-scan tails of objects. 131 // 132 // Whenever this is updated, call gcController.revise(). 133 heap_scan uint64 134 135 // heap_marked is the number of bytes marked by the previous 136 // GC. After mark termination, heap_live == heap_marked, but 137 // unlike heap_live, heap_marked does not change until the 138 // next mark termination. 139 heap_marked uint64 140 } 141 142 var memstats mstats 143 144 // A MemStats records statistics about the memory allocator. 145 type MemStats struct { 146 // General statistics. 147 148 // Alloc is bytes of allocated heap objects. 149 // 150 // This is the same as HeapAlloc (see below). 151 Alloc uint64 152 153 // TotalAlloc is cumulative bytes allocated for heap objects. 154 // 155 // TotalAlloc increases as heap objects are allocated, but 156 // unlike Alloc and HeapAlloc, it does not decrease when 157 // objects are freed. 158 TotalAlloc uint64 159 160 // Sys is the total bytes of memory obtained from the OS. 161 // 162 // Sys is the sum of the XSys fields below. Sys measures the 163 // virtual address space reserved by the Go runtime for the 164 // heap, stacks, and other internal data structures. It's 165 // likely that not all of the virtual address space is backed 166 // by physical memory at any given moment, though in general 167 // it all was at some point. 168 Sys uint64 169 170 // Lookups is the number of pointer lookups performed by the 171 // runtime. 172 // 173 // This is primarily useful for debugging runtime internals. 174 Lookups uint64 175 176 // Mallocs is the cumulative count of heap objects allocated. 177 // The number of live objects is Mallocs - Frees. 178 Mallocs uint64 179 180 // Frees is the cumulative count of heap objects freed. 181 Frees uint64 182 183 // Heap memory statistics. 184 // 185 // Interpreting the heap statistics requires some knowledge of 186 // how Go organizes memory. Go divides the virtual address 187 // space of the heap into "spans", which are contiguous 188 // regions of memory 8K or larger. A span may be in one of 189 // three states: 190 // 191 // An "idle" span contains no objects or other data. The 192 // physical memory backing an idle span can be released back 193 // to the OS (but the virtual address space never is), or it 194 // can be converted into an "in use" or "stack" span. 195 // 196 // An "in use" span contains at least one heap object and may 197 // have free space available to allocate more heap objects. 198 // 199 // A "stack" span is used for goroutine stacks. Stack spans 200 // are not considered part of the heap. A span can change 201 // between heap and stack memory; it is never used for both 202 // simultaneously. 203 204 // HeapAlloc is bytes of allocated heap objects. 205 // 206 // "Allocated" heap objects include all reachable objects, as 207 // well as unreachable objects that the garbage collector has 208 // not yet freed. Specifically, HeapAlloc increases as heap 209 // objects are allocated and decreases as the heap is swept 210 // and unreachable objects are freed. Sweeping occurs 211 // incrementally between GC cycles, so these two processes 212 // occur simultaneously, and as a result HeapAlloc tends to 213 // change smoothly (in contrast with the sawtooth that is 214 // typical of stop-the-world garbage collectors). 215 HeapAlloc uint64 216 217 // HeapSys is bytes of heap memory obtained from the OS. 218 // 219 // HeapSys measures the amount of virtual address space 220 // reserved for the heap. This includes virtual address space 221 // that has been reserved but not yet used, which consumes no 222 // physical memory, but tends to be small, as well as virtual 223 // address space for which the physical memory has been 224 // returned to the OS after it became unused (see HeapReleased 225 // for a measure of the latter). 226 // 227 // HeapSys estimates the largest size the heap has had. 228 HeapSys uint64 229 230 // HeapIdle is bytes in idle (unused) spans. 231 // 232 // Idle spans have no objects in them. These spans could be 233 // (and may already have been) returned to the OS, or they can 234 // be reused for heap allocations, or they can be reused as 235 // stack memory. 236 // 237 // HeapIdle minus HeapReleased estimates the amount of memory 238 // that could be returned to the OS, but is being retained by 239 // the runtime so it can grow the heap without requesting more 240 // memory from the OS. If this difference is significantly 241 // larger than the heap size, it indicates there was a recent 242 // transient spike in live heap size. 243 HeapIdle uint64 244 245 // HeapInuse is bytes in in-use spans. 246 // 247 // In-use spans have at least one object in them. These spans 248 // can only be used for other objects of roughly the same 249 // size. 250 // 251 // HeapInuse minus HeapAlloc estimates the amount of memory 252 // that has been dedicated to particular size classes, but is 253 // not currently being used. This is an upper bound on 254 // fragmentation, but in general this memory can be reused 255 // efficiently. 256 HeapInuse uint64 257 258 // HeapReleased is bytes of physical memory returned to the OS. 259 // 260 // This counts heap memory from idle spans that was returned 261 // to the OS and has not yet been reacquired for the heap. 262 HeapReleased uint64 263 264 // HeapObjects is the number of allocated heap objects. 265 // 266 // Like HeapAlloc, this increases as objects are allocated and 267 // decreases as the heap is swept and unreachable objects are 268 // freed. 269 HeapObjects uint64 270 271 // Stack memory statistics. 272 // 273 // Stacks are not considered part of the heap, but the runtime 274 // can reuse a span of heap memory for stack memory, and 275 // vice-versa. 276 277 // StackInuse is bytes in stack spans. 278 // 279 // In-use stack spans have at least one stack in them. These 280 // spans can only be used for other stacks of the same size. 281 // 282 // There is no StackIdle because unused stack spans are 283 // returned to the heap (and hence counted toward HeapIdle). 284 StackInuse uint64 285 286 // StackSys is bytes of stack memory obtained from the OS. 287 // 288 // StackSys is StackInuse, plus any memory obtained directly 289 // from the OS for OS thread stacks (which should be minimal). 290 StackSys uint64 291 292 // Off-heap memory statistics. 293 // 294 // The following statistics measure runtime-internal 295 // structures that are not allocated from heap memory (usually 296 // because they are part of implementing the heap). Unlike 297 // heap or stack memory, any memory allocated to these 298 // structures is dedicated to these structures. 299 // 300 // These are primarily useful for debugging runtime memory 301 // overheads. 302 303 // MSpanInuse is bytes of allocated mspan structures. 304 MSpanInuse uint64 305 306 // MSpanSys is bytes of memory obtained from the OS for mspan 307 // structures. 308 MSpanSys uint64 309 310 // MCacheInuse is bytes of allocated mcache structures. 311 MCacheInuse uint64 312 313 // MCacheSys is bytes of memory obtained from the OS for 314 // mcache structures. 315 MCacheSys uint64 316 317 // BuckHashSys is bytes of memory in profiling bucket hash tables. 318 BuckHashSys uint64 319 320 // GCSys is bytes of memory in garbage collection metadata. 321 GCSys uint64 322 323 // OtherSys is bytes of memory in miscellaneous off-heap 324 // runtime allocations. 325 OtherSys uint64 326 327 // Garbage collector statistics. 328 329 // NextGC is the target heap size of the next GC cycle. 330 // 331 // The garbage collector's goal is to keep HeapAlloc ≤ NextGC. 332 // At the end of each GC cycle, the target for the next cycle 333 // is computed based on the amount of reachable data and the 334 // value of GOGC. 335 NextGC uint64 336 337 // LastGC is the time the last garbage collection finished, as 338 // nanoseconds since 1970 (the UNIX epoch). 339 LastGC uint64 340 341 // PauseTotalNs is the cumulative nanoseconds in GC 342 // stop-the-world pauses since the program started. 343 // 344 // During a stop-the-world pause, all goroutines are paused 345 // and only the garbage collector can run. 346 PauseTotalNs uint64 347 348 // PauseNs is a circular buffer of recent GC stop-the-world 349 // pause times in nanoseconds. 350 // 351 // The most recent pause is at PauseNs[(NumGC+255)%256]. In 352 // general, PauseNs[N%256] records the time paused in the most 353 // recent N%256th GC cycle. There may be multiple pauses per 354 // GC cycle; this is the sum of all pauses during a cycle. 355 PauseNs [256]uint64 356 357 // PauseEnd is a circular buffer of recent GC pause end times, 358 // as nanoseconds since 1970 (the UNIX epoch). 359 // 360 // This buffer is filled the same way as PauseNs. There may be 361 // multiple pauses per GC cycle; this records the end of the 362 // last pause in a cycle. 363 PauseEnd [256]uint64 364 365 // NumGC is the number of completed GC cycles. 366 NumGC uint32 367 368 // NumForcedGC is the number of GC cycles that were forced by 369 // the application calling the GC function. 370 NumForcedGC uint32 371 372 // GCCPUFraction is the fraction of this program's available 373 // CPU time used by the GC since the program started. 374 // 375 // GCCPUFraction is expressed as a number between 0 and 1, 376 // where 0 means GC has consumed none of this program's CPU. A 377 // program's available CPU time is defined as the integral of 378 // GOMAXPROCS since the program started. That is, if 379 // GOMAXPROCS is 2 and a program has been running for 10 380 // seconds, its "available CPU" is 20 seconds. GCCPUFraction 381 // does not include CPU time used for write barrier activity. 382 // 383 // This is the same as the fraction of CPU reported by 384 // GODEBUG=gctrace=1. 385 GCCPUFraction float64 386 387 // EnableGC indicates that GC is enabled. It is always true, 388 // even if GOGC=off. 389 EnableGC bool 390 391 // DebugGC is currently unused. 392 DebugGC bool 393 394 // BySize reports per-size class allocation statistics. 395 // 396 // BySize[N] gives statistics for allocations of size S where 397 // BySize[N-1].Size < S ≤ BySize[N].Size. 398 // 399 // This does not report allocations larger than BySize[60].Size. 400 BySize [61]struct { 401 // Size is the maximum byte size of an object in this 402 // size class. 403 Size uint32 404 405 // Mallocs is the cumulative count of heap objects 406 // allocated in this size class. The cumulative bytes 407 // of allocation is Size*Mallocs. The number of live 408 // objects in this size class is Mallocs - Frees. 409 Mallocs uint64 410 411 // Frees is the cumulative count of heap objects freed 412 // in this size class. 413 Frees uint64 414 } 415 } 416 417 // Size of the trailing by_size array differs between mstats and MemStats, 418 // and all data after by_size is local to runtime, not exported. 419 // NumSizeClasses was changed, but we cannot change MemStats because of backward compatibility. 420 // sizeof_C_MStats is the size of the prefix of mstats that 421 // corresponds to MemStats. It should match Sizeof(MemStats{}). 422 var sizeof_C_MStats = unsafe.Offsetof(memstats.by_size) + 61*unsafe.Sizeof(memstats.by_size[0]) 423 424 func init() { 425 var memStats MemStats 426 if sizeof_C_MStats != unsafe.Sizeof(memStats) { 427 println(sizeof_C_MStats, unsafe.Sizeof(memStats)) 428 throw("MStats vs MemStatsType size mismatch") 429 } 430 431 if unsafe.Offsetof(memstats.heap_live)%8 != 0 { 432 println(unsafe.Offsetof(memstats.heap_live)) 433 throw("memstats.heap_live not aligned to 8 bytes") 434 } 435 } 436 437 // ReadMemStats populates m with memory allocator statistics. 438 // 439 // The returned memory allocator statistics are up to date as of the 440 // call to ReadMemStats. This is in contrast with a heap profile, 441 // which is a snapshot as of the most recently completed garbage 442 // collection cycle. 443 func ReadMemStats(m *MemStats) { 444 stopTheWorld("read mem stats") 445 446 systemstack(func() { 447 readmemstats_m(m) 448 }) 449 450 startTheWorld() 451 } 452 453 func readmemstats_m(stats *MemStats) { 454 updatememstats() 455 456 // The size of the trailing by_size array differs between 457 // mstats and MemStats. NumSizeClasses was changed, but we 458 // cannot change MemStats because of backward compatibility. 459 memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats) 460 461 // memstats.stacks_sys is only memory mapped directly for OS stacks. 462 // Add in heap-allocated stack memory for user consumption. 463 stats.StackSys += stats.StackInuse 464 } 465 466 //go:linkname readGCStats runtime/debug.readGCStats 467 func readGCStats(pauses *[]uint64) { 468 systemstack(func() { 469 readGCStats_m(pauses) 470 }) 471 } 472 473 func readGCStats_m(pauses *[]uint64) { 474 p := *pauses 475 // Calling code in runtime/debug should make the slice large enough. 476 if cap(p) < len(memstats.pause_ns)+3 { 477 throw("short slice passed to readGCStats") 478 } 479 480 // Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns. 481 lock(&mheap_.lock) 482 483 n := memstats.numgc 484 if n > uint32(len(memstats.pause_ns)) { 485 n = uint32(len(memstats.pause_ns)) 486 } 487 488 // The pause buffer is circular. The most recent pause is at 489 // pause_ns[(numgc-1)%len(pause_ns)], and then backward 490 // from there to go back farther in time. We deliver the times 491 // most recent first (in p[0]). 492 p = p[:cap(p)] 493 for i := uint32(0); i < n; i++ { 494 j := (memstats.numgc - 1 - i) % uint32(len(memstats.pause_ns)) 495 p[i] = memstats.pause_ns[j] 496 p[n+i] = memstats.pause_end[j] 497 } 498 499 p[n+n] = memstats.last_gc_unix 500 p[n+n+1] = uint64(memstats.numgc) 501 p[n+n+2] = memstats.pause_total_ns 502 unlock(&mheap_.lock) 503 *pauses = p[:n+n+3] 504 } 505 506 //go:nowritebarrier 507 func updatememstats() { 508 memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse) 509 memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse) 510 memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys + 511 memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys 512 513 // We also count stacks_inuse as sys memory. 514 memstats.sys += memstats.stacks_inuse 515 516 // Calculate memory allocator stats. 517 // During program execution we only count number of frees and amount of freed memory. 518 // Current number of alive object in the heap and amount of alive heap memory 519 // are calculated by scanning all spans. 520 // Total number of mallocs is calculated as number of frees plus number of alive objects. 521 // Similarly, total amount of allocated memory is calculated as amount of freed memory 522 // plus amount of alive heap memory. 523 memstats.alloc = 0 524 memstats.total_alloc = 0 525 memstats.nmalloc = 0 526 memstats.nfree = 0 527 for i := 0; i < len(memstats.by_size); i++ { 528 memstats.by_size[i].nmalloc = 0 529 memstats.by_size[i].nfree = 0 530 } 531 532 // Flush mcache's to mcentral. 533 systemstack(flushallmcaches) 534 535 // Aggregate local stats. 536 cachestats() 537 538 // Collect allocation stats. This is safe and consistent 539 // because the world is stopped. 540 var smallFree, totalAlloc, totalFree uint64 541 // Collect per-spanclass stats. 542 for spc := range mheap_.central { 543 // The mcaches are now empty, so mcentral stats are 544 // up-to-date. 545 c := &mheap_.central[spc].mcentral 546 memstats.nmalloc += c.nmalloc 547 i := spanClass(spc).sizeclass() 548 memstats.by_size[i].nmalloc += c.nmalloc 549 totalAlloc += c.nmalloc * uint64(class_to_size[i]) 550 } 551 // Collect per-sizeclass stats. 552 for i := 0; i < _NumSizeClasses; i++ { 553 if i == 0 { 554 memstats.nmalloc += mheap_.nlargealloc 555 totalAlloc += mheap_.largealloc 556 totalFree += mheap_.largefree 557 memstats.nfree += mheap_.nlargefree 558 continue 559 } 560 561 // The mcache stats have been flushed to mheap_. 562 memstats.nfree += mheap_.nsmallfree[i] 563 memstats.by_size[i].nfree = mheap_.nsmallfree[i] 564 smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i]) 565 } 566 totalFree += smallFree 567 568 memstats.nfree += memstats.tinyallocs 569 memstats.nmalloc += memstats.tinyallocs 570 571 // Calculate derived stats. 572 memstats.total_alloc = totalAlloc 573 memstats.alloc = totalAlloc - totalFree 574 memstats.heap_alloc = memstats.alloc 575 memstats.heap_objects = memstats.nmalloc - memstats.nfree 576 } 577 578 // cachestats flushes all mcache stats. 579 // 580 // The world must be stopped. 581 // 582 //go:nowritebarrier 583 func cachestats() { 584 for _, p := range allp { 585 c := p.mcache 586 if c == nil { 587 continue 588 } 589 purgecachedstats(c) 590 } 591 } 592 593 // flushmcache flushes the mcache of allp[i]. 594 // 595 // The world must be stopped. 596 // 597 //go:nowritebarrier 598 func flushmcache(i int) { 599 p := allp[i] 600 c := p.mcache 601 if c == nil { 602 return 603 } 604 c.releaseAll() 605 stackcache_clear(c) 606 } 607 608 // flushallmcaches flushes the mcaches of all Ps. 609 // 610 // The world must be stopped. 611 // 612 //go:nowritebarrier 613 func flushallmcaches() { 614 for i := 0; i < int(gomaxprocs); i++ { 615 flushmcache(i) 616 } 617 } 618 619 //go:nosplit 620 func purgecachedstats(c *mcache) { 621 // Protected by either heap or GC lock. 622 h := &mheap_ 623 memstats.heap_scan += uint64(c.local_scan) 624 c.local_scan = 0 625 memstats.tinyallocs += uint64(c.local_tinyallocs) 626 c.local_tinyallocs = 0 627 h.largefree += uint64(c.local_largefree) 628 c.local_largefree = 0 629 h.nlargefree += uint64(c.local_nlargefree) 630 c.local_nlargefree = 0 631 for i := 0; i < len(c.local_nsmallfree); i++ { 632 h.nsmallfree[i] += uint64(c.local_nsmallfree[i]) 633 c.local_nsmallfree[i] = 0 634 } 635 } 636 637 // Atomically increases a given *system* memory stat. We are counting on this 638 // stat never overflowing a uintptr, so this function must only be used for 639 // system memory stats. 640 // 641 // The current implementation for little endian architectures is based on 642 // xadduintptr(), which is less than ideal: xadd64() should really be used. 643 // Using xadduintptr() is a stop-gap solution until arm supports xadd64() that 644 // doesn't use locks. (Locks are a problem as they require a valid G, which 645 // restricts their useability.) 646 // 647 // A side-effect of using xadduintptr() is that we need to check for 648 // overflow errors. 649 //go:nosplit 650 func mSysStatInc(sysStat *uint64, n uintptr) { 651 if sysStat == nil { 652 return 653 } 654 if sys.BigEndian { 655 atomic.Xadd64(sysStat, int64(n)) 656 return 657 } 658 if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), n); val < n { 659 print("runtime: stat overflow: val ", val, ", n ", n, "\n") 660 exit(2) 661 } 662 } 663 664 // Atomically decreases a given *system* memory stat. Same comments as 665 // mSysStatInc apply. 666 //go:nosplit 667 func mSysStatDec(sysStat *uint64, n uintptr) { 668 if sysStat == nil { 669 return 670 } 671 if sys.BigEndian { 672 atomic.Xadd64(sysStat, -int64(n)) 673 return 674 } 675 if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), uintptr(-int64(n))); val+n < n { 676 print("runtime: stat underflow: val ", val, ", n ", n, "\n") 677 exit(2) 678 } 679 }