github.com/panjjo/go@v0.0.0-20161104043856-d62b31386338/src/runtime/mstats.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory statistics 6 7 package runtime 8 9 import ( 10 "runtime/internal/atomic" 11 "runtime/internal/sys" 12 "unsafe" 13 ) 14 15 // Statistics. 16 // If you edit this structure, also edit type MemStats below. 17 // Their layouts must match exactly. 18 // 19 // For detailed descriptions see the documentation for MemStats. 20 // Fields that differ from MemStats are further documented here. 21 // 22 // Many of these fields are updated on the fly, while others are only 23 // updated when updatememstats is called. 24 type mstats struct { 25 // General statistics. 26 alloc uint64 // bytes allocated and not yet freed 27 total_alloc uint64 // bytes allocated (even if freed) 28 sys uint64 // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate) 29 nlookup uint64 // number of pointer lookups 30 nmalloc uint64 // number of mallocs 31 nfree uint64 // number of frees 32 33 // Statistics about malloc heap. 34 // Protected by mheap.lock 35 // 36 // In mstats, heap_sys and heap_inuse includes stack memory, 37 // while in MemStats stack memory is separated out from the 38 // heap stats. 39 heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above) 40 heap_sys uint64 // virtual address space obtained from system 41 heap_idle uint64 // bytes in idle spans 42 heap_inuse uint64 // bytes in non-idle spans 43 heap_released uint64 // bytes released to the os 44 heap_objects uint64 // total number of allocated objects 45 46 // TODO(austin): heap_released is both useless and inaccurate 47 // in its current form. It's useless because, from the user's 48 // and OS's perspectives, there's no difference between a page 49 // that has not yet been faulted in and a page that has been 50 // released back to the OS. We could fix this by considering 51 // newly mapped spans to be "released". It's inaccurate 52 // because when we split a large span for allocation, we 53 // "unrelease" all pages in the large span and not just the 54 // ones we split off for use. This is trickier to fix because 55 // we currently don't know which pages of a span we've 56 // released. We could fix it by separating "free" and 57 // "released" spans, but then we have to allocate from runs of 58 // free and released spans. 59 60 // Statistics about allocation of low-level fixed-size structures. 61 // Protected by FixAlloc locks. 62 stacks_inuse uint64 // this number is included in heap_inuse above; differs from MemStats.StackInuse 63 stacks_sys uint64 // only counts newosproc0 stack in mstats; differs from MemStats.StackSys 64 mspan_inuse uint64 // mspan structures 65 mspan_sys uint64 66 mcache_inuse uint64 // mcache structures 67 mcache_sys uint64 68 buckhash_sys uint64 // profiling bucket hash table 69 gc_sys uint64 70 other_sys uint64 71 72 // Statistics about garbage collector. 73 // Protected by mheap or stopping the world during GC. 74 next_gc uint64 // goal heap_live for when next GC ends; ^0 if disabled 75 last_gc uint64 // last gc (in absolute time) 76 pause_total_ns uint64 77 pause_ns [256]uint64 // circular buffer of recent gc pause lengths 78 pause_end [256]uint64 // circular buffer of recent gc end times (nanoseconds since 1970) 79 numgc uint32 80 gc_cpu_fraction float64 // fraction of CPU time used by GC 81 enablegc bool 82 debuggc bool 83 84 // Statistics about allocation size classes. 85 86 by_size [_NumSizeClasses]struct { 87 size uint32 88 nmalloc uint64 89 nfree uint64 90 } 91 92 // Statistics below here are not exported to MemStats directly. 93 94 tinyallocs uint64 // number of tiny allocations that didn't cause actual allocation; not exported to go directly 95 96 // gc_trigger is the heap size that triggers marking. 97 // 98 // When heap_live ≥ gc_trigger, the mark phase will start. 99 // This is also the heap size by which proportional sweeping 100 // must be complete. 101 gc_trigger uint64 102 103 _ uint32 // force 8-byte alignment of heap_live and prevent an alignment check crash on MIPS32. 104 105 // heap_live is the number of bytes considered live by the GC. 106 // That is: retained by the most recent GC plus allocated 107 // since then. heap_live <= heap_alloc, since heap_alloc 108 // includes unmarked objects that have not yet been swept (and 109 // hence goes up as we allocate and down as we sweep) while 110 // heap_live excludes these objects (and hence only goes up 111 // between GCs). 112 // 113 // This is updated atomically without locking. To reduce 114 // contention, this is updated only when obtaining a span from 115 // an mcentral and at this point it counts all of the 116 // unallocated slots in that span (which will be allocated 117 // before that mcache obtains another span from that 118 // mcentral). Hence, it slightly overestimates the "true" live 119 // heap size. It's better to overestimate than to 120 // underestimate because 1) this triggers the GC earlier than 121 // necessary rather than potentially too late and 2) this 122 // leads to a conservative GC rate rather than a GC rate that 123 // is potentially too low. 124 // 125 // Whenever this is updated, call traceHeapAlloc() and 126 // gcController.revise(). 127 heap_live uint64 128 129 // heap_scan is the number of bytes of "scannable" heap. This 130 // is the live heap (as counted by heap_live), but omitting 131 // no-scan objects and no-scan tails of objects. 132 // 133 // Whenever this is updated, call gcController.revise(). 134 heap_scan uint64 135 136 // heap_marked is the number of bytes marked by the previous 137 // GC. After mark termination, heap_live == heap_marked, but 138 // unlike heap_live, heap_marked does not change until the 139 // next mark termination. 140 heap_marked uint64 141 } 142 143 var memstats mstats 144 145 // A MemStats records statistics about the memory allocator. 146 type MemStats struct { 147 // General statistics. 148 149 // Alloc is bytes of allocated heap objects. 150 // 151 // This is the same as HeapAlloc (see below). 152 Alloc uint64 153 154 // TotalAlloc is cumulative bytes allocated for heap objects. 155 // 156 // TotalAlloc increases as heap objects are allocated, but 157 // unlike Alloc and HeapAlloc, it does not decrease when 158 // objects are freed. 159 TotalAlloc uint64 160 161 // Sys is the total bytes of memory obtained from the OS. 162 // 163 // Sys is the sum of the XSys fields below. Sys measures the 164 // virtual address space reserved by the Go runtime for the 165 // heap, stacks, and other internal data structures. It's 166 // likely that not all of the virtual address space is backed 167 // by physical memory at any given moment, though in general 168 // it all was at some point. 169 Sys uint64 170 171 // Lookups is the number of pointer lookups performed by the 172 // runtime. 173 // 174 // This is primarily useful for debugging runtime internals. 175 Lookups uint64 176 177 // Mallocs is the cumulative count of heap objects allocated. 178 Mallocs uint64 179 180 // Frees is the cumulative count of heap objects freed. 181 Frees uint64 182 183 // Heap memory statistics. 184 // 185 // Interpreting the heap statistics requires some knowledge of 186 // how Go organizes memory. Go divides the virtual address 187 // space of the heap into "spans", which are contiguous 188 // regions of memory 8K or larger. A span may be in one of 189 // three states: 190 // 191 // An "idle" span contains no objects or other data. The 192 // physical memory backing an idle span can be released back 193 // to the OS (but the virtual address space never is), or it 194 // can be converted into an "in use" or "stack" span. 195 // 196 // An "in use" span contains at least one heap object and may 197 // have free space available to allocate more heap objects. 198 // 199 // A "stack" span is used for goroutine stacks. Stack spans 200 // are not considered part of the heap. A span can change 201 // between heap and stack memory; it is never used for both 202 // simultaneously. 203 204 // HeapAlloc is bytes of allocated heap objects. 205 // 206 // "Allocated" heap objects include all reachable objects, as 207 // well as unreachable objects that the garbage collector has 208 // not yet freed. Specifically, HeapAlloc increases as heap 209 // objects are allocated and decreases as the heap is swept 210 // and unreachable objects are freed. Sweeping occurs 211 // incrementally between GC cycles, so these two processes 212 // occur simultaneously, and as a result HeapAlloc tends to 213 // change smoothly (in contrast with the sawtooth that is 214 // typical of stop-the-world garbage collectors). 215 HeapAlloc uint64 216 217 // HeapSys is bytes of heap memory obtained from the OS. 218 // 219 // HeapSys measures the amount of virtual address space 220 // reserved for the heap. This includes virtual address space 221 // that has been reserved but not yet used, which consumes no 222 // physical memory, but tends to be small, as well as virtual 223 // address space for which the physical memory has been 224 // returned to the OS after it became unused (see HeapReleased 225 // for a measure of the latter). 226 // 227 // HeapSys estimates the largest size the heap has had. 228 HeapSys uint64 229 230 // HeapIdle is bytes in idle (unused) spans. 231 // 232 // Idle spans have no objects in them. These spans could be 233 // (and may already have been) returned to the OS, or they can 234 // be reused for heap allocations, or they can be reused as 235 // stack memory. 236 // 237 // HeapIdle minus HeapReleased estimates the amount of memory 238 // that could be returned to the OS, but is being retained by 239 // the runtime so it can grow the heap without requesting more 240 // memory from the OS. If this difference is significantly 241 // larger than the heap size, it indicates there was a recent 242 // transient spike in live heap size. 243 HeapIdle uint64 244 245 // HeapInuse is bytes in in-use spans. 246 // 247 // In-use spans have at least one object in them. These spans 248 // can only be used for other objects of roughly the same 249 // size. 250 // 251 // HeapInuse minus HeapAlloc esimates the amount of memory 252 // that has been dedicated to particular size classes, but is 253 // not currently being used. This is an upper bound on 254 // fragmentation, but in general this memory can be reused 255 // efficiently. 256 HeapInuse uint64 257 258 // HeapReleased is bytes of physical memory returned to the OS. 259 // 260 // This counts heap memory from idle spans that was returned 261 // to the OS and has not yet been reacquired for the heap. 262 HeapReleased uint64 263 264 // HeapObjects is the number of allocated heap objects. 265 // 266 // Like HeapAlloc, this increases as objects are allocated and 267 // decreases as the heap is swept and unreachable objects are 268 // freed. 269 HeapObjects uint64 270 271 // Stack memory statistics. 272 // 273 // Stacks are not considered part of the heap, but the runtime 274 // can reuse a span of heap memory for stack memory, and 275 // vice-versa. 276 277 // StackInuse is bytes in stack spans. 278 // 279 // In-use stack spans have at least one stack in them. These 280 // spans can only be used for other stacks of the same size. 281 // 282 // There is no StackIdle because unused stack spans are 283 // returned to the heap (and hence counted toward HeapIdle). 284 StackInuse uint64 285 286 // StackSys is bytes of stack memory obtained from the OS. 287 // 288 // StackSys is StackInuse, plus any memory obtained directly 289 // from the OS for OS thread stacks (which should be minimal). 290 StackSys uint64 291 292 // Off-heap memory statistics. 293 // 294 // The following statistics measure runtime-internal 295 // structures that are not allocated from heap memory (usually 296 // because they are part of implementing the heap). Unlike 297 // heap or stack memory, any memory allocated to these 298 // structures is dedicated to these structures. 299 // 300 // These are primarily useful for debugging runtime memory 301 // overheads. 302 303 // MSpanInuse is bytes of allocated mspan structures. 304 MSpanInuse uint64 305 306 // MSpanSys is bytes of memory obtained from the OS for mspan 307 // structures. 308 MSpanSys uint64 309 310 // MCacheInuse is bytes of allocated mcache structures. 311 MCacheInuse uint64 312 313 // MCacheSys is bytes of memory obtained from the OS for 314 // mcache structures. 315 MCacheSys uint64 316 317 // BuckHashSys is bytes of memory in profiling bucket hash tables. 318 BuckHashSys uint64 319 320 // GCSys is bytes of memory in garbage collection metadata. 321 GCSys uint64 322 323 // OtherSys is bytes of memory in miscellaneous off-heap 324 // runtime allocations. 325 OtherSys uint64 326 327 // Garbage collector statistics. 328 329 // NextGC is the target heap size of the next GC cycle. 330 // 331 // The garbage collector's goal is to keep HeapAlloc ≤ NextGC. 332 // At the end of each GC cycle, the target for the next cycle 333 // is computed based on the amount of reachable data and the 334 // value of GOGC. 335 NextGC uint64 336 337 // LastGC is the time the last garbage collection finished, as 338 // nanoseconds since 1970 (the UNIX epoch). 339 LastGC uint64 340 341 // PauseTotalNs is the cumulative nanoseconds in GC 342 // stop-the-world pauses since the program started. 343 // 344 // During a stop-the-world pause, all goroutines are paused 345 // and only the garbage collector can run. 346 PauseTotalNs uint64 347 348 // PauseNs is a circular buffer of recent GC stop-the-world 349 // pause times in nanoseconds. 350 // 351 // The most recent pause is at PauseNs[(NumGC+255)%256]. In 352 // general, PauseNs[N%256] records the time paused in the most 353 // recent N%256th GC cycle. There may be multiple pauses per 354 // GC cycle; this is the sum of all pauses during a cycle. 355 PauseNs [256]uint64 356 357 // PauseEnd is a circular buffer of recent GC pause end times, 358 // as nanoseconds since 1970 (the UNIX epoch). 359 // 360 // This buffer is filled the same way as PauseNs. There may be 361 // multiple pauses per GC cycle; this records the end of the 362 // last pause in a cycle. 363 PauseEnd [256]uint64 364 365 // NumGC is the number of completed GC cycles. 366 NumGC uint32 367 368 // GCCPUFraction is the fraction of this program's available 369 // CPU time used by the GC since the program started. 370 // 371 // GCCPUFraction is expressed as a number between 0 and 1, 372 // where 0 means GC has consumed none of this program's CPU. A 373 // program's available CPU time is defined as the integral of 374 // GOMAXPROCS since the program started. That is, if 375 // GOMAXPROCS is 2 and a program has been running for 10 376 // seconds, its "available CPU" is 20 seconds. GCCPUFraction 377 // does not include CPU time used for write barrier activity. 378 // 379 // This is the same as the fraction of CPU reported by 380 // GODEBUG=gctrace=1. 381 GCCPUFraction float64 382 383 // EnableGC indicates that GC is enabled. It is always true, 384 // even if GOGC=off. 385 EnableGC bool 386 387 // DebugGC is currently unused. 388 DebugGC bool 389 390 // BySize reports per-size class allocation statistics. 391 // 392 // BySize[N] gives statistics for allocations of size S where 393 // BySize[N-1].Size < S ≤ BySize[N].Size. 394 // 395 // This does not report allocations larger than BySize[60].Size. 396 BySize [61]struct { 397 Size uint32 398 Mallocs uint64 399 Frees uint64 400 } 401 } 402 403 // Size of the trailing by_size array differs between mstats and MemStats, 404 // and all data after by_size is local to runtime, not exported. 405 // NumSizeClasses was changed, but we cannot change MemStats because of backward compatibility. 406 // sizeof_C_MStats is the size of the prefix of mstats that 407 // corresponds to MemStats. It should match Sizeof(MemStats{}). 408 var sizeof_C_MStats = unsafe.Offsetof(memstats.by_size) + 61*unsafe.Sizeof(memstats.by_size[0]) 409 410 func init() { 411 var memStats MemStats 412 if sizeof_C_MStats != unsafe.Sizeof(memStats) { 413 println(sizeof_C_MStats, unsafe.Sizeof(memStats)) 414 throw("MStats vs MemStatsType size mismatch") 415 } 416 417 if unsafe.Offsetof(memstats.heap_live)%8 != 0 { 418 println(unsafe.Offsetof(memstats.heap_live)) 419 throw("memstats.heap_live not aligned to 8 bytes") 420 } 421 } 422 423 // ReadMemStats populates m with memory allocator statistics. 424 // 425 // The returned memory allocator statistics are up to date as of the 426 // call to ReadMemStats. This is in contrast with a heap profile, 427 // which is a snapshot as of the most recently completed garbage 428 // collection cycle. 429 func ReadMemStats(m *MemStats) { 430 stopTheWorld("read mem stats") 431 432 systemstack(func() { 433 readmemstats_m(m) 434 }) 435 436 startTheWorld() 437 } 438 439 func readmemstats_m(stats *MemStats) { 440 updatememstats(nil) 441 442 // The size of the trailing by_size array differs between 443 // mstats and MemStats. NumSizeClasses was changed, but we 444 // cannot change MemStats because of backward compatibility. 445 memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats) 446 447 // Stack numbers are part of the heap numbers, separate those out for user consumption 448 stats.StackSys += stats.StackInuse 449 stats.HeapInuse -= stats.StackInuse 450 stats.HeapSys -= stats.StackInuse 451 } 452 453 //go:linkname readGCStats runtime/debug.readGCStats 454 func readGCStats(pauses *[]uint64) { 455 systemstack(func() { 456 readGCStats_m(pauses) 457 }) 458 } 459 460 func readGCStats_m(pauses *[]uint64) { 461 p := *pauses 462 // Calling code in runtime/debug should make the slice large enough. 463 if cap(p) < len(memstats.pause_ns)+3 { 464 throw("short slice passed to readGCStats") 465 } 466 467 // Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns. 468 lock(&mheap_.lock) 469 470 n := memstats.numgc 471 if n > uint32(len(memstats.pause_ns)) { 472 n = uint32(len(memstats.pause_ns)) 473 } 474 475 // The pause buffer is circular. The most recent pause is at 476 // pause_ns[(numgc-1)%len(pause_ns)], and then backward 477 // from there to go back farther in time. We deliver the times 478 // most recent first (in p[0]). 479 p = p[:cap(p)] 480 for i := uint32(0); i < n; i++ { 481 j := (memstats.numgc - 1 - i) % uint32(len(memstats.pause_ns)) 482 p[i] = memstats.pause_ns[j] 483 p[n+i] = memstats.pause_end[j] 484 } 485 486 p[n+n] = memstats.last_gc 487 p[n+n+1] = uint64(memstats.numgc) 488 p[n+n+2] = memstats.pause_total_ns 489 unlock(&mheap_.lock) 490 *pauses = p[:n+n+3] 491 } 492 493 //go:nowritebarrier 494 func updatememstats(stats *gcstats) { 495 if stats != nil { 496 *stats = gcstats{} 497 } 498 for mp := allm; mp != nil; mp = mp.alllink { 499 if stats != nil { 500 src := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(&mp.gcstats)) 501 dst := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(stats)) 502 for i, v := range src { 503 dst[i] += v 504 } 505 mp.gcstats = gcstats{} 506 } 507 } 508 509 memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse) 510 memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse) 511 memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys + 512 memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys 513 514 // Calculate memory allocator stats. 515 // During program execution we only count number of frees and amount of freed memory. 516 // Current number of alive object in the heap and amount of alive heap memory 517 // are calculated by scanning all spans. 518 // Total number of mallocs is calculated as number of frees plus number of alive objects. 519 // Similarly, total amount of allocated memory is calculated as amount of freed memory 520 // plus amount of alive heap memory. 521 memstats.alloc = 0 522 memstats.total_alloc = 0 523 memstats.nmalloc = 0 524 memstats.nfree = 0 525 for i := 0; i < len(memstats.by_size); i++ { 526 memstats.by_size[i].nmalloc = 0 527 memstats.by_size[i].nfree = 0 528 } 529 530 // Flush MCache's to MCentral. 531 systemstack(flushallmcaches) 532 533 // Aggregate local stats. 534 cachestats() 535 536 // Scan all spans and count number of alive objects. 537 lock(&mheap_.lock) 538 for _, s := range mheap_.allspans { 539 if s.state != mSpanInUse { 540 continue 541 } 542 if s.sizeclass == 0 { 543 memstats.nmalloc++ 544 memstats.alloc += uint64(s.elemsize) 545 } else { 546 memstats.nmalloc += uint64(s.allocCount) 547 memstats.by_size[s.sizeclass].nmalloc += uint64(s.allocCount) 548 memstats.alloc += uint64(s.allocCount) * uint64(s.elemsize) 549 } 550 } 551 unlock(&mheap_.lock) 552 553 // Aggregate by size class. 554 smallfree := uint64(0) 555 memstats.nfree = mheap_.nlargefree 556 for i := 0; i < len(memstats.by_size); i++ { 557 memstats.nfree += mheap_.nsmallfree[i] 558 memstats.by_size[i].nfree = mheap_.nsmallfree[i] 559 memstats.by_size[i].nmalloc += mheap_.nsmallfree[i] 560 smallfree += mheap_.nsmallfree[i] * uint64(class_to_size[i]) 561 } 562 memstats.nfree += memstats.tinyallocs 563 memstats.nmalloc += memstats.nfree 564 565 // Calculate derived stats. 566 memstats.total_alloc = memstats.alloc + mheap_.largefree + smallfree 567 memstats.heap_alloc = memstats.alloc 568 memstats.heap_objects = memstats.nmalloc - memstats.nfree 569 } 570 571 //go:nowritebarrier 572 func cachestats() { 573 for i := 0; ; i++ { 574 p := allp[i] 575 if p == nil { 576 break 577 } 578 c := p.mcache 579 if c == nil { 580 continue 581 } 582 purgecachedstats(c) 583 } 584 } 585 586 // flushmcache flushes the mcache of allp[i]. 587 // 588 // The world must be stopped. 589 // 590 //go:nowritebarrier 591 func flushmcache(i int) { 592 p := allp[i] 593 if p == nil { 594 return 595 } 596 c := p.mcache 597 if c == nil { 598 return 599 } 600 c.releaseAll() 601 stackcache_clear(c) 602 } 603 604 // flushallmcaches flushes the mcaches of all Ps. 605 // 606 // The world must be stopped. 607 // 608 //go:nowritebarrier 609 func flushallmcaches() { 610 for i := 0; i < int(gomaxprocs); i++ { 611 flushmcache(i) 612 } 613 } 614 615 //go:nosplit 616 func purgecachedstats(c *mcache) { 617 // Protected by either heap or GC lock. 618 h := &mheap_ 619 memstats.heap_scan += uint64(c.local_scan) 620 c.local_scan = 0 621 memstats.tinyallocs += uint64(c.local_tinyallocs) 622 c.local_tinyallocs = 0 623 memstats.nlookup += uint64(c.local_nlookup) 624 c.local_nlookup = 0 625 h.largefree += uint64(c.local_largefree) 626 c.local_largefree = 0 627 h.nlargefree += uint64(c.local_nlargefree) 628 c.local_nlargefree = 0 629 for i := 0; i < len(c.local_nsmallfree); i++ { 630 h.nsmallfree[i] += uint64(c.local_nsmallfree[i]) 631 c.local_nsmallfree[i] = 0 632 } 633 } 634 635 // Atomically increases a given *system* memory stat. We are counting on this 636 // stat never overflowing a uintptr, so this function must only be used for 637 // system memory stats. 638 // 639 // The current implementation for little endian architectures is based on 640 // xadduintptr(), which is less than ideal: xadd64() should really be used. 641 // Using xadduintptr() is a stop-gap solution until arm supports xadd64() that 642 // doesn't use locks. (Locks are a problem as they require a valid G, which 643 // restricts their useability.) 644 // 645 // A side-effect of using xadduintptr() is that we need to check for 646 // overflow errors. 647 //go:nosplit 648 func mSysStatInc(sysStat *uint64, n uintptr) { 649 if sys.BigEndian != 0 { 650 atomic.Xadd64(sysStat, int64(n)) 651 return 652 } 653 if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), n); val < n { 654 print("runtime: stat overflow: val ", val, ", n ", n, "\n") 655 exit(2) 656 } 657 } 658 659 // Atomically decreases a given *system* memory stat. Same comments as 660 // mSysStatInc apply. 661 //go:nosplit 662 func mSysStatDec(sysStat *uint64, n uintptr) { 663 if sys.BigEndian != 0 { 664 atomic.Xadd64(sysStat, -int64(n)) 665 return 666 } 667 if val := atomic.Xadduintptr((*uintptr)(unsafe.Pointer(sysStat)), uintptr(-int64(n))); val+n < n { 668 print("runtime: stat underflow: val ", val, ", n ", n, "\n") 669 exit(2) 670 } 671 }