rsc.io/go@v0.0.0-20150416155037-e040fd465409/src/runtime/trace.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Go execution tracer. 6 // The tracer captures a wide range of execution events like goroutine 7 // creation/blocking/unblocking, syscall enter/exit/block, GC-related events, 8 // changes of heap size, processor start/stop, etc and writes them to a buffer 9 // in a compact form. A precise nanosecond-precision timestamp and a stack 10 // trace is captured for most events. 11 // See http://golang.org/s/go15trace for more info. 12 13 package runtime 14 15 import "unsafe" 16 17 // Event types in the trace, args are given in square brackets. 18 const ( 19 traceEvNone = 0 // unused 20 traceEvBatch = 1 // start of per-P batch of events [pid, timestamp] 21 traceEvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)] 22 traceEvStack = 3 // stack [stack id, number of PCs, array of PCs] 23 traceEvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id] 24 traceEvProcStart = 5 // start of P [timestamp, thread id] 25 traceEvProcStop = 6 // stop of P [timestamp] 26 traceEvGCStart = 7 // GC start [timestamp, stack id] 27 traceEvGCDone = 8 // GC done [timestamp] 28 traceEvGCScanStart = 9 // GC scan start [timestamp] 29 traceEvGCScanDone = 10 // GC scan done [timestamp] 30 traceEvGCSweepStart = 11 // GC sweep start [timestamp, stack id] 31 traceEvGCSweepDone = 12 // GC sweep done [timestamp] 32 traceEvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, start PC, stack id] 33 traceEvGoStart = 14 // goroutine starts running [timestamp, goroutine id] 34 traceEvGoEnd = 15 // goroutine ends [timestamp] 35 traceEvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack] 36 traceEvGoSched = 17 // goroutine calls Gosched [timestamp, stack] 37 traceEvGoPreempt = 18 // goroutine is preempted [timestamp, stack] 38 traceEvGoSleep = 19 // goroutine calls Sleep [timestamp, stack] 39 traceEvGoBlock = 20 // goroutine blocks [timestamp, stack] 40 traceEvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, stack] 41 traceEvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack] 42 traceEvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack] 43 traceEvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack] 44 traceEvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack] 45 traceEvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack] 46 traceEvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack] 47 traceEvGoSysCall = 28 // syscall enter [timestamp, stack] 48 traceEvGoSysExit = 29 // syscall exit [timestamp, goroutine id, real timestamp] 49 traceEvGoSysBlock = 30 // syscall blocks [timestamp] 50 traceEvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [goroutine id] 51 traceEvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [goroutine id] 52 traceEvHeapAlloc = 33 // memstats.heap_live change [timestamp, heap_alloc] 53 traceEvNextGC = 34 // memstats.next_gc change [timestamp, next_gc] 54 traceEvTimerGoroutine = 35 // denotes timer goroutine [timer goroutine id] 55 traceEvFutileWakeup = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp] 56 traceEvCount = 37 57 ) 58 59 const ( 60 // Timestamps in trace are cputicks/traceTickDiv. 61 // This makes absolute values of timestamp diffs smaller, 62 // and so they are encoded in less number of bytes. 63 // 64 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine). 64 traceTickDiv = 64 65 // Maximum number of PCs in a single stack trace. 66 // Since events contain only stack id rather than whole stack trace, 67 // we can allow quite large values here. 68 traceStackSize = 128 69 // Identifier of a fake P that is used when we trace without a real P. 70 traceGlobProc = -1 71 // Maximum number of bytes to encode uint64 in base-128. 72 traceBytesPerNumber = 10 73 // Shift of the number of arguments in the first event byte. 74 traceArgCountShift = 6 75 // Flag passed to traceGoPark to denote that the previous wakeup of this 76 // goroutine was futile. For example, a goroutine was unblocked on a mutex, 77 // but another goroutine got ahead and acquired the mutex before the first 78 // goroutine is scheduled, so the first goroutine has to block again. 79 // Such wakeups happen on buffered channels and sync.Mutex, 80 // but are generally not interesting for end user. 81 traceFutileWakeup byte = 128 82 ) 83 84 // trace is global tracing context. 85 var trace struct { 86 lock mutex // protects the following members 87 lockOwner *g // to avoid deadlocks during recursive lock locks 88 enabled bool // when set runtime traces events 89 shutdown bool // set when we are waiting for trace reader to finish after setting enabled to false 90 headerWritten bool // whether ReadTrace has emitted trace header 91 footerWritten bool // whether ReadTrace has emitted trace footer 92 shutdownSema uint32 // used to wait for ReadTrace completion 93 ticksStart int64 // cputicks when tracing was started 94 ticksEnd int64 // cputicks when tracing was stopped 95 timeStart int64 // nanotime when tracing was started 96 timeEnd int64 // nanotime when tracing was stopped 97 reading *traceBuf // buffer currently handed off to user 98 empty *traceBuf // stack of empty buffers 99 fullHead *traceBuf // queue of full buffers 100 fullTail *traceBuf 101 reader *g // goroutine that called ReadTrace, or nil 102 stackTab traceStackTable // maps stack traces to unique ids 103 104 bufLock mutex // protects buf 105 buf *traceBuf // global trace buffer, used when running without a p 106 } 107 108 // traceBufHeader is per-P tracing buffer. 109 type traceBufHeader struct { 110 link *traceBuf // in trace.empty/full 111 lastTicks uint64 // when we wrote the last event 112 buf []byte // trace data, always points to traceBuf.arr 113 stk [traceStackSize]uintptr // scratch buffer for traceback 114 } 115 116 // traceBuf is per-P tracing buffer. 117 type traceBuf struct { 118 traceBufHeader 119 arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf 120 } 121 122 // StartTrace enables tracing for the current process. 123 // While tracing, the data will be buffered and available via ReadTrace. 124 // StartTrace returns an error if tracing is already enabled. 125 // Most clients should use the runtime/pprof package or the testing package's 126 // -test.trace flag instead of calling StartTrace directly. 127 func StartTrace() error { 128 // Stop the world, so that we can take a consistent snapshot 129 // of all goroutines at the beginning of the trace. 130 semacquire(&worldsema, false) 131 _g_ := getg() 132 _g_.m.preemptoff = "start tracing" 133 systemstack(stoptheworld) 134 135 // We are in stop-the-world, but syscalls can finish and write to trace concurrently. 136 // Exitsyscall could check trace.enabled long before and then suddenly wake up 137 // and decide to write to trace at a random point in time. 138 // However, such syscall will use the global trace.buf buffer, because we've 139 // acquired all p's by doing stop-the-world. So this protects us from such races. 140 lock(&trace.bufLock) 141 142 if trace.enabled || trace.shutdown { 143 unlock(&trace.bufLock) 144 _g_.m.preemptoff = "" 145 semrelease(&worldsema) 146 systemstack(starttheworld) 147 return errorString("tracing is already enabled") 148 } 149 150 trace.ticksStart = cputicks() 151 trace.timeStart = nanotime() 152 trace.enabled = true 153 trace.headerWritten = false 154 trace.footerWritten = false 155 156 for _, gp := range allgs { 157 status := readgstatus(gp) 158 if status != _Gdead { 159 traceGoCreate(gp, gp.startpc) 160 } 161 if status == _Gwaiting { 162 traceEvent(traceEvGoWaiting, -1, uint64(gp.goid)) 163 } 164 if status == _Gsyscall { 165 traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid)) 166 } 167 } 168 traceProcStart() 169 traceGoStart() 170 171 unlock(&trace.bufLock) 172 173 _g_.m.preemptoff = "" 174 semrelease(&worldsema) 175 systemstack(starttheworld) 176 return nil 177 } 178 179 // StopTrace stops tracing, if it was previously enabled. 180 // StopTrace only returns after all the reads for the trace have completed. 181 func StopTrace() { 182 // Stop the world so that we can collect the trace buffers from all p's below, 183 // and also to avoid races with traceEvent. 184 semacquire(&worldsema, false) 185 _g_ := getg() 186 _g_.m.preemptoff = "stop tracing" 187 systemstack(stoptheworld) 188 189 // See the comment in StartTrace. 190 lock(&trace.bufLock) 191 192 if !trace.enabled { 193 unlock(&trace.bufLock) 194 _g_.m.preemptoff = "" 195 semrelease(&worldsema) 196 systemstack(starttheworld) 197 return 198 } 199 200 traceGoSched() 201 202 for _, p := range &allp { 203 if p == nil { 204 break 205 } 206 buf := p.tracebuf 207 if buf != nil { 208 traceFullQueue(buf) 209 p.tracebuf = nil 210 } 211 } 212 if trace.buf != nil && len(trace.buf.buf) != 0 { 213 buf := trace.buf 214 trace.buf = nil 215 traceFullQueue(buf) 216 } 217 218 for { 219 trace.ticksEnd = cputicks() 220 trace.timeEnd = nanotime() 221 // Windows time can tick only every 15ms, wait for at least one tick. 222 if trace.timeEnd != trace.timeStart { 223 break 224 } 225 osyield() 226 } 227 228 trace.enabled = false 229 trace.shutdown = true 230 trace.stackTab.dump() 231 232 unlock(&trace.bufLock) 233 234 _g_.m.preemptoff = "" 235 semrelease(&worldsema) 236 systemstack(starttheworld) 237 238 // The world is started but we've set trace.shutdown, so new tracing can't start. 239 // Wait for the trace reader to flush pending buffers and stop. 240 semacquire(&trace.shutdownSema, false) 241 if raceenabled { 242 raceacquire(unsafe.Pointer(&trace.shutdownSema)) 243 } 244 245 // The lock protects us from races with StartTrace/StopTrace because they do stop-the-world. 246 lock(&trace.lock) 247 for _, p := range &allp { 248 if p == nil { 249 break 250 } 251 if p.tracebuf != nil { 252 throw("trace: non-empty trace buffer in proc") 253 } 254 } 255 if trace.buf != nil { 256 throw("trace: non-empty global trace buffer") 257 } 258 if trace.fullHead != nil || trace.fullTail != nil { 259 throw("trace: non-empty full trace buffer") 260 } 261 if trace.reading != nil || trace.reader != nil { 262 throw("trace: reading after shutdown") 263 } 264 for trace.empty != nil { 265 buf := trace.empty 266 trace.empty = buf.link 267 sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf), &memstats.other_sys) 268 } 269 trace.shutdown = false 270 unlock(&trace.lock) 271 } 272 273 // ReadTrace returns the next chunk of binary tracing data, blocking until data 274 // is available. If tracing is turned off and all the data accumulated while it 275 // was on has been returned, ReadTrace returns nil. The caller must copy the 276 // returned data before calling ReadTrace again. 277 // ReadTrace must be called from one goroutine at a time. 278 func ReadTrace() []byte { 279 // This function may need to lock trace.lock recursively 280 // (goparkunlock -> traceGoPark -> traceEvent -> traceFlush). 281 // To allow this we use trace.lockOwner. 282 // Also this function must not allocate while holding trace.lock: 283 // allocation can call heap allocate, which will try to emit a trace 284 // event while holding heap lock. 285 lock(&trace.lock) 286 trace.lockOwner = getg() 287 288 if trace.reader != nil { 289 // More than one goroutine reads trace. This is bad. 290 // But we rather do not crash the program because of tracing, 291 // because tracing can be enabled at runtime on prod servers. 292 trace.lockOwner = nil 293 unlock(&trace.lock) 294 println("runtime: ReadTrace called from multiple goroutines simultaneously") 295 return nil 296 } 297 // Recycle the old buffer. 298 if buf := trace.reading; buf != nil { 299 buf.link = trace.empty 300 trace.empty = buf 301 trace.reading = nil 302 } 303 // Write trace header. 304 if !trace.headerWritten { 305 trace.headerWritten = true 306 trace.lockOwner = nil 307 unlock(&trace.lock) 308 return []byte("gotrace\x00") 309 } 310 // Wait for new data. 311 if trace.fullHead == nil && !trace.shutdown { 312 trace.reader = getg() 313 goparkunlock(&trace.lock, "trace reader (blocked)", traceEvGoBlock, 2) 314 lock(&trace.lock) 315 } 316 // Write a buffer. 317 if trace.fullHead != nil { 318 buf := traceFullDequeue() 319 trace.reading = buf 320 trace.lockOwner = nil 321 unlock(&trace.lock) 322 return buf.buf 323 } 324 // Write footer with timer frequency. 325 if !trace.footerWritten { 326 trace.footerWritten = true 327 // Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64. 328 freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv 329 trace.lockOwner = nil 330 unlock(&trace.lock) 331 var data []byte 332 data = append(data, traceEvFrequency|0<<traceArgCountShift) 333 data = traceAppend(data, uint64(freq)) 334 if timers.gp != nil { 335 data = append(data, traceEvTimerGoroutine|0<<traceArgCountShift) 336 data = traceAppend(data, uint64(timers.gp.goid)) 337 } 338 return data 339 } 340 // Done. 341 if trace.shutdown { 342 trace.lockOwner = nil 343 unlock(&trace.lock) 344 if raceenabled { 345 // Model synchronization on trace.shutdownSema, which race 346 // detector does not see. This is required to avoid false 347 // race reports on writer passed to pprof.StartTrace. 348 racerelease(unsafe.Pointer(&trace.shutdownSema)) 349 } 350 // trace.enabled is already reset, so can call traceable functions. 351 semrelease(&trace.shutdownSema) 352 return nil 353 } 354 // Also bad, but see the comment above. 355 trace.lockOwner = nil 356 unlock(&trace.lock) 357 println("runtime: spurious wakeup of trace reader") 358 return nil 359 } 360 361 // traceReader returns the trace reader that should be woken up, if any. 362 func traceReader() *g { 363 if trace.reader == nil || (trace.fullHead == nil && !trace.shutdown) { 364 return nil 365 } 366 lock(&trace.lock) 367 if trace.reader == nil || (trace.fullHead == nil && !trace.shutdown) { 368 unlock(&trace.lock) 369 return nil 370 } 371 gp := trace.reader 372 trace.reader = nil 373 unlock(&trace.lock) 374 return gp 375 } 376 377 // traceProcFree frees trace buffer associated with pp. 378 func traceProcFree(pp *p) { 379 buf := pp.tracebuf 380 pp.tracebuf = nil 381 if buf == nil { 382 return 383 } 384 lock(&trace.lock) 385 traceFullQueue(buf) 386 unlock(&trace.lock) 387 } 388 389 // traceFullQueue queues buf into queue of full buffers. 390 func traceFullQueue(buf *traceBuf) { 391 buf.link = nil 392 if trace.fullHead == nil { 393 trace.fullHead = buf 394 } else { 395 trace.fullTail.link = buf 396 } 397 trace.fullTail = buf 398 } 399 400 // traceFullDequeue dequeues from queue of full buffers. 401 func traceFullDequeue() *traceBuf { 402 buf := trace.fullHead 403 if buf == nil { 404 return nil 405 } 406 trace.fullHead = buf.link 407 if trace.fullHead == nil { 408 trace.fullTail = nil 409 } 410 buf.link = nil 411 return buf 412 } 413 414 // traceEvent writes a single event to trace buffer, flushing the buffer if necessary. 415 // ev is event type. 416 // If skip > 0, write current stack id as the last argument (skipping skip top frames). 417 // If skip = 0, this event type should contain a stack, but we don't want 418 // to collect and remember it for this particular call. 419 func traceEvent(ev byte, skip int, args ...uint64) { 420 mp, pid, bufp := traceAcquireBuffer() 421 // Double-check trace.enabled now that we've done m.locks++ and acquired bufLock. 422 // This protects from races between traceEvent and StartTrace/StopTrace. 423 424 // The caller checked that trace.enabled == true, but trace.enabled might have been 425 // turned off between the check and now. Check again. traceLockBuffer did mp.locks++, 426 // StopTrace does stoptheworld, and stoptheworld waits for mp.locks to go back to zero, 427 // so if we see trace.enabled == true now, we know it's true for the rest of the function. 428 // Exitsyscall can run even during stoptheworld. The race with StartTrace/StopTrace 429 // during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer. 430 if !trace.enabled { 431 traceReleaseBuffer(pid) 432 return 433 } 434 buf := *bufp 435 const maxSize = 2 + 4*traceBytesPerNumber // event type, length, timestamp, stack id and two add params 436 if buf == nil || cap(buf.buf)-len(buf.buf) < maxSize { 437 buf = traceFlush(buf) 438 *bufp = buf 439 } 440 441 ticks := uint64(cputicks()) / traceTickDiv 442 tickDiff := ticks - buf.lastTicks 443 if len(buf.buf) == 0 { 444 data := buf.buf 445 data = append(data, traceEvBatch|1<<traceArgCountShift) 446 data = traceAppend(data, uint64(pid)) 447 data = traceAppend(data, ticks) 448 buf.buf = data 449 tickDiff = 0 450 } 451 buf.lastTicks = ticks 452 narg := byte(len(args)) 453 if skip >= 0 { 454 narg++ 455 } 456 // We have only 2 bits for number of arguments. 457 // If number is >= 3, then the event type is followed by event length in bytes. 458 if narg > 3 { 459 narg = 3 460 } 461 data := buf.buf 462 data = append(data, ev|narg<<traceArgCountShift) 463 var lenp *byte 464 if narg == 3 { 465 // Reserve the byte for length assuming that length < 128. 466 data = append(data, 0) 467 lenp = &data[len(data)-1] 468 } 469 data = traceAppend(data, tickDiff) 470 for _, a := range args { 471 data = traceAppend(data, a) 472 } 473 if skip == 0 { 474 data = append(data, 0) 475 } else if skip > 0 { 476 _g_ := getg() 477 gp := mp.curg 478 var nstk int 479 if gp == _g_ { 480 nstk = callers(skip, buf.stk[:]) 481 } else if gp != nil { 482 gp = mp.curg 483 nstk = gcallers(gp, skip, buf.stk[:]) 484 } 485 if nstk > 0 { 486 nstk-- // skip runtime.goexit 487 } 488 if nstk > 0 && gp.goid == 1 { 489 nstk-- // skip runtime.main 490 } 491 id := trace.stackTab.put(buf.stk[:nstk]) 492 data = traceAppend(data, uint64(id)) 493 } 494 evSize := len(data) - len(buf.buf) 495 if evSize > maxSize { 496 throw("invalid length of trace event") 497 } 498 if lenp != nil { 499 // Fill in actual length. 500 *lenp = byte(evSize - 2) 501 } 502 buf.buf = data 503 traceReleaseBuffer(pid) 504 } 505 506 // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it. 507 func traceAcquireBuffer() (mp *m, pid int32, bufp **traceBuf) { 508 mp = acquirem() 509 if p := mp.p; p != nil { 510 return mp, p.id, &p.tracebuf 511 } 512 lock(&trace.bufLock) 513 return mp, traceGlobProc, &trace.buf 514 } 515 516 // traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer. 517 func traceReleaseBuffer(pid int32) { 518 if pid == traceGlobProc { 519 unlock(&trace.bufLock) 520 } 521 releasem(getg().m) 522 } 523 524 // traceFlush puts buf onto stack of full buffers and returns an empty buffer. 525 func traceFlush(buf *traceBuf) *traceBuf { 526 owner := trace.lockOwner 527 dolock := owner == nil || owner != getg().m.curg 528 if dolock { 529 lock(&trace.lock) 530 } 531 if buf != nil { 532 if &buf.buf[0] != &buf.arr[0] { 533 throw("trace buffer overflow") 534 } 535 traceFullQueue(buf) 536 } 537 if trace.empty != nil { 538 buf = trace.empty 539 trace.empty = buf.link 540 } else { 541 buf = (*traceBuf)(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys)) 542 if buf == nil { 543 throw("trace: out of memory") 544 } 545 } 546 buf.link = nil 547 buf.buf = buf.arr[:0] 548 buf.lastTicks = 0 549 if dolock { 550 unlock(&trace.lock) 551 } 552 return buf 553 } 554 555 // traceAppend appends v to buf in little-endian-base-128 encoding. 556 func traceAppend(buf []byte, v uint64) []byte { 557 for ; v >= 0x80; v >>= 7 { 558 buf = append(buf, 0x80|byte(v)) 559 } 560 buf = append(buf, byte(v)) 561 return buf 562 } 563 564 // traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids. 565 // It is lock-free for reading. 566 type traceStackTable struct { 567 lock mutex 568 seq uint32 569 mem traceAlloc 570 tab [1 << 13]*traceStack 571 } 572 573 // traceStack is a single stack in traceStackTable. 574 type traceStack struct { 575 link *traceStack 576 hash uintptr 577 id uint32 578 n int 579 stk [0]uintptr // real type [n]uintptr 580 } 581 582 // stack returns slice of PCs. 583 func (ts *traceStack) stack() []uintptr { 584 return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n] 585 } 586 587 // put returns a unique id for the stack trace pcs and caches it in the table, 588 // if it sees the trace for the first time. 589 func (tab *traceStackTable) put(pcs []uintptr) uint32 { 590 if len(pcs) == 0 { 591 return 0 592 } 593 hash := memhash(unsafe.Pointer(&pcs[0]), uintptr(len(pcs))*unsafe.Sizeof(pcs[0]), 0) 594 // First, search the hashtable w/o the mutex. 595 if id := tab.find(pcs, hash); id != 0 { 596 return id 597 } 598 // Now, double check under the mutex. 599 lock(&tab.lock) 600 if id := tab.find(pcs, hash); id != 0 { 601 unlock(&tab.lock) 602 return id 603 } 604 // Create new record. 605 tab.seq++ 606 stk := tab.newStack(len(pcs)) 607 stk.hash = hash 608 stk.id = tab.seq 609 stk.n = len(pcs) 610 stkpc := stk.stack() 611 for i, pc := range pcs { 612 stkpc[i] = pc 613 } 614 part := int(hash % uintptr(len(tab.tab))) 615 stk.link = tab.tab[part] 616 atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk)) 617 unlock(&tab.lock) 618 return stk.id 619 } 620 621 // find checks if the stack trace pcs is already present in the table. 622 func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 { 623 part := int(hash % uintptr(len(tab.tab))) 624 Search: 625 for stk := tab.tab[part]; stk != nil; stk = stk.link { 626 if stk.hash == hash && stk.n == len(pcs) { 627 for i, stkpc := range stk.stack() { 628 if stkpc != pcs[i] { 629 continue Search 630 } 631 } 632 return stk.id 633 } 634 } 635 return 0 636 } 637 638 // newStack allocates a new stack of size n. 639 func (tab *traceStackTable) newStack(n int) *traceStack { 640 return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*ptrSize)) 641 } 642 643 // dump writes all previously cached stacks to trace buffers, 644 // releases all memory and resets state. 645 func (tab *traceStackTable) dump() { 646 var tmp [(2 + traceStackSize) * traceBytesPerNumber]byte 647 buf := traceFlush(nil) 648 for _, stk := range tab.tab { 649 for ; stk != nil; stk = stk.link { 650 maxSize := 1 + (3+stk.n)*traceBytesPerNumber 651 if cap(buf.buf)-len(buf.buf) < maxSize { 652 buf = traceFlush(buf) 653 } 654 // Form the event in the temp buffer, we need to know the actual length. 655 tmpbuf := tmp[:0] 656 tmpbuf = traceAppend(tmpbuf, uint64(stk.id)) 657 tmpbuf = traceAppend(tmpbuf, uint64(stk.n)) 658 for _, pc := range stk.stack() { 659 tmpbuf = traceAppend(tmpbuf, uint64(pc)) 660 } 661 // Now copy to the buffer. 662 data := buf.buf 663 data = append(data, traceEvStack|3<<traceArgCountShift) 664 data = traceAppend(data, uint64(len(tmpbuf))) 665 data = append(data, tmpbuf...) 666 buf.buf = data 667 } 668 } 669 670 lock(&trace.lock) 671 traceFullQueue(buf) 672 unlock(&trace.lock) 673 674 tab.mem.drop() 675 *tab = traceStackTable{} 676 } 677 678 // traceAlloc is a non-thread-safe region allocator. 679 // It holds a linked list of traceAllocBlock. 680 type traceAlloc struct { 681 head *traceAllocBlock 682 off uintptr 683 } 684 685 // traceAllocBlock is a block in traceAlloc. 686 type traceAllocBlock struct { 687 next *traceAllocBlock 688 data [64<<10 - ptrSize]byte 689 } 690 691 // alloc allocates n-byte block. 692 func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer { 693 n = round(n, ptrSize) 694 if a.head == nil || a.off+n > uintptr(len(a.head.data)) { 695 if n > uintptr(len(a.head.data)) { 696 throw("trace: alloc too large") 697 } 698 block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)) 699 if block == nil { 700 throw("trace: out of memory") 701 } 702 block.next = a.head 703 a.head = block 704 a.off = 0 705 } 706 p := &a.head.data[a.off] 707 a.off += n 708 return unsafe.Pointer(p) 709 } 710 711 // drop frees all previously allocated memory and resets the allocator. 712 func (a *traceAlloc) drop() { 713 for a.head != nil { 714 block := a.head 715 a.head = block.next 716 sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys) 717 } 718 } 719 720 // The following functions write specific events to trace. 721 722 func traceGomaxprocs(procs int32) { 723 traceEvent(traceEvGomaxprocs, 1, uint64(procs)) 724 } 725 726 func traceProcStart() { 727 traceEvent(traceEvProcStart, -1, uint64(getg().m.id)) 728 } 729 730 func traceProcStop(pp *p) { 731 // Sysmon and stoptheworld can stop Ps blocked in syscalls, 732 // to handle this we temporary employ the P. 733 mp := acquirem() 734 oldp := mp.p 735 mp.p = pp 736 traceEvent(traceEvProcStop, -1) 737 mp.p = oldp 738 releasem(mp) 739 } 740 741 func traceGCStart() { 742 traceEvent(traceEvGCStart, 4) 743 } 744 745 func traceGCDone() { 746 traceEvent(traceEvGCDone, -1) 747 } 748 749 func traceGCScanStart() { 750 traceEvent(traceEvGCScanStart, -1) 751 } 752 753 func traceGCScanDone() { 754 traceEvent(traceEvGCScanDone, -1) 755 } 756 757 func traceGCSweepStart() { 758 traceEvent(traceEvGCSweepStart, 1) 759 } 760 761 func traceGCSweepDone() { 762 traceEvent(traceEvGCSweepDone, -1) 763 } 764 765 func traceGoCreate(newg *g, pc uintptr) { 766 traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(pc)) 767 } 768 769 func traceGoStart() { 770 traceEvent(traceEvGoStart, -1, uint64(getg().m.curg.goid)) 771 } 772 773 func traceGoEnd() { 774 traceEvent(traceEvGoEnd, -1) 775 } 776 777 func traceGoSched() { 778 traceEvent(traceEvGoSched, 1) 779 } 780 781 func traceGoPreempt() { 782 traceEvent(traceEvGoPreempt, 1) 783 } 784 785 func traceGoPark(traceEv byte, skip int, gp *g) { 786 if traceEv&traceFutileWakeup != 0 { 787 traceEvent(traceEvFutileWakeup, -1) 788 } 789 traceEvent(traceEv & ^traceFutileWakeup, skip) 790 } 791 792 func traceGoUnpark(gp *g, skip int) { 793 traceEvent(traceEvGoUnblock, skip, uint64(gp.goid)) 794 } 795 796 func traceGoSysCall() { 797 traceEvent(traceEvGoSysCall, 4) 798 } 799 800 func traceGoSysExit(ts int64) { 801 traceEvent(traceEvGoSysExit, -1, uint64(getg().m.curg.goid), uint64(ts)/traceTickDiv) 802 } 803 804 func traceGoSysBlock(pp *p) { 805 // Sysmon and stoptheworld can declare syscalls running on remote Ps as blocked, 806 // to handle this we temporary employ the P. 807 mp := acquirem() 808 oldp := mp.p 809 mp.p = pp 810 traceEvent(traceEvGoSysBlock, -1) 811 mp.p = oldp 812 releasem(mp) 813 } 814 815 func traceHeapAlloc() { 816 traceEvent(traceEvHeapAlloc, -1, memstats.heap_live) 817 } 818 819 func traceNextGC() { 820 traceEvent(traceEvNextGC, -1, memstats.next_gc) 821 }