github.com/m10x/go/src@v0.0.0-20220112094212-ba61592315da/runtime/trace.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Go execution tracer. 6 // The tracer captures a wide range of execution events like goroutine 7 // creation/blocking/unblocking, syscall enter/exit/block, GC-related events, 8 // changes of heap size, processor start/stop, etc and writes them to a buffer 9 // in a compact form. A precise nanosecond-precision timestamp and a stack 10 // trace is captured for most events. 11 // See https://golang.org/s/go15trace for more info. 12 13 package runtime 14 15 import ( 16 "internal/goarch" 17 "runtime/internal/atomic" 18 "runtime/internal/sys" 19 "unsafe" 20 ) 21 22 // Event types in the trace, args are given in square brackets. 23 const ( 24 traceEvNone = 0 // unused 25 traceEvBatch = 1 // start of per-P batch of events [pid, timestamp] 26 traceEvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)] 27 traceEvStack = 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}] 28 traceEvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id] 29 traceEvProcStart = 5 // start of P [timestamp, thread id] 30 traceEvProcStop = 6 // stop of P [timestamp] 31 traceEvGCStart = 7 // GC start [timestamp, seq, stack id] 32 traceEvGCDone = 8 // GC done [timestamp] 33 traceEvGCSTWStart = 9 // GC STW start [timestamp, kind] 34 traceEvGCSTWDone = 10 // GC STW done [timestamp] 35 traceEvGCSweepStart = 11 // GC sweep start [timestamp, stack id] 36 traceEvGCSweepDone = 12 // GC sweep done [timestamp, swept, reclaimed] 37 traceEvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id] 38 traceEvGoStart = 14 // goroutine starts running [timestamp, goroutine id, seq] 39 traceEvGoEnd = 15 // goroutine ends [timestamp] 40 traceEvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack] 41 traceEvGoSched = 17 // goroutine calls Gosched [timestamp, stack] 42 traceEvGoPreempt = 18 // goroutine is preempted [timestamp, stack] 43 traceEvGoSleep = 19 // goroutine calls Sleep [timestamp, stack] 44 traceEvGoBlock = 20 // goroutine blocks [timestamp, stack] 45 traceEvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack] 46 traceEvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack] 47 traceEvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack] 48 traceEvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack] 49 traceEvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack] 50 traceEvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack] 51 traceEvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack] 52 traceEvGoSysCall = 28 // syscall enter [timestamp, stack] 53 traceEvGoSysExit = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp] 54 traceEvGoSysBlock = 30 // syscall blocks [timestamp] 55 traceEvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id] 56 traceEvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id] 57 traceEvHeapAlloc = 33 // gcController.heapLive change [timestamp, heap_alloc] 58 traceEvHeapGoal = 34 // gcController.heapGoal (formerly next_gc) change [timestamp, heap goal in bytes] 59 traceEvTimerGoroutine = 35 // not currently used; previously denoted timer goroutine [timer goroutine id] 60 traceEvFutileWakeup = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp] 61 traceEvString = 37 // string dictionary entry [ID, length, string] 62 traceEvGoStartLocal = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id] 63 traceEvGoUnblockLocal = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack] 64 traceEvGoSysExitLocal = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp] 65 traceEvGoStartLabel = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id] 66 traceEvGoBlockGC = 42 // goroutine blocks on GC assist [timestamp, stack] 67 traceEvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack] 68 traceEvGCMarkAssistDone = 44 // GC mark assist done [timestamp] 69 traceEvUserTaskCreate = 45 // trace.NewContext [timestamp, internal task id, internal parent task id, stack, name string] 70 traceEvUserTaskEnd = 46 // end of a task [timestamp, internal task id, stack] 71 traceEvUserRegion = 47 // trace.WithRegion [timestamp, internal task id, mode(0:start, 1:end), stack, name string] 72 traceEvUserLog = 48 // trace.Log [timestamp, internal task id, key string id, stack, value string] 73 traceEvCount = 49 74 // Byte is used but only 6 bits are available for event type. 75 // The remaining 2 bits are used to specify the number of arguments. 76 // That means, the max event type value is 63. 77 ) 78 79 const ( 80 // Timestamps in trace are cputicks/traceTickDiv. 81 // This makes absolute values of timestamp diffs smaller, 82 // and so they are encoded in less number of bytes. 83 // 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine). 84 // The suggested increment frequency for PowerPC's time base register is 85 // 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64 86 // and ppc64le. 87 // Tracing won't work reliably for architectures where cputicks is emulated 88 // by nanotime, so the value doesn't matter for those architectures. 89 traceTickDiv = 16 + 48*(goarch.Is386|goarch.IsAmd64) 90 // Maximum number of PCs in a single stack trace. 91 // Since events contain only stack id rather than whole stack trace, 92 // we can allow quite large values here. 93 traceStackSize = 128 94 // Identifier of a fake P that is used when we trace without a real P. 95 traceGlobProc = -1 96 // Maximum number of bytes to encode uint64 in base-128. 97 traceBytesPerNumber = 10 98 // Shift of the number of arguments in the first event byte. 99 traceArgCountShift = 6 100 // Flag passed to traceGoPark to denote that the previous wakeup of this 101 // goroutine was futile. For example, a goroutine was unblocked on a mutex, 102 // but another goroutine got ahead and acquired the mutex before the first 103 // goroutine is scheduled, so the first goroutine has to block again. 104 // Such wakeups happen on buffered channels and sync.Mutex, 105 // but are generally not interesting for end user. 106 traceFutileWakeup byte = 128 107 ) 108 109 // trace is global tracing context. 110 var trace struct { 111 lock mutex // protects the following members 112 lockOwner *g // to avoid deadlocks during recursive lock locks 113 enabled bool // when set runtime traces events 114 shutdown bool // set when we are waiting for trace reader to finish after setting enabled to false 115 headerWritten bool // whether ReadTrace has emitted trace header 116 footerWritten bool // whether ReadTrace has emitted trace footer 117 shutdownSema uint32 // used to wait for ReadTrace completion 118 seqStart uint64 // sequence number when tracing was started 119 ticksStart int64 // cputicks when tracing was started 120 ticksEnd int64 // cputicks when tracing was stopped 121 timeStart int64 // nanotime when tracing was started 122 timeEnd int64 // nanotime when tracing was stopped 123 seqGC uint64 // GC start/done sequencer 124 reading traceBufPtr // buffer currently handed off to user 125 empty traceBufPtr // stack of empty buffers 126 fullHead traceBufPtr // queue of full buffers 127 fullTail traceBufPtr 128 reader guintptr // goroutine that called ReadTrace, or nil 129 stackTab traceStackTable // maps stack traces to unique ids 130 131 // Dictionary for traceEvString. 132 // 133 // TODO: central lock to access the map is not ideal. 134 // option: pre-assign ids to all user annotation region names and tags 135 // option: per-P cache 136 // option: sync.Map like data structure 137 stringsLock mutex 138 strings map[string]uint64 139 stringSeq uint64 140 141 // markWorkerLabels maps gcMarkWorkerMode to string ID. 142 markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64 143 144 bufLock mutex // protects buf 145 buf traceBufPtr // global trace buffer, used when running without a p 146 } 147 148 // traceBufHeader is per-P tracing buffer. 149 type traceBufHeader struct { 150 link traceBufPtr // in trace.empty/full 151 lastTicks uint64 // when we wrote the last event 152 pos int // next write offset in arr 153 stk [traceStackSize]uintptr // scratch buffer for traceback 154 } 155 156 // traceBuf is per-P tracing buffer. 157 // 158 //go:notinheap 159 type traceBuf struct { 160 traceBufHeader 161 arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf 162 } 163 164 // traceBufPtr is a *traceBuf that is not traced by the garbage 165 // collector and doesn't have write barriers. traceBufs are not 166 // allocated from the GC'd heap, so this is safe, and are often 167 // manipulated in contexts where write barriers are not allowed, so 168 // this is necessary. 169 // 170 // TODO: Since traceBuf is now go:notinheap, this isn't necessary. 171 type traceBufPtr uintptr 172 173 func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) } 174 func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) } 175 func traceBufPtrOf(b *traceBuf) traceBufPtr { 176 return traceBufPtr(unsafe.Pointer(b)) 177 } 178 179 // StartTrace enables tracing for the current process. 180 // While tracing, the data will be buffered and available via ReadTrace. 181 // StartTrace returns an error if tracing is already enabled. 182 // Most clients should use the runtime/trace package or the testing package's 183 // -test.trace flag instead of calling StartTrace directly. 184 func StartTrace() error { 185 // Stop the world so that we can take a consistent snapshot 186 // of all goroutines at the beginning of the trace. 187 // Do not stop the world during GC so we ensure we always see 188 // a consistent view of GC-related events (e.g. a start is always 189 // paired with an end). 190 stopTheWorldGC("start tracing") 191 192 // Prevent sysmon from running any code that could generate events. 193 lock(&sched.sysmonlock) 194 195 // We are in stop-the-world, but syscalls can finish and write to trace concurrently. 196 // Exitsyscall could check trace.enabled long before and then suddenly wake up 197 // and decide to write to trace at a random point in time. 198 // However, such syscall will use the global trace.buf buffer, because we've 199 // acquired all p's by doing stop-the-world. So this protects us from such races. 200 lock(&trace.bufLock) 201 202 if trace.enabled || trace.shutdown { 203 unlock(&trace.bufLock) 204 unlock(&sched.sysmonlock) 205 startTheWorldGC() 206 return errorString("tracing is already enabled") 207 } 208 209 // Can't set trace.enabled yet. While the world is stopped, exitsyscall could 210 // already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here. 211 // That would lead to an inconsistent trace: 212 // - either GoSysExit appears before EvGoInSyscall, 213 // - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below. 214 // To instruct traceEvent that it must not ignore events below, we set startingtrace. 215 // trace.enabled is set afterwards once we have emitted all preliminary events. 216 _g_ := getg() 217 _g_.m.startingtrace = true 218 219 // Obtain current stack ID to use in all traceEvGoCreate events below. 220 mp := acquirem() 221 stkBuf := make([]uintptr, traceStackSize) 222 stackID := traceStackID(mp, stkBuf, 2) 223 releasem(mp) 224 225 // World is stopped, no need to lock. 226 forEachGRace(func(gp *g) { 227 status := readgstatus(gp) 228 if status != _Gdead { 229 gp.traceseq = 0 230 gp.tracelastp = getg().m.p 231 // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum. 232 id := trace.stackTab.put([]uintptr{gp.startpc + sys.PCQuantum}) 233 traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID) 234 } 235 if status == _Gwaiting { 236 // traceEvGoWaiting is implied to have seq=1. 237 gp.traceseq++ 238 traceEvent(traceEvGoWaiting, -1, uint64(gp.goid)) 239 } 240 if status == _Gsyscall { 241 gp.traceseq++ 242 traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid)) 243 } else { 244 gp.sysblocktraced = false 245 } 246 }) 247 traceProcStart() 248 traceGoStart() 249 // Note: ticksStart needs to be set after we emit traceEvGoInSyscall events. 250 // If we do it the other way around, it is possible that exitsyscall will 251 // query sysexitticks after ticksStart but before traceEvGoInSyscall timestamp. 252 // It will lead to a false conclusion that cputicks is broken. 253 trace.ticksStart = cputicks() 254 trace.timeStart = nanotime() 255 trace.headerWritten = false 256 trace.footerWritten = false 257 258 // string to id mapping 259 // 0 : reserved for an empty string 260 // remaining: other strings registered by traceString 261 trace.stringSeq = 0 262 trace.strings = make(map[string]uint64) 263 264 trace.seqGC = 0 265 _g_.m.startingtrace = false 266 trace.enabled = true 267 268 // Register runtime goroutine labels. 269 _, pid, bufp := traceAcquireBuffer() 270 for i, label := range gcMarkWorkerModeStrings[:] { 271 trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label) 272 } 273 traceReleaseBuffer(pid) 274 275 unlock(&trace.bufLock) 276 277 unlock(&sched.sysmonlock) 278 279 startTheWorldGC() 280 return nil 281 } 282 283 // StopTrace stops tracing, if it was previously enabled. 284 // StopTrace only returns after all the reads for the trace have completed. 285 func StopTrace() { 286 // Stop the world so that we can collect the trace buffers from all p's below, 287 // and also to avoid races with traceEvent. 288 stopTheWorldGC("stop tracing") 289 290 // See the comment in StartTrace. 291 lock(&sched.sysmonlock) 292 293 // See the comment in StartTrace. 294 lock(&trace.bufLock) 295 296 if !trace.enabled { 297 unlock(&trace.bufLock) 298 unlock(&sched.sysmonlock) 299 startTheWorldGC() 300 return 301 } 302 303 traceGoSched() 304 305 // Loop over all allocated Ps because dead Ps may still have 306 // trace buffers. 307 for _, p := range allp[:cap(allp)] { 308 buf := p.tracebuf 309 if buf != 0 { 310 traceFullQueue(buf) 311 p.tracebuf = 0 312 } 313 } 314 if trace.buf != 0 { 315 buf := trace.buf 316 trace.buf = 0 317 if buf.ptr().pos != 0 { 318 traceFullQueue(buf) 319 } 320 } 321 322 for { 323 trace.ticksEnd = cputicks() 324 trace.timeEnd = nanotime() 325 // Windows time can tick only every 15ms, wait for at least one tick. 326 if trace.timeEnd != trace.timeStart { 327 break 328 } 329 osyield() 330 } 331 332 trace.enabled = false 333 trace.shutdown = true 334 unlock(&trace.bufLock) 335 336 unlock(&sched.sysmonlock) 337 338 startTheWorldGC() 339 340 // The world is started but we've set trace.shutdown, so new tracing can't start. 341 // Wait for the trace reader to flush pending buffers and stop. 342 semacquire(&trace.shutdownSema) 343 if raceenabled { 344 raceacquire(unsafe.Pointer(&trace.shutdownSema)) 345 } 346 347 // The lock protects us from races with StartTrace/StopTrace because they do stop-the-world. 348 lock(&trace.lock) 349 for _, p := range allp[:cap(allp)] { 350 if p.tracebuf != 0 { 351 throw("trace: non-empty trace buffer in proc") 352 } 353 } 354 if trace.buf != 0 { 355 throw("trace: non-empty global trace buffer") 356 } 357 if trace.fullHead != 0 || trace.fullTail != 0 { 358 throw("trace: non-empty full trace buffer") 359 } 360 if trace.reading != 0 || trace.reader != 0 { 361 throw("trace: reading after shutdown") 362 } 363 for trace.empty != 0 { 364 buf := trace.empty 365 trace.empty = buf.ptr().link 366 sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys) 367 } 368 trace.strings = nil 369 trace.shutdown = false 370 unlock(&trace.lock) 371 } 372 373 // ReadTrace returns the next chunk of binary tracing data, blocking until data 374 // is available. If tracing is turned off and all the data accumulated while it 375 // was on has been returned, ReadTrace returns nil. The caller must copy the 376 // returned data before calling ReadTrace again. 377 // ReadTrace must be called from one goroutine at a time. 378 func ReadTrace() []byte { 379 // This function may need to lock trace.lock recursively 380 // (goparkunlock -> traceGoPark -> traceEvent -> traceFlush). 381 // To allow this we use trace.lockOwner. 382 // Also this function must not allocate while holding trace.lock: 383 // allocation can call heap allocate, which will try to emit a trace 384 // event while holding heap lock. 385 lock(&trace.lock) 386 trace.lockOwner = getg() 387 388 if trace.reader != 0 { 389 // More than one goroutine reads trace. This is bad. 390 // But we rather do not crash the program because of tracing, 391 // because tracing can be enabled at runtime on prod servers. 392 trace.lockOwner = nil 393 unlock(&trace.lock) 394 println("runtime: ReadTrace called from multiple goroutines simultaneously") 395 return nil 396 } 397 // Recycle the old buffer. 398 if buf := trace.reading; buf != 0 { 399 buf.ptr().link = trace.empty 400 trace.empty = buf 401 trace.reading = 0 402 } 403 // Write trace header. 404 if !trace.headerWritten { 405 trace.headerWritten = true 406 trace.lockOwner = nil 407 unlock(&trace.lock) 408 return []byte("go 1.11 trace\x00\x00\x00") 409 } 410 // Wait for new data. 411 if trace.fullHead == 0 && !trace.shutdown { 412 trace.reader.set(getg()) 413 goparkunlock(&trace.lock, waitReasonTraceReaderBlocked, traceEvGoBlock, 2) 414 lock(&trace.lock) 415 } 416 // Write a buffer. 417 if trace.fullHead != 0 { 418 buf := traceFullDequeue() 419 trace.reading = buf 420 trace.lockOwner = nil 421 unlock(&trace.lock) 422 return buf.ptr().arr[:buf.ptr().pos] 423 } 424 // Write footer with timer frequency. 425 if !trace.footerWritten { 426 trace.footerWritten = true 427 // Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64. 428 freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv 429 if freq <= 0 { 430 throw("trace: ReadTrace got invalid frequency") 431 } 432 trace.lockOwner = nil 433 unlock(&trace.lock) 434 var data []byte 435 data = append(data, traceEvFrequency|0<<traceArgCountShift) 436 data = traceAppend(data, uint64(freq)) 437 // This will emit a bunch of full buffers, we will pick them up 438 // on the next iteration. 439 trace.stackTab.dump() 440 return data 441 } 442 // Done. 443 if trace.shutdown { 444 trace.lockOwner = nil 445 unlock(&trace.lock) 446 if raceenabled { 447 // Model synchronization on trace.shutdownSema, which race 448 // detector does not see. This is required to avoid false 449 // race reports on writer passed to trace.Start. 450 racerelease(unsafe.Pointer(&trace.shutdownSema)) 451 } 452 // trace.enabled is already reset, so can call traceable functions. 453 semrelease(&trace.shutdownSema) 454 return nil 455 } 456 // Also bad, but see the comment above. 457 trace.lockOwner = nil 458 unlock(&trace.lock) 459 println("runtime: spurious wakeup of trace reader") 460 return nil 461 } 462 463 // traceReader returns the trace reader that should be woken up, if any. 464 func traceReader() *g { 465 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) { 466 return nil 467 } 468 lock(&trace.lock) 469 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) { 470 unlock(&trace.lock) 471 return nil 472 } 473 gp := trace.reader.ptr() 474 trace.reader.set(nil) 475 unlock(&trace.lock) 476 return gp 477 } 478 479 // traceProcFree frees trace buffer associated with pp. 480 func traceProcFree(pp *p) { 481 buf := pp.tracebuf 482 pp.tracebuf = 0 483 if buf == 0 { 484 return 485 } 486 lock(&trace.lock) 487 traceFullQueue(buf) 488 unlock(&trace.lock) 489 } 490 491 // traceFullQueue queues buf into queue of full buffers. 492 func traceFullQueue(buf traceBufPtr) { 493 buf.ptr().link = 0 494 if trace.fullHead == 0 { 495 trace.fullHead = buf 496 } else { 497 trace.fullTail.ptr().link = buf 498 } 499 trace.fullTail = buf 500 } 501 502 // traceFullDequeue dequeues from queue of full buffers. 503 func traceFullDequeue() traceBufPtr { 504 buf := trace.fullHead 505 if buf == 0 { 506 return 0 507 } 508 trace.fullHead = buf.ptr().link 509 if trace.fullHead == 0 { 510 trace.fullTail = 0 511 } 512 buf.ptr().link = 0 513 return buf 514 } 515 516 // traceEvent writes a single event to trace buffer, flushing the buffer if necessary. 517 // ev is event type. 518 // If skip > 0, write current stack id as the last argument (skipping skip top frames). 519 // If skip = 0, this event type should contain a stack, but we don't want 520 // to collect and remember it for this particular call. 521 func traceEvent(ev byte, skip int, args ...uint64) { 522 mp, pid, bufp := traceAcquireBuffer() 523 // Double-check trace.enabled now that we've done m.locks++ and acquired bufLock. 524 // This protects from races between traceEvent and StartTrace/StopTrace. 525 526 // The caller checked that trace.enabled == true, but trace.enabled might have been 527 // turned off between the check and now. Check again. traceLockBuffer did mp.locks++, 528 // StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero, 529 // so if we see trace.enabled == true now, we know it's true for the rest of the function. 530 // Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace 531 // during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer. 532 // 533 // Note trace_userTaskCreate runs the same check. 534 if !trace.enabled && !mp.startingtrace { 535 traceReleaseBuffer(pid) 536 return 537 } 538 539 if skip > 0 { 540 if getg() == mp.curg { 541 skip++ // +1 because stack is captured in traceEventLocked. 542 } 543 } 544 traceEventLocked(0, mp, pid, bufp, ev, skip, args...) 545 traceReleaseBuffer(pid) 546 } 547 548 func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, skip int, args ...uint64) { 549 buf := bufp.ptr() 550 // TODO: test on non-zero extraBytes param. 551 maxSize := 2 + 5*traceBytesPerNumber + extraBytes // event type, length, sequence, timestamp, stack id and two add params 552 if buf == nil || len(buf.arr)-buf.pos < maxSize { 553 buf = traceFlush(traceBufPtrOf(buf), pid).ptr() 554 bufp.set(buf) 555 } 556 557 // NOTE: ticks might be same after tick division, although the real cputicks is 558 // linear growth. 559 ticks := uint64(cputicks()) / traceTickDiv 560 tickDiff := ticks - buf.lastTicks 561 if tickDiff == 0 { 562 ticks = buf.lastTicks + 1 563 tickDiff = 1 564 } 565 566 buf.lastTicks = ticks 567 narg := byte(len(args)) 568 if skip >= 0 { 569 narg++ 570 } 571 // We have only 2 bits for number of arguments. 572 // If number is >= 3, then the event type is followed by event length in bytes. 573 if narg > 3 { 574 narg = 3 575 } 576 startPos := buf.pos 577 buf.byte(ev | narg<<traceArgCountShift) 578 var lenp *byte 579 if narg == 3 { 580 // Reserve the byte for length assuming that length < 128. 581 buf.varint(0) 582 lenp = &buf.arr[buf.pos-1] 583 } 584 buf.varint(tickDiff) 585 for _, a := range args { 586 buf.varint(a) 587 } 588 if skip == 0 { 589 buf.varint(0) 590 } else if skip > 0 { 591 buf.varint(traceStackID(mp, buf.stk[:], skip)) 592 } 593 evSize := buf.pos - startPos 594 if evSize > maxSize { 595 throw("invalid length of trace event") 596 } 597 if lenp != nil { 598 // Fill in actual length. 599 *lenp = byte(evSize - 2) 600 } 601 } 602 603 func traceStackID(mp *m, buf []uintptr, skip int) uint64 { 604 _g_ := getg() 605 gp := mp.curg 606 var nstk int 607 if gp == _g_ { 608 nstk = callers(skip+1, buf) 609 } else if gp != nil { 610 gp = mp.curg 611 nstk = gcallers(gp, skip, buf) 612 } 613 if nstk > 0 { 614 nstk-- // skip runtime.goexit 615 } 616 if nstk > 0 && gp.goid == 1 { 617 nstk-- // skip runtime.main 618 } 619 id := trace.stackTab.put(buf[:nstk]) 620 return uint64(id) 621 } 622 623 // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it. 624 func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) { 625 mp = acquirem() 626 if p := mp.p.ptr(); p != nil { 627 return mp, p.id, &p.tracebuf 628 } 629 lock(&trace.bufLock) 630 return mp, traceGlobProc, &trace.buf 631 } 632 633 // traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer. 634 func traceReleaseBuffer(pid int32) { 635 if pid == traceGlobProc { 636 unlock(&trace.bufLock) 637 } 638 releasem(getg().m) 639 } 640 641 // traceFlush puts buf onto stack of full buffers and returns an empty buffer. 642 func traceFlush(buf traceBufPtr, pid int32) traceBufPtr { 643 owner := trace.lockOwner 644 dolock := owner == nil || owner != getg().m.curg 645 if dolock { 646 lock(&trace.lock) 647 } 648 if buf != 0 { 649 traceFullQueue(buf) 650 } 651 if trace.empty != 0 { 652 buf = trace.empty 653 trace.empty = buf.ptr().link 654 } else { 655 buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys)) 656 if buf == 0 { 657 throw("trace: out of memory") 658 } 659 } 660 bufp := buf.ptr() 661 bufp.link.set(nil) 662 bufp.pos = 0 663 664 // initialize the buffer for a new batch 665 ticks := uint64(cputicks()) / traceTickDiv 666 if ticks == bufp.lastTicks { 667 ticks = bufp.lastTicks + 1 668 } 669 bufp.lastTicks = ticks 670 bufp.byte(traceEvBatch | 1<<traceArgCountShift) 671 bufp.varint(uint64(pid)) 672 bufp.varint(ticks) 673 674 if dolock { 675 unlock(&trace.lock) 676 } 677 return buf 678 } 679 680 // traceString adds a string to the trace.strings and returns the id. 681 func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) { 682 if s == "" { 683 return 0, bufp 684 } 685 686 lock(&trace.stringsLock) 687 if raceenabled { 688 // raceacquire is necessary because the map access 689 // below is race annotated. 690 raceacquire(unsafe.Pointer(&trace.stringsLock)) 691 } 692 693 if id, ok := trace.strings[s]; ok { 694 if raceenabled { 695 racerelease(unsafe.Pointer(&trace.stringsLock)) 696 } 697 unlock(&trace.stringsLock) 698 699 return id, bufp 700 } 701 702 trace.stringSeq++ 703 id := trace.stringSeq 704 trace.strings[s] = id 705 706 if raceenabled { 707 racerelease(unsafe.Pointer(&trace.stringsLock)) 708 } 709 unlock(&trace.stringsLock) 710 711 // memory allocation in above may trigger tracing and 712 // cause *bufp changes. Following code now works with *bufp, 713 // so there must be no memory allocation or any activities 714 // that causes tracing after this point. 715 716 buf := bufp.ptr() 717 size := 1 + 2*traceBytesPerNumber + len(s) 718 if buf == nil || len(buf.arr)-buf.pos < size { 719 buf = traceFlush(traceBufPtrOf(buf), pid).ptr() 720 bufp.set(buf) 721 } 722 buf.byte(traceEvString) 723 buf.varint(id) 724 725 // double-check the string and the length can fit. 726 // Otherwise, truncate the string. 727 slen := len(s) 728 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber { 729 slen = room 730 } 731 732 buf.varint(uint64(slen)) 733 buf.pos += copy(buf.arr[buf.pos:], s[:slen]) 734 735 bufp.set(buf) 736 return id, bufp 737 } 738 739 // traceAppend appends v to buf in little-endian-base-128 encoding. 740 func traceAppend(buf []byte, v uint64) []byte { 741 for ; v >= 0x80; v >>= 7 { 742 buf = append(buf, 0x80|byte(v)) 743 } 744 buf = append(buf, byte(v)) 745 return buf 746 } 747 748 // varint appends v to buf in little-endian-base-128 encoding. 749 func (buf *traceBuf) varint(v uint64) { 750 pos := buf.pos 751 for ; v >= 0x80; v >>= 7 { 752 buf.arr[pos] = 0x80 | byte(v) 753 pos++ 754 } 755 buf.arr[pos] = byte(v) 756 pos++ 757 buf.pos = pos 758 } 759 760 // byte appends v to buf. 761 func (buf *traceBuf) byte(v byte) { 762 buf.arr[buf.pos] = v 763 buf.pos++ 764 } 765 766 // traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids. 767 // It is lock-free for reading. 768 type traceStackTable struct { 769 lock mutex 770 seq uint32 771 mem traceAlloc 772 tab [1 << 13]traceStackPtr 773 } 774 775 // traceStack is a single stack in traceStackTable. 776 type traceStack struct { 777 link traceStackPtr 778 hash uintptr 779 id uint32 780 n int 781 stk [0]uintptr // real type [n]uintptr 782 } 783 784 type traceStackPtr uintptr 785 786 func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) } 787 788 // stack returns slice of PCs. 789 func (ts *traceStack) stack() []uintptr { 790 return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n] 791 } 792 793 // put returns a unique id for the stack trace pcs and caches it in the table, 794 // if it sees the trace for the first time. 795 func (tab *traceStackTable) put(pcs []uintptr) uint32 { 796 if len(pcs) == 0 { 797 return 0 798 } 799 hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0])) 800 // First, search the hashtable w/o the mutex. 801 if id := tab.find(pcs, hash); id != 0 { 802 return id 803 } 804 // Now, double check under the mutex. 805 lock(&tab.lock) 806 if id := tab.find(pcs, hash); id != 0 { 807 unlock(&tab.lock) 808 return id 809 } 810 // Create new record. 811 tab.seq++ 812 stk := tab.newStack(len(pcs)) 813 stk.hash = hash 814 stk.id = tab.seq 815 stk.n = len(pcs) 816 stkpc := stk.stack() 817 for i, pc := range pcs { 818 stkpc[i] = pc 819 } 820 part := int(hash % uintptr(len(tab.tab))) 821 stk.link = tab.tab[part] 822 atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk)) 823 unlock(&tab.lock) 824 return stk.id 825 } 826 827 // find checks if the stack trace pcs is already present in the table. 828 func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 { 829 part := int(hash % uintptr(len(tab.tab))) 830 Search: 831 for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() { 832 if stk.hash == hash && stk.n == len(pcs) { 833 for i, stkpc := range stk.stack() { 834 if stkpc != pcs[i] { 835 continue Search 836 } 837 } 838 return stk.id 839 } 840 } 841 return 0 842 } 843 844 // newStack allocates a new stack of size n. 845 func (tab *traceStackTable) newStack(n int) *traceStack { 846 return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*goarch.PtrSize)) 847 } 848 849 // allFrames returns all of the Frames corresponding to pcs. 850 func allFrames(pcs []uintptr) []Frame { 851 frames := make([]Frame, 0, len(pcs)) 852 ci := CallersFrames(pcs) 853 for { 854 f, more := ci.Next() 855 frames = append(frames, f) 856 if !more { 857 return frames 858 } 859 } 860 } 861 862 // dump writes all previously cached stacks to trace buffers, 863 // releases all memory and resets state. 864 func (tab *traceStackTable) dump() { 865 var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte 866 bufp := traceFlush(0, 0) 867 for _, stk := range tab.tab { 868 stk := stk.ptr() 869 for ; stk != nil; stk = stk.link.ptr() { 870 tmpbuf := tmp[:0] 871 tmpbuf = traceAppend(tmpbuf, uint64(stk.id)) 872 frames := allFrames(stk.stack()) 873 tmpbuf = traceAppend(tmpbuf, uint64(len(frames))) 874 for _, f := range frames { 875 var frame traceFrame 876 frame, bufp = traceFrameForPC(bufp, 0, f) 877 tmpbuf = traceAppend(tmpbuf, uint64(f.PC)) 878 tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID)) 879 tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID)) 880 tmpbuf = traceAppend(tmpbuf, uint64(frame.line)) 881 } 882 // Now copy to the buffer. 883 size := 1 + traceBytesPerNumber + len(tmpbuf) 884 if buf := bufp.ptr(); len(buf.arr)-buf.pos < size { 885 bufp = traceFlush(bufp, 0) 886 } 887 buf := bufp.ptr() 888 buf.byte(traceEvStack | 3<<traceArgCountShift) 889 buf.varint(uint64(len(tmpbuf))) 890 buf.pos += copy(buf.arr[buf.pos:], tmpbuf) 891 } 892 } 893 894 lock(&trace.lock) 895 traceFullQueue(bufp) 896 unlock(&trace.lock) 897 898 tab.mem.drop() 899 *tab = traceStackTable{} 900 lockInit(&((*tab).lock), lockRankTraceStackTab) 901 } 902 903 type traceFrame struct { 904 funcID uint64 905 fileID uint64 906 line uint64 907 } 908 909 // traceFrameForPC records the frame information. 910 // It may allocate memory. 911 func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) { 912 bufp := &buf 913 var frame traceFrame 914 915 fn := f.Function 916 const maxLen = 1 << 10 917 if len(fn) > maxLen { 918 fn = fn[len(fn)-maxLen:] 919 } 920 frame.funcID, bufp = traceString(bufp, pid, fn) 921 frame.line = uint64(f.Line) 922 file := f.File 923 if len(file) > maxLen { 924 file = file[len(file)-maxLen:] 925 } 926 frame.fileID, bufp = traceString(bufp, pid, file) 927 return frame, (*bufp) 928 } 929 930 // traceAlloc is a non-thread-safe region allocator. 931 // It holds a linked list of traceAllocBlock. 932 type traceAlloc struct { 933 head traceAllocBlockPtr 934 off uintptr 935 } 936 937 // traceAllocBlock is a block in traceAlloc. 938 // 939 // traceAllocBlock is allocated from non-GC'd memory, so it must not 940 // contain heap pointers. Writes to pointers to traceAllocBlocks do 941 // not need write barriers. 942 // 943 //go:notinheap 944 type traceAllocBlock struct { 945 next traceAllocBlockPtr 946 data [64<<10 - goarch.PtrSize]byte 947 } 948 949 // TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary. 950 type traceAllocBlockPtr uintptr 951 952 func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) } 953 func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) } 954 955 // alloc allocates n-byte block. 956 func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer { 957 n = alignUp(n, goarch.PtrSize) 958 if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) { 959 if n > uintptr(len(a.head.ptr().data)) { 960 throw("trace: alloc too large") 961 } 962 block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)) 963 if block == nil { 964 throw("trace: out of memory") 965 } 966 block.next.set(a.head.ptr()) 967 a.head.set(block) 968 a.off = 0 969 } 970 p := &a.head.ptr().data[a.off] 971 a.off += n 972 return unsafe.Pointer(p) 973 } 974 975 // drop frees all previously allocated memory and resets the allocator. 976 func (a *traceAlloc) drop() { 977 for a.head != 0 { 978 block := a.head.ptr() 979 a.head.set(block.next.ptr()) 980 sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys) 981 } 982 } 983 984 // The following functions write specific events to trace. 985 986 func traceGomaxprocs(procs int32) { 987 traceEvent(traceEvGomaxprocs, 1, uint64(procs)) 988 } 989 990 func traceProcStart() { 991 traceEvent(traceEvProcStart, -1, uint64(getg().m.id)) 992 } 993 994 func traceProcStop(pp *p) { 995 // Sysmon and stopTheWorld can stop Ps blocked in syscalls, 996 // to handle this we temporary employ the P. 997 mp := acquirem() 998 oldp := mp.p 999 mp.p.set(pp) 1000 traceEvent(traceEvProcStop, -1) 1001 mp.p = oldp 1002 releasem(mp) 1003 } 1004 1005 func traceGCStart() { 1006 traceEvent(traceEvGCStart, 3, trace.seqGC) 1007 trace.seqGC++ 1008 } 1009 1010 func traceGCDone() { 1011 traceEvent(traceEvGCDone, -1) 1012 } 1013 1014 func traceGCSTWStart(kind int) { 1015 traceEvent(traceEvGCSTWStart, -1, uint64(kind)) 1016 } 1017 1018 func traceGCSTWDone() { 1019 traceEvent(traceEvGCSTWDone, -1) 1020 } 1021 1022 // traceGCSweepStart prepares to trace a sweep loop. This does not 1023 // emit any events until traceGCSweepSpan is called. 1024 // 1025 // traceGCSweepStart must be paired with traceGCSweepDone and there 1026 // must be no preemption points between these two calls. 1027 func traceGCSweepStart() { 1028 // Delay the actual GCSweepStart event until the first span 1029 // sweep. If we don't sweep anything, don't emit any events. 1030 _p_ := getg().m.p.ptr() 1031 if _p_.traceSweep { 1032 throw("double traceGCSweepStart") 1033 } 1034 _p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0 1035 } 1036 1037 // traceGCSweepSpan traces the sweep of a single page. 1038 // 1039 // This may be called outside a traceGCSweepStart/traceGCSweepDone 1040 // pair; however, it will not emit any trace events in this case. 1041 func traceGCSweepSpan(bytesSwept uintptr) { 1042 _p_ := getg().m.p.ptr() 1043 if _p_.traceSweep { 1044 if _p_.traceSwept == 0 { 1045 traceEvent(traceEvGCSweepStart, 1) 1046 } 1047 _p_.traceSwept += bytesSwept 1048 } 1049 } 1050 1051 func traceGCSweepDone() { 1052 _p_ := getg().m.p.ptr() 1053 if !_p_.traceSweep { 1054 throw("missing traceGCSweepStart") 1055 } 1056 if _p_.traceSwept != 0 { 1057 traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed)) 1058 } 1059 _p_.traceSweep = false 1060 } 1061 1062 func traceGCMarkAssistStart() { 1063 traceEvent(traceEvGCMarkAssistStart, 1) 1064 } 1065 1066 func traceGCMarkAssistDone() { 1067 traceEvent(traceEvGCMarkAssistDone, -1) 1068 } 1069 1070 func traceGoCreate(newg *g, pc uintptr) { 1071 newg.traceseq = 0 1072 newg.tracelastp = getg().m.p 1073 // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum. 1074 id := trace.stackTab.put([]uintptr{pc + sys.PCQuantum}) 1075 traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id)) 1076 } 1077 1078 func traceGoStart() { 1079 _g_ := getg().m.curg 1080 _p_ := _g_.m.p 1081 _g_.traceseq++ 1082 if _p_.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker { 1083 traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode]) 1084 } else if _g_.tracelastp == _p_ { 1085 traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid)) 1086 } else { 1087 _g_.tracelastp = _p_ 1088 traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq) 1089 } 1090 } 1091 1092 func traceGoEnd() { 1093 traceEvent(traceEvGoEnd, -1) 1094 } 1095 1096 func traceGoSched() { 1097 _g_ := getg() 1098 _g_.tracelastp = _g_.m.p 1099 traceEvent(traceEvGoSched, 1) 1100 } 1101 1102 func traceGoPreempt() { 1103 _g_ := getg() 1104 _g_.tracelastp = _g_.m.p 1105 traceEvent(traceEvGoPreempt, 1) 1106 } 1107 1108 func traceGoPark(traceEv byte, skip int) { 1109 if traceEv&traceFutileWakeup != 0 { 1110 traceEvent(traceEvFutileWakeup, -1) 1111 } 1112 traceEvent(traceEv & ^traceFutileWakeup, skip) 1113 } 1114 1115 func traceGoUnpark(gp *g, skip int) { 1116 _p_ := getg().m.p 1117 gp.traceseq++ 1118 if gp.tracelastp == _p_ { 1119 traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid)) 1120 } else { 1121 gp.tracelastp = _p_ 1122 traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq) 1123 } 1124 } 1125 1126 func traceGoSysCall() { 1127 traceEvent(traceEvGoSysCall, 1) 1128 } 1129 1130 func traceGoSysExit(ts int64) { 1131 if ts != 0 && ts < trace.ticksStart { 1132 // There is a race between the code that initializes sysexitticks 1133 // (in exitsyscall, which runs without a P, and therefore is not 1134 // stopped with the rest of the world) and the code that initializes 1135 // a new trace. The recorded sysexitticks must therefore be treated 1136 // as "best effort". If they are valid for this trace, then great, 1137 // use them for greater accuracy. But if they're not valid for this 1138 // trace, assume that the trace was started after the actual syscall 1139 // exit (but before we actually managed to start the goroutine, 1140 // aka right now), and assign a fresh time stamp to keep the log consistent. 1141 ts = 0 1142 } 1143 _g_ := getg().m.curg 1144 _g_.traceseq++ 1145 _g_.tracelastp = _g_.m.p 1146 traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv) 1147 } 1148 1149 func traceGoSysBlock(pp *p) { 1150 // Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked, 1151 // to handle this we temporary employ the P. 1152 mp := acquirem() 1153 oldp := mp.p 1154 mp.p.set(pp) 1155 traceEvent(traceEvGoSysBlock, -1) 1156 mp.p = oldp 1157 releasem(mp) 1158 } 1159 1160 func traceHeapAlloc() { 1161 traceEvent(traceEvHeapAlloc, -1, gcController.heapLive) 1162 } 1163 1164 func traceHeapGoal() { 1165 if heapGoal := atomic.Load64(&gcController.heapGoal); heapGoal == ^uint64(0) { 1166 // Heap-based triggering is disabled. 1167 traceEvent(traceEvHeapGoal, -1, 0) 1168 } else { 1169 traceEvent(traceEvHeapGoal, -1, heapGoal) 1170 } 1171 } 1172 1173 // To access runtime functions from runtime/trace. 1174 // See runtime/trace/annotation.go 1175 1176 //go:linkname trace_userTaskCreate runtime/trace.userTaskCreate 1177 func trace_userTaskCreate(id, parentID uint64, taskType string) { 1178 if !trace.enabled { 1179 return 1180 } 1181 1182 // Same as in traceEvent. 1183 mp, pid, bufp := traceAcquireBuffer() 1184 if !trace.enabled && !mp.startingtrace { 1185 traceReleaseBuffer(pid) 1186 return 1187 } 1188 1189 typeStringID, bufp := traceString(bufp, pid, taskType) 1190 traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 3, id, parentID, typeStringID) 1191 traceReleaseBuffer(pid) 1192 } 1193 1194 //go:linkname trace_userTaskEnd runtime/trace.userTaskEnd 1195 func trace_userTaskEnd(id uint64) { 1196 traceEvent(traceEvUserTaskEnd, 2, id) 1197 } 1198 1199 //go:linkname trace_userRegion runtime/trace.userRegion 1200 func trace_userRegion(id, mode uint64, name string) { 1201 if !trace.enabled { 1202 return 1203 } 1204 1205 mp, pid, bufp := traceAcquireBuffer() 1206 if !trace.enabled && !mp.startingtrace { 1207 traceReleaseBuffer(pid) 1208 return 1209 } 1210 1211 nameStringID, bufp := traceString(bufp, pid, name) 1212 traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 3, id, mode, nameStringID) 1213 traceReleaseBuffer(pid) 1214 } 1215 1216 //go:linkname trace_userLog runtime/trace.userLog 1217 func trace_userLog(id uint64, category, message string) { 1218 if !trace.enabled { 1219 return 1220 } 1221 1222 mp, pid, bufp := traceAcquireBuffer() 1223 if !trace.enabled && !mp.startingtrace { 1224 traceReleaseBuffer(pid) 1225 return 1226 } 1227 1228 categoryID, bufp := traceString(bufp, pid, category) 1229 1230 extraSpace := traceBytesPerNumber + len(message) // extraSpace for the value string 1231 traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 3, id, categoryID) 1232 // traceEventLocked reserved extra space for val and len(val) 1233 // in buf, so buf now has room for the following. 1234 buf := bufp.ptr() 1235 1236 // double-check the message and its length can fit. 1237 // Otherwise, truncate the message. 1238 slen := len(message) 1239 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber { 1240 slen = room 1241 } 1242 buf.varint(uint64(slen)) 1243 buf.pos += copy(buf.arr[buf.pos:], message[:slen]) 1244 1245 traceReleaseBuffer(pid) 1246 }