github.com/c12o16h1/go/src@v0.0.0-20200114212001-5a151c0f00ed/runtime/pprof/proto.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package pprof 6 7 import ( 8 "bytes" 9 "compress/gzip" 10 "fmt" 11 "io" 12 "io/ioutil" 13 "runtime" 14 "strconv" 15 "time" 16 "unsafe" 17 ) 18 19 // lostProfileEvent is the function to which lost profiling 20 // events are attributed. 21 // (The name shows up in the pprof graphs.) 22 func lostProfileEvent() { lostProfileEvent() } 23 24 // funcPC returns the PC for the func value f. 25 func funcPC(f interface{}) uintptr { 26 return *(*[2]*uintptr)(unsafe.Pointer(&f))[1] 27 } 28 29 // A profileBuilder writes a profile incrementally from a 30 // stream of profile samples delivered by the runtime. 31 type profileBuilder struct { 32 start time.Time 33 end time.Time 34 havePeriod bool 35 period int64 36 m profMap 37 38 // encoding state 39 w io.Writer 40 zw *gzip.Writer 41 pb protobuf 42 strings []string 43 stringMap map[string]int 44 locs map[uintptr]locInfo // list of locInfo starting with the given PC. 45 funcs map[string]int // Package path-qualified function name to Function.ID 46 mem []memMap 47 deck pcDeck 48 } 49 50 type memMap struct { 51 // initialized as reading mapping 52 start uintptr 53 end uintptr 54 offset uint64 55 file, buildID string 56 57 funcs symbolizeFlag 58 fake bool // map entry was faked; /proc/self/maps wasn't available 59 } 60 61 // symbolizeFlag keeps track of symbolization result. 62 // 0 : no symbol lookup was performed 63 // 1<<0 (lookupTried) : symbol lookup was performed 64 // 1<<1 (lookupFailed): symbol lookup was performed but failed 65 type symbolizeFlag uint8 66 67 const ( 68 lookupTried symbolizeFlag = 1 << iota 69 lookupFailed symbolizeFlag = 1 << iota 70 ) 71 72 const ( 73 // message Profile 74 tagProfile_SampleType = 1 // repeated ValueType 75 tagProfile_Sample = 2 // repeated Sample 76 tagProfile_Mapping = 3 // repeated Mapping 77 tagProfile_Location = 4 // repeated Location 78 tagProfile_Function = 5 // repeated Function 79 tagProfile_StringTable = 6 // repeated string 80 tagProfile_DropFrames = 7 // int64 (string table index) 81 tagProfile_KeepFrames = 8 // int64 (string table index) 82 tagProfile_TimeNanos = 9 // int64 83 tagProfile_DurationNanos = 10 // int64 84 tagProfile_PeriodType = 11 // ValueType (really optional string???) 85 tagProfile_Period = 12 // int64 86 tagProfile_Comment = 13 // repeated int64 87 tagProfile_DefaultSampleType = 14 // int64 88 89 // message ValueType 90 tagValueType_Type = 1 // int64 (string table index) 91 tagValueType_Unit = 2 // int64 (string table index) 92 93 // message Sample 94 tagSample_Location = 1 // repeated uint64 95 tagSample_Value = 2 // repeated int64 96 tagSample_Label = 3 // repeated Label 97 98 // message Label 99 tagLabel_Key = 1 // int64 (string table index) 100 tagLabel_Str = 2 // int64 (string table index) 101 tagLabel_Num = 3 // int64 102 103 // message Mapping 104 tagMapping_ID = 1 // uint64 105 tagMapping_Start = 2 // uint64 106 tagMapping_Limit = 3 // uint64 107 tagMapping_Offset = 4 // uint64 108 tagMapping_Filename = 5 // int64 (string table index) 109 tagMapping_BuildID = 6 // int64 (string table index) 110 tagMapping_HasFunctions = 7 // bool 111 tagMapping_HasFilenames = 8 // bool 112 tagMapping_HasLineNumbers = 9 // bool 113 tagMapping_HasInlineFrames = 10 // bool 114 115 // message Location 116 tagLocation_ID = 1 // uint64 117 tagLocation_MappingID = 2 // uint64 118 tagLocation_Address = 3 // uint64 119 tagLocation_Line = 4 // repeated Line 120 121 // message Line 122 tagLine_FunctionID = 1 // uint64 123 tagLine_Line = 2 // int64 124 125 // message Function 126 tagFunction_ID = 1 // uint64 127 tagFunction_Name = 2 // int64 (string table index) 128 tagFunction_SystemName = 3 // int64 (string table index) 129 tagFunction_Filename = 4 // int64 (string table index) 130 tagFunction_StartLine = 5 // int64 131 ) 132 133 // stringIndex adds s to the string table if not already present 134 // and returns the index of s in the string table. 135 func (b *profileBuilder) stringIndex(s string) int64 { 136 id, ok := b.stringMap[s] 137 if !ok { 138 id = len(b.strings) 139 b.strings = append(b.strings, s) 140 b.stringMap[s] = id 141 } 142 return int64(id) 143 } 144 145 func (b *profileBuilder) flush() { 146 const dataFlush = 4096 147 if b.pb.nest == 0 && len(b.pb.data) > dataFlush { 148 b.zw.Write(b.pb.data) 149 b.pb.data = b.pb.data[:0] 150 } 151 } 152 153 // pbValueType encodes a ValueType message to b.pb. 154 func (b *profileBuilder) pbValueType(tag int, typ, unit string) { 155 start := b.pb.startMessage() 156 b.pb.int64(tagValueType_Type, b.stringIndex(typ)) 157 b.pb.int64(tagValueType_Unit, b.stringIndex(unit)) 158 b.pb.endMessage(tag, start) 159 } 160 161 // pbSample encodes a Sample message to b.pb. 162 func (b *profileBuilder) pbSample(values []int64, locs []uint64, labels func()) { 163 start := b.pb.startMessage() 164 b.pb.int64s(tagSample_Value, values) 165 b.pb.uint64s(tagSample_Location, locs) 166 if labels != nil { 167 labels() 168 } 169 b.pb.endMessage(tagProfile_Sample, start) 170 b.flush() 171 } 172 173 // pbLabel encodes a Label message to b.pb. 174 func (b *profileBuilder) pbLabel(tag int, key, str string, num int64) { 175 start := b.pb.startMessage() 176 b.pb.int64Opt(tagLabel_Key, b.stringIndex(key)) 177 b.pb.int64Opt(tagLabel_Str, b.stringIndex(str)) 178 b.pb.int64Opt(tagLabel_Num, num) 179 b.pb.endMessage(tag, start) 180 } 181 182 // pbLine encodes a Line message to b.pb. 183 func (b *profileBuilder) pbLine(tag int, funcID uint64, line int64) { 184 start := b.pb.startMessage() 185 b.pb.uint64Opt(tagLine_FunctionID, funcID) 186 b.pb.int64Opt(tagLine_Line, line) 187 b.pb.endMessage(tag, start) 188 } 189 190 // pbMapping encodes a Mapping message to b.pb. 191 func (b *profileBuilder) pbMapping(tag int, id, base, limit, offset uint64, file, buildID string, hasFuncs bool) { 192 start := b.pb.startMessage() 193 b.pb.uint64Opt(tagMapping_ID, id) 194 b.pb.uint64Opt(tagMapping_Start, base) 195 b.pb.uint64Opt(tagMapping_Limit, limit) 196 b.pb.uint64Opt(tagMapping_Offset, offset) 197 b.pb.int64Opt(tagMapping_Filename, b.stringIndex(file)) 198 b.pb.int64Opt(tagMapping_BuildID, b.stringIndex(buildID)) 199 // TODO: we set HasFunctions if all symbols from samples were symbolized (hasFuncs). 200 // Decide what to do about HasInlineFrames and HasLineNumbers. 201 // Also, another approach to handle the mapping entry with 202 // incomplete symbolization results is to dupliace the mapping 203 // entry (but with different Has* fields values) and use 204 // different entries for symbolized locations and unsymbolized locations. 205 if hasFuncs { 206 b.pb.bool(tagMapping_HasFunctions, true) 207 } 208 b.pb.endMessage(tag, start) 209 } 210 211 func allFrames(addr uintptr) ([]runtime.Frame, symbolizeFlag) { 212 // Expand this one address using CallersFrames so we can cache 213 // each expansion. In general, CallersFrames takes a whole 214 // stack, but in this case we know there will be no skips in 215 // the stack and we have return PCs anyway. 216 frames := runtime.CallersFrames([]uintptr{addr}) 217 frame, more := frames.Next() 218 if frame.Function == "runtime.goexit" { 219 // Short-circuit if we see runtime.goexit so the loop 220 // below doesn't allocate a useless empty location. 221 return nil, 0 222 } 223 224 symbolizeResult := lookupTried 225 if frame.PC == 0 || frame.Function == "" || frame.File == "" || frame.Line == 0 { 226 symbolizeResult |= lookupFailed 227 } 228 229 if frame.PC == 0 { 230 // If we failed to resolve the frame, at least make up 231 // a reasonable call PC. This mostly happens in tests. 232 frame.PC = addr - 1 233 } 234 ret := []runtime.Frame{frame} 235 for frame.Function != "runtime.goexit" && more == true { 236 frame, more = frames.Next() 237 ret = append(ret, frame) 238 } 239 return ret, symbolizeResult 240 } 241 242 type locInfo struct { 243 // location id assigned by the profileBuilder 244 id uint64 245 246 // sequence of PCs, including the fake PCs returned by the traceback 247 // to represent inlined functions 248 // https://github.com/golang/go/blob/d6f2f833c93a41ec1c68e49804b8387a06b131c5/src/runtime/traceback.go#L347-L368 249 pcs []uintptr 250 } 251 252 // newProfileBuilder returns a new profileBuilder. 253 // CPU profiling data obtained from the runtime can be added 254 // by calling b.addCPUData, and then the eventual profile 255 // can be obtained by calling b.finish. 256 func newProfileBuilder(w io.Writer) *profileBuilder { 257 zw, _ := gzip.NewWriterLevel(w, gzip.BestSpeed) 258 b := &profileBuilder{ 259 w: w, 260 zw: zw, 261 start: time.Now(), 262 strings: []string{""}, 263 stringMap: map[string]int{"": 0}, 264 locs: map[uintptr]locInfo{}, 265 funcs: map[string]int{}, 266 } 267 b.readMapping() 268 return b 269 } 270 271 // addCPUData adds the CPU profiling data to the profile. 272 // The data must be a whole number of records, 273 // as delivered by the runtime. 274 func (b *profileBuilder) addCPUData(data []uint64, tags []unsafe.Pointer) error { 275 if !b.havePeriod { 276 // first record is period 277 if len(data) < 3 { 278 return fmt.Errorf("truncated profile") 279 } 280 if data[0] != 3 || data[2] == 0 { 281 return fmt.Errorf("malformed profile") 282 } 283 // data[2] is sampling rate in Hz. Convert to sampling 284 // period in nanoseconds. 285 b.period = 1e9 / int64(data[2]) 286 b.havePeriod = true 287 data = data[3:] 288 } 289 290 // Parse CPU samples from the profile. 291 // Each sample is 3+n uint64s: 292 // data[0] = 3+n 293 // data[1] = time stamp (ignored) 294 // data[2] = count 295 // data[3:3+n] = stack 296 // If the count is 0 and the stack has length 1, 297 // that's an overflow record inserted by the runtime 298 // to indicate that stack[0] samples were lost. 299 // Otherwise the count is usually 1, 300 // but in a few special cases like lost non-Go samples 301 // there can be larger counts. 302 // Because many samples with the same stack arrive, 303 // we want to deduplicate immediately, which we do 304 // using the b.m profMap. 305 for len(data) > 0 { 306 if len(data) < 3 || data[0] > uint64(len(data)) { 307 return fmt.Errorf("truncated profile") 308 } 309 if data[0] < 3 || tags != nil && len(tags) < 1 { 310 return fmt.Errorf("malformed profile") 311 } 312 count := data[2] 313 stk := data[3:data[0]] 314 data = data[data[0]:] 315 var tag unsafe.Pointer 316 if tags != nil { 317 tag = tags[0] 318 tags = tags[1:] 319 } 320 321 if count == 0 && len(stk) == 1 { 322 // overflow record 323 count = uint64(stk[0]) 324 stk = []uint64{ 325 uint64(funcPC(lostProfileEvent)), 326 } 327 } 328 b.m.lookup(stk, tag).count += int64(count) 329 } 330 return nil 331 } 332 333 // build completes and returns the constructed profile. 334 func (b *profileBuilder) build() { 335 b.end = time.Now() 336 337 b.pb.int64Opt(tagProfile_TimeNanos, b.start.UnixNano()) 338 if b.havePeriod { // must be CPU profile 339 b.pbValueType(tagProfile_SampleType, "samples", "count") 340 b.pbValueType(tagProfile_SampleType, "cpu", "nanoseconds") 341 b.pb.int64Opt(tagProfile_DurationNanos, b.end.Sub(b.start).Nanoseconds()) 342 b.pbValueType(tagProfile_PeriodType, "cpu", "nanoseconds") 343 b.pb.int64Opt(tagProfile_Period, b.period) 344 } 345 346 values := []int64{0, 0} 347 var locs []uint64 348 349 for e := b.m.all; e != nil; e = e.nextAll { 350 values[0] = e.count 351 values[1] = e.count * b.period 352 353 var labels func() 354 if e.tag != nil { 355 labels = func() { 356 for k, v := range *(*labelMap)(e.tag) { 357 b.pbLabel(tagSample_Label, k, v, 0) 358 } 359 } 360 } 361 362 locs = b.appendLocsForStack(locs[:0], e.stk) 363 364 b.pbSample(values, locs, labels) 365 } 366 367 for i, m := range b.mem { 368 hasFunctions := m.funcs == lookupTried // lookupTried but not lookupFailed 369 b.pbMapping(tagProfile_Mapping, uint64(i+1), uint64(m.start), uint64(m.end), m.offset, m.file, m.buildID, hasFunctions) 370 } 371 372 // TODO: Anything for tagProfile_DropFrames? 373 // TODO: Anything for tagProfile_KeepFrames? 374 375 b.pb.strings(tagProfile_StringTable, b.strings) 376 b.zw.Write(b.pb.data) 377 b.zw.Close() 378 } 379 380 // appendLocsForStack appends the location IDs for the given stack trace to the given 381 // location ID slice, locs. The addresses in the stack are return PCs or 1 + the PC of 382 // an inline marker as the runtime traceback function returns. 383 // 384 // It may emit to b.pb, so there must be no message encoding in progress. 385 func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLocs []uint64) { 386 b.deck.reset() 387 for len(stk) > 0 { 388 addr := stk[0] 389 if l, ok := b.locs[addr]; ok { 390 // first record the location if there is any pending accumulated info. 391 if id := b.emitLocation(); id > 0 { 392 locs = append(locs, id) 393 } 394 395 // then, record the cached location. 396 locs = append(locs, l.id) 397 398 // The stk may be truncated due to the stack depth limit 399 // (e.g. See maxStack and maxCPUProfStack in runtime) or 400 // bugs in runtime. Avoid the crash in either case. 401 // TODO(hyangah): The correct fix may require using the exact 402 // pcs as the key for b.locs cache management instead of just 403 // relying on the very first pc. We are late in the go1.14 dev 404 // cycle, so this is a workaround with little code change. 405 if len(l.pcs) > len(stk) { 406 stk = nil 407 // TODO(hyangah): would be nice if we can enable 408 // debug print out on demand and report the problematic 409 // cached location entry and stack traces. Do we already 410 // have such facility to utilize (e.g. GODEBUG)? 411 } else { 412 stk = stk[len(l.pcs):] // skip the matching pcs. 413 } 414 continue 415 } 416 417 frames, symbolizeResult := allFrames(addr) 418 if len(frames) == 0 { // runtime.goexit. 419 if id := b.emitLocation(); id > 0 { 420 locs = append(locs, id) 421 } 422 stk = stk[1:] 423 continue 424 } 425 426 if added := b.deck.tryAdd(addr, frames, symbolizeResult); added { 427 stk = stk[1:] 428 continue 429 } 430 // add failed because this addr is not inlined with 431 // the existing PCs in the deck. Flush the deck and retry to 432 // handle this pc. 433 if id := b.emitLocation(); id > 0 { 434 locs = append(locs, id) 435 } 436 437 // check cache again - previous emitLocation added a new entry 438 if l, ok := b.locs[addr]; ok { 439 locs = append(locs, l.id) 440 stk = stk[len(l.pcs):] // skip the matching pcs. 441 } else { 442 b.deck.tryAdd(addr, frames, symbolizeResult) // must succeed. 443 stk = stk[1:] 444 } 445 } 446 if id := b.emitLocation(); id > 0 { // emit remaining location. 447 locs = append(locs, id) 448 } 449 return locs 450 } 451 452 // pcDeck is a helper to detect a sequence of inlined functions from 453 // a stack trace returned by the runtime. 454 // 455 // The stack traces returned by runtime's trackback functions are fully 456 // expanded (at least for Go functions) and include the fake pcs representing 457 // inlined functions. The profile proto expects the inlined functions to be 458 // encoded in one Location message. 459 // https://github.com/google/pprof/blob/5e965273ee43930341d897407202dd5e10e952cb/proto/profile.proto#L177-L184 460 // 461 // Runtime does not directly expose whether a frame is for an inlined function 462 // and looking up debug info is not ideal, so we use a heuristic to filter 463 // the fake pcs and restore the inlined and entry functions. Inlined functions 464 // have the following properties: 465 // Frame's Func is nil (note: also true for non-Go functions), and 466 // Frame's Entry matches its entry function frame's Entry. (note: could also be true for recursive calls and non-Go functions), 467 // Frame's Name does not match its entry function frame's name. 468 // 469 // As reading and processing the pcs in a stack trace one by one (from leaf to the root), 470 // we use pcDeck to temporarily hold the observed pcs and their expanded frames 471 // until we observe the entry function frame. 472 type pcDeck struct { 473 pcs []uintptr 474 frames []runtime.Frame 475 symbolizeResult symbolizeFlag 476 } 477 478 func (d *pcDeck) reset() { 479 d.pcs = d.pcs[:0] 480 d.frames = d.frames[:0] 481 d.symbolizeResult = 0 482 } 483 484 // tryAdd tries to add the pc and Frames expanded from it (most likely one, 485 // since the stack trace is already fully expanded) and the symbolizeResult 486 // to the deck. If it fails the caller needs to flush the deck and retry. 487 func (d *pcDeck) tryAdd(pc uintptr, frames []runtime.Frame, symbolizeResult symbolizeFlag) (success bool) { 488 if existing := len(d.pcs); existing > 0 { 489 // 'frames' are all expanded from one 'pc' and represent all inlined functions 490 // so we check only the last one. 491 newFrame := frames[0] 492 last := d.frames[existing-1] 493 if last.Func != nil { // the last frame can't be inlined. Flush. 494 return false 495 } 496 if last.Entry == 0 || newFrame.Entry == 0 { // Possibly not a Go function. Don't try to merge. 497 return false 498 } 499 500 if last.Entry != newFrame.Entry { // newFrame is for a different function. 501 return false 502 } 503 if last.Function == newFrame.Function { // maybe recursion. 504 return false 505 } 506 } 507 d.pcs = append(d.pcs, pc) 508 d.frames = append(d.frames, frames...) 509 d.symbolizeResult |= symbolizeResult 510 return true 511 } 512 513 // emitLocation emits the new location and function information recorded in the deck 514 // and returns the location ID encoded in the profile protobuf. 515 // It emits to b.pb, so there must be no message encoding in progress. 516 // It resets the deck. 517 func (b *profileBuilder) emitLocation() uint64 { 518 if len(b.deck.pcs) == 0 { 519 return 0 520 } 521 defer b.deck.reset() 522 523 addr := b.deck.pcs[0] 524 firstFrame := b.deck.frames[0] 525 526 // We can't write out functions while in the middle of the 527 // Location message, so record new functions we encounter and 528 // write them out after the Location. 529 type newFunc struct { 530 id uint64 531 name, file string 532 } 533 newFuncs := make([]newFunc, 0, 8) 534 535 id := uint64(len(b.locs)) + 1 536 b.locs[addr] = locInfo{id: id, pcs: append([]uintptr{}, b.deck.pcs...)} 537 538 start := b.pb.startMessage() 539 b.pb.uint64Opt(tagLocation_ID, id) 540 b.pb.uint64Opt(tagLocation_Address, uint64(firstFrame.PC)) 541 for _, frame := range b.deck.frames { 542 // Write out each line in frame expansion. 543 funcID := uint64(b.funcs[frame.Function]) 544 if funcID == 0 { 545 funcID = uint64(len(b.funcs)) + 1 546 b.funcs[frame.Function] = int(funcID) 547 newFuncs = append(newFuncs, newFunc{funcID, frame.Function, frame.File}) 548 } 549 b.pbLine(tagLocation_Line, funcID, int64(frame.Line)) 550 } 551 for i := range b.mem { 552 if b.mem[i].start <= addr && addr < b.mem[i].end || b.mem[i].fake { 553 b.pb.uint64Opt(tagLocation_MappingID, uint64(i+1)) 554 555 m := b.mem[i] 556 m.funcs |= b.deck.symbolizeResult 557 b.mem[i] = m 558 break 559 } 560 } 561 b.pb.endMessage(tagProfile_Location, start) 562 563 // Write out functions we found during frame expansion. 564 for _, fn := range newFuncs { 565 start := b.pb.startMessage() 566 b.pb.uint64Opt(tagFunction_ID, fn.id) 567 b.pb.int64Opt(tagFunction_Name, b.stringIndex(fn.name)) 568 b.pb.int64Opt(tagFunction_SystemName, b.stringIndex(fn.name)) 569 b.pb.int64Opt(tagFunction_Filename, b.stringIndex(fn.file)) 570 b.pb.endMessage(tagProfile_Function, start) 571 } 572 573 b.flush() 574 return id 575 } 576 577 // readMapping reads /proc/self/maps and writes mappings to b.pb. 578 // It saves the address ranges of the mappings in b.mem for use 579 // when emitting locations. 580 func (b *profileBuilder) readMapping() { 581 data, _ := ioutil.ReadFile("/proc/self/maps") 582 parseProcSelfMaps(data, b.addMapping) 583 if len(b.mem) == 0 { // pprof expects a map entry, so fake one. 584 b.addMappingEntry(0, 0, 0, "", "", true) 585 // TODO(hyangah): make addMapping return *memMap or 586 // take a memMap struct, and get rid of addMappingEntry 587 // that takes a bunch of positional arguments. 588 } 589 } 590 591 func parseProcSelfMaps(data []byte, addMapping func(lo, hi, offset uint64, file, buildID string)) { 592 // $ cat /proc/self/maps 593 // 00400000-0040b000 r-xp 00000000 fc:01 787766 /bin/cat 594 // 0060a000-0060b000 r--p 0000a000 fc:01 787766 /bin/cat 595 // 0060b000-0060c000 rw-p 0000b000 fc:01 787766 /bin/cat 596 // 014ab000-014cc000 rw-p 00000000 00:00 0 [heap] 597 // 7f7d76af8000-7f7d7797c000 r--p 00000000 fc:01 1318064 /usr/lib/locale/locale-archive 598 // 7f7d7797c000-7f7d77b36000 r-xp 00000000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so 599 // 7f7d77b36000-7f7d77d36000 ---p 001ba000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so 600 // 7f7d77d36000-7f7d77d3a000 r--p 001ba000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so 601 // 7f7d77d3a000-7f7d77d3c000 rw-p 001be000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so 602 // 7f7d77d3c000-7f7d77d41000 rw-p 00000000 00:00 0 603 // 7f7d77d41000-7f7d77d64000 r-xp 00000000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so 604 // 7f7d77f3f000-7f7d77f42000 rw-p 00000000 00:00 0 605 // 7f7d77f61000-7f7d77f63000 rw-p 00000000 00:00 0 606 // 7f7d77f63000-7f7d77f64000 r--p 00022000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so 607 // 7f7d77f64000-7f7d77f65000 rw-p 00023000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so 608 // 7f7d77f65000-7f7d77f66000 rw-p 00000000 00:00 0 609 // 7ffc342a2000-7ffc342c3000 rw-p 00000000 00:00 0 [stack] 610 // 7ffc34343000-7ffc34345000 r-xp 00000000 00:00 0 [vdso] 611 // ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] 612 613 var line []byte 614 // next removes and returns the next field in the line. 615 // It also removes from line any spaces following the field. 616 next := func() []byte { 617 j := bytes.IndexByte(line, ' ') 618 if j < 0 { 619 f := line 620 line = nil 621 return f 622 } 623 f := line[:j] 624 line = line[j+1:] 625 for len(line) > 0 && line[0] == ' ' { 626 line = line[1:] 627 } 628 return f 629 } 630 631 for len(data) > 0 { 632 i := bytes.IndexByte(data, '\n') 633 if i < 0 { 634 line, data = data, nil 635 } else { 636 line, data = data[:i], data[i+1:] 637 } 638 addr := next() 639 i = bytes.IndexByte(addr, '-') 640 if i < 0 { 641 continue 642 } 643 lo, err := strconv.ParseUint(string(addr[:i]), 16, 64) 644 if err != nil { 645 continue 646 } 647 hi, err := strconv.ParseUint(string(addr[i+1:]), 16, 64) 648 if err != nil { 649 continue 650 } 651 perm := next() 652 if len(perm) < 4 || perm[2] != 'x' { 653 // Only interested in executable mappings. 654 continue 655 } 656 offset, err := strconv.ParseUint(string(next()), 16, 64) 657 if err != nil { 658 continue 659 } 660 next() // dev 661 inode := next() // inode 662 if line == nil { 663 continue 664 } 665 file := string(line) 666 667 // Trim deleted file marker. 668 deletedStr := " (deleted)" 669 deletedLen := len(deletedStr) 670 if len(file) >= deletedLen && file[len(file)-deletedLen:] == deletedStr { 671 file = file[:len(file)-deletedLen] 672 } 673 674 if len(inode) == 1 && inode[0] == '0' && file == "" { 675 // Huge-page text mappings list the initial fragment of 676 // mapped but unpopulated memory as being inode 0. 677 // Don't report that part. 678 // But [vdso] and [vsyscall] are inode 0, so let non-empty file names through. 679 continue 680 } 681 682 // TODO: pprof's remapMappingIDs makes two adjustments: 683 // 1. If there is an /anon_hugepage mapping first and it is 684 // consecutive to a next mapping, drop the /anon_hugepage. 685 // 2. If start-offset = 0x400000, change start to 0x400000 and offset to 0. 686 // There's no indication why either of these is needed. 687 // Let's try not doing these and see what breaks. 688 // If we do need them, they would go here, before we 689 // enter the mappings into b.mem in the first place. 690 691 buildID, _ := elfBuildID(file) 692 addMapping(lo, hi, offset, file, buildID) 693 } 694 } 695 696 func (b *profileBuilder) addMapping(lo, hi, offset uint64, file, buildID string) { 697 b.addMappingEntry(lo, hi, offset, file, buildID, false) 698 } 699 700 func (b *profileBuilder) addMappingEntry(lo, hi, offset uint64, file, buildID string, fake bool) { 701 b.mem = append(b.mem, memMap{ 702 start: uintptr(lo), 703 end: uintptr(hi), 704 offset: offset, 705 file: file, 706 buildID: buildID, 707 fake: fake, 708 }) 709 }