github.com/cnboonhan/delve@v0.0.0-20230908061759-363f2388c2fb/pkg/proc/variables.go (about) 1 package proc 2 3 import ( 4 "bytes" 5 "debug/dwarf" 6 "encoding/binary" 7 "errors" 8 "fmt" 9 "go/constant" 10 "go/token" 11 "math" 12 "reflect" 13 "sort" 14 "strconv" 15 "strings" 16 "time" 17 "unsafe" 18 19 "github.com/go-delve/delve/pkg/dwarf/godwarf" 20 "github.com/go-delve/delve/pkg/dwarf/op" 21 "github.com/go-delve/delve/pkg/goversion" 22 "github.com/go-delve/delve/pkg/logflags" 23 ) 24 25 const ( 26 maxErrCount = 3 // Max number of read errors to accept while evaluating slices, arrays and structs 27 28 maxArrayStridePrefetch = 1024 // Maximum size of array stride for which we will prefetch the array contents 29 30 // hashTophashEmptyZero is used by map reading code, indicates an empty cell 31 hashTophashEmptyZero = 0 // +rtype emptyRest 32 // hashTophashEmptyOne is used by map reading code, indicates an empty cell in Go 1.12 and later 33 hashTophashEmptyOne = 1 // +rtype emptyOne 34 // hashMinTopHashGo111 used by map reading code, indicates minimum value of tophash that isn't empty or evacuated, in Go1.11 35 hashMinTopHashGo111 = 4 // +rtype minTopHash 36 // hashMinTopHashGo112 is used by map reading code, indicates minimum value of tophash that isn't empty or evacuated, in Go1.12 37 hashMinTopHashGo112 = 5 // +rtype minTopHash 38 39 maxFramePrefetchSize = 1 * 1024 * 1024 // Maximum prefetch size for a stack frame 40 41 maxMapBucketsFactor = 100 // Maximum numbers of map buckets to read for every requested map entry when loading variables through (*EvalScope).LocalVariables and (*EvalScope).FunctionArguments. 42 43 maxGoroutineUserCurrentDepth = 30 // Maximum depth used by (*G).UserCurrent to search its location 44 ) 45 46 type floatSpecial uint8 47 48 const ( 49 // FloatIsNormal means the value is a normal float. 50 FloatIsNormal floatSpecial = iota 51 // FloatIsNaN means the float is a special NaN value. 52 FloatIsNaN 53 // FloatIsPosInf means the float is a special positive infinity value. 54 FloatIsPosInf 55 // FloatIsNegInf means the float is a special negative infinity value. 56 FloatIsNegInf 57 ) 58 59 type variableFlags uint16 60 61 const ( 62 // VariableEscaped is set for local variables that escaped to the heap 63 // 64 // The compiler performs escape analysis on local variables, the variables 65 // that may outlive the stack frame are allocated on the heap instead and 66 // only the address is recorded on the stack. These variables will be 67 // marked with this flag. 68 VariableEscaped variableFlags = (1 << iota) 69 // VariableShadowed is set for local variables that are shadowed by a 70 // variable with the same name in another scope 71 VariableShadowed 72 // VariableConstant means this variable is a constant value 73 VariableConstant 74 // VariableArgument means this variable is a function argument 75 VariableArgument 76 // VariableReturnArgument means this variable is a function return value 77 VariableReturnArgument 78 // VariableFakeAddress means the address of this variable is either fake 79 // (i.e. the variable is partially or completely stored in a CPU register 80 // and doesn't have a real address) or possibly no longer available (because 81 // the variable is the return value of a function call and allocated on a 82 // frame that no longer exists) 83 VariableFakeAddress 84 // VariableCPtr means the variable is a C pointer 85 VariableCPtr 86 // VariableCPURegister means this variable is a CPU register. 87 VariableCPURegister 88 ) 89 90 // Variable represents a variable. It contains the address, name, 91 // type and other information parsed from both the Dwarf information 92 // and the memory of the debugged process. 93 // If OnlyAddr is true, the variables value has not been loaded. 94 type Variable struct { 95 Addr uint64 96 OnlyAddr bool 97 Name string 98 DwarfType godwarf.Type 99 RealType godwarf.Type 100 Kind reflect.Kind 101 mem MemoryReadWriter 102 bi *BinaryInfo 103 104 Value constant.Value 105 FloatSpecial floatSpecial 106 reg *op.DwarfRegister // contains the value of this variable if VariableCPURegister flag is set and loaded is false 107 108 Len int64 109 Cap int64 110 111 Flags variableFlags 112 113 // Base address of arrays, Base address of the backing array for slices (0 for nil slices) 114 // Base address of the backing byte array for strings 115 // address of the struct backing chan and map variables 116 // address of the function entry point for function variables (0 for nil function pointers) 117 Base uint64 118 stride int64 119 fieldType godwarf.Type 120 121 // closureAddr is the closure address for function variables (0 for non-closures) 122 closureAddr uint64 123 124 // number of elements to skip when loading a map 125 mapSkip int 126 127 // Children lists the variables sub-variables. What constitutes a child 128 // depends on the variable's type. For pointers, there's one child 129 // representing the pointed-to variable. 130 Children []Variable 131 132 loaded bool 133 Unreadable error 134 135 LocationExpr *locationExpr // location expression 136 DeclLine int64 // line number of this variable's declaration 137 } 138 139 // LoadConfig controls how variables are loaded from the targets memory. 140 type LoadConfig struct { 141 // FollowPointers requests pointers to be automatically dereferenced. 142 FollowPointers bool 143 // MaxVariableRecurse is how far to recurse when evaluating nested types. 144 MaxVariableRecurse int 145 // MaxStringLen is the maximum number of bytes read from a string 146 MaxStringLen int 147 // MaxArrayValues is the maximum number of elements read from an array, a slice or a map. 148 MaxArrayValues int 149 // MaxStructFields is the maximum number of fields read from a struct, -1 will read all fields. 150 MaxStructFields int 151 152 // MaxMapBuckets is the maximum number of map buckets to read before giving up. 153 // A value of 0 will read as many buckets as necessary until the entire map 154 // is read or MaxArrayValues is reached. 155 // 156 // Loading a map is an operation that issues O(num_buckets) operations. 157 // Normally the number of buckets is proportional to the number of elements 158 // in the map, since the runtime tries to keep the load factor of maps 159 // between 40% and 80%. 160 // 161 // It is possible, however, to create very sparse maps either by: 162 // a) adding lots of entries to a map and then deleting most of them, or 163 // b) using the make(mapType, N) expression with a very large N 164 // 165 // When this happens delve will have to scan many empty buckets to find the 166 // few entries in the map. 167 // MaxMapBuckets can be set to avoid annoying slowdownsâŁwhile reading 168 // very sparse maps. 169 // 170 // Since there is no good way for a user of delve to specify the value of 171 // MaxMapBuckets, this field is not actually exposed through the API. 172 // Instead (*EvalScope).LocalVariables and (*EvalScope).FunctionArguments 173 // set this field automatically to MaxArrayValues * maxMapBucketsFactor. 174 // Every other invocation uses the default value of 0, obtaining the old behavior. 175 // In practice this means that debuggers using the ListLocalVars or 176 // ListFunctionArgs API will not experience a massive slowdown when a very 177 // sparse map is in scope, but evaluating a single variable will still work 178 // correctly, even if the variable in question is a very sparse map. 179 MaxMapBuckets int 180 } 181 182 var loadSingleValue = LoadConfig{false, 0, 64, 0, 0, 0} 183 var loadFullValue = LoadConfig{true, 1, 64, 64, -1, 0} 184 var loadFullValueLongerStrings = LoadConfig{true, 1, 1024 * 1024, 64, -1, 0} 185 186 // G status, from: src/runtime/runtime2.go 187 const ( 188 Gidle uint64 = iota // 0 189 Grunnable // 1 runnable and on a run queue 190 Grunning // 2 191 Gsyscall // 3 192 Gwaiting // 4 193 GmoribundUnused // 5 currently unused, but hardcoded in gdb scripts 194 Gdead // 6 195 Genqueue // 7 Only the Gscanenqueue is used. 196 Gcopystack // 8 in this state when newstack is moving the stack 197 ) 198 199 // G represents a runtime G (goroutine) structure (at least the 200 // fields that Delve is interested in). 201 type G struct { 202 ID int64 // Goroutine ID 203 PC uint64 // PC of goroutine when it was parked. 204 SP uint64 // SP of goroutine when it was parked. 205 BP uint64 // BP of goroutine when it was parked (go >= 1.7). 206 LR uint64 // LR of goroutine when it was parked. 207 GoPC uint64 // PC of 'go' statement that created this goroutine. 208 StartPC uint64 // PC of the first function run on this goroutine. 209 Status uint64 210 stack stack // value of stack 211 212 WaitSince int64 213 WaitReason int64 214 215 SystemStack bool // SystemStack is true if this goroutine is currently executing on a system stack. 216 217 // Information on goroutine location 218 CurrentLoc Location 219 220 // Thread that this goroutine is currently allocated to 221 Thread Thread 222 223 variable *Variable 224 225 Unreadable error // could not read the G struct 226 227 labels *map[string]string // G's pprof labels, computed on demand in Labels() method 228 } 229 230 // stack represents a stack span in the target process. 231 type stack struct { 232 hi, lo uint64 233 } 234 235 // GetG returns information on the G (goroutine) that is executing on this thread. 236 // 237 // The G structure for a thread is stored in thread local storage. Here we simply 238 // calculate the address and read and parse the G struct. 239 // 240 // We cannot simply use the allg linked list in order to find the M that represents 241 // the given OS thread and follow its G pointer because on Darwin mach ports are not 242 // universal, so our port for this thread would not map to the `id` attribute of the M 243 // structure. Also, when linked against libc, Go prefers the libc version of clone as 244 // opposed to the runtime version. This has the consequence of not setting M.id for 245 // any thread, regardless of OS. 246 // 247 // In order to get around all this craziness, we read the address of the G structure for 248 // the current thread from the thread local storage area. 249 func GetG(thread Thread) (*G, error) { 250 if thread.Common().g != nil { 251 return thread.Common().g, nil 252 } 253 if loc, _ := thread.Location(); loc != nil && loc.Fn != nil && loc.Fn.Name == "runtime.clone" { 254 // When threads are executing runtime.clone the value of TLS is unreliable. 255 return nil, nil 256 } 257 gaddr, err := getGVariable(thread) 258 if err != nil { 259 return nil, err 260 } 261 262 g, err := gaddr.parseG() 263 if err != nil { 264 return nil, err 265 } 266 if g.ID == 0 { 267 // The runtime uses a special goroutine with ID == 0 to mark that the 268 // current goroutine is executing on the system stack (sometimes also 269 // referred to as the g0 stack or scheduler stack, I'm not sure if there's 270 // actually any difference between those). 271 // For our purposes it's better if we always return the real goroutine 272 // since the rest of the code assumes the goroutine ID is univocal. 273 // The real 'current goroutine' is stored in g0.m.curg 274 mvar, err := g.variable.structMember("m") 275 if err != nil { 276 return nil, err 277 } 278 curgvar, err := mvar.structMember("curg") 279 if err != nil { 280 return nil, err 281 } 282 g, err = curgvar.parseG() 283 if err != nil { 284 if _, ok := err.(ErrNoGoroutine); ok { 285 err = ErrNoGoroutine{thread.ThreadID()} 286 } 287 return nil, err 288 } 289 g.SystemStack = true 290 } 291 g.Thread = thread 292 if loc, err := thread.Location(); err == nil { 293 g.CurrentLoc = *loc 294 } 295 thread.Common().g = g 296 return g, nil 297 } 298 299 // GoroutinesInfo searches for goroutines starting at index 'start', and 300 // returns an array of up to 'count' (or all found elements, if 'count' is 0) 301 // G structures representing the information Delve care about from the internal 302 // runtime G structure. 303 // GoroutinesInfo also returns the next index to be used as 'start' argument 304 // while scanning for all available goroutines, or -1 if there was an error 305 // or if the index already reached the last possible value. 306 func GoroutinesInfo(dbp *Target, start, count int) ([]*G, int, error) { 307 if _, err := dbp.Valid(); err != nil { 308 return nil, -1, err 309 } 310 if dbp.gcache.allGCache != nil { 311 // We can't use the cached array to fulfill a subrange request 312 if start == 0 && (count == 0 || count >= len(dbp.gcache.allGCache)) { 313 return dbp.gcache.allGCache, -1, nil 314 } 315 } 316 317 var ( 318 threadg = map[int64]*G{} 319 allg []*G 320 ) 321 322 threads := dbp.ThreadList() 323 for _, th := range threads { 324 g, _ := GetG(th) 325 if g != nil { 326 threadg[g.ID] = g 327 } 328 } 329 330 allgptr, allglen, err := dbp.gcache.getRuntimeAllg(dbp.BinInfo(), dbp.Memory()) 331 if err != nil { 332 return nil, -1, err 333 } 334 335 for i := uint64(start); i < allglen; i++ { 336 if count != 0 && len(allg) >= count { 337 return allg, int(i), nil 338 } 339 gvar, err := newGVariable(dbp.CurrentThread(), allgptr+(i*uint64(dbp.BinInfo().Arch.PtrSize())), true) 340 if err != nil { 341 allg = append(allg, &G{Unreadable: err}) 342 continue 343 } 344 g, err := gvar.parseG() 345 if err != nil { 346 allg = append(allg, &G{Unreadable: err}) 347 continue 348 } 349 if thg, allocated := threadg[g.ID]; allocated { 350 loc, err := thg.Thread.Location() 351 if err != nil { 352 return nil, -1, err 353 } 354 g.Thread = thg.Thread 355 // Prefer actual thread location information. 356 g.CurrentLoc = *loc 357 g.SystemStack = thg.SystemStack 358 } 359 if g.Status != Gdead { 360 allg = append(allg, g) 361 } 362 dbp.gcache.addGoroutine(g) 363 } 364 if start == 0 { 365 dbp.gcache.allGCache = allg 366 } 367 368 return allg, -1, nil 369 } 370 371 // FindGoroutine returns a G struct representing the goroutine 372 // specified by `gid`. 373 func FindGoroutine(dbp *Target, gid int64) (*G, error) { 374 if selg := dbp.SelectedGoroutine(); (gid == -1) || (selg != nil && selg.ID == gid) || (selg == nil && gid == 0) { 375 // Return the currently selected goroutine in the following circumstances: 376 // 377 // 1. if the caller asks for gid == -1 (because that's what a goroutine ID of -1 means in our API). 378 // 2. if gid == selg.ID. 379 // this serves two purposes: (a) it's an optimizations that allows us 380 // to avoid reading any other goroutine and, more importantly, (b) we 381 // could be reading an incorrect value for the goroutine ID of a thread. 382 // This condition usually happens when a goroutine calls runtime.clone 383 // and for a short period of time two threads will appear to be running 384 // the same goroutine. 385 // 3. if the caller asks for gid == 0 and the selected goroutine is 386 // either 0 or nil. 387 // Goroutine 0 is special, it either means we have no current goroutine 388 // (for example, running C code), or that we are running on a special 389 // stack (system stack, signal handling stack) and we didn't properly 390 // detect it. 391 // Since there could be multiple goroutines '0' running simultaneously 392 // if the user requests it return the one that's already selected or 393 // nil if there isn't a selected goroutine. 394 return selg, nil 395 } 396 397 if gid == 0 { 398 return nil, fmt.Errorf("unknown goroutine %d", gid) 399 } 400 401 if g := dbp.gcache.partialGCache[gid]; g != nil { 402 return g, nil 403 } 404 405 // Calling GoroutinesInfo could be slow if there are many goroutines 406 // running, check if a running goroutine has been requested first. 407 for _, thread := range dbp.ThreadList() { 408 g, _ := GetG(thread) 409 if g != nil && g.ID == gid { 410 return g, nil 411 } 412 } 413 414 const goroutinesInfoLimit = 10 415 nextg := 0 416 for nextg >= 0 { 417 var gs []*G 418 var err error 419 gs, nextg, err = GoroutinesInfo(dbp, nextg, goroutinesInfoLimit) 420 if err != nil { 421 return nil, err 422 } 423 for i := range gs { 424 if gs[i].ID == gid { 425 if gs[i].Unreadable != nil { 426 return nil, gs[i].Unreadable 427 } 428 return gs[i], nil 429 } 430 } 431 } 432 433 return nil, fmt.Errorf("unknown goroutine %d", gid) 434 } 435 436 func getGVariable(thread Thread) (*Variable, error) { 437 regs, err := thread.Registers() 438 if err != nil { 439 return nil, err 440 } 441 442 gaddr, hasgaddr := regs.GAddr() 443 if !hasgaddr { 444 bi := thread.BinInfo() 445 offset, err := bi.GStructOffset(thread.ProcessMemory()) 446 if err != nil { 447 return nil, err 448 } 449 gaddr, err = readUintRaw(thread.ProcessMemory(), regs.TLS()+offset, int64(bi.Arch.PtrSize())) 450 if err != nil { 451 return nil, err 452 } 453 } 454 455 return newGVariable(thread, gaddr, thread.BinInfo().Arch.DerefTLS()) 456 } 457 458 func newGVariable(thread Thread, gaddr uint64, deref bool) (*Variable, error) { 459 typ, err := thread.BinInfo().findType("runtime.g") 460 if err != nil { 461 return nil, err 462 } 463 464 if deref { 465 typ = &godwarf.PtrType{ 466 CommonType: godwarf.CommonType{ 467 ByteSize: int64(thread.BinInfo().Arch.PtrSize()), 468 Name: "", 469 ReflectKind: reflect.Ptr, 470 Offset: 0, 471 }, 472 Type: typ, 473 } 474 } 475 476 return newVariableFromThread(thread, "", gaddr, typ), nil 477 } 478 479 // Defer returns the top-most defer of the goroutine. 480 func (g *G) Defer() *Defer { 481 if g.variable.Unreadable != nil { 482 return nil 483 } 484 dvar, _ := g.variable.structMember("_defer") 485 if dvar == nil { 486 return nil 487 } 488 dvar = dvar.maybeDereference() 489 if dvar.Addr == 0 { 490 return nil 491 } 492 d := &Defer{variable: dvar} 493 d.load() 494 return d 495 } 496 497 // UserCurrent returns the location the users code is at, 498 // or was at before entering a runtime function. 499 func (g *G) UserCurrent() Location { 500 it, err := goroutineStackIterator(nil, g, 0) 501 if err != nil { 502 return g.CurrentLoc 503 } 504 for count := 0; it.Next() && count < maxGoroutineUserCurrentDepth; count++ { 505 frame := it.Frame() 506 if frame.Call.Fn != nil { 507 name := frame.Call.Fn.Name 508 if strings.Contains(name, ".") && (!strings.HasPrefix(name, "runtime.") || frame.Call.Fn.exportedRuntime()) && !strings.HasPrefix(name, "internal/") && !strings.HasPrefix(name, "runtime/internal") { 509 return frame.Call 510 } 511 } 512 } 513 return g.CurrentLoc 514 } 515 516 // Go returns the location of the 'go' statement 517 // that spawned this goroutine. 518 func (g *G) Go() Location { 519 pc := g.GoPC 520 if fn := g.variable.bi.PCToFunc(pc); fn != nil { 521 // Backup to CALL instruction. 522 // Mimics runtime/traceback.go:677. 523 if g.GoPC > fn.Entry { 524 pc-- 525 } 526 } 527 f, l, fn := g.variable.bi.PCToLine(pc) 528 return Location{PC: g.GoPC, File: f, Line: l, Fn: fn} 529 } 530 531 // StartLoc returns the starting location of the goroutine. 532 func (g *G) StartLoc(tgt *Target) Location { 533 fn := g.variable.bi.PCToFunc(g.StartPC) 534 fn = tgt.dwrapUnwrap(fn) 535 if fn == nil { 536 return Location{PC: g.StartPC} 537 } 538 f, l := tgt.BinInfo().EntryLineForFunc(fn) 539 return Location{PC: fn.Entry, File: f, Line: l, Fn: fn} 540 } 541 542 // System returns true if g is a system goroutine. See isSystemGoroutine in 543 // $GOROOT/src/runtime/traceback.go. 544 func (g *G) System(tgt *Target) bool { 545 loc := g.StartLoc(tgt) 546 if loc.Fn == nil { 547 return false 548 } 549 switch loc.Fn.Name { 550 case "runtime.main", "runtime.handleAsyncEvent": 551 return false 552 } 553 return strings.HasPrefix(loc.Fn.Name, "runtime.") 554 } 555 556 func (g *G) Labels() map[string]string { 557 if g.labels != nil { 558 return *g.labels 559 } 560 var labels map[string]string 561 if labelsVar := g.variable.loadFieldNamed("labels"); labelsVar != nil && len(labelsVar.Children) == 1 { 562 if address := labelsVar.Children[0]; address.Addr != 0 { 563 labelMapType, _ := g.variable.bi.findType("runtime/pprof.labelMap") 564 if labelMapType != nil { 565 labelMap := newVariable("", address.Addr, labelMapType, g.variable.bi, g.variable.mem) 566 labelMap.loadValue(loadFullValue) 567 labels = map[string]string{} 568 for i := range labelMap.Children { 569 if i%2 == 0 { 570 k := labelMap.Children[i] 571 v := labelMap.Children[i+1] 572 labels[constant.StringVal(k.Value)] = constant.StringVal(v.Value) 573 } 574 } 575 } 576 } 577 } 578 g.labels = &labels 579 return *g.labels 580 } 581 582 type Ancestor struct { 583 ID int64 // Goroutine ID 584 Unreadable error 585 pcsVar *Variable 586 } 587 588 // IsNilErr is returned when a variable is nil. 589 type IsNilErr struct { 590 name string 591 } 592 593 func (err *IsNilErr) Error() string { 594 return fmt.Sprintf("%s is nil", err.name) 595 } 596 597 func globalScope(tgt *Target, bi *BinaryInfo, image *Image, mem MemoryReadWriter) *EvalScope { 598 return &EvalScope{Location: Location{}, Regs: op.DwarfRegisters{StaticBase: image.StaticBase}, Mem: mem, g: nil, BinInfo: bi, target: tgt, frameOffset: 0} 599 } 600 601 func newVariableFromThread(t Thread, name string, addr uint64, dwarfType godwarf.Type) *Variable { 602 return newVariable(name, addr, dwarfType, t.BinInfo(), t.ProcessMemory()) 603 } 604 605 func (v *Variable) newVariable(name string, addr uint64, dwarfType godwarf.Type, mem MemoryReadWriter) *Variable { 606 return newVariable(name, addr, dwarfType, v.bi, mem) 607 } 608 609 func newVariable(name string, addr uint64, dwarfType godwarf.Type, bi *BinaryInfo, mem MemoryReadWriter) *Variable { 610 if styp, isstruct := dwarfType.(*godwarf.StructType); isstruct && !strings.Contains(styp.Name, "<") && !strings.Contains(styp.Name, "{") { 611 // For named structs the compiler will emit a DW_TAG_structure_type entry 612 // and a DW_TAG_typedef entry. 613 // 614 // Normally variables refer to the typedef entry but sometimes global 615 // variables will refer to the struct entry incorrectly. 616 // Also the runtime type offset resolution (runtimeTypeToDIE) will return 617 // the struct entry directly. 618 // 619 // In both cases we prefer to have a typedef type for consistency's sake. 620 // 621 // So we wrap all struct types into a fake typedef type except for: 622 // a. types not defined by go 623 // b. anonymous struct types (they contain the '{' character) 624 // c. Go internal struct types used to describe maps (they contain the '<' 625 // character). 626 cu := bi.Images[dwarfType.Common().Index].findCompileUnitForOffset(dwarfType.Common().Offset) 627 if cu != nil && cu.isgo { 628 dwarfType = &godwarf.TypedefType{ 629 CommonType: *(dwarfType.Common()), 630 Type: dwarfType, 631 } 632 } 633 } 634 635 v := &Variable{ 636 Name: name, 637 Addr: addr, 638 DwarfType: dwarfType, 639 mem: mem, 640 bi: bi, 641 } 642 643 v.RealType = resolveTypedef(v.DwarfType) 644 645 switch t := v.RealType.(type) { 646 case *godwarf.PtrType: 647 v.Kind = reflect.Ptr 648 if _, isvoid := t.Type.(*godwarf.VoidType); isvoid { 649 v.Kind = reflect.UnsafePointer 650 } else if isCgoType(bi, t) { 651 v.Flags |= VariableCPtr 652 v.fieldType = t.Type 653 v.stride = alignAddr(v.fieldType.Size(), v.fieldType.Align()) 654 v.Len = 0 655 if isCgoCharPtr(bi, t) { 656 v.Kind = reflect.String 657 } 658 if v.Addr != 0 { 659 v.Base, v.Unreadable = readUintRaw(v.mem, v.Addr, int64(v.bi.Arch.PtrSize())) 660 } 661 } 662 case *godwarf.ChanType: 663 v.Kind = reflect.Chan 664 if v.Addr != 0 { 665 v.loadChanInfo() 666 } 667 case *godwarf.MapType: 668 v.Kind = reflect.Map 669 case *godwarf.StringType: 670 v.Kind = reflect.String 671 v.stride = 1 672 v.fieldType = &godwarf.UintType{BasicType: godwarf.BasicType{CommonType: godwarf.CommonType{ByteSize: 1, Name: "byte", ReflectKind: reflect.Uint8}, BitSize: 8, BitOffset: 0}} 673 if v.Addr != 0 { 674 v.Base, v.Len, v.Unreadable = readStringInfo(v.mem, v.bi.Arch, v.Addr, t) 675 } 676 case *godwarf.SliceType: 677 v.Kind = reflect.Slice 678 if v.Addr != 0 { 679 v.loadSliceInfo(t) 680 } 681 case *godwarf.InterfaceType: 682 v.Kind = reflect.Interface 683 case *godwarf.StructType: 684 v.Kind = reflect.Struct 685 case *godwarf.ArrayType: 686 v.Kind = reflect.Array 687 v.Base = v.Addr 688 v.Len = t.Count 689 v.Cap = -1 690 v.fieldType = t.Type 691 v.stride = 0 692 693 if t.Count > 0 { 694 v.stride = t.ByteSize / t.Count 695 } 696 case *godwarf.ComplexType: 697 switch t.ByteSize { 698 case 8: 699 v.Kind = reflect.Complex64 700 case 16: 701 v.Kind = reflect.Complex128 702 } 703 case *godwarf.IntType: 704 v.Kind = reflect.Int 705 case *godwarf.CharType: 706 // Rest of the code assumes that Kind == reflect.Int implies RealType == 707 // godwarf.IntType. 708 v.RealType = &godwarf.IntType{BasicType: t.BasicType} 709 v.Kind = reflect.Int 710 case *godwarf.UcharType: 711 v.RealType = &godwarf.IntType{BasicType: t.BasicType} 712 v.Kind = reflect.Int 713 case *godwarf.UintType: 714 v.Kind = reflect.Uint 715 case *godwarf.FloatType: 716 switch t.ByteSize { 717 case 4: 718 v.Kind = reflect.Float32 719 case 8: 720 v.Kind = reflect.Float64 721 } 722 case *godwarf.BoolType: 723 v.Kind = reflect.Bool 724 case *godwarf.FuncType: 725 v.Kind = reflect.Func 726 case *godwarf.VoidType: 727 v.Kind = reflect.Invalid 728 case *godwarf.UnspecifiedType: 729 v.Kind = reflect.Invalid 730 default: 731 v.Unreadable = fmt.Errorf("unknown type: %T", t) 732 } 733 734 return v 735 } 736 737 func resolveTypedef(typ godwarf.Type) godwarf.Type { 738 for { 739 switch tt := typ.(type) { 740 case *godwarf.TypedefType: 741 typ = tt.Type 742 case *godwarf.QualType: 743 typ = tt.Type 744 default: 745 return typ 746 } 747 } 748 } 749 750 var constantMaxInt64 = constant.MakeInt64(1<<63 - 1) 751 752 func newConstant(val constant.Value, mem MemoryReadWriter) *Variable { 753 v := &Variable{Value: val, mem: mem, loaded: true} 754 switch val.Kind() { 755 case constant.Int: 756 v.Kind = reflect.Int 757 if constant.Sign(val) >= 0 && constant.Compare(val, token.GTR, constantMaxInt64) { 758 v.Kind = reflect.Uint64 759 } 760 case constant.Float: 761 v.Kind = reflect.Float64 762 case constant.Bool: 763 v.Kind = reflect.Bool 764 case constant.Complex: 765 v.Kind = reflect.Complex128 766 case constant.String: 767 v.Kind = reflect.String 768 v.Len = int64(len(constant.StringVal(val))) 769 } 770 v.Flags |= VariableConstant 771 return v 772 } 773 774 var nilVariable = &Variable{ 775 Name: "nil", 776 Addr: 0, 777 Base: 0, 778 Kind: reflect.Ptr, 779 Children: []Variable{{Addr: 0, OnlyAddr: true}}, 780 } 781 782 func (v *Variable) clone() *Variable { 783 r := *v 784 return &r 785 } 786 787 // TypeString returns the string representation 788 // of the type of this variable. 789 func (v *Variable) TypeString() string { 790 if v == nilVariable { 791 return "nil" 792 } 793 if v.DwarfType == nil { 794 return v.Kind.String() 795 } 796 if v.DwarfType.Common().Name != "" { 797 return v.DwarfType.Common().Name 798 } 799 r := v.DwarfType.String() 800 if r == "*void" { 801 cu := v.bi.Images[v.DwarfType.Common().Index].findCompileUnitForOffset(v.DwarfType.Common().Offset) 802 if cu != nil && cu.isgo { 803 r = "unsafe.Pointer" 804 } 805 } 806 return r 807 } 808 809 func (v *Variable) toField(field *godwarf.StructField) (*Variable, error) { 810 if v.Unreadable != nil { 811 return v.clone(), nil 812 } 813 if v.Addr == 0 { 814 return nil, &IsNilErr{v.Name} 815 } 816 817 name := "" 818 if v.Name != "" { 819 parts := strings.Split(field.Name, ".") 820 if len(parts) > 1 { 821 name = fmt.Sprintf("%s.%s", v.Name, parts[1]) 822 } else { 823 name = fmt.Sprintf("%s.%s", v.Name, field.Name) 824 } 825 } 826 return v.newVariable(name, uint64(int64(v.Addr)+field.ByteOffset), field.Type, v.mem), nil 827 } 828 829 // ErrNoGoroutine returned when a G could not be found 830 // for a specific thread. 831 type ErrNoGoroutine struct { 832 tid int 833 } 834 835 func (ng ErrNoGoroutine) Error() string { 836 return fmt.Sprintf("no G executing on thread %d", ng.tid) 837 } 838 839 var ErrUnreadableG = errors.New("could not read G struct") 840 841 func (v *Variable) parseG() (*G, error) { 842 mem := v.mem 843 gaddr := v.Addr 844 _, deref := v.RealType.(*godwarf.PtrType) 845 846 if deref { 847 var err error 848 gaddr, err = readUintRaw(mem, gaddr, int64(v.bi.Arch.PtrSize())) 849 if err != nil { 850 return nil, fmt.Errorf("error derefing *G %s", err) 851 } 852 } 853 if gaddr == 0 { 854 id := 0 855 if thread, ok := mem.(Thread); ok { 856 id = thread.ThreadID() 857 } 858 return nil, ErrNoGoroutine{tid: id} 859 } 860 isptr := func(t godwarf.Type) bool { 861 _, ok := t.(*godwarf.PtrType) 862 return ok 863 } 864 for isptr(v.RealType) { 865 v = v.maybeDereference() // +rtype g 866 } 867 868 v.mem = cacheMemory(v.mem, v.Addr, int(v.RealType.Size())) 869 870 schedVar := v.loadFieldNamed("sched") // +rtype gobuf 871 if schedVar == nil { 872 return nil, ErrUnreadableG 873 } 874 pc, _ := constant.Int64Val(schedVar.fieldVariable("pc").Value) // +rtype uintptr 875 sp, _ := constant.Int64Val(schedVar.fieldVariable("sp").Value) // +rtype uintptr 876 var bp, lr int64 877 if bpvar := schedVar.fieldVariable("bp"); /* +rtype -opt uintptr */ bpvar != nil && bpvar.Value != nil { 878 bp, _ = constant.Int64Val(bpvar.Value) 879 } 880 if lrvar := schedVar.fieldVariable("lr"); /* +rtype -opt uintptr */ lrvar != nil && lrvar.Value != nil { 881 lr, _ = constant.Int64Val(lrvar.Value) 882 } 883 884 unreadable := false 885 886 loadInt64Maybe := func(name string) int64 { 887 vv := v.loadFieldNamed(name) 888 if vv == nil { 889 unreadable = true 890 return 0 891 } 892 n, _ := constant.Int64Val(vv.Value) 893 return n 894 } 895 896 loadUint64Maybe := func(name string) uint64 { 897 vv := v.loadFieldNamed(name) 898 if vv == nil { 899 unreadable = true 900 return 0 901 } 902 n, _ := constant.Uint64Val(vv.Value) 903 return n 904 } 905 906 id := loadUint64Maybe("goid") // +rtype int64|uint64 907 gopc := loadInt64Maybe("gopc") // +rtype uintptr 908 startpc := loadInt64Maybe("startpc") // +rtype uintptr 909 waitSince := loadInt64Maybe("waitsince") // +rtype int64 910 waitReason := int64(0) 911 if producer := v.bi.Producer(); producer != "" && goversion.ProducerAfterOrEqual(producer, 1, 11) { 912 waitReason = loadInt64Maybe("waitreason") // +rtype -opt waitReason 913 } 914 var stackhi, stacklo uint64 915 if stackVar := v.loadFieldNamed("stack"); /* +rtype stack */ stackVar != nil { 916 if stackhiVar := stackVar.fieldVariable("hi"); /* +rtype uintptr */ stackhiVar != nil && stackhiVar.Value != nil { 917 stackhi, _ = constant.Uint64Val(stackhiVar.Value) 918 } else { 919 unreadable = true 920 } 921 if stackloVar := stackVar.fieldVariable("lo"); /* +rtype uintptr */ stackloVar != nil && stackloVar.Value != nil { 922 stacklo, _ = constant.Uint64Val(stackloVar.Value) 923 } else { 924 unreadable = true 925 } 926 } 927 928 status := uint64(0) 929 if atomicStatus := v.loadFieldNamed("atomicstatus"); /* +rtype uint32|runtime/internal/atomic.Uint32 */ atomicStatus != nil { 930 if constant.Val(atomicStatus.Value) != nil { 931 status, _ = constant.Uint64Val(atomicStatus.Value) 932 } else { 933 atomicStatus := atomicStatus // +rtype runtime/internal/atomic.Uint32 934 vv := atomicStatus.fieldVariable("value") // +rtype uint32 935 if vv == nil { 936 unreadable = true 937 } else { 938 status, _ = constant.Uint64Val(vv.Value) 939 } 940 } 941 } else { 942 unreadable = true 943 } 944 945 if unreadable { 946 return nil, ErrUnreadableG 947 } 948 949 f, l, fn := v.bi.PCToLine(uint64(pc)) 950 951 v.Name = "runtime.curg" 952 953 g := &G{ 954 ID: int64(id), 955 GoPC: uint64(gopc), 956 StartPC: uint64(startpc), 957 PC: uint64(pc), 958 SP: uint64(sp), 959 BP: uint64(bp), 960 LR: uint64(lr), 961 Status: uint64(status), 962 WaitSince: waitSince, 963 WaitReason: waitReason, 964 CurrentLoc: Location{PC: uint64(pc), File: f, Line: l, Fn: fn}, 965 variable: v, 966 stack: stack{hi: stackhi, lo: stacklo}, 967 } 968 return g, nil 969 } 970 971 func (v *Variable) loadFieldNamed(name string) *Variable { 972 v, err := v.structMember(name) 973 if err != nil { 974 return nil 975 } 976 v.loadValue(loadFullValue) 977 if v.Unreadable != nil { 978 return nil 979 } 980 return v 981 } 982 983 func (v *Variable) fieldVariable(name string) *Variable { 984 if !v.loaded { 985 panic("fieldVariable called on a variable that wasn't loaded") 986 } 987 for i := range v.Children { 988 if child := &v.Children[i]; child.Name == name { 989 return child 990 } 991 } 992 return nil 993 } 994 995 var errTracebackAncestorsDisabled = errors.New("tracebackancestors is disabled") 996 997 // Ancestors returns the list of ancestors for g. 998 func Ancestors(p *Target, g *G, n int) ([]Ancestor, error) { 999 scope := globalScope(p, p.BinInfo(), p.BinInfo().Images[0], p.Memory()) 1000 tbav, err := scope.EvalExpression("runtime.debug.tracebackancestors", loadSingleValue) 1001 if err == nil && tbav.Unreadable == nil && tbav.Kind == reflect.Int { 1002 tba, _ := constant.Int64Val(tbav.Value) 1003 if tba == 0 { 1004 return nil, errTracebackAncestorsDisabled 1005 } 1006 } 1007 1008 av, err := g.variable.structMember("ancestors") 1009 if err != nil { 1010 return nil, err 1011 } 1012 av = av.maybeDereference() 1013 av.loadValue(LoadConfig{MaxArrayValues: n, MaxVariableRecurse: 1, MaxStructFields: -1}) 1014 if av.Unreadable != nil { 1015 return nil, err 1016 } 1017 if av.Addr == 0 { 1018 // no ancestors 1019 return nil, nil 1020 } 1021 1022 r := make([]Ancestor, len(av.Children)) 1023 1024 for i := range av.Children { 1025 if av.Children[i].Unreadable != nil { 1026 r[i].Unreadable = av.Children[i].Unreadable 1027 continue 1028 } 1029 goidv := av.Children[i].fieldVariable("goid") 1030 if goidv.Unreadable != nil { 1031 r[i].Unreadable = goidv.Unreadable 1032 continue 1033 } 1034 r[i].ID, _ = constant.Int64Val(goidv.Value) 1035 pcsVar := av.Children[i].fieldVariable("pcs") 1036 if pcsVar.Unreadable != nil { 1037 r[i].Unreadable = pcsVar.Unreadable 1038 } 1039 pcsVar.loaded = false 1040 pcsVar.Children = pcsVar.Children[:0] 1041 r[i].pcsVar = pcsVar 1042 } 1043 1044 return r, nil 1045 } 1046 1047 // Stack returns the stack trace of ancestor 'a' as saved by the runtime. 1048 func (a *Ancestor) Stack(n int) ([]Stackframe, error) { 1049 if a.Unreadable != nil { 1050 return nil, a.Unreadable 1051 } 1052 pcsVar := a.pcsVar.clone() 1053 pcsVar.loadValue(LoadConfig{MaxArrayValues: n}) 1054 if pcsVar.Unreadable != nil { 1055 return nil, pcsVar.Unreadable 1056 } 1057 r := make([]Stackframe, len(pcsVar.Children)) 1058 for i := range pcsVar.Children { 1059 if pcsVar.Children[i].Unreadable != nil { 1060 r[i] = Stackframe{Err: pcsVar.Children[i].Unreadable} 1061 continue 1062 } 1063 if pcsVar.Children[i].Kind != reflect.Uint { 1064 return nil, fmt.Errorf("wrong type for pcs item %d: %v", i, pcsVar.Children[i].Kind) 1065 } 1066 pc, _ := constant.Int64Val(pcsVar.Children[i].Value) 1067 fn := a.pcsVar.bi.PCToFunc(uint64(pc)) 1068 if fn == nil { 1069 loc := Location{PC: uint64(pc)} 1070 r[i] = Stackframe{Current: loc, Call: loc} 1071 continue 1072 } 1073 pc2 := uint64(pc) 1074 if pc2-1 >= fn.Entry { 1075 pc2-- 1076 } 1077 f, ln := fn.cu.lineInfo.PCToLine(fn.Entry, pc2) 1078 loc := Location{PC: uint64(pc), File: f, Line: ln, Fn: fn} 1079 r[i] = Stackframe{Current: loc, Call: loc} 1080 } 1081 r[len(r)-1].Bottom = pcsVar.Len == int64(len(pcsVar.Children)) 1082 return r, nil 1083 } 1084 1085 func (v *Variable) structMember(memberName string) (*Variable, error) { 1086 if v.Unreadable != nil { 1087 return v.clone(), nil 1088 } 1089 vname := v.Name 1090 if v.loaded && (v.Flags&VariableFakeAddress) != 0 { 1091 for i := range v.Children { 1092 if v.Children[i].Name == memberName { 1093 return &v.Children[i], nil 1094 } 1095 } 1096 return nil, fmt.Errorf("%s has no member %s", vname, memberName) 1097 } 1098 switch v.Kind { 1099 case reflect.Chan: 1100 v = v.clone() 1101 v.RealType = resolveTypedef(&(v.RealType.(*godwarf.ChanType).TypedefType)) 1102 case reflect.Interface: 1103 v.loadInterface(0, false, LoadConfig{}) 1104 if len(v.Children) > 0 { 1105 v = &v.Children[0] 1106 } 1107 } 1108 1109 queue := []*Variable{v} 1110 seen := map[string]struct{}{} // prevent infinite loops 1111 first := true 1112 1113 for len(queue) > 0 { 1114 v := queue[0] 1115 queue = append(queue[:0], queue[1:]...) 1116 if _, isseen := seen[v.RealType.String()]; isseen { 1117 continue 1118 } 1119 seen[v.RealType.String()] = struct{}{} 1120 1121 structVar := v.maybeDereference() 1122 structVar.Name = v.Name 1123 if structVar.Unreadable != nil { 1124 return structVar, nil 1125 } 1126 1127 switch t := structVar.RealType.(type) { 1128 case *godwarf.StructType: 1129 for _, field := range t.Field { 1130 if field.Name == memberName { 1131 return structVar.toField(field) 1132 } 1133 isEmbeddedStructMember := 1134 field.Embedded || 1135 (field.Type.Common().Name == field.Name) || 1136 (len(field.Name) > 1 && 1137 field.Name[0] == '*' && 1138 field.Type.Common().Name[1:] == field.Name[1:]) 1139 if !isEmbeddedStructMember { 1140 continue 1141 } 1142 embeddedVar, err := structVar.toField(field) 1143 if err != nil { 1144 return nil, err 1145 } 1146 // Check for embedded field referenced by type name 1147 parts := strings.Split(field.Name, ".") 1148 if len(parts) > 1 && parts[1] == memberName { 1149 return embeddedVar, nil 1150 } 1151 embeddedVar.Name = structVar.Name 1152 queue = append(queue, embeddedVar) 1153 } 1154 default: 1155 if first { 1156 return nil, fmt.Errorf("%s (type %s) is not a struct", vname, structVar.TypeString()) 1157 } 1158 } 1159 first = false 1160 } 1161 1162 return nil, fmt.Errorf("%s has no member %s", vname, memberName) 1163 } 1164 1165 func readVarEntry(entry *godwarf.Tree, image *Image) (name string, typ godwarf.Type, err error) { 1166 name, ok := entry.Val(dwarf.AttrName).(string) 1167 if !ok { 1168 return "", nil, fmt.Errorf("malformed variable DIE (name)") 1169 } 1170 1171 typ, err = entry.Type(image.dwarf, image.index, image.typeCache) 1172 if err != nil { 1173 return "", nil, err 1174 } 1175 1176 return name, typ, nil 1177 } 1178 1179 // Extracts the name and type of a variable from a dwarf entry 1180 // then executes the instructions given in the DW_AT_location attribute to grab the variable's address 1181 func extractVarInfoFromEntry(tgt *Target, bi *BinaryInfo, image *Image, regs op.DwarfRegisters, mem MemoryReadWriter, entry *godwarf.Tree, dictAddr uint64) (*Variable, error) { 1182 if entry.Tag != dwarf.TagFormalParameter && entry.Tag != dwarf.TagVariable { 1183 return nil, fmt.Errorf("invalid entry tag, only supports FormalParameter and Variable, got %s", entry.Tag.String()) 1184 } 1185 1186 n, t, err := readVarEntry(entry, image) 1187 if err != nil { 1188 return nil, err 1189 } 1190 1191 t, err = resolveParametricType(bi, mem, t, dictAddr) 1192 if err != nil { 1193 // Log the error, keep going with t, which will be the shape type 1194 logflags.DebuggerLogger().Errorf("could not resolve parametric type of %s", n) 1195 } 1196 1197 addr, pieces, descr, err := bi.Location(entry, dwarf.AttrLocation, regs.PC(), regs, mem) 1198 if pieces != nil { 1199 var cmem *compositeMemory 1200 if tgt != nil { 1201 addr, cmem, err = tgt.newCompositeMemory(mem, regs, pieces, descr, t.Common().ByteSize) 1202 } else { 1203 cmem, err = newCompositeMemory(mem, bi.Arch, regs, pieces, t.Common().ByteSize) 1204 if cmem != nil { 1205 cmem.base = fakeAddressUnresolv 1206 addr = int64(cmem.base) 1207 } 1208 } 1209 if cmem != nil { 1210 mem = cmem 1211 } 1212 } 1213 1214 v := newVariable(n, uint64(addr), t, bi, mem) 1215 if pieces != nil { 1216 v.Flags |= VariableFakeAddress 1217 } 1218 v.LocationExpr = descr 1219 v.DeclLine, _ = entry.Val(dwarf.AttrDeclLine).(int64) 1220 if err != nil { 1221 v.Unreadable = err 1222 } 1223 return v, nil 1224 } 1225 1226 // If v is a pointer a new variable is returned containing the value pointed by v. 1227 func (v *Variable) maybeDereference() *Variable { 1228 if v.Unreadable != nil { 1229 return v 1230 } 1231 1232 switch t := v.RealType.(type) { 1233 case *godwarf.PtrType: 1234 if v.Addr == 0 && len(v.Children) == 1 && v.loaded { 1235 // fake pointer variable constructed by casting an integer to a pointer type 1236 return &v.Children[0] 1237 } 1238 ptrval, err := readUintRaw(v.mem, v.Addr, t.ByteSize) 1239 r := v.newVariable("", ptrval, t.Type, DereferenceMemory(v.mem)) 1240 if err != nil { 1241 r.Unreadable = err 1242 } 1243 1244 return r 1245 default: 1246 return v 1247 } 1248 } 1249 1250 // loadPtr assumes that v is a pointer and loads its value. v also gets a child 1251 // variable, representing the pointed-to value. If v is already loaded, 1252 // loadPtr() is a no-op. 1253 func (v *Variable) loadPtr() { 1254 if len(v.Children) > 0 { 1255 // We've already loaded this variable. 1256 return 1257 } 1258 1259 t := v.RealType.(*godwarf.PtrType) 1260 v.Len = 1 1261 1262 var child *Variable 1263 if v.Unreadable == nil { 1264 ptrval, err := readUintRaw(v.mem, v.Addr, t.ByteSize) 1265 if err == nil { 1266 child = v.newVariable("", ptrval, t.Type, DereferenceMemory(v.mem)) 1267 } else { 1268 // We failed to read the pointer value; mark v as unreadable. 1269 v.Unreadable = err 1270 } 1271 } 1272 1273 if v.Unreadable != nil { 1274 // Pointers get a child even if their value can't be read, to 1275 // maintain backwards compatibility. 1276 child = v.newVariable("", 0 /* addr */, t.Type, DereferenceMemory(v.mem)) 1277 child.Unreadable = fmt.Errorf("parent pointer unreadable: %w", v.Unreadable) 1278 } 1279 1280 v.Children = []Variable{*child} 1281 v.Value = constant.MakeUint64(v.Children[0].Addr) 1282 } 1283 1284 func loadValues(vars []*Variable, cfg LoadConfig) { 1285 for i := range vars { 1286 vars[i].loadValueInternal(0, cfg) 1287 } 1288 } 1289 1290 // Extracts the value of the variable at the given address. 1291 func (v *Variable) loadValue(cfg LoadConfig) { 1292 v.loadValueInternal(0, cfg) 1293 } 1294 1295 func (v *Variable) loadValueInternal(recurseLevel int, cfg LoadConfig) { 1296 if v.Unreadable != nil || v.loaded || (v.Addr == 0 && v.Base == 0) { 1297 return 1298 } 1299 1300 v.loaded = true 1301 switch v.Kind { 1302 case reflect.Ptr, reflect.UnsafePointer: 1303 v.loadPtr() 1304 if cfg.FollowPointers { 1305 // Don't increase the recursion level when dereferencing pointers 1306 // unless this is a pointer to interface (which could cause an infinite loop) 1307 nextLvl := recurseLevel 1308 checkLvl := false 1309 if v.Children[0].Kind == reflect.Interface { 1310 nextLvl++ 1311 } else if ptyp, isptr := v.RealType.(*godwarf.PtrType); isptr { 1312 _, elemTypIsPtr := resolveTypedef(ptyp.Type).(*godwarf.PtrType) 1313 if elemTypIsPtr { 1314 nextLvl++ 1315 checkLvl = true 1316 } 1317 } 1318 if checkLvl && recurseLevel > cfg.MaxVariableRecurse { 1319 v.Children[0].OnlyAddr = true 1320 } else { 1321 v.Children[0].loadValueInternal(nextLvl, cfg) 1322 } 1323 } else { 1324 v.Children[0].OnlyAddr = true 1325 } 1326 1327 case reflect.Chan: 1328 sv := v.clone() 1329 sv.RealType = resolveTypedef(&(sv.RealType.(*godwarf.ChanType).TypedefType)) 1330 sv = sv.maybeDereference() 1331 sv.loadValueInternal(0, loadFullValue) 1332 v.Children = sv.Children 1333 v.Len = sv.Len 1334 v.Base = sv.Addr 1335 1336 case reflect.Map: 1337 if recurseLevel <= cfg.MaxVariableRecurse { 1338 v.loadMap(recurseLevel, cfg) 1339 } else { 1340 // loads length so that the client knows that the map isn't empty 1341 v.mapIterator() 1342 } 1343 1344 case reflect.String: 1345 var val string 1346 switch { 1347 case v.Flags&VariableCPtr != 0: 1348 var done bool 1349 val, done, v.Unreadable = readCStringValue(DereferenceMemory(v.mem), v.Base, cfg) 1350 if v.Unreadable == nil { 1351 v.Len = int64(len(val)) 1352 if !done { 1353 v.Len++ 1354 } 1355 } 1356 1357 case v.Flags&VariableCPURegister != 0: 1358 val = fmt.Sprintf("%x", v.reg.Bytes) 1359 s := v.Base - fakeAddressUnresolv 1360 if s < uint64(len(val)) { 1361 val = val[s:] 1362 if v.Len >= 0 && v.Len < int64(len(val)) { 1363 val = val[:v.Len] 1364 } 1365 } 1366 1367 default: 1368 val, v.Unreadable = readStringValue(DereferenceMemory(v.mem), v.Base, v.Len, cfg) 1369 } 1370 v.Value = constant.MakeString(val) 1371 1372 case reflect.Slice, reflect.Array: 1373 v.loadArrayValues(recurseLevel, cfg) 1374 1375 case reflect.Struct: 1376 v.mem = cacheMemory(v.mem, v.Addr, int(v.RealType.Size())) 1377 t := v.RealType.(*godwarf.StructType) 1378 v.Len = int64(len(t.Field)) 1379 // Recursively call extractValue to grab 1380 // the value of all the members of the struct. 1381 if recurseLevel <= cfg.MaxVariableRecurse { 1382 v.Children = make([]Variable, 0, len(t.Field)) 1383 for i, field := range t.Field { 1384 if cfg.MaxStructFields >= 0 && len(v.Children) >= cfg.MaxStructFields { 1385 break 1386 } 1387 f, _ := v.toField(field) 1388 v.Children = append(v.Children, *f) 1389 v.Children[i].Name = field.Name 1390 v.Children[i].loadValueInternal(recurseLevel+1, cfg) 1391 } 1392 } 1393 if t.Name == "time.Time" { 1394 v.formatTime() 1395 } 1396 1397 case reflect.Interface: 1398 v.loadInterface(recurseLevel, true, cfg) 1399 1400 case reflect.Complex64, reflect.Complex128: 1401 v.readComplex(v.RealType.(*godwarf.ComplexType).ByteSize) 1402 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 1403 var val int64 1404 val, v.Unreadable = readIntRaw(v.mem, v.Addr, v.RealType.(*godwarf.IntType).ByteSize) 1405 v.Value = constant.MakeInt64(val) 1406 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: 1407 if v.Flags&VariableCPURegister != 0 { 1408 v.Value = constant.MakeUint64(v.reg.Uint64Val) 1409 } else { 1410 var val uint64 1411 val, v.Unreadable = readUintRaw(v.mem, v.Addr, v.RealType.(*godwarf.UintType).ByteSize) 1412 v.Value = constant.MakeUint64(val) 1413 } 1414 case reflect.Bool: 1415 val := make([]byte, 1) 1416 _, err := v.mem.ReadMemory(val, v.Addr) 1417 v.Unreadable = err 1418 if err == nil { 1419 v.Value = constant.MakeBool(val[0] != 0) 1420 } 1421 case reflect.Float32, reflect.Float64: 1422 var val float64 1423 val, v.Unreadable = v.readFloatRaw(v.RealType.(*godwarf.FloatType).ByteSize) 1424 v.Value = constant.MakeFloat64(val) 1425 switch { 1426 case math.IsInf(val, +1): 1427 v.FloatSpecial = FloatIsPosInf 1428 case math.IsInf(val, -1): 1429 v.FloatSpecial = FloatIsNegInf 1430 case math.IsNaN(val): 1431 v.FloatSpecial = FloatIsNaN 1432 } 1433 case reflect.Func: 1434 v.readFunctionPtr() 1435 default: 1436 v.Unreadable = fmt.Errorf("unknown or unsupported kind: %q", v.Kind.String()) 1437 } 1438 } 1439 1440 // convertToEface converts srcv into an "interface {}" and writes it to 1441 // dstv. 1442 // Dstv must be a variable of type "interface {}" and srcv must either be an 1443 // interface or a pointer shaped variable (map, channel, pointer or struct 1444 // containing a single pointer) 1445 func convertToEface(srcv, dstv *Variable) error { 1446 if dstv.RealType.String() != "interface {}" { 1447 return &typeConvErr{srcv.DwarfType, dstv.RealType} 1448 } 1449 if _, isiface := srcv.RealType.(*godwarf.InterfaceType); isiface { 1450 // iface -> eface conversion 1451 _type, data, _ := srcv.readInterface() 1452 if srcv.Unreadable != nil { 1453 return srcv.Unreadable 1454 } 1455 _type = _type.maybeDereference() 1456 dstv.writeEmptyInterface(uint64(_type.Addr), data) 1457 return nil 1458 } 1459 typeAddr, typeKind, runtimeTypeFound, err := dwarfToRuntimeType(srcv.bi, srcv.mem, srcv.RealType) 1460 if err != nil { 1461 return err 1462 } 1463 if !runtimeTypeFound || typeKind&kindDirectIface == 0 { 1464 return &typeConvErr{srcv.DwarfType, dstv.RealType} 1465 } 1466 return dstv.writeEmptyInterface(typeAddr, srcv) 1467 } 1468 1469 func readStringInfo(mem MemoryReadWriter, arch *Arch, addr uint64, typ *godwarf.StringType) (uint64, int64, error) { 1470 // string data structure is always two ptrs in size. Addr, followed by len 1471 // http://research.swtch.com/godata 1472 1473 mem = cacheMemory(mem, addr, arch.PtrSize()*2) 1474 1475 var strlen int64 1476 var outaddr uint64 1477 var err error 1478 1479 for _, field := range typ.StructType.Field { 1480 switch field.Name { 1481 case "len": 1482 strlen, err = readIntRaw(mem, addr+uint64(field.ByteOffset), int64(arch.PtrSize())) 1483 if err != nil { 1484 return 0, 0, fmt.Errorf("could not read string len %s", err) 1485 } 1486 if strlen < 0 { 1487 return 0, 0, fmt.Errorf("invalid length: %d", strlen) 1488 } 1489 case "str": 1490 outaddr, err = readUintRaw(mem, addr+uint64(field.ByteOffset), int64(arch.PtrSize())) 1491 if err != nil { 1492 return 0, 0, fmt.Errorf("could not read string pointer %s", err) 1493 } 1494 if addr == 0 { 1495 return 0, 0, nil 1496 } 1497 } 1498 } 1499 1500 return outaddr, strlen, nil 1501 } 1502 1503 func readStringValue(mem MemoryReadWriter, addr uint64, strlen int64, cfg LoadConfig) (string, error) { 1504 if strlen == 0 { 1505 return "", nil 1506 } 1507 1508 count := strlen 1509 if count > int64(cfg.MaxStringLen) { 1510 count = int64(cfg.MaxStringLen) 1511 } 1512 1513 val := make([]byte, int(count)) 1514 _, err := mem.ReadMemory(val, addr) 1515 if err != nil { 1516 return "", fmt.Errorf("could not read string at %#v due to %s", addr, err) 1517 } 1518 1519 return string(val), nil 1520 } 1521 1522 func readCStringValue(mem MemoryReadWriter, addr uint64, cfg LoadConfig) (string, bool, error) { 1523 buf := make([]byte, cfg.MaxStringLen) // 1524 val := buf[:0] // part of the string we've already read 1525 1526 for len(buf) > 0 { 1527 // Reads some memory for the string but (a) never more than we would 1528 // need (considering cfg.MaxStringLen), and (b) never cross a page boundary 1529 // until we're sure we have to. 1530 // The page check is needed to avoid getting an I/O error for reading 1531 // memory we don't even need. 1532 // We don't know how big a page is but 1024 is a reasonable minimum common 1533 // divisor for all architectures. 1534 curaddr := addr + uint64(len(val)) 1535 maxsize := int(alignAddr(int64(curaddr+1), 1024) - int64(curaddr)) 1536 size := len(buf) 1537 if size > maxsize { 1538 size = maxsize 1539 } 1540 1541 _, err := mem.ReadMemory(buf[:size], curaddr) 1542 if err != nil { 1543 return "", false, fmt.Errorf("could not read string at %#v due to %s", addr, err) 1544 } 1545 1546 done := false 1547 for i := 0; i < size; i++ { 1548 if buf[i] == 0 { 1549 done = true 1550 size = i 1551 break 1552 } 1553 } 1554 1555 val = val[:len(val)+size] 1556 buf = buf[size:] 1557 if done { 1558 return string(val), true, nil 1559 } 1560 } 1561 1562 return string(val), false, nil 1563 } 1564 1565 const ( 1566 sliceArrayFieldName = "array" 1567 sliceLenFieldName = "len" 1568 sliceCapFieldName = "cap" 1569 ) 1570 1571 func (v *Variable) loadSliceInfo(t *godwarf.SliceType) { 1572 v.mem = cacheMemory(v.mem, v.Addr, int(t.Size())) 1573 1574 var err error 1575 for _, f := range t.Field { 1576 switch f.Name { 1577 case sliceArrayFieldName: 1578 var base uint64 1579 base, err = readUintRaw(v.mem, uint64(int64(v.Addr)+f.ByteOffset), f.Type.Size()) 1580 if err == nil { 1581 v.Base = base 1582 // Dereference array type to get value type 1583 ptrType, ok := f.Type.(*godwarf.PtrType) 1584 if !ok { 1585 //lint:ignore ST1005 backwards compatibility 1586 v.Unreadable = fmt.Errorf("Invalid type %s in slice array", f.Type) 1587 return 1588 } 1589 v.fieldType = ptrType.Type 1590 } 1591 case sliceLenFieldName: 1592 lstrAddr, _ := v.toField(f) 1593 lstrAddr.loadValue(loadSingleValue) 1594 err = lstrAddr.Unreadable 1595 if err == nil { 1596 v.Len, _ = constant.Int64Val(lstrAddr.Value) 1597 } 1598 case sliceCapFieldName: 1599 cstrAddr, _ := v.toField(f) 1600 cstrAddr.loadValue(loadSingleValue) 1601 err = cstrAddr.Unreadable 1602 if err == nil { 1603 v.Cap, _ = constant.Int64Val(cstrAddr.Value) 1604 } 1605 } 1606 if err != nil { 1607 v.Unreadable = err 1608 return 1609 } 1610 } 1611 1612 v.stride = v.fieldType.Size() 1613 if t, ok := v.fieldType.(*godwarf.PtrType); ok { 1614 v.stride = t.ByteSize 1615 } 1616 } 1617 1618 // loadChanInfo loads the buffer size of the channel and changes the type of 1619 // the buf field from unsafe.Pointer to an array of the correct type. 1620 func (v *Variable) loadChanInfo() { 1621 chanType, ok := v.RealType.(*godwarf.ChanType) 1622 if !ok { 1623 v.Unreadable = errors.New("bad channel type") 1624 return 1625 } 1626 sv := v.clone() 1627 sv.RealType = resolveTypedef(&(chanType.TypedefType)) 1628 sv = sv.maybeDereference() 1629 if sv.Unreadable != nil || sv.Addr == 0 { 1630 return 1631 } 1632 v.Base = sv.Addr 1633 structType, ok := sv.DwarfType.(*godwarf.StructType) 1634 if !ok { 1635 v.Unreadable = errors.New("bad channel type") 1636 return 1637 } 1638 1639 lenAddr, _ := sv.toField(structType.Field[1]) 1640 lenAddr.loadValue(loadSingleValue) 1641 if lenAddr.Unreadable != nil { 1642 v.Unreadable = fmt.Errorf("unreadable length: %v", lenAddr.Unreadable) 1643 return 1644 } 1645 chanLen, _ := constant.Uint64Val(lenAddr.Value) 1646 1647 newStructType := &godwarf.StructType{} 1648 *newStructType = *structType 1649 newStructType.Field = make([]*godwarf.StructField, len(structType.Field)) 1650 1651 for i := range structType.Field { 1652 field := &godwarf.StructField{} 1653 *field = *structType.Field[i] 1654 if field.Name == "buf" { 1655 field.Type = pointerTo(fakeArrayType(chanLen, chanType.ElemType), v.bi.Arch) 1656 } 1657 newStructType.Field[i] = field 1658 } 1659 1660 v.RealType = &godwarf.ChanType{ 1661 TypedefType: godwarf.TypedefType{ 1662 CommonType: chanType.TypedefType.CommonType, 1663 Type: pointerTo(newStructType, v.bi.Arch), 1664 }, 1665 ElemType: chanType.ElemType, 1666 } 1667 } 1668 1669 func (v *Variable) loadArrayValues(recurseLevel int, cfg LoadConfig) { 1670 if v.Unreadable != nil { 1671 return 1672 } 1673 if v.Len < 0 { 1674 //lint:ignore ST1005 backwards compatibility 1675 v.Unreadable = errors.New("Negative array length") 1676 return 1677 } 1678 if v.Base == 0 && v.Len > 0 { 1679 v.Unreadable = errors.New("non-zero length array with nil base") 1680 return 1681 } 1682 1683 count := v.Len 1684 // Cap number of elements 1685 if count > int64(cfg.MaxArrayValues) { 1686 count = int64(cfg.MaxArrayValues) 1687 } 1688 1689 if v.stride < maxArrayStridePrefetch { 1690 v.mem = cacheMemory(v.mem, v.Base, int(v.stride*count)) 1691 } 1692 1693 errcount := 0 1694 1695 mem := v.mem 1696 if v.Kind != reflect.Array { 1697 mem = DereferenceMemory(mem) 1698 } 1699 1700 for i := int64(0); i < count; i++ { 1701 fieldvar := v.newVariable("", uint64(int64(v.Base)+(i*v.stride)), v.fieldType, mem) 1702 fieldvar.loadValueInternal(recurseLevel+1, cfg) 1703 1704 if fieldvar.Unreadable != nil { 1705 errcount++ 1706 } 1707 1708 v.Children = append(v.Children, *fieldvar) 1709 if errcount > maxErrCount { 1710 break 1711 } 1712 } 1713 } 1714 1715 func (v *Variable) readComplex(size int64) { 1716 var fs int64 1717 switch size { 1718 case 8: 1719 fs = 4 1720 case 16: 1721 fs = 8 1722 default: 1723 v.Unreadable = fmt.Errorf("invalid size (%d) for complex type", size) 1724 return 1725 } 1726 1727 ftyp := fakeBasicType("float", int(fs*8)) 1728 1729 realvar := v.newVariable("real", v.Addr, ftyp, v.mem) 1730 imagvar := v.newVariable("imaginary", v.Addr+uint64(fs), ftyp, v.mem) 1731 realvar.loadValue(loadSingleValue) 1732 imagvar.loadValue(loadSingleValue) 1733 v.Value = constant.BinaryOp(realvar.Value, token.ADD, constant.MakeImag(imagvar.Value)) 1734 } 1735 1736 func (v *Variable) writeComplex(real, imag float64, size int64) error { 1737 err := v.writeFloatRaw(real, int64(size/2)) 1738 if err != nil { 1739 return err 1740 } 1741 imagaddr := *v 1742 imagaddr.Addr += uint64(size / 2) 1743 return imagaddr.writeFloatRaw(imag, int64(size/2)) 1744 } 1745 1746 func readIntRaw(mem MemoryReadWriter, addr uint64, size int64) (int64, error) { 1747 var n int64 1748 1749 val := make([]byte, int(size)) 1750 _, err := mem.ReadMemory(val, addr) 1751 if err != nil { 1752 return 0, err 1753 } 1754 1755 switch size { 1756 case 1: 1757 n = int64(int8(val[0])) 1758 case 2: 1759 n = int64(int16(binary.LittleEndian.Uint16(val))) 1760 case 4: 1761 n = int64(int32(binary.LittleEndian.Uint32(val))) 1762 case 8: 1763 n = int64(binary.LittleEndian.Uint64(val)) 1764 } 1765 1766 return n, nil 1767 } 1768 1769 func (v *Variable) writeUint(value uint64, size int64) error { 1770 val := make([]byte, size) 1771 1772 switch size { 1773 case 1: 1774 val[0] = byte(value) 1775 case 2: 1776 binary.LittleEndian.PutUint16(val, uint16(value)) 1777 case 4: 1778 binary.LittleEndian.PutUint32(val, uint32(value)) 1779 case 8: 1780 binary.LittleEndian.PutUint64(val, uint64(value)) 1781 } 1782 1783 _, err := v.mem.WriteMemory(v.Addr, val) 1784 return err 1785 } 1786 1787 func readUintRaw(mem MemoryReadWriter, addr uint64, size int64) (uint64, error) { 1788 var n uint64 1789 1790 val := make([]byte, int(size)) 1791 _, err := mem.ReadMemory(val, addr) 1792 if err != nil { 1793 return 0, err 1794 } 1795 1796 switch size { 1797 case 1: 1798 n = uint64(val[0]) 1799 case 2: 1800 n = uint64(binary.LittleEndian.Uint16(val)) 1801 case 4: 1802 n = uint64(binary.LittleEndian.Uint32(val)) 1803 case 8: 1804 n = uint64(binary.LittleEndian.Uint64(val)) 1805 } 1806 1807 return n, nil 1808 } 1809 1810 func (v *Variable) readFloatRaw(size int64) (float64, error) { 1811 val := make([]byte, int(size)) 1812 _, err := v.mem.ReadMemory(val, v.Addr) 1813 if err != nil { 1814 return 0.0, err 1815 } 1816 buf := bytes.NewBuffer(val) 1817 1818 switch size { 1819 case 4: 1820 n := float32(0) 1821 binary.Read(buf, binary.LittleEndian, &n) 1822 return float64(n), nil 1823 case 8: 1824 n := float64(0) 1825 binary.Read(buf, binary.LittleEndian, &n) 1826 return n, nil 1827 } 1828 1829 return 0.0, fmt.Errorf("could not read float") 1830 } 1831 1832 func (v *Variable) writeFloatRaw(f float64, size int64) error { 1833 buf := bytes.NewBuffer(make([]byte, 0, size)) 1834 1835 switch size { 1836 case 4: 1837 n := float32(f) 1838 binary.Write(buf, binary.LittleEndian, n) 1839 case 8: 1840 n := float64(f) 1841 binary.Write(buf, binary.LittleEndian, n) 1842 } 1843 1844 _, err := v.mem.WriteMemory(v.Addr, buf.Bytes()) 1845 return err 1846 } 1847 1848 func (v *Variable) writeBool(value bool) error { 1849 val := []byte{0} 1850 val[0] = *(*byte)(unsafe.Pointer(&value)) 1851 _, err := v.mem.WriteMemory(v.Addr, val) 1852 return err 1853 } 1854 1855 func (v *Variable) writeZero() error { 1856 val := make([]byte, v.RealType.Size()) 1857 _, err := v.mem.WriteMemory(v.Addr, val) 1858 return err 1859 } 1860 1861 // writeEmptyInterface writes the empty interface of type typeAddr and data as the data field. 1862 func (v *Variable) writeEmptyInterface(typeAddr uint64, data *Variable) error { 1863 dstType, dstData, _ := v.readInterface() 1864 if v.Unreadable != nil { 1865 return v.Unreadable 1866 } 1867 dstType.writeUint(typeAddr, dstType.RealType.Size()) 1868 dstData.writeCopy(data) 1869 return nil 1870 } 1871 1872 func (v *Variable) writeSlice(len, cap int64, base uint64) error { 1873 for _, f := range v.RealType.(*godwarf.SliceType).Field { 1874 switch f.Name { 1875 case sliceArrayFieldName: 1876 arrv, _ := v.toField(f) 1877 if err := arrv.writeUint(uint64(base), arrv.RealType.Size()); err != nil { 1878 return err 1879 } 1880 case sliceLenFieldName: 1881 lenv, _ := v.toField(f) 1882 if err := lenv.writeUint(uint64(len), lenv.RealType.Size()); err != nil { 1883 return err 1884 } 1885 case sliceCapFieldName: 1886 capv, _ := v.toField(f) 1887 if err := capv.writeUint(uint64(cap), capv.RealType.Size()); err != nil { 1888 return err 1889 } 1890 } 1891 } 1892 return nil 1893 } 1894 1895 func (v *Variable) writeString(len, base uint64) error { 1896 writePointer(v.bi, v.mem, uint64(v.Addr), base) 1897 writePointer(v.bi, v.mem, uint64(v.Addr)+uint64(v.bi.Arch.PtrSize()), len) 1898 return nil 1899 } 1900 1901 func (v *Variable) writeCopy(srcv *Variable) error { 1902 buf := make([]byte, srcv.RealType.Size()) 1903 _, err := srcv.mem.ReadMemory(buf, srcv.Addr) 1904 if err != nil { 1905 return err 1906 } 1907 _, err = v.mem.WriteMemory(v.Addr, buf) 1908 return err 1909 } 1910 1911 func (v *Variable) readFunctionPtr() { 1912 // dereference pointer to find function pc 1913 v.closureAddr = v.funcvalAddr() 1914 if v.Unreadable != nil { 1915 return 1916 } 1917 if v.closureAddr == 0 { 1918 v.Base = 0 1919 v.Value = constant.MakeString("") 1920 return 1921 } 1922 1923 val, err := readUintRaw(v.mem, v.closureAddr, int64(v.bi.Arch.PtrSize())) 1924 if err != nil { 1925 v.Unreadable = err 1926 return 1927 } 1928 1929 v.Base = val 1930 fn := v.bi.PCToFunc(uint64(v.Base)) 1931 if fn == nil { 1932 v.Unreadable = fmt.Errorf("could not find function for %#v", v.Base) 1933 return 1934 } 1935 1936 v.Value = constant.MakeString(fn.Name) 1937 } 1938 1939 // funcvalAddr reads the address of the funcval contained in a function variable. 1940 func (v *Variable) funcvalAddr() uint64 { 1941 val, err := readUintRaw(v.mem, v.Addr, int64(v.bi.Arch.PtrSize())) 1942 if err != nil { 1943 v.Unreadable = err 1944 return 0 1945 } 1946 return val 1947 } 1948 1949 func (v *Variable) loadMap(recurseLevel int, cfg LoadConfig) { 1950 it := v.mapIterator() 1951 if it == nil { 1952 return 1953 } 1954 it.maxNumBuckets = uint64(cfg.MaxMapBuckets) 1955 1956 if v.Len == 0 || int64(v.mapSkip) >= v.Len || cfg.MaxArrayValues == 0 { 1957 return 1958 } 1959 1960 for skip := 0; skip < v.mapSkip; skip++ { 1961 if ok := it.next(); !ok { 1962 v.Unreadable = fmt.Errorf("map index out of bounds") 1963 return 1964 } 1965 } 1966 1967 count := 0 1968 errcount := 0 1969 for it.next() { 1970 key := it.key() 1971 var val *Variable 1972 if it.values.fieldType.Size() > 0 { 1973 val = it.value() 1974 } else { 1975 val = v.newVariable("", it.values.Addr, it.values.fieldType, DereferenceMemory(v.mem)) 1976 } 1977 key.loadValueInternal(recurseLevel+1, cfg) 1978 val.loadValueInternal(recurseLevel+1, cfg) 1979 if key.Unreadable != nil || val.Unreadable != nil { 1980 errcount++ 1981 } 1982 v.Children = append(v.Children, *key, *val) 1983 count++ 1984 if errcount > maxErrCount { 1985 break 1986 } 1987 if count >= cfg.MaxArrayValues || int64(count) >= v.Len { 1988 break 1989 } 1990 } 1991 } 1992 1993 type mapIterator struct { 1994 v *Variable 1995 numbuckets uint64 1996 oldmask uint64 1997 buckets *Variable 1998 oldbuckets *Variable 1999 b *Variable 2000 bidx uint64 2001 2002 tophashes *Variable 2003 keys *Variable 2004 values *Variable 2005 overflow *Variable 2006 2007 maxNumBuckets uint64 // maximum number of buckets to scan 2008 2009 idx int64 2010 2011 hashTophashEmptyOne uint64 // Go 1.12 and later has two sentinel tophash values for an empty cell, this is the second one (the first one hashTophashEmptyZero, the same as Go 1.11 and earlier) 2012 hashMinTopHash uint64 // minimum value of tophash for a cell that isn't either evacuated or empty 2013 } 2014 2015 // Code derived from go/src/runtime/hashmap.go 2016 func (v *Variable) mapIterator() *mapIterator { 2017 sv := v.clone() 2018 sv.RealType = resolveTypedef(&(sv.RealType.(*godwarf.MapType).TypedefType)) 2019 sv = sv.maybeDereference() 2020 v.Base = sv.Addr 2021 2022 maptype, ok := sv.RealType.(*godwarf.StructType) 2023 if !ok { 2024 v.Unreadable = fmt.Errorf("wrong real type for map") 2025 return nil 2026 } 2027 2028 it := &mapIterator{v: v, bidx: 0, b: nil, idx: 0} 2029 2030 if sv.Addr == 0 { 2031 it.numbuckets = 0 2032 return it 2033 } 2034 2035 v.mem = cacheMemory(v.mem, v.Base, int(v.RealType.Size())) 2036 2037 for _, f := range maptype.Field { 2038 var err error 2039 field, _ := sv.toField(f) 2040 switch f.Name { 2041 case "count": // +rtype -fieldof hmap int 2042 v.Len, err = field.asInt() 2043 case "B": // +rtype -fieldof hmap uint8 2044 var b uint64 2045 b, err = field.asUint() 2046 it.numbuckets = 1 << b 2047 it.oldmask = (1 << (b - 1)) - 1 2048 case "buckets": // +rtype -fieldof hmap unsafe.Pointer 2049 it.buckets = field.maybeDereference() 2050 case "oldbuckets": // +rtype -fieldof hmap unsafe.Pointer 2051 it.oldbuckets = field.maybeDereference() 2052 } 2053 if err != nil { 2054 v.Unreadable = err 2055 return nil 2056 } 2057 } 2058 2059 if it.buckets.Kind != reflect.Struct || it.oldbuckets.Kind != reflect.Struct { 2060 v.Unreadable = errMapBucketsNotStruct 2061 return nil 2062 } 2063 2064 it.hashTophashEmptyOne = hashTophashEmptyZero 2065 it.hashMinTopHash = hashMinTopHashGo111 2066 if producer := v.bi.Producer(); producer != "" && goversion.ProducerAfterOrEqual(producer, 1, 12) { 2067 it.hashTophashEmptyOne = hashTophashEmptyOne 2068 it.hashMinTopHash = hashMinTopHashGo112 2069 } 2070 2071 return it 2072 } 2073 2074 var errMapBucketContentsNotArray = errors.New("malformed map type: keys, values or tophash of a bucket is not an array") 2075 var errMapBucketContentsInconsistentLen = errors.New("malformed map type: inconsistent array length in bucket") 2076 var errMapBucketsNotStruct = errors.New("malformed map type: buckets, oldbuckets or overflow field not a struct") 2077 2078 func (it *mapIterator) nextBucket() bool { 2079 if it.overflow != nil && it.overflow.Addr > 0 { 2080 it.b = it.overflow 2081 } else { 2082 it.b = nil 2083 2084 if it.maxNumBuckets > 0 && it.bidx >= it.maxNumBuckets { 2085 return false 2086 } 2087 2088 for it.bidx < it.numbuckets { 2089 it.b = it.buckets.clone() 2090 it.b.Addr += uint64(it.buckets.DwarfType.Size()) * it.bidx 2091 2092 if it.oldbuckets.Addr <= 0 { 2093 break 2094 } 2095 2096 // if oldbuckets is not nil we are iterating through a map that is in 2097 // the middle of a grow. 2098 // if the bucket we are looking at hasn't been filled in we iterate 2099 // instead through its corresponding "oldbucket" (i.e. the bucket the 2100 // elements of this bucket are coming from) but only if this is the first 2101 // of the two buckets being created from the same oldbucket (otherwise we 2102 // would print some keys twice) 2103 2104 oldbidx := it.bidx & it.oldmask 2105 oldb := it.oldbuckets.clone() 2106 oldb.Addr += uint64(it.oldbuckets.DwarfType.Size()) * oldbidx 2107 2108 if it.mapEvacuated(oldb) { 2109 break 2110 } 2111 2112 if oldbidx == it.bidx { 2113 it.b = oldb 2114 break 2115 } 2116 2117 // oldbucket origin for current bucket has not been evacuated but we have already 2118 // iterated over it so we should just skip it 2119 it.b = nil 2120 it.bidx++ 2121 } 2122 2123 if it.b == nil { 2124 return false 2125 } 2126 it.bidx++ 2127 } 2128 2129 if it.b.Addr <= 0 { 2130 return false 2131 } 2132 2133 it.b.mem = cacheMemory(it.b.mem, it.b.Addr, int(it.b.RealType.Size())) 2134 2135 it.tophashes = nil 2136 it.keys = nil 2137 it.values = nil 2138 it.overflow = nil 2139 2140 for _, f := range it.b.DwarfType.(*godwarf.StructType).Field { 2141 field, err := it.b.toField(f) 2142 if err != nil { 2143 it.v.Unreadable = err 2144 return false 2145 } 2146 if field.Unreadable != nil { 2147 it.v.Unreadable = field.Unreadable 2148 return false 2149 } 2150 2151 switch f.Name { 2152 case "tophash": // +rtype -fieldof bmap [8]uint8 2153 it.tophashes = field 2154 case "keys": 2155 it.keys = field 2156 case "values": 2157 it.values = field 2158 case "overflow": 2159 it.overflow = field.maybeDereference() 2160 } 2161 } 2162 2163 // sanity checks 2164 if it.tophashes == nil || it.keys == nil || it.values == nil { 2165 it.v.Unreadable = fmt.Errorf("malformed map type") 2166 return false 2167 } 2168 2169 if it.tophashes.Kind != reflect.Array || it.keys.Kind != reflect.Array || it.values.Kind != reflect.Array { 2170 it.v.Unreadable = errMapBucketContentsNotArray 2171 return false 2172 } 2173 2174 if it.tophashes.Len != it.keys.Len { 2175 it.v.Unreadable = errMapBucketContentsInconsistentLen 2176 return false 2177 } 2178 2179 if it.values.fieldType.Size() > 0 && it.tophashes.Len != it.values.Len { 2180 // if the type of the value is zero-sized (i.e. struct{}) then the values 2181 // array's length is zero. 2182 it.v.Unreadable = errMapBucketContentsInconsistentLen 2183 return false 2184 } 2185 2186 if it.overflow.Kind != reflect.Struct { 2187 it.v.Unreadable = errMapBucketsNotStruct 2188 return false 2189 } 2190 2191 return true 2192 } 2193 2194 func (it *mapIterator) next() bool { 2195 for { 2196 if it.b == nil || it.idx >= it.tophashes.Len { 2197 r := it.nextBucket() 2198 if !r { 2199 return false 2200 } 2201 it.idx = 0 2202 } 2203 tophash, _ := it.tophashes.sliceAccess(int(it.idx)) 2204 h, err := tophash.asUint() 2205 if err != nil { 2206 it.v.Unreadable = fmt.Errorf("unreadable tophash: %v", err) 2207 return false 2208 } 2209 it.idx++ 2210 if h != hashTophashEmptyZero && h != it.hashTophashEmptyOne { 2211 return true 2212 } 2213 } 2214 } 2215 2216 func (it *mapIterator) key() *Variable { 2217 k, _ := it.keys.sliceAccess(int(it.idx - 1)) 2218 return k 2219 } 2220 2221 func (it *mapIterator) value() *Variable { 2222 v, _ := it.values.sliceAccess(int(it.idx - 1)) 2223 return v 2224 } 2225 2226 func (it *mapIterator) mapEvacuated(b *Variable) bool { 2227 if b.Addr == 0 { 2228 return true 2229 } 2230 for _, f := range b.DwarfType.(*godwarf.StructType).Field { 2231 if f.Name != "tophash" { 2232 continue 2233 } 2234 tophashes, _ := b.toField(f) 2235 tophash0var, _ := tophashes.sliceAccess(0) 2236 tophash0, err := tophash0var.asUint() 2237 if err != nil { 2238 return true 2239 } 2240 //TODO: this needs to be > hashTophashEmptyOne for go >= 1.12 2241 return tophash0 > it.hashTophashEmptyOne && tophash0 < it.hashMinTopHash 2242 } 2243 return true 2244 } 2245 2246 func (v *Variable) readInterface() (_type, data *Variable, isnil bool) { 2247 // An interface variable is implemented either by a runtime.iface 2248 // struct or a runtime.eface struct. The difference being that empty 2249 // interfaces (i.e. "interface {}") are represented by runtime.eface 2250 // and non-empty interfaces by runtime.iface. 2251 // 2252 // For both runtime.ifaces and runtime.efaces the data is stored in v.data 2253 // 2254 // The concrete type however is stored in v.tab._type for non-empty 2255 // interfaces and in v._type for empty interfaces. 2256 // 2257 // For nil empty interface variables _type will be nil, for nil 2258 // non-empty interface variables tab will be nil 2259 // 2260 // In either case the _type field is a pointer to a runtime._type struct. 2261 // 2262 // The following code works for both runtime.iface and runtime.eface. 2263 2264 v.mem = cacheMemory(v.mem, v.Addr, int(v.RealType.Size())) 2265 2266 ityp := resolveTypedef(&v.RealType.(*godwarf.InterfaceType).TypedefType).(*godwarf.StructType) 2267 2268 // +rtype -field iface.tab *itab 2269 // +rtype -field iface.data unsafe.Pointer 2270 // +rtype -field eface._type *_type|*internal/abi.Type 2271 // +rtype -field eface.data unsafe.Pointer 2272 2273 for _, f := range ityp.Field { 2274 switch f.Name { 2275 case "tab": // for runtime.iface 2276 tab, _ := v.toField(f) // +rtype *itab 2277 tab = tab.maybeDereference() 2278 isnil = tab.Addr == 0 2279 if !isnil { 2280 var err error 2281 _type, err = tab.structMember("_type") // +rtype *_type|*internal/abi.Type 2282 if err != nil { 2283 v.Unreadable = fmt.Errorf("invalid interface type: %v", err) 2284 return 2285 } 2286 } 2287 case "_type": // for runtime.eface 2288 _type, _ = v.toField(f) 2289 isnil = _type.maybeDereference().Addr == 0 2290 case "data": 2291 data, _ = v.toField(f) 2292 } 2293 } 2294 return 2295 } 2296 2297 func (v *Variable) loadInterface(recurseLevel int, loadData bool, cfg LoadConfig) { 2298 _type, data, isnil := v.readInterface() 2299 2300 if isnil { 2301 // interface to nil 2302 data = data.maybeDereference() 2303 v.Children = []Variable{*data} 2304 if loadData { 2305 v.Children[0].loadValueInternal(recurseLevel, cfg) 2306 } 2307 return 2308 } 2309 2310 if data == nil { 2311 v.Unreadable = fmt.Errorf("invalid interface type") 2312 return 2313 } 2314 2315 typ, kind, err := runtimeTypeToDIE(_type, data.Addr) 2316 if err != nil { 2317 v.Unreadable = err 2318 return 2319 } 2320 2321 deref := false 2322 if kind&kindDirectIface == 0 { 2323 realtyp := resolveTypedef(typ) 2324 if _, isptr := realtyp.(*godwarf.PtrType); !isptr { 2325 typ = pointerTo(typ, v.bi.Arch) 2326 deref = true 2327 } 2328 } 2329 2330 data = data.newVariable("data", data.Addr, typ, data.mem) 2331 if deref { 2332 data = data.maybeDereference() 2333 data.Name = "data" 2334 } 2335 2336 v.Children = []Variable{*data} 2337 if loadData && recurseLevel <= cfg.MaxVariableRecurse { 2338 v.Children[0].loadValueInternal(recurseLevel, cfg) 2339 } else { 2340 v.Children[0].OnlyAddr = true 2341 } 2342 } 2343 2344 // ConstDescr describes the value of v using constants. 2345 func (v *Variable) ConstDescr() string { 2346 if v.bi == nil || (v.Flags&VariableConstant != 0) { 2347 return "" 2348 } 2349 ctyp := v.bi.consts.Get(v.DwarfType) 2350 if ctyp == nil { 2351 return "" 2352 } 2353 if typename := v.DwarfType.Common().Name; !strings.Contains(typename, ".") || strings.HasPrefix(typename, "C.") { 2354 // only attempt to use constants for user defined type, otherwise every 2355 // int variable with value 1 will be described with io.SeekCurrent and other 2356 // similar problems. 2357 return "" 2358 } 2359 2360 switch v.Kind { 2361 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 2362 fallthrough 2363 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: 2364 n, _ := constant.Int64Val(v.Value) 2365 return ctyp.describe(n) 2366 } 2367 return "" 2368 } 2369 2370 // registerVariableTypeConv implements type conversions for CPU register variables (REGNAME.int8, etc) 2371 func (v *Variable) registerVariableTypeConv(newtyp string) (*Variable, error) { 2372 var n int = 0 2373 for i := 0; i < len(v.reg.Bytes); i += n { 2374 var child *Variable 2375 switch newtyp { 2376 case "int8": 2377 child = newConstant(constant.MakeInt64(int64(int8(v.reg.Bytes[i]))), v.mem) 2378 child.Kind = reflect.Int8 2379 n = 1 2380 case "int16": 2381 child = newConstant(constant.MakeInt64(int64(int16(binary.LittleEndian.Uint16(v.reg.Bytes[i:])))), v.mem) 2382 child.Kind = reflect.Int16 2383 n = 2 2384 case "int32": 2385 child = newConstant(constant.MakeInt64(int64(int32(binary.LittleEndian.Uint32(v.reg.Bytes[i:])))), v.mem) 2386 child.Kind = reflect.Int32 2387 n = 4 2388 case "int64": 2389 child = newConstant(constant.MakeInt64(int64(binary.LittleEndian.Uint64(v.reg.Bytes[i:]))), v.mem) 2390 child.Kind = reflect.Int64 2391 n = 8 2392 case "uint8": 2393 child = newConstant(constant.MakeUint64(uint64(v.reg.Bytes[i])), v.mem) 2394 child.Kind = reflect.Uint8 2395 n = 1 2396 case "uint16": 2397 child = newConstant(constant.MakeUint64(uint64(binary.LittleEndian.Uint16(v.reg.Bytes[i:]))), v.mem) 2398 child.Kind = reflect.Uint16 2399 n = 2 2400 case "uint32": 2401 child = newConstant(constant.MakeUint64(uint64(binary.LittleEndian.Uint32(v.reg.Bytes[i:]))), v.mem) 2402 child.Kind = reflect.Uint32 2403 n = 4 2404 case "uint64": 2405 child = newConstant(constant.MakeUint64(uint64(binary.LittleEndian.Uint64(v.reg.Bytes[i:]))), v.mem) 2406 child.Kind = reflect.Uint64 2407 n = 8 2408 case "float32": 2409 a := binary.LittleEndian.Uint32(v.reg.Bytes[i:]) 2410 x := *(*float32)(unsafe.Pointer(&a)) 2411 child = newConstant(constant.MakeFloat64(float64(x)), v.mem) 2412 child.Kind = reflect.Float32 2413 n = 4 2414 case "float64": 2415 a := binary.LittleEndian.Uint64(v.reg.Bytes[i:]) 2416 x := *(*float64)(unsafe.Pointer(&a)) 2417 child = newConstant(constant.MakeFloat64(x), v.mem) 2418 child.Kind = reflect.Float64 2419 n = 8 2420 default: 2421 if n == 0 { 2422 for _, pfx := range []string{"uint", "int"} { 2423 if strings.HasPrefix(newtyp, pfx) { 2424 n, _ = strconv.Atoi(newtyp[len(pfx):]) 2425 break 2426 } 2427 } 2428 if n == 0 || popcnt(uint64(n)) != 1 { 2429 return nil, fmt.Errorf("unknown CPU register type conversion to %q", newtyp) 2430 } 2431 n = n / 8 2432 } 2433 child = newConstant(constant.MakeString(fmt.Sprintf("%x", v.reg.Bytes[i:][:n])), v.mem) 2434 } 2435 v.Children = append(v.Children, *child) 2436 } 2437 2438 v.loaded = true 2439 v.Kind = reflect.Array 2440 v.Len = int64(len(v.Children)) 2441 v.Base = fakeAddressUnresolv 2442 v.DwarfType = fakeArrayType(uint64(len(v.Children)), &godwarf.VoidType{CommonType: godwarf.CommonType{ByteSize: int64(n)}}) 2443 v.RealType = v.DwarfType 2444 return v, nil 2445 } 2446 2447 // popcnt is the number of bits set to 1 in x. 2448 // It's the same as math/bits.OnesCount64, copied here so that we can build 2449 // on versions of go that don't have math/bits. 2450 func popcnt(x uint64) int { 2451 const m0 = 0x5555555555555555 // 01010101 ... 2452 const m1 = 0x3333333333333333 // 00110011 ... 2453 const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ... 2454 const m = 1<<64 - 1 2455 x = x>>1&(m0&m) + x&(m0&m) 2456 x = x>>2&(m1&m) + x&(m1&m) 2457 x = (x>>4 + x) & (m2 & m) 2458 x += x >> 8 2459 x += x >> 16 2460 x += x >> 32 2461 return int(x) & (1<<7 - 1) 2462 } 2463 2464 func isCgoType(bi *BinaryInfo, typ godwarf.Type) bool { 2465 cu := bi.Images[typ.Common().Index].findCompileUnitForOffset(typ.Common().Offset) 2466 if cu == nil { 2467 return false 2468 } 2469 return !cu.isgo 2470 } 2471 2472 func isCgoCharPtr(bi *BinaryInfo, typ *godwarf.PtrType) bool { 2473 if !isCgoType(bi, typ) { 2474 return false 2475 } 2476 2477 fieldtyp := typ.Type 2478 resolveQualTypedef: 2479 for { 2480 switch t := fieldtyp.(type) { 2481 case *godwarf.QualType: 2482 fieldtyp = t.Type 2483 case *godwarf.TypedefType: 2484 fieldtyp = t.Type 2485 default: 2486 break resolveQualTypedef 2487 } 2488 } 2489 2490 _, ischar := fieldtyp.(*godwarf.CharType) 2491 _, isuchar := fieldtyp.(*godwarf.UcharType) 2492 return ischar || isuchar 2493 } 2494 2495 func (cm constantsMap) Get(typ godwarf.Type) *constantType { 2496 ctyp := cm[dwarfRef{typ.Common().Index, typ.Common().Offset}] 2497 if ctyp == nil { 2498 return nil 2499 } 2500 typepkg := packageName(typ.String()) + "." 2501 if !ctyp.initialized { 2502 ctyp.initialized = true 2503 sort.Sort(constantValuesByValue(ctyp.values)) 2504 for i := range ctyp.values { 2505 ctyp.values[i].name = strings.TrimPrefix(ctyp.values[i].name, typepkg) 2506 if popcnt(uint64(ctyp.values[i].value)) == 1 { 2507 ctyp.values[i].singleBit = true 2508 } 2509 } 2510 } 2511 return ctyp 2512 } 2513 2514 func (ctyp *constantType) describe(n int64) string { 2515 for _, val := range ctyp.values { 2516 if val.value == n { 2517 return val.name 2518 } 2519 } 2520 2521 if n == 0 { 2522 return "" 2523 } 2524 2525 // If all the values for this constant only have one bit set we try to 2526 // represent the value as a bitwise or of constants. 2527 2528 fields := []string{} 2529 for _, val := range ctyp.values { 2530 if !val.singleBit { 2531 continue 2532 } 2533 if n&val.value != 0 { 2534 fields = append(fields, val.name) 2535 n = n & ^val.value 2536 } 2537 } 2538 if n == 0 { 2539 return strings.Join(fields, "|") 2540 } 2541 return "" 2542 } 2543 2544 type variablesByDepthAndDeclLine struct { 2545 vars []*Variable 2546 depths []int 2547 } 2548 2549 func (v *variablesByDepthAndDeclLine) Len() int { return len(v.vars) } 2550 2551 func (v *variablesByDepthAndDeclLine) Less(i int, j int) bool { 2552 if v.depths[i] == v.depths[j] { 2553 return v.vars[i].DeclLine < v.vars[j].DeclLine 2554 } 2555 return v.depths[i] < v.depths[j] 2556 } 2557 2558 func (v *variablesByDepthAndDeclLine) Swap(i int, j int) { 2559 v.depths[i], v.depths[j] = v.depths[j], v.depths[i] 2560 v.vars[i], v.vars[j] = v.vars[j], v.vars[i] 2561 } 2562 2563 type constantValuesByValue []constantValue 2564 2565 func (v constantValuesByValue) Len() int { return len(v) } 2566 func (v constantValuesByValue) Less(i int, j int) bool { return v[i].value < v[j].value } 2567 func (v constantValuesByValue) Swap(i int, j int) { v[i], v[j] = v[j], v[i] } 2568 2569 const ( 2570 timeTimeWallHasMonotonicBit uint64 = (1 << 63) // hasMonotonic bit of time.Time.wall 2571 2572 //lint:ignore ST1011 addSeconds is the name of the relevant function 2573 maxAddSeconds time.Duration = (time.Duration(^uint64(0)>>1) / time.Second) * time.Second // maximum number of seconds that can be added with (time.Time).Add, measured in nanoseconds 2574 2575 wallNsecShift = 30 // size of the nanoseconds field of time.Time.wall 2576 2577 unixTimestampOfWallEpoch = -2682288000 // number of seconds between the unix epoch and the epoch for time.Time.wall (1 jan 1885) 2578 ) 2579 2580 // formatTime writes formatted value of a time.Time to v.Value. 2581 // See $GOROOT/src/time/time.go for a description of time.Time internals. 2582 func (v *Variable) formatTime() { 2583 wallv := v.fieldVariable("wall") 2584 extv := v.fieldVariable("ext") 2585 if wallv == nil || extv == nil || wallv.Unreadable != nil || extv.Unreadable != nil || wallv.Value == nil || extv.Value == nil { 2586 return 2587 } 2588 2589 var loc *time.Location 2590 2591 locv := v.fieldVariable("loc") 2592 if locv != nil && locv.Unreadable == nil { 2593 namev := locv.loadFieldNamed("name") 2594 if namev != nil && namev.Unreadable == nil { 2595 name := constant.StringVal(namev.Value) 2596 loc, _ = time.LoadLocation(name) 2597 } 2598 } 2599 2600 wall, _ := constant.Uint64Val(wallv.Value) 2601 ext, _ := constant.Int64Val(extv.Value) 2602 2603 hasMonotonic := (wall & timeTimeWallHasMonotonicBit) != 0 2604 if hasMonotonic { 2605 // the 33-bit field of wall holds a 33-bit unsigned wall 2606 // seconds since Jan 1 year 1885, and ext holds a signed 64-bit monotonic 2607 // clock reading, nanoseconds since process start 2608 sec := int64(wall << 1 >> (wallNsecShift + 1)) // seconds since 1 Jan 1885 2609 t := time.Unix(sec+unixTimestampOfWallEpoch, 0).UTC() 2610 if loc != nil { 2611 t = t.In(loc) 2612 } 2613 v.Value = constant.MakeString(fmt.Sprintf("%s, %+d", t.Format(time.RFC3339), ext)) 2614 } else { 2615 // the full signed 64-bit wall seconds since Jan 1 year 1 is stored in ext 2616 var t time.Time 2617 if ext > int64(maxAddSeconds/time.Second)*1000 { 2618 // avoid doing the add loop below if it will take too much time 2619 return 2620 } 2621 for ext > int64(maxAddSeconds/time.Second) { 2622 t = t.Add(maxAddSeconds) 2623 ext -= int64(maxAddSeconds / time.Second) 2624 } 2625 t = t.Add(time.Duration(ext) * time.Second) 2626 if loc != nil { 2627 t = t.In(loc) 2628 } 2629 v.Value = constant.MakeString(t.Format(time.RFC3339)) 2630 } 2631 }