github.com/undoio/delve@v1.9.0/pkg/proc/variables.go (about) 1 package proc 2 3 import ( 4 "bytes" 5 "debug/dwarf" 6 "encoding/binary" 7 "errors" 8 "fmt" 9 "go/constant" 10 "go/token" 11 "math" 12 "reflect" 13 "sort" 14 "strconv" 15 "strings" 16 "time" 17 "unsafe" 18 19 "github.com/undoio/delve/pkg/dwarf/godwarf" 20 "github.com/undoio/delve/pkg/dwarf/op" 21 "github.com/undoio/delve/pkg/goversion" 22 "github.com/undoio/delve/pkg/logflags" 23 ) 24 25 const ( 26 maxErrCount = 3 // Max number of read errors to accept while evaluating slices, arrays and structs 27 28 maxArrayStridePrefetch = 1024 // Maximum size of array stride for which we will prefetch the array contents 29 30 // hashTophashEmptyZero is used by map reading code, indicates an empty cell 31 hashTophashEmptyZero = 0 // +rtype emptyRest 32 // hashTophashEmptyOne is used by map reading code, indicates an empty cell in Go 1.12 and later 33 hashTophashEmptyOne = 1 // +rtype emptyOne 34 // hashMinTopHashGo111 used by map reading code, indicates minimum value of tophash that isn't empty or evacuated, in Go1.11 35 hashMinTopHashGo111 = 4 // +rtype minTopHash 36 // hashMinTopHashGo112 is used by map reading code, indicates minimum value of tophash that isn't empty or evacuated, in Go1.12 37 hashMinTopHashGo112 = 5 // +rtype minTopHash 38 39 maxFramePrefetchSize = 1 * 1024 * 1024 // Maximum prefetch size for a stack frame 40 41 maxMapBucketsFactor = 100 // Maximum numbers of map buckets to read for every requested map entry when loading variables through (*EvalScope).LocalVariables and (*EvalScope).FunctionArguments. 42 43 maxGoroutineUserCurrentDepth = 30 // Maximum depth used by (*G).UserCurrent to search its location 44 ) 45 46 type floatSpecial uint8 47 48 const ( 49 // FloatIsNormal means the value is a normal float. 50 FloatIsNormal floatSpecial = iota 51 // FloatIsNaN means the float is a special NaN value. 52 FloatIsNaN 53 // FloatIsPosInf means the float is a special positive inifitiy value. 54 FloatIsPosInf 55 // FloatIsNegInf means the float is a special negative infinity value. 56 FloatIsNegInf 57 ) 58 59 type variableFlags uint16 60 61 const ( 62 // VariableEscaped is set for local variables that escaped to the heap 63 // 64 // The compiler performs escape analysis on local variables, the variables 65 // that may outlive the stack frame are allocated on the heap instead and 66 // only the address is recorded on the stack. These variables will be 67 // marked with this flag. 68 VariableEscaped variableFlags = (1 << iota) 69 // VariableShadowed is set for local variables that are shadowed by a 70 // variable with the same name in another scope 71 VariableShadowed 72 // VariableConstant means this variable is a constant value 73 VariableConstant 74 // VariableArgument means this variable is a function argument 75 VariableArgument 76 // VariableReturnArgument means this variable is a function return value 77 VariableReturnArgument 78 // VariableFakeAddress means the address of this variable is either fake 79 // (i.e. the variable is partially or completely stored in a CPU register 80 // and doesn't have a real address) or possibly no longer available (because 81 // the variable is the return value of a function call and allocated on a 82 // frame that no longer exists) 83 VariableFakeAddress 84 // VariableCPrt means the variable is a C pointer 85 VariableCPtr 86 // VariableCPURegister means this variable is a CPU register. 87 VariableCPURegister 88 ) 89 90 // Variable represents a variable. It contains the address, name, 91 // type and other information parsed from both the Dwarf information 92 // and the memory of the debugged process. 93 // If OnlyAddr is true, the variables value has not been loaded. 94 type Variable struct { 95 Addr uint64 96 OnlyAddr bool 97 Name string 98 DwarfType godwarf.Type 99 RealType godwarf.Type 100 Kind reflect.Kind 101 mem MemoryReadWriter 102 bi *BinaryInfo 103 104 Value constant.Value 105 FloatSpecial floatSpecial 106 reg *op.DwarfRegister // contains the value of this variable if VariableCPURegister flag is set and loaded is false 107 108 Len int64 109 Cap int64 110 111 Flags variableFlags 112 113 // Base address of arrays, Base address of the backing array for slices (0 for nil slices) 114 // Base address of the backing byte array for strings 115 // address of the struct backing chan and map variables 116 // address of the function entry point for function variables (0 for nil function pointers) 117 Base uint64 118 stride int64 119 fieldType godwarf.Type 120 121 // closureAddr is the closure address for function variables (0 for non-closures) 122 closureAddr uint64 123 124 // number of elements to skip when loading a map 125 mapSkip int 126 127 Children []Variable 128 129 loaded bool 130 Unreadable error 131 132 LocationExpr *locationExpr // location expression 133 DeclLine int64 // line number of this variable's declaration 134 } 135 136 // LoadConfig controls how variables are loaded from the targets memory. 137 type LoadConfig struct { 138 // FollowPointers requests pointers to be automatically dereferenced. 139 FollowPointers bool 140 // MaxVariableRecurse is how far to recurse when evaluating nested types. 141 MaxVariableRecurse int 142 // MaxStringLen is the maximum number of bytes read from a string 143 MaxStringLen int 144 // MaxArrayValues is the maximum number of elements read from an array, a slice or a map. 145 MaxArrayValues int 146 // MaxStructFields is the maximum number of fields read from a struct, -1 will read all fields. 147 MaxStructFields int 148 149 // MaxMapBuckets is the maximum number of map buckets to read before giving up. 150 // A value of 0 will read as many buckets as necessary until the entire map 151 // is read or MaxArrayValues is reached. 152 // 153 // Loading a map is an operation that issues O(num_buckets) operations. 154 // Normally the number of buckets is proportional to the number of elements 155 // in the map, since the runtime tries to keep the load factor of maps 156 // between 40% and 80%. 157 // 158 // It is possible, however, to create very sparse maps either by: 159 // a) adding lots of entries to a map and then deleting most of them, or 160 // b) using the make(mapType, N) expression with a very large N 161 // 162 // When this happens delve will have to scan many empty buckets to find the 163 // few entries in the map. 164 // MaxMapBuckets can be set to avoid annoying slowdownsâŁwhile reading 165 // very sparse maps. 166 // 167 // Since there is no good way for a user of delve to specify the value of 168 // MaxMapBuckets, this field is not actually exposed through the API. 169 // Instead (*EvalScope).LocalVariables and (*EvalScope).FunctionArguments 170 // set this field automatically to MaxArrayValues * maxMapBucketsFactor. 171 // Every other invocation uses the default value of 0, obtaining the old behavior. 172 // In practice this means that debuggers using the ListLocalVars or 173 // ListFunctionArgs API will not experience a massive slowdown when a very 174 // sparse map is in scope, but evaluating a single variable will still work 175 // correctly, even if the variable in question is a very sparse map. 176 MaxMapBuckets int 177 } 178 179 var loadSingleValue = LoadConfig{false, 0, 64, 0, 0, 0} 180 var loadFullValue = LoadConfig{true, 1, 64, 64, -1, 0} 181 var loadFullValueLongerStrings = LoadConfig{true, 1, 1024 * 1024, 64, -1, 0} 182 183 // G status, from: src/runtime/runtime2.go 184 const ( 185 Gidle uint64 = iota // 0 186 Grunnable // 1 runnable and on a run queue 187 Grunning // 2 188 Gsyscall // 3 189 Gwaiting // 4 190 GmoribundUnused // 5 currently unused, but hardcoded in gdb scripts 191 Gdead // 6 192 Genqueue // 7 Only the Gscanenqueue is used. 193 Gcopystack // 8 in this state when newstack is moving the stack 194 ) 195 196 // G represents a runtime G (goroutine) structure (at least the 197 // fields that Delve is interested in). 198 type G struct { 199 ID int // Goroutine ID 200 PC uint64 // PC of goroutine when it was parked. 201 SP uint64 // SP of goroutine when it was parked. 202 BP uint64 // BP of goroutine when it was parked (go >= 1.7). 203 LR uint64 // LR of goroutine when it was parked. 204 GoPC uint64 // PC of 'go' statement that created this goroutine. 205 StartPC uint64 // PC of the first function run on this goroutine. 206 Status uint64 207 stack stack // value of stack 208 209 WaitSince int64 210 WaitReason int64 211 212 SystemStack bool // SystemStack is true if this goroutine is currently executing on a system stack. 213 214 // Information on goroutine location 215 CurrentLoc Location 216 217 // Thread that this goroutine is currently allocated to 218 Thread Thread 219 220 variable *Variable 221 222 Unreadable error // could not read the G struct 223 224 labels *map[string]string // G's pprof labels, computed on demand in Labels() method 225 } 226 227 // stack represents a stack span in the target process. 228 type stack struct { 229 hi, lo uint64 230 } 231 232 // GetG returns information on the G (goroutine) that is executing on this thread. 233 // 234 // The G structure for a thread is stored in thread local storage. Here we simply 235 // calculate the address and read and parse the G struct. 236 // 237 // We cannot simply use the allg linked list in order to find the M that represents 238 // the given OS thread and follow its G pointer because on Darwin mach ports are not 239 // universal, so our port for this thread would not map to the `id` attribute of the M 240 // structure. Also, when linked against libc, Go prefers the libc version of clone as 241 // opposed to the runtime version. This has the consequence of not setting M.id for 242 // any thread, regardless of OS. 243 // 244 // In order to get around all this craziness, we read the address of the G structure for 245 // the current thread from the thread local storage area. 246 func GetG(thread Thread) (*G, error) { 247 if thread.Common().g != nil { 248 return thread.Common().g, nil 249 } 250 if loc, _ := thread.Location(); loc != nil && loc.Fn != nil && loc.Fn.Name == "runtime.clone" { 251 // When threads are executing runtime.clone the value of TLS is unreliable. 252 return nil, nil 253 } 254 gaddr, err := getGVariable(thread) 255 if err != nil { 256 return nil, err 257 } 258 259 g, err := gaddr.parseG() 260 if err != nil { 261 return nil, err 262 } 263 if g.ID == 0 { 264 // The runtime uses a special goroutine with ID == 0 to mark that the 265 // current goroutine is executing on the system stack (sometimes also 266 // referred to as the g0 stack or scheduler stack, I'm not sure if there's 267 // actually any difference between those). 268 // For our purposes it's better if we always return the real goroutine 269 // since the rest of the code assumes the goroutine ID is univocal. 270 // The real 'current goroutine' is stored in g0.m.curg 271 mvar, err := g.variable.structMember("m") 272 if err != nil { 273 return nil, err 274 } 275 curgvar, err := mvar.structMember("curg") 276 if err != nil { 277 return nil, err 278 } 279 g, err = curgvar.parseG() 280 if err != nil { 281 if _, ok := err.(ErrNoGoroutine); ok { 282 err = ErrNoGoroutine{thread.ThreadID()} 283 } 284 return nil, err 285 } 286 g.SystemStack = true 287 } 288 g.Thread = thread 289 if loc, err := thread.Location(); err == nil { 290 g.CurrentLoc = *loc 291 } 292 thread.Common().g = g 293 return g, nil 294 } 295 296 // GoroutinesInfo searches for goroutines starting at index 'start', and 297 // returns an array of up to 'count' (or all found elements, if 'count' is 0) 298 // G structures representing the information Delve care about from the internal 299 // runtime G structure. 300 // GoroutinesInfo also returns the next index to be used as 'start' argument 301 // while scanning for all available goroutines, or -1 if there was an error 302 // or if the index already reached the last possible value. 303 func GoroutinesInfo(dbp *Target, start, count int) ([]*G, int, error) { 304 if _, err := dbp.Valid(); err != nil { 305 return nil, -1, err 306 } 307 if dbp.gcache.allGCache != nil { 308 // We can't use the cached array to fulfill a subrange request 309 if start == 0 && (count == 0 || count >= len(dbp.gcache.allGCache)) { 310 return dbp.gcache.allGCache, -1, nil 311 } 312 } 313 314 var ( 315 threadg = map[int]*G{} 316 allg []*G 317 ) 318 319 threads := dbp.ThreadList() 320 for _, th := range threads { 321 g, _ := GetG(th) 322 if g != nil { 323 threadg[g.ID] = g 324 } 325 } 326 327 allgptr, allglen, err := dbp.gcache.getRuntimeAllg(dbp.BinInfo(), dbp.Memory()) 328 if err != nil { 329 return nil, -1, err 330 } 331 332 for i := uint64(start); i < allglen; i++ { 333 if count != 0 && len(allg) >= count { 334 return allg, int(i), nil 335 } 336 gvar, err := newGVariable(dbp.CurrentThread(), allgptr+(i*uint64(dbp.BinInfo().Arch.PtrSize())), true) 337 if err != nil { 338 allg = append(allg, &G{Unreadable: err}) 339 continue 340 } 341 g, err := gvar.parseG() 342 if err != nil { 343 allg = append(allg, &G{Unreadable: err}) 344 continue 345 } 346 if thg, allocated := threadg[g.ID]; allocated { 347 loc, err := thg.Thread.Location() 348 if err != nil { 349 return nil, -1, err 350 } 351 g.Thread = thg.Thread 352 // Prefer actual thread location information. 353 g.CurrentLoc = *loc 354 g.SystemStack = thg.SystemStack 355 } 356 if g.Status != Gdead { 357 allg = append(allg, g) 358 } 359 dbp.gcache.addGoroutine(g) 360 } 361 if start == 0 { 362 dbp.gcache.allGCache = allg 363 } 364 365 return allg, -1, nil 366 } 367 368 // FindGoroutine returns a G struct representing the goroutine 369 // specified by `gid`. 370 func FindGoroutine(dbp *Target, gid int) (*G, error) { 371 if selg := dbp.SelectedGoroutine(); (gid == -1) || (selg != nil && selg.ID == gid) || (selg == nil && gid == 0) { 372 // Return the currently selected goroutine in the following circumstances: 373 // 374 // 1. if the caller asks for gid == -1 (because that's what a goroutine ID of -1 means in our API). 375 // 2. if gid == selg.ID. 376 // this serves two purposes: (a) it's an optimizations that allows us 377 // to avoid reading any other goroutine and, more importantly, (b) we 378 // could be reading an incorrect value for the goroutine ID of a thread. 379 // This condition usually happens when a goroutine calls runtime.clone 380 // and for a short period of time two threads will appear to be running 381 // the same goroutine. 382 // 3. if the caller asks for gid == 0 and the selected goroutine is 383 // either 0 or nil. 384 // Goroutine 0 is special, it either means we have no current goroutine 385 // (for example, running C code), or that we are running on a special 386 // stack (system stack, signal handling stack) and we didn't properly 387 // detect it. 388 // Since there could be multiple goroutines '0' running simultaneously 389 // if the user requests it return the one that's already selected or 390 // nil if there isn't a selected goroutine. 391 return selg, nil 392 } 393 394 if gid == 0 { 395 return nil, fmt.Errorf("unknown goroutine %d", gid) 396 } 397 398 if g := dbp.gcache.partialGCache[gid]; g != nil { 399 return g, nil 400 } 401 402 // Calling GoroutinesInfo could be slow if there are many goroutines 403 // running, check if a running goroutine has been requested first. 404 for _, thread := range dbp.ThreadList() { 405 g, _ := GetG(thread) 406 if g != nil && g.ID == gid { 407 return g, nil 408 } 409 } 410 411 const goroutinesInfoLimit = 10 412 nextg := 0 413 for nextg >= 0 { 414 var gs []*G 415 var err error 416 gs, nextg, err = GoroutinesInfo(dbp, nextg, goroutinesInfoLimit) 417 if err != nil { 418 return nil, err 419 } 420 for i := range gs { 421 if gs[i].ID == gid { 422 if gs[i].Unreadable != nil { 423 return nil, gs[i].Unreadable 424 } 425 return gs[i], nil 426 } 427 } 428 } 429 430 return nil, fmt.Errorf("unknown goroutine %d", gid) 431 } 432 433 func getGVariable(thread Thread) (*Variable, error) { 434 regs, err := thread.Registers() 435 if err != nil { 436 return nil, err 437 } 438 439 gaddr, hasgaddr := regs.GAddr() 440 if !hasgaddr { 441 var err error 442 gaddr, err = readUintRaw(thread.ProcessMemory(), regs.TLS()+thread.BinInfo().GStructOffset(), int64(thread.BinInfo().Arch.PtrSize())) 443 if err != nil { 444 return nil, err 445 } 446 } 447 448 return newGVariable(thread, gaddr, thread.BinInfo().Arch.DerefTLS()) 449 } 450 451 func newGVariable(thread Thread, gaddr uint64, deref bool) (*Variable, error) { 452 typ, err := thread.BinInfo().findType("runtime.g") 453 if err != nil { 454 return nil, err 455 } 456 457 if deref { 458 typ = &godwarf.PtrType{ 459 CommonType: godwarf.CommonType{ 460 ByteSize: int64(thread.BinInfo().Arch.PtrSize()), 461 Name: "", 462 ReflectKind: reflect.Ptr, 463 Offset: 0, 464 }, 465 Type: typ, 466 } 467 } 468 469 return newVariableFromThread(thread, "", gaddr, typ), nil 470 } 471 472 // Defer returns the top-most defer of the goroutine. 473 func (g *G) Defer() *Defer { 474 if g.variable.Unreadable != nil { 475 return nil 476 } 477 dvar, _ := g.variable.structMember("_defer") 478 if dvar == nil { 479 return nil 480 } 481 dvar = dvar.maybeDereference() 482 if dvar.Addr == 0 { 483 return nil 484 } 485 d := &Defer{variable: dvar} 486 d.load() 487 return d 488 } 489 490 // UserCurrent returns the location the users code is at, 491 // or was at before entering a runtime function. 492 func (g *G) UserCurrent() Location { 493 it, err := g.stackIterator(0) 494 if err != nil { 495 return g.CurrentLoc 496 } 497 for count := 0; it.Next() && count < maxGoroutineUserCurrentDepth; count++ { 498 frame := it.Frame() 499 if frame.Call.Fn != nil { 500 name := frame.Call.Fn.Name 501 if strings.Contains(name, ".") && (!strings.HasPrefix(name, "runtime.") || frame.Call.Fn.exportedRuntime()) && !strings.HasPrefix(name, "internal/") && !strings.HasPrefix(name, "runtime/internal") { 502 return frame.Call 503 } 504 } 505 } 506 return g.CurrentLoc 507 } 508 509 // Go returns the location of the 'go' statement 510 // that spawned this goroutine. 511 func (g *G) Go() Location { 512 pc := g.GoPC 513 if fn := g.variable.bi.PCToFunc(pc); fn != nil { 514 // Backup to CALL instruction. 515 // Mimics runtime/traceback.go:677. 516 if g.GoPC > fn.Entry { 517 pc-- 518 } 519 } 520 f, l, fn := g.variable.bi.PCToLine(pc) 521 return Location{PC: g.GoPC, File: f, Line: l, Fn: fn} 522 } 523 524 // StartLoc returns the starting location of the goroutine. 525 func (g *G) StartLoc(tgt *Target) Location { 526 fn := g.variable.bi.PCToFunc(g.StartPC) 527 fn = tgt.dwrapUnwrap(fn) 528 if fn == nil { 529 return Location{PC: g.StartPC} 530 } 531 f, l := fn.cu.lineInfo.PCToLine(fn.Entry, fn.Entry) 532 return Location{PC: fn.Entry, File: f, Line: l, Fn: fn} 533 } 534 535 // System returns true if g is a system goroutine. See isSystemGoroutine in 536 // $GOROOT/src/runtime/traceback.go. 537 func (g *G) System(tgt *Target) bool { 538 loc := g.StartLoc(tgt) 539 if loc.Fn == nil { 540 return false 541 } 542 switch loc.Fn.Name { 543 case "runtime.main", "runtime.handleAsyncEvent": 544 return false 545 } 546 return strings.HasPrefix(loc.Fn.Name, "runtime.") 547 } 548 549 func (g *G) Labels() map[string]string { 550 if g.labels != nil { 551 return *g.labels 552 } 553 var labels map[string]string 554 if labelsVar := g.variable.loadFieldNamed("labels"); labelsVar != nil && len(labelsVar.Children) == 1 { 555 if address := labelsVar.Children[0]; address.Addr != 0 { 556 labelMapType, _ := g.variable.bi.findType("runtime/pprof.labelMap") 557 if labelMapType != nil { 558 labelMap := newVariable("", address.Addr, labelMapType, g.variable.bi, g.variable.mem) 559 labelMap.loadValue(loadFullValue) 560 labels = map[string]string{} 561 for i := range labelMap.Children { 562 if i%2 == 0 { 563 k := labelMap.Children[i] 564 v := labelMap.Children[i+1] 565 labels[constant.StringVal(k.Value)] = constant.StringVal(v.Value) 566 } 567 } 568 } 569 } 570 } 571 g.labels = &labels 572 return *g.labels 573 } 574 575 type Ancestor struct { 576 ID int64 // Goroutine ID 577 Unreadable error 578 pcsVar *Variable 579 } 580 581 // IsNilErr is returned when a variable is nil. 582 type IsNilErr struct { 583 name string 584 } 585 586 func (err *IsNilErr) Error() string { 587 return fmt.Sprintf("%s is nil", err.name) 588 } 589 590 func globalScope(tgt *Target, bi *BinaryInfo, image *Image, mem MemoryReadWriter) *EvalScope { 591 return &EvalScope{Location: Location{}, Regs: op.DwarfRegisters{StaticBase: image.StaticBase}, Mem: mem, g: nil, BinInfo: bi, target: tgt, frameOffset: 0} 592 } 593 594 func newVariableFromThread(t Thread, name string, addr uint64, dwarfType godwarf.Type) *Variable { 595 return newVariable(name, addr, dwarfType, t.BinInfo(), t.ProcessMemory()) 596 } 597 598 func (v *Variable) newVariable(name string, addr uint64, dwarfType godwarf.Type, mem MemoryReadWriter) *Variable { 599 return newVariable(name, addr, dwarfType, v.bi, mem) 600 } 601 602 func newVariable(name string, addr uint64, dwarfType godwarf.Type, bi *BinaryInfo, mem MemoryReadWriter) *Variable { 603 if styp, isstruct := dwarfType.(*godwarf.StructType); isstruct && !strings.Contains(styp.Name, "<") && !strings.Contains(styp.Name, "{") { 604 // For named structs the compiler will emit a DW_TAG_structure_type entry 605 // and a DW_TAG_typedef entry. 606 // 607 // Normally variables refer to the typedef entry but sometimes global 608 // variables will refer to the struct entry incorrectly. 609 // Also the runtime type offset resolution (runtimeTypeToDIE) will return 610 // the struct entry directly. 611 // 612 // In both cases we prefer to have a typedef type for consistency's sake. 613 // 614 // So we wrap all struct types into a fake typedef type except for: 615 // a. types not defined by go 616 // b. anonymous struct types (they contain the '{' character) 617 // c. Go internal struct types used to describe maps (they contain the '<' 618 // character). 619 cu := bi.Images[dwarfType.Common().Index].findCompileUnitForOffset(dwarfType.Common().Offset) 620 if cu != nil && cu.isgo { 621 dwarfType = &godwarf.TypedefType{ 622 CommonType: *(dwarfType.Common()), 623 Type: dwarfType, 624 } 625 } 626 } 627 628 v := &Variable{ 629 Name: name, 630 Addr: addr, 631 DwarfType: dwarfType, 632 mem: mem, 633 bi: bi, 634 } 635 636 v.RealType = resolveTypedef(v.DwarfType) 637 638 switch t := v.RealType.(type) { 639 case *godwarf.PtrType: 640 v.Kind = reflect.Ptr 641 if _, isvoid := t.Type.(*godwarf.VoidType); isvoid { 642 v.Kind = reflect.UnsafePointer 643 } else if isCgoType(bi, t) { 644 v.Flags |= VariableCPtr 645 v.fieldType = t.Type 646 v.stride = alignAddr(v.fieldType.Size(), v.fieldType.Align()) 647 v.Len = 0 648 if isCgoCharPtr(bi, t) { 649 v.Kind = reflect.String 650 } 651 if v.Addr != 0 { 652 v.Base, v.Unreadable = readUintRaw(v.mem, v.Addr, int64(v.bi.Arch.PtrSize())) 653 } 654 } 655 case *godwarf.ChanType: 656 v.Kind = reflect.Chan 657 if v.Addr != 0 { 658 v.loadChanInfo() 659 } 660 case *godwarf.MapType: 661 v.Kind = reflect.Map 662 case *godwarf.StringType: 663 v.Kind = reflect.String 664 v.stride = 1 665 v.fieldType = &godwarf.UintType{BasicType: godwarf.BasicType{CommonType: godwarf.CommonType{ByteSize: 1, Name: "byte"}, BitSize: 8, BitOffset: 0}} 666 if v.Addr != 0 { 667 v.Base, v.Len, v.Unreadable = readStringInfo(v.mem, v.bi.Arch, v.Addr) 668 } 669 case *godwarf.SliceType: 670 v.Kind = reflect.Slice 671 if v.Addr != 0 { 672 v.loadSliceInfo(t) 673 } 674 case *godwarf.InterfaceType: 675 v.Kind = reflect.Interface 676 case *godwarf.StructType: 677 v.Kind = reflect.Struct 678 case *godwarf.ArrayType: 679 v.Kind = reflect.Array 680 v.Base = v.Addr 681 v.Len = t.Count 682 v.Cap = -1 683 v.fieldType = t.Type 684 v.stride = 0 685 686 if t.Count > 0 { 687 v.stride = t.ByteSize / t.Count 688 } 689 case *godwarf.ComplexType: 690 switch t.ByteSize { 691 case 8: 692 v.Kind = reflect.Complex64 693 case 16: 694 v.Kind = reflect.Complex128 695 } 696 case *godwarf.IntType: 697 v.Kind = reflect.Int 698 case *godwarf.CharType: 699 // Rest of the code assumes that Kind == reflect.Int implies RealType == 700 // godwarf.IntType. 701 v.RealType = &godwarf.IntType{BasicType: t.BasicType} 702 v.Kind = reflect.Int 703 case *godwarf.UcharType: 704 v.RealType = &godwarf.IntType{BasicType: t.BasicType} 705 v.Kind = reflect.Int 706 case *godwarf.UintType: 707 v.Kind = reflect.Uint 708 case *godwarf.FloatType: 709 switch t.ByteSize { 710 case 4: 711 v.Kind = reflect.Float32 712 case 8: 713 v.Kind = reflect.Float64 714 } 715 case *godwarf.BoolType: 716 v.Kind = reflect.Bool 717 case *godwarf.FuncType: 718 v.Kind = reflect.Func 719 case *godwarf.VoidType: 720 v.Kind = reflect.Invalid 721 case *godwarf.UnspecifiedType: 722 v.Kind = reflect.Invalid 723 default: 724 v.Unreadable = fmt.Errorf("unknown type: %T", t) 725 } 726 727 return v 728 } 729 730 func resolveTypedef(typ godwarf.Type) godwarf.Type { 731 for { 732 switch tt := typ.(type) { 733 case *godwarf.TypedefType: 734 typ = tt.Type 735 case *godwarf.QualType: 736 typ = tt.Type 737 default: 738 return typ 739 } 740 } 741 } 742 743 var constantMaxInt64 = constant.MakeInt64(1<<63 - 1) 744 745 func newConstant(val constant.Value, mem MemoryReadWriter) *Variable { 746 v := &Variable{Value: val, mem: mem, loaded: true} 747 switch val.Kind() { 748 case constant.Int: 749 v.Kind = reflect.Int 750 if constant.Sign(val) >= 0 && constant.Compare(val, token.GTR, constantMaxInt64) { 751 v.Kind = reflect.Uint64 752 } 753 case constant.Float: 754 v.Kind = reflect.Float64 755 case constant.Bool: 756 v.Kind = reflect.Bool 757 case constant.Complex: 758 v.Kind = reflect.Complex128 759 case constant.String: 760 v.Kind = reflect.String 761 v.Len = int64(len(constant.StringVal(val))) 762 } 763 v.Flags |= VariableConstant 764 return v 765 } 766 767 var nilVariable = &Variable{ 768 Name: "nil", 769 Addr: 0, 770 Base: 0, 771 Kind: reflect.Ptr, 772 Children: []Variable{{Addr: 0, OnlyAddr: true}}, 773 } 774 775 func (v *Variable) clone() *Variable { 776 r := *v 777 return &r 778 } 779 780 // TypeString returns the string representation 781 // of the type of this variable. 782 func (v *Variable) TypeString() string { 783 if v == nilVariable { 784 return "nil" 785 } 786 if v.DwarfType == nil { 787 return v.Kind.String() 788 } 789 if v.DwarfType.Common().Name != "" { 790 return v.DwarfType.Common().Name 791 } 792 r := v.DwarfType.String() 793 if r == "*void" { 794 cu := v.bi.Images[v.DwarfType.Common().Index].findCompileUnitForOffset(v.DwarfType.Common().Offset) 795 if cu != nil && cu.isgo { 796 r = "unsafe.Pointer" 797 } 798 } 799 return r 800 } 801 802 func (v *Variable) toField(field *godwarf.StructField) (*Variable, error) { 803 if v.Unreadable != nil { 804 return v.clone(), nil 805 } 806 if v.Addr == 0 { 807 return nil, &IsNilErr{v.Name} 808 } 809 810 name := "" 811 if v.Name != "" { 812 parts := strings.Split(field.Name, ".") 813 if len(parts) > 1 { 814 name = fmt.Sprintf("%s.%s", v.Name, parts[1]) 815 } else { 816 name = fmt.Sprintf("%s.%s", v.Name, field.Name) 817 } 818 } 819 return v.newVariable(name, uint64(int64(v.Addr)+field.ByteOffset), field.Type, v.mem), nil 820 } 821 822 // ErrNoGoroutine returned when a G could not be found 823 // for a specific thread. 824 type ErrNoGoroutine struct { 825 tid int 826 } 827 828 func (ng ErrNoGoroutine) Error() string { 829 return fmt.Sprintf("no G executing on thread %d", ng.tid) 830 } 831 832 var ErrUnreadableG = errors.New("could not read G struct") 833 834 func (v *Variable) parseG() (*G, error) { 835 mem := v.mem 836 gaddr := uint64(v.Addr) 837 _, deref := v.RealType.(*godwarf.PtrType) 838 839 if deref { 840 var err error 841 gaddr, err = readUintRaw(mem, gaddr, int64(v.bi.Arch.PtrSize())) 842 if err != nil { 843 return nil, fmt.Errorf("error derefing *G %s", err) 844 } 845 } 846 if gaddr == 0 { 847 id := 0 848 if thread, ok := mem.(Thread); ok { 849 id = thread.ThreadID() 850 } 851 return nil, ErrNoGoroutine{tid: id} 852 } 853 isptr := func(t godwarf.Type) bool { 854 _, ok := t.(*godwarf.PtrType) 855 return ok 856 } 857 for isptr(v.RealType) { 858 v = v.maybeDereference() // +rtype g 859 } 860 861 v.mem = cacheMemory(v.mem, v.Addr, int(v.RealType.Size())) 862 863 schedVar := v.loadFieldNamed("sched") // +rtype gobuf 864 if schedVar == nil { 865 return nil, ErrUnreadableG 866 } 867 pc, _ := constant.Int64Val(schedVar.fieldVariable("pc").Value) // +rtype uintptr 868 sp, _ := constant.Int64Val(schedVar.fieldVariable("sp").Value) // +rtype uintptr 869 var bp, lr int64 870 if bpvar := schedVar.fieldVariable("bp"); /* +rtype -opt uintptr */ bpvar != nil && bpvar.Value != nil { 871 bp, _ = constant.Int64Val(bpvar.Value) 872 } 873 if bpvar := schedVar.fieldVariable("lr"); /* +rtype -opt uintptr */ bpvar != nil && bpvar.Value != nil { 874 lr, _ = constant.Int64Val(bpvar.Value) 875 } 876 877 unreadable := false 878 879 loadInt64Maybe := func(name string) int64 { 880 vv := v.loadFieldNamed(name) 881 if vv == nil { 882 unreadable = true 883 return 0 884 } 885 n, _ := constant.Int64Val(vv.Value) 886 return n 887 } 888 889 id := loadInt64Maybe("goid") // +rtype int64 890 gopc := loadInt64Maybe("gopc") // +rtype uintptr 891 startpc := loadInt64Maybe("startpc") // +rtype uintptr 892 waitSince := loadInt64Maybe("waitsince") // +rtype int64 893 waitReason := int64(0) 894 if producer := v.bi.Producer(); producer != "" && goversion.ProducerAfterOrEqual(producer, 1, 11) { 895 waitReason = loadInt64Maybe("waitreason") // +rtype -opt waitReason 896 } 897 var stackhi, stacklo uint64 898 if stackVar := v.loadFieldNamed("stack"); /* +rtype stack */ stackVar != nil { 899 if stackhiVar := stackVar.fieldVariable("hi"); /* +rtype uintptr */ stackhiVar != nil { 900 stackhi, _ = constant.Uint64Val(stackhiVar.Value) 901 } 902 if stackloVar := stackVar.fieldVariable("lo"); /* +rtype uintptr */ stackloVar != nil { 903 stacklo, _ = constant.Uint64Val(stackloVar.Value) 904 } 905 } 906 907 status := loadInt64Maybe("atomicstatus") // +rtype uint32 908 909 if unreadable { 910 return nil, ErrUnreadableG 911 } 912 913 f, l, fn := v.bi.PCToLine(uint64(pc)) 914 915 v.Name = "runtime.curg" 916 917 g := &G{ 918 ID: int(id), 919 GoPC: uint64(gopc), 920 StartPC: uint64(startpc), 921 PC: uint64(pc), 922 SP: uint64(sp), 923 BP: uint64(bp), 924 LR: uint64(lr), 925 Status: uint64(status), 926 WaitSince: waitSince, 927 WaitReason: waitReason, 928 CurrentLoc: Location{PC: uint64(pc), File: f, Line: l, Fn: fn}, 929 variable: v, 930 stack: stack{hi: stackhi, lo: stacklo}, 931 } 932 return g, nil 933 } 934 935 func (v *Variable) loadFieldNamed(name string) *Variable { 936 v, err := v.structMember(name) 937 if err != nil { 938 return nil 939 } 940 v.loadValue(loadFullValue) 941 if v.Unreadable != nil { 942 return nil 943 } 944 return v 945 } 946 947 func (v *Variable) fieldVariable(name string) *Variable { 948 if !v.loaded { 949 panic("fieldVariable called on a variable that wasn't loaded") 950 } 951 for i := range v.Children { 952 if child := &v.Children[i]; child.Name == name { 953 return child 954 } 955 } 956 return nil 957 } 958 959 var errTracebackAncestorsDisabled = errors.New("tracebackancestors is disabled") 960 961 // Ancestors returns the list of ancestors for g. 962 func Ancestors(p *Target, g *G, n int) ([]Ancestor, error) { 963 scope := globalScope(p, p.BinInfo(), p.BinInfo().Images[0], p.Memory()) 964 tbav, err := scope.EvalExpression("runtime.debug.tracebackancestors", loadSingleValue) 965 if err == nil && tbav.Unreadable == nil && tbav.Kind == reflect.Int { 966 tba, _ := constant.Int64Val(tbav.Value) 967 if tba == 0 { 968 return nil, errTracebackAncestorsDisabled 969 } 970 } 971 972 av, err := g.variable.structMember("ancestors") 973 if err != nil { 974 return nil, err 975 } 976 av = av.maybeDereference() 977 av.loadValue(LoadConfig{MaxArrayValues: n, MaxVariableRecurse: 1, MaxStructFields: -1}) 978 if av.Unreadable != nil { 979 return nil, err 980 } 981 if av.Addr == 0 { 982 // no ancestors 983 return nil, nil 984 } 985 986 r := make([]Ancestor, len(av.Children)) 987 988 for i := range av.Children { 989 if av.Children[i].Unreadable != nil { 990 r[i].Unreadable = av.Children[i].Unreadable 991 continue 992 } 993 goidv := av.Children[i].fieldVariable("goid") 994 if goidv.Unreadable != nil { 995 r[i].Unreadable = goidv.Unreadable 996 continue 997 } 998 r[i].ID, _ = constant.Int64Val(goidv.Value) 999 pcsVar := av.Children[i].fieldVariable("pcs") 1000 if pcsVar.Unreadable != nil { 1001 r[i].Unreadable = pcsVar.Unreadable 1002 } 1003 pcsVar.loaded = false 1004 pcsVar.Children = pcsVar.Children[:0] 1005 r[i].pcsVar = pcsVar 1006 } 1007 1008 return r, nil 1009 } 1010 1011 // Stack returns the stack trace of ancestor 'a' as saved by the runtime. 1012 func (a *Ancestor) Stack(n int) ([]Stackframe, error) { 1013 if a.Unreadable != nil { 1014 return nil, a.Unreadable 1015 } 1016 pcsVar := a.pcsVar.clone() 1017 pcsVar.loadValue(LoadConfig{MaxArrayValues: n}) 1018 if pcsVar.Unreadable != nil { 1019 return nil, pcsVar.Unreadable 1020 } 1021 r := make([]Stackframe, len(pcsVar.Children)) 1022 for i := range pcsVar.Children { 1023 if pcsVar.Children[i].Unreadable != nil { 1024 r[i] = Stackframe{Err: pcsVar.Children[i].Unreadable} 1025 continue 1026 } 1027 if pcsVar.Children[i].Kind != reflect.Uint { 1028 return nil, fmt.Errorf("wrong type for pcs item %d: %v", i, pcsVar.Children[i].Kind) 1029 } 1030 pc, _ := constant.Int64Val(pcsVar.Children[i].Value) 1031 fn := a.pcsVar.bi.PCToFunc(uint64(pc)) 1032 if fn == nil { 1033 loc := Location{PC: uint64(pc)} 1034 r[i] = Stackframe{Current: loc, Call: loc} 1035 continue 1036 } 1037 pc2 := uint64(pc) 1038 if pc2-1 >= fn.Entry { 1039 pc2-- 1040 } 1041 f, ln := fn.cu.lineInfo.PCToLine(fn.Entry, pc2) 1042 loc := Location{PC: uint64(pc), File: f, Line: ln, Fn: fn} 1043 r[i] = Stackframe{Current: loc, Call: loc} 1044 } 1045 r[len(r)-1].Bottom = pcsVar.Len == int64(len(pcsVar.Children)) 1046 return r, nil 1047 } 1048 1049 func (v *Variable) structMember(memberName string) (*Variable, error) { 1050 if v.Unreadable != nil { 1051 return v.clone(), nil 1052 } 1053 vname := v.Name 1054 if v.loaded && (v.Flags&VariableFakeAddress) != 0 { 1055 for i := range v.Children { 1056 if v.Children[i].Name == memberName { 1057 return &v.Children[i], nil 1058 } 1059 } 1060 return nil, fmt.Errorf("%s has no member %s", vname, memberName) 1061 } 1062 switch v.Kind { 1063 case reflect.Chan: 1064 v = v.clone() 1065 v.RealType = resolveTypedef(&(v.RealType.(*godwarf.ChanType).TypedefType)) 1066 case reflect.Interface: 1067 v.loadInterface(0, false, LoadConfig{}) 1068 if len(v.Children) > 0 { 1069 v = &v.Children[0] 1070 } 1071 } 1072 1073 queue := []*Variable{v} 1074 seen := map[string]struct{}{} // prevent infinite loops 1075 first := true 1076 1077 for len(queue) > 0 { 1078 v := queue[0] 1079 queue = append(queue[:0], queue[1:]...) 1080 if _, isseen := seen[v.RealType.String()]; isseen { 1081 continue 1082 } 1083 seen[v.RealType.String()] = struct{}{} 1084 1085 structVar := v.maybeDereference() 1086 structVar.Name = v.Name 1087 if structVar.Unreadable != nil { 1088 return structVar, nil 1089 } 1090 1091 switch t := structVar.RealType.(type) { 1092 case *godwarf.StructType: 1093 for _, field := range t.Field { 1094 if field.Name == memberName { 1095 return structVar.toField(field) 1096 } 1097 isEmbeddedStructMember := 1098 field.Embedded || 1099 (field.Type.Common().Name == field.Name) || 1100 (len(field.Name) > 1 && 1101 field.Name[0] == '*' && 1102 field.Type.Common().Name[1:] == field.Name[1:]) 1103 if !isEmbeddedStructMember { 1104 continue 1105 } 1106 embeddedVar, err := structVar.toField(field) 1107 if err != nil { 1108 return nil, err 1109 } 1110 // Check for embedded field referenced by type name 1111 parts := strings.Split(field.Name, ".") 1112 if len(parts) > 1 && parts[1] == memberName { 1113 return embeddedVar, nil 1114 } 1115 embeddedVar.Name = structVar.Name 1116 queue = append(queue, embeddedVar) 1117 } 1118 default: 1119 if first { 1120 return nil, fmt.Errorf("%s (type %s) is not a struct", vname, structVar.TypeString()) 1121 } 1122 } 1123 first = false 1124 } 1125 1126 return nil, fmt.Errorf("%s has no member %s", vname, memberName) 1127 } 1128 1129 func readVarEntry(entry *godwarf.Tree, image *Image) (name string, typ godwarf.Type, err error) { 1130 name, ok := entry.Val(dwarf.AttrName).(string) 1131 if !ok { 1132 return "", nil, fmt.Errorf("malformed variable DIE (name)") 1133 } 1134 1135 typ, err = entry.Type(image.dwarf, image.index, image.typeCache) 1136 if err != nil { 1137 return "", nil, err 1138 } 1139 1140 return name, typ, nil 1141 } 1142 1143 // Extracts the name and type of a variable from a dwarf entry 1144 // then executes the instructions given in the DW_AT_location attribute to grab the variable's address 1145 func extractVarInfoFromEntry(tgt *Target, bi *BinaryInfo, image *Image, regs op.DwarfRegisters, mem MemoryReadWriter, entry *godwarf.Tree, dictAddr uint64) (*Variable, error) { 1146 if entry.Tag != dwarf.TagFormalParameter && entry.Tag != dwarf.TagVariable { 1147 return nil, fmt.Errorf("invalid entry tag, only supports FormalParameter and Variable, got %s", entry.Tag.String()) 1148 } 1149 1150 n, t, err := readVarEntry(entry, image) 1151 if err != nil { 1152 return nil, err 1153 } 1154 1155 t, err = resolveParametricType(tgt, bi, mem, t, dictAddr) 1156 if err != nil { 1157 // Log the error, keep going with t, which will be the shape type 1158 logflags.DebuggerLogger().Errorf("could not resolve parametric type of %s", n) 1159 } 1160 1161 addr, pieces, descr, err := bi.Location(entry, dwarf.AttrLocation, regs.PC(), regs, mem) 1162 if pieces != nil { 1163 var cmem *compositeMemory 1164 if tgt != nil { 1165 addr, cmem, err = tgt.newCompositeMemory(mem, regs, pieces, descr) 1166 } else { 1167 cmem, err = newCompositeMemory(mem, bi.Arch, regs, pieces) 1168 if cmem != nil { 1169 cmem.base = fakeAddressUnresolv 1170 addr = int64(cmem.base) 1171 } 1172 } 1173 if cmem != nil { 1174 mem = cmem 1175 } 1176 } 1177 1178 v := newVariable(n, uint64(addr), t, bi, mem) 1179 if pieces != nil { 1180 v.Flags |= VariableFakeAddress 1181 } 1182 v.LocationExpr = descr 1183 v.DeclLine, _ = entry.Val(dwarf.AttrDeclLine).(int64) 1184 if err != nil { 1185 v.Unreadable = err 1186 } 1187 return v, nil 1188 } 1189 1190 // If v is a pointer a new variable is returned containing the value pointed by v. 1191 func (v *Variable) maybeDereference() *Variable { 1192 if v.Unreadable != nil { 1193 return v 1194 } 1195 1196 switch t := v.RealType.(type) { 1197 case *godwarf.PtrType: 1198 if v.Addr == 0 && len(v.Children) == 1 && v.loaded { 1199 // fake pointer variable constructed by casting an integer to a pointer type 1200 return &v.Children[0] 1201 } 1202 ptrval, err := readUintRaw(v.mem, v.Addr, t.ByteSize) 1203 r := v.newVariable("", ptrval, t.Type, DereferenceMemory(v.mem)) 1204 if err != nil { 1205 r.Unreadable = err 1206 } 1207 1208 return r 1209 default: 1210 return v 1211 } 1212 } 1213 1214 func loadValues(vars []*Variable, cfg LoadConfig) { 1215 for i := range vars { 1216 vars[i].loadValueInternal(0, cfg) 1217 } 1218 } 1219 1220 // Extracts the value of the variable at the given address. 1221 func (v *Variable) loadValue(cfg LoadConfig) { 1222 v.loadValueInternal(0, cfg) 1223 } 1224 1225 func (v *Variable) loadValueInternal(recurseLevel int, cfg LoadConfig) { 1226 if v.Unreadable != nil || v.loaded || (v.Addr == 0 && v.Base == 0) { 1227 return 1228 } 1229 1230 v.loaded = true 1231 switch v.Kind { 1232 case reflect.Ptr, reflect.UnsafePointer: 1233 v.Len = 1 1234 v.Children = []Variable{*v.maybeDereference()} 1235 if cfg.FollowPointers { 1236 // Don't increase the recursion level when dereferencing pointers 1237 // unless this is a pointer to interface (which could cause an infinite loop) 1238 nextLvl := recurseLevel 1239 if v.Children[0].Kind == reflect.Interface { 1240 nextLvl++ 1241 } 1242 v.Children[0].loadValueInternal(nextLvl, cfg) 1243 } else { 1244 v.Children[0].OnlyAddr = true 1245 } 1246 1247 case reflect.Chan: 1248 sv := v.clone() 1249 sv.RealType = resolveTypedef(&(sv.RealType.(*godwarf.ChanType).TypedefType)) 1250 sv = sv.maybeDereference() 1251 sv.loadValueInternal(0, loadFullValue) 1252 v.Children = sv.Children 1253 v.Len = sv.Len 1254 v.Base = sv.Addr 1255 1256 case reflect.Map: 1257 if recurseLevel <= cfg.MaxVariableRecurse { 1258 v.loadMap(recurseLevel, cfg) 1259 } else { 1260 // loads length so that the client knows that the map isn't empty 1261 v.mapIterator() 1262 } 1263 1264 case reflect.String: 1265 var val string 1266 switch { 1267 case v.Flags&VariableCPtr != 0: 1268 var done bool 1269 val, done, v.Unreadable = readCStringValue(DereferenceMemory(v.mem), v.Base, cfg) 1270 if v.Unreadable == nil { 1271 v.Len = int64(len(val)) 1272 if !done { 1273 v.Len++ 1274 } 1275 } 1276 1277 case v.Flags&VariableCPURegister != 0: 1278 val = fmt.Sprintf("%x", v.reg.Bytes) 1279 s := v.Base - fakeAddressUnresolv 1280 if s < uint64(len(val)) { 1281 val = val[s:] 1282 if v.Len >= 0 && v.Len < int64(len(val)) { 1283 val = val[:v.Len] 1284 } 1285 } 1286 1287 default: 1288 val, v.Unreadable = readStringValue(DereferenceMemory(v.mem), v.Base, v.Len, cfg) 1289 } 1290 v.Value = constant.MakeString(val) 1291 1292 case reflect.Slice, reflect.Array: 1293 v.loadArrayValues(recurseLevel, cfg) 1294 1295 case reflect.Struct: 1296 v.mem = cacheMemory(v.mem, v.Addr, int(v.RealType.Size())) 1297 t := v.RealType.(*godwarf.StructType) 1298 v.Len = int64(len(t.Field)) 1299 // Recursively call extractValue to grab 1300 // the value of all the members of the struct. 1301 if recurseLevel <= cfg.MaxVariableRecurse { 1302 v.Children = make([]Variable, 0, len(t.Field)) 1303 for i, field := range t.Field { 1304 if cfg.MaxStructFields >= 0 && len(v.Children) >= cfg.MaxStructFields { 1305 break 1306 } 1307 f, _ := v.toField(field) 1308 v.Children = append(v.Children, *f) 1309 v.Children[i].Name = field.Name 1310 v.Children[i].loadValueInternal(recurseLevel+1, cfg) 1311 } 1312 } 1313 if t.Name == "time.Time" { 1314 v.formatTime() 1315 } 1316 1317 case reflect.Interface: 1318 v.loadInterface(recurseLevel, true, cfg) 1319 1320 case reflect.Complex64, reflect.Complex128: 1321 v.readComplex(v.RealType.(*godwarf.ComplexType).ByteSize) 1322 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 1323 var val int64 1324 val, v.Unreadable = readIntRaw(v.mem, v.Addr, v.RealType.(*godwarf.IntType).ByteSize) 1325 v.Value = constant.MakeInt64(val) 1326 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: 1327 if v.Flags&VariableCPURegister != 0 { 1328 v.Value = constant.MakeUint64(v.reg.Uint64Val) 1329 } else { 1330 var val uint64 1331 val, v.Unreadable = readUintRaw(v.mem, v.Addr, v.RealType.(*godwarf.UintType).ByteSize) 1332 v.Value = constant.MakeUint64(val) 1333 } 1334 case reflect.Bool: 1335 val := make([]byte, 1) 1336 _, err := v.mem.ReadMemory(val, v.Addr) 1337 v.Unreadable = err 1338 if err == nil { 1339 v.Value = constant.MakeBool(val[0] != 0) 1340 } 1341 case reflect.Float32, reflect.Float64: 1342 var val float64 1343 val, v.Unreadable = v.readFloatRaw(v.RealType.(*godwarf.FloatType).ByteSize) 1344 v.Value = constant.MakeFloat64(val) 1345 switch { 1346 case math.IsInf(val, +1): 1347 v.FloatSpecial = FloatIsPosInf 1348 case math.IsInf(val, -1): 1349 v.FloatSpecial = FloatIsNegInf 1350 case math.IsNaN(val): 1351 v.FloatSpecial = FloatIsNaN 1352 } 1353 case reflect.Func: 1354 v.readFunctionPtr() 1355 default: 1356 v.Unreadable = fmt.Errorf("unknown or unsupported kind: \"%s\"", v.Kind.String()) 1357 } 1358 } 1359 1360 // convertToEface converts srcv into an "interface {}" and writes it to 1361 // dstv. 1362 // Dstv must be a variable of type "inteface {}" and srcv must either be an 1363 // interface or a pointer shaped variable (map, channel, pointer or struct 1364 // containing a single pointer) 1365 func convertToEface(srcv, dstv *Variable) error { 1366 if dstv.RealType.String() != "interface {}" { 1367 return &typeConvErr{srcv.DwarfType, dstv.RealType} 1368 } 1369 if _, isiface := srcv.RealType.(*godwarf.InterfaceType); isiface { 1370 // iface -> eface conversion 1371 _type, data, _ := srcv.readInterface() 1372 if srcv.Unreadable != nil { 1373 return srcv.Unreadable 1374 } 1375 _type = _type.maybeDereference() 1376 dstv.writeEmptyInterface(uint64(_type.Addr), data) 1377 return nil 1378 } 1379 typeAddr, typeKind, runtimeTypeFound, err := dwarfToRuntimeType(srcv.bi, srcv.mem, srcv.RealType) 1380 if err != nil { 1381 return err 1382 } 1383 if !runtimeTypeFound || typeKind&kindDirectIface == 0 { 1384 return &typeConvErr{srcv.DwarfType, dstv.RealType} 1385 } 1386 return dstv.writeEmptyInterface(typeAddr, srcv) 1387 } 1388 1389 func readStringInfo(mem MemoryReadWriter, arch *Arch, addr uint64) (uint64, int64, error) { 1390 // string data structure is always two ptrs in size. Addr, followed by len 1391 // http://research.swtch.com/godata 1392 1393 mem = cacheMemory(mem, addr, arch.PtrSize()*2) 1394 1395 // read len 1396 strlen, err := readIntRaw(mem, addr+uint64(arch.PtrSize()), int64(arch.PtrSize())) 1397 if err != nil { 1398 return 0, 0, fmt.Errorf("could not read string len %s", err) 1399 } 1400 if strlen < 0 { 1401 return 0, 0, fmt.Errorf("invalid length: %d", strlen) 1402 } 1403 1404 // read addr 1405 addr, err = readUintRaw(mem, addr, int64(arch.PtrSize())) 1406 if err != nil { 1407 return 0, 0, fmt.Errorf("could not read string pointer %s", err) 1408 } 1409 if addr == 0 { 1410 return 0, 0, nil 1411 } 1412 return addr, strlen, nil 1413 } 1414 1415 func readStringValue(mem MemoryReadWriter, addr uint64, strlen int64, cfg LoadConfig) (string, error) { 1416 if strlen == 0 { 1417 return "", nil 1418 } 1419 1420 count := strlen 1421 if count > int64(cfg.MaxStringLen) { 1422 count = int64(cfg.MaxStringLen) 1423 } 1424 1425 val := make([]byte, int(count)) 1426 _, err := mem.ReadMemory(val, addr) 1427 if err != nil { 1428 return "", fmt.Errorf("could not read string at %#v due to %s", addr, err) 1429 } 1430 1431 return string(val), nil 1432 } 1433 1434 func readCStringValue(mem MemoryReadWriter, addr uint64, cfg LoadConfig) (string, bool, error) { 1435 buf := make([]byte, cfg.MaxStringLen) // 1436 val := buf[:0] // part of the string we've already read 1437 1438 for len(buf) > 0 { 1439 // Reads some memory for the string but (a) never more than we would 1440 // need (considering cfg.MaxStringLen), and (b) never cross a page boundary 1441 // until we're sure we have to. 1442 // The page check is needed to avoid getting an I/O error for reading 1443 // memory we don't even need. 1444 // We don't know how big a page is but 1024 is a reasonable minimum common 1445 // divisor for all architectures. 1446 curaddr := addr + uint64(len(val)) 1447 maxsize := int(alignAddr(int64(curaddr+1), 1024) - int64(curaddr)) 1448 size := len(buf) 1449 if size > maxsize { 1450 size = maxsize 1451 } 1452 1453 _, err := mem.ReadMemory(buf[:size], curaddr) 1454 if err != nil { 1455 return "", false, fmt.Errorf("could not read string at %#v due to %s", addr, err) 1456 } 1457 1458 done := false 1459 for i := 0; i < size; i++ { 1460 if buf[i] == 0 { 1461 done = true 1462 size = i 1463 break 1464 } 1465 } 1466 1467 val = val[:len(val)+size] 1468 buf = buf[size:] 1469 if done { 1470 return string(val), true, nil 1471 } 1472 } 1473 1474 return string(val), false, nil 1475 } 1476 1477 const ( 1478 sliceArrayFieldName = "array" 1479 sliceLenFieldName = "len" 1480 sliceCapFieldName = "cap" 1481 ) 1482 1483 func (v *Variable) loadSliceInfo(t *godwarf.SliceType) { 1484 v.mem = cacheMemory(v.mem, v.Addr, int(t.Size())) 1485 1486 var err error 1487 for _, f := range t.Field { 1488 switch f.Name { 1489 case sliceArrayFieldName: 1490 var base uint64 1491 base, err = readUintRaw(v.mem, uint64(int64(v.Addr)+f.ByteOffset), f.Type.Size()) 1492 if err == nil { 1493 v.Base = base 1494 // Dereference array type to get value type 1495 ptrType, ok := f.Type.(*godwarf.PtrType) 1496 if !ok { 1497 //lint:ignore ST1005 backwards compatibility 1498 v.Unreadable = fmt.Errorf("Invalid type %s in slice array", f.Type) 1499 return 1500 } 1501 v.fieldType = ptrType.Type 1502 } 1503 case sliceLenFieldName: 1504 lstrAddr, _ := v.toField(f) 1505 lstrAddr.loadValue(loadSingleValue) 1506 err = lstrAddr.Unreadable 1507 if err == nil { 1508 v.Len, _ = constant.Int64Val(lstrAddr.Value) 1509 } 1510 case sliceCapFieldName: 1511 cstrAddr, _ := v.toField(f) 1512 cstrAddr.loadValue(loadSingleValue) 1513 err = cstrAddr.Unreadable 1514 if err == nil { 1515 v.Cap, _ = constant.Int64Val(cstrAddr.Value) 1516 } 1517 } 1518 if err != nil { 1519 v.Unreadable = err 1520 return 1521 } 1522 } 1523 1524 v.stride = v.fieldType.Size() 1525 if t, ok := v.fieldType.(*godwarf.PtrType); ok { 1526 v.stride = t.ByteSize 1527 } 1528 } 1529 1530 // loadChanInfo loads the buffer size of the channel and changes the type of 1531 // the buf field from unsafe.Pointer to an array of the correct type. 1532 func (v *Variable) loadChanInfo() { 1533 chanType, ok := v.RealType.(*godwarf.ChanType) 1534 if !ok { 1535 v.Unreadable = errors.New("bad channel type") 1536 return 1537 } 1538 sv := v.clone() 1539 sv.RealType = resolveTypedef(&(chanType.TypedefType)) 1540 sv = sv.maybeDereference() 1541 if sv.Unreadable != nil || sv.Addr == 0 { 1542 return 1543 } 1544 v.Base = sv.Addr 1545 structType, ok := sv.DwarfType.(*godwarf.StructType) 1546 if !ok { 1547 v.Unreadable = errors.New("bad channel type") 1548 return 1549 } 1550 1551 lenAddr, _ := sv.toField(structType.Field[1]) 1552 lenAddr.loadValue(loadSingleValue) 1553 if lenAddr.Unreadable != nil { 1554 v.Unreadable = fmt.Errorf("unreadable length: %v", lenAddr.Unreadable) 1555 return 1556 } 1557 chanLen, _ := constant.Uint64Val(lenAddr.Value) 1558 1559 newStructType := &godwarf.StructType{} 1560 *newStructType = *structType 1561 newStructType.Field = make([]*godwarf.StructField, len(structType.Field)) 1562 1563 for i := range structType.Field { 1564 field := &godwarf.StructField{} 1565 *field = *structType.Field[i] 1566 if field.Name == "buf" { 1567 field.Type = pointerTo(fakeArrayType(chanLen, chanType.ElemType), v.bi.Arch) 1568 } 1569 newStructType.Field[i] = field 1570 } 1571 1572 v.RealType = &godwarf.ChanType{ 1573 TypedefType: godwarf.TypedefType{ 1574 CommonType: chanType.TypedefType.CommonType, 1575 Type: pointerTo(newStructType, v.bi.Arch), 1576 }, 1577 ElemType: chanType.ElemType, 1578 } 1579 } 1580 1581 func (v *Variable) loadArrayValues(recurseLevel int, cfg LoadConfig) { 1582 if v.Unreadable != nil { 1583 return 1584 } 1585 if v.Len < 0 { 1586 //lint:ignore ST1005 backwards compatibility 1587 v.Unreadable = errors.New("Negative array length") 1588 return 1589 } 1590 1591 count := v.Len 1592 // Cap number of elements 1593 if count > int64(cfg.MaxArrayValues) { 1594 count = int64(cfg.MaxArrayValues) 1595 } 1596 1597 if v.stride < maxArrayStridePrefetch { 1598 v.mem = cacheMemory(v.mem, v.Base, int(v.stride*count)) 1599 } 1600 1601 errcount := 0 1602 1603 mem := v.mem 1604 if v.Kind != reflect.Array { 1605 mem = DereferenceMemory(mem) 1606 } 1607 1608 for i := int64(0); i < count; i++ { 1609 fieldvar := v.newVariable("", uint64(int64(v.Base)+(i*v.stride)), v.fieldType, mem) 1610 fieldvar.loadValueInternal(recurseLevel+1, cfg) 1611 1612 if fieldvar.Unreadable != nil { 1613 errcount++ 1614 } 1615 1616 v.Children = append(v.Children, *fieldvar) 1617 if errcount > maxErrCount { 1618 break 1619 } 1620 } 1621 } 1622 1623 func (v *Variable) readComplex(size int64) { 1624 var fs int64 1625 switch size { 1626 case 8: 1627 fs = 4 1628 case 16: 1629 fs = 8 1630 default: 1631 v.Unreadable = fmt.Errorf("invalid size (%d) for complex type", size) 1632 return 1633 } 1634 1635 ftyp := &godwarf.FloatType{BasicType: godwarf.BasicType{CommonType: godwarf.CommonType{ByteSize: fs, Name: fmt.Sprintf("float%d", fs)}, BitSize: fs * 8, BitOffset: 0}} 1636 1637 realvar := v.newVariable("real", v.Addr, ftyp, v.mem) 1638 imagvar := v.newVariable("imaginary", v.Addr+uint64(fs), ftyp, v.mem) 1639 realvar.loadValue(loadSingleValue) 1640 imagvar.loadValue(loadSingleValue) 1641 v.Value = constant.BinaryOp(realvar.Value, token.ADD, constant.MakeImag(imagvar.Value)) 1642 } 1643 1644 func (v *Variable) writeComplex(real, imag float64, size int64) error { 1645 err := v.writeFloatRaw(real, int64(size/2)) 1646 if err != nil { 1647 return err 1648 } 1649 imagaddr := *v 1650 imagaddr.Addr += uint64(size / 2) 1651 return imagaddr.writeFloatRaw(imag, int64(size/2)) 1652 } 1653 1654 func readIntRaw(mem MemoryReadWriter, addr uint64, size int64) (int64, error) { 1655 var n int64 1656 1657 val := make([]byte, int(size)) 1658 _, err := mem.ReadMemory(val, addr) 1659 if err != nil { 1660 return 0, err 1661 } 1662 1663 switch size { 1664 case 1: 1665 n = int64(int8(val[0])) 1666 case 2: 1667 n = int64(int16(binary.LittleEndian.Uint16(val))) 1668 case 4: 1669 n = int64(int32(binary.LittleEndian.Uint32(val))) 1670 case 8: 1671 n = int64(binary.LittleEndian.Uint64(val)) 1672 } 1673 1674 return n, nil 1675 } 1676 1677 func (v *Variable) writeUint(value uint64, size int64) error { 1678 val := make([]byte, size) 1679 1680 switch size { 1681 case 1: 1682 val[0] = byte(value) 1683 case 2: 1684 binary.LittleEndian.PutUint16(val, uint16(value)) 1685 case 4: 1686 binary.LittleEndian.PutUint32(val, uint32(value)) 1687 case 8: 1688 binary.LittleEndian.PutUint64(val, uint64(value)) 1689 } 1690 1691 _, err := v.mem.WriteMemory(v.Addr, val) 1692 return err 1693 } 1694 1695 func readUintRaw(mem MemoryReadWriter, addr uint64, size int64) (uint64, error) { 1696 var n uint64 1697 1698 val := make([]byte, int(size)) 1699 _, err := mem.ReadMemory(val, addr) 1700 if err != nil { 1701 return 0, err 1702 } 1703 1704 switch size { 1705 case 1: 1706 n = uint64(val[0]) 1707 case 2: 1708 n = uint64(binary.LittleEndian.Uint16(val)) 1709 case 4: 1710 n = uint64(binary.LittleEndian.Uint32(val)) 1711 case 8: 1712 n = uint64(binary.LittleEndian.Uint64(val)) 1713 } 1714 1715 return n, nil 1716 } 1717 1718 func (v *Variable) readFloatRaw(size int64) (float64, error) { 1719 val := make([]byte, int(size)) 1720 _, err := v.mem.ReadMemory(val, v.Addr) 1721 if err != nil { 1722 return 0.0, err 1723 } 1724 buf := bytes.NewBuffer(val) 1725 1726 switch size { 1727 case 4: 1728 n := float32(0) 1729 binary.Read(buf, binary.LittleEndian, &n) 1730 return float64(n), nil 1731 case 8: 1732 n := float64(0) 1733 binary.Read(buf, binary.LittleEndian, &n) 1734 return n, nil 1735 } 1736 1737 return 0.0, fmt.Errorf("could not read float") 1738 } 1739 1740 func (v *Variable) writeFloatRaw(f float64, size int64) error { 1741 buf := bytes.NewBuffer(make([]byte, 0, size)) 1742 1743 switch size { 1744 case 4: 1745 n := float32(f) 1746 binary.Write(buf, binary.LittleEndian, n) 1747 case 8: 1748 n := float64(f) 1749 binary.Write(buf, binary.LittleEndian, n) 1750 } 1751 1752 _, err := v.mem.WriteMemory(v.Addr, buf.Bytes()) 1753 return err 1754 } 1755 1756 func (v *Variable) writeBool(value bool) error { 1757 val := []byte{0} 1758 val[0] = *(*byte)(unsafe.Pointer(&value)) 1759 _, err := v.mem.WriteMemory(v.Addr, val) 1760 return err 1761 } 1762 1763 func (v *Variable) writeZero() error { 1764 val := make([]byte, v.RealType.Size()) 1765 _, err := v.mem.WriteMemory(v.Addr, val) 1766 return err 1767 } 1768 1769 // writeInterface writes the empty interface of type typeAddr and data as the data field. 1770 func (v *Variable) writeEmptyInterface(typeAddr uint64, data *Variable) error { 1771 dstType, dstData, _ := v.readInterface() 1772 if v.Unreadable != nil { 1773 return v.Unreadable 1774 } 1775 dstType.writeUint(typeAddr, dstType.RealType.Size()) 1776 dstData.writeCopy(data) 1777 return nil 1778 } 1779 1780 func (v *Variable) writeSlice(len, cap int64, base uint64) error { 1781 for _, f := range v.RealType.(*godwarf.SliceType).Field { 1782 switch f.Name { 1783 case sliceArrayFieldName: 1784 arrv, _ := v.toField(f) 1785 if err := arrv.writeUint(uint64(base), arrv.RealType.Size()); err != nil { 1786 return err 1787 } 1788 case sliceLenFieldName: 1789 lenv, _ := v.toField(f) 1790 if err := lenv.writeUint(uint64(len), lenv.RealType.Size()); err != nil { 1791 return err 1792 } 1793 case sliceCapFieldName: 1794 capv, _ := v.toField(f) 1795 if err := capv.writeUint(uint64(cap), capv.RealType.Size()); err != nil { 1796 return err 1797 } 1798 } 1799 } 1800 return nil 1801 } 1802 1803 func (v *Variable) writeString(len, base uint64) error { 1804 writePointer(v.bi, v.mem, uint64(v.Addr), base) 1805 writePointer(v.bi, v.mem, uint64(v.Addr)+uint64(v.bi.Arch.PtrSize()), len) 1806 return nil 1807 } 1808 1809 func (v *Variable) writeCopy(srcv *Variable) error { 1810 buf := make([]byte, srcv.RealType.Size()) 1811 _, err := srcv.mem.ReadMemory(buf, srcv.Addr) 1812 if err != nil { 1813 return err 1814 } 1815 _, err = v.mem.WriteMemory(v.Addr, buf) 1816 return err 1817 } 1818 1819 func (v *Variable) readFunctionPtr() { 1820 // dereference pointer to find function pc 1821 v.closureAddr = v.funcvalAddr() 1822 if v.Unreadable != nil { 1823 return 1824 } 1825 if v.closureAddr == 0 { 1826 v.Base = 0 1827 v.Value = constant.MakeString("") 1828 return 1829 } 1830 1831 val, err := readUintRaw(v.mem, v.closureAddr, int64(v.bi.Arch.PtrSize())) 1832 if err != nil { 1833 v.Unreadable = err 1834 return 1835 } 1836 1837 v.Base = val 1838 fn := v.bi.PCToFunc(uint64(v.Base)) 1839 if fn == nil { 1840 v.Unreadable = fmt.Errorf("could not find function for %#v", v.Base) 1841 return 1842 } 1843 1844 v.Value = constant.MakeString(fn.Name) 1845 } 1846 1847 // funcvalAddr reads the address of the funcval contained in a function variable. 1848 func (v *Variable) funcvalAddr() uint64 { 1849 val, err := readUintRaw(v.mem, v.Addr, int64(v.bi.Arch.PtrSize())) 1850 if err != nil { 1851 v.Unreadable = err 1852 return 0 1853 } 1854 return val 1855 } 1856 1857 func (v *Variable) loadMap(recurseLevel int, cfg LoadConfig) { 1858 it := v.mapIterator() 1859 if it == nil { 1860 return 1861 } 1862 it.maxNumBuckets = uint64(cfg.MaxMapBuckets) 1863 1864 if v.Len == 0 || int64(v.mapSkip) >= v.Len || cfg.MaxArrayValues == 0 { 1865 return 1866 } 1867 1868 for skip := 0; skip < v.mapSkip; skip++ { 1869 if ok := it.next(); !ok { 1870 v.Unreadable = fmt.Errorf("map index out of bounds") 1871 return 1872 } 1873 } 1874 1875 count := 0 1876 errcount := 0 1877 for it.next() { 1878 key := it.key() 1879 var val *Variable 1880 if it.values.fieldType.Size() > 0 { 1881 val = it.value() 1882 } else { 1883 val = v.newVariable("", it.values.Addr, it.values.fieldType, DereferenceMemory(v.mem)) 1884 } 1885 key.loadValueInternal(recurseLevel+1, cfg) 1886 val.loadValueInternal(recurseLevel+1, cfg) 1887 if key.Unreadable != nil || val.Unreadable != nil { 1888 errcount++ 1889 } 1890 v.Children = append(v.Children, *key, *val) 1891 count++ 1892 if errcount > maxErrCount { 1893 break 1894 } 1895 if count >= cfg.MaxArrayValues || int64(count) >= v.Len { 1896 break 1897 } 1898 } 1899 } 1900 1901 type mapIterator struct { 1902 v *Variable 1903 numbuckets uint64 1904 oldmask uint64 1905 buckets *Variable 1906 oldbuckets *Variable 1907 b *Variable 1908 bidx uint64 1909 1910 tophashes *Variable 1911 keys *Variable 1912 values *Variable 1913 overflow *Variable 1914 1915 maxNumBuckets uint64 // maximum number of buckets to scan 1916 1917 idx int64 1918 1919 hashTophashEmptyOne uint64 // Go 1.12 and later has two sentinel tophash values for an empty cell, this is the second one (the first one hashTophashEmptyZero, the same as Go 1.11 and earlier) 1920 hashMinTopHash uint64 // minimum value of tophash for a cell that isn't either evacuated or empty 1921 } 1922 1923 // Code derived from go/src/runtime/hashmap.go 1924 func (v *Variable) mapIterator() *mapIterator { 1925 sv := v.clone() 1926 sv.RealType = resolveTypedef(&(sv.RealType.(*godwarf.MapType).TypedefType)) 1927 sv = sv.maybeDereference() 1928 v.Base = sv.Addr 1929 1930 maptype, ok := sv.RealType.(*godwarf.StructType) 1931 if !ok { 1932 v.Unreadable = fmt.Errorf("wrong real type for map") 1933 return nil 1934 } 1935 1936 it := &mapIterator{v: v, bidx: 0, b: nil, idx: 0} 1937 1938 if sv.Addr == 0 { 1939 it.numbuckets = 0 1940 return it 1941 } 1942 1943 v.mem = cacheMemory(v.mem, v.Base, int(v.RealType.Size())) 1944 1945 for _, f := range maptype.Field { 1946 var err error 1947 field, _ := sv.toField(f) 1948 switch f.Name { 1949 case "count": // +rtype -fieldof hmap int 1950 v.Len, err = field.asInt() 1951 case "B": // +rtype -fieldof hmap uint8 1952 var b uint64 1953 b, err = field.asUint() 1954 it.numbuckets = 1 << b 1955 it.oldmask = (1 << (b - 1)) - 1 1956 case "buckets": // +rtype -fieldof hmap unsafe.Pointer 1957 it.buckets = field.maybeDereference() 1958 case "oldbuckets": // +rtype -fieldof hmap unsafe.Pointer 1959 it.oldbuckets = field.maybeDereference() 1960 } 1961 if err != nil { 1962 v.Unreadable = err 1963 return nil 1964 } 1965 } 1966 1967 if it.buckets.Kind != reflect.Struct || it.oldbuckets.Kind != reflect.Struct { 1968 v.Unreadable = errMapBucketsNotStruct 1969 return nil 1970 } 1971 1972 it.hashTophashEmptyOne = hashTophashEmptyZero 1973 it.hashMinTopHash = hashMinTopHashGo111 1974 if producer := v.bi.Producer(); producer != "" && goversion.ProducerAfterOrEqual(producer, 1, 12) { 1975 it.hashTophashEmptyOne = hashTophashEmptyOne 1976 it.hashMinTopHash = hashMinTopHashGo112 1977 } 1978 1979 return it 1980 } 1981 1982 var errMapBucketContentsNotArray = errors.New("malformed map type: keys, values or tophash of a bucket is not an array") 1983 var errMapBucketContentsInconsistentLen = errors.New("malformed map type: inconsistent array length in bucket") 1984 var errMapBucketsNotStruct = errors.New("malformed map type: buckets, oldbuckets or overflow field not a struct") 1985 1986 func (it *mapIterator) nextBucket() bool { 1987 if it.overflow != nil && it.overflow.Addr > 0 { 1988 it.b = it.overflow 1989 } else { 1990 it.b = nil 1991 1992 if it.maxNumBuckets > 0 && it.bidx >= it.maxNumBuckets { 1993 return false 1994 } 1995 1996 for it.bidx < it.numbuckets { 1997 it.b = it.buckets.clone() 1998 it.b.Addr += uint64(it.buckets.DwarfType.Size()) * it.bidx 1999 2000 if it.oldbuckets.Addr <= 0 { 2001 break 2002 } 2003 2004 // if oldbuckets is not nil we are iterating through a map that is in 2005 // the middle of a grow. 2006 // if the bucket we are looking at hasn't been filled in we iterate 2007 // instead through its corresponding "oldbucket" (i.e. the bucket the 2008 // elements of this bucket are coming from) but only if this is the first 2009 // of the two buckets being created from the same oldbucket (otherwise we 2010 // would print some keys twice) 2011 2012 oldbidx := it.bidx & it.oldmask 2013 oldb := it.oldbuckets.clone() 2014 oldb.Addr += uint64(it.oldbuckets.DwarfType.Size()) * oldbidx 2015 2016 if it.mapEvacuated(oldb) { 2017 break 2018 } 2019 2020 if oldbidx == it.bidx { 2021 it.b = oldb 2022 break 2023 } 2024 2025 // oldbucket origin for current bucket has not been evacuated but we have already 2026 // iterated over it so we should just skip it 2027 it.b = nil 2028 it.bidx++ 2029 } 2030 2031 if it.b == nil { 2032 return false 2033 } 2034 it.bidx++ 2035 } 2036 2037 if it.b.Addr <= 0 { 2038 return false 2039 } 2040 2041 it.b.mem = cacheMemory(it.b.mem, it.b.Addr, int(it.b.RealType.Size())) 2042 2043 it.tophashes = nil 2044 it.keys = nil 2045 it.values = nil 2046 it.overflow = nil 2047 2048 for _, f := range it.b.DwarfType.(*godwarf.StructType).Field { 2049 field, err := it.b.toField(f) 2050 if err != nil { 2051 it.v.Unreadable = err 2052 return false 2053 } 2054 if field.Unreadable != nil { 2055 it.v.Unreadable = field.Unreadable 2056 return false 2057 } 2058 2059 switch f.Name { 2060 case "tophash": // +rtype -fieldof bmap [8]uint8 2061 it.tophashes = field 2062 case "keys": 2063 it.keys = field 2064 case "values": 2065 it.values = field 2066 case "overflow": 2067 it.overflow = field.maybeDereference() 2068 } 2069 } 2070 2071 // sanity checks 2072 if it.tophashes == nil || it.keys == nil || it.values == nil { 2073 it.v.Unreadable = fmt.Errorf("malformed map type") 2074 return false 2075 } 2076 2077 if it.tophashes.Kind != reflect.Array || it.keys.Kind != reflect.Array || it.values.Kind != reflect.Array { 2078 it.v.Unreadable = errMapBucketContentsNotArray 2079 return false 2080 } 2081 2082 if it.tophashes.Len != it.keys.Len { 2083 it.v.Unreadable = errMapBucketContentsInconsistentLen 2084 return false 2085 } 2086 2087 if it.values.fieldType.Size() > 0 && it.tophashes.Len != it.values.Len { 2088 // if the type of the value is zero-sized (i.e. struct{}) then the values 2089 // array's length is zero. 2090 it.v.Unreadable = errMapBucketContentsInconsistentLen 2091 return false 2092 } 2093 2094 if it.overflow.Kind != reflect.Struct { 2095 it.v.Unreadable = errMapBucketsNotStruct 2096 return false 2097 } 2098 2099 return true 2100 } 2101 2102 func (it *mapIterator) next() bool { 2103 for { 2104 if it.b == nil || it.idx >= it.tophashes.Len { 2105 r := it.nextBucket() 2106 if !r { 2107 return false 2108 } 2109 it.idx = 0 2110 } 2111 tophash, _ := it.tophashes.sliceAccess(int(it.idx)) 2112 h, err := tophash.asUint() 2113 if err != nil { 2114 it.v.Unreadable = fmt.Errorf("unreadable tophash: %v", err) 2115 return false 2116 } 2117 it.idx++ 2118 if h != hashTophashEmptyZero && h != it.hashTophashEmptyOne { 2119 return true 2120 } 2121 } 2122 } 2123 2124 func (it *mapIterator) key() *Variable { 2125 k, _ := it.keys.sliceAccess(int(it.idx - 1)) 2126 return k 2127 } 2128 2129 func (it *mapIterator) value() *Variable { 2130 v, _ := it.values.sliceAccess(int(it.idx - 1)) 2131 return v 2132 } 2133 2134 func (it *mapIterator) mapEvacuated(b *Variable) bool { 2135 if b.Addr == 0 { 2136 return true 2137 } 2138 for _, f := range b.DwarfType.(*godwarf.StructType).Field { 2139 if f.Name != "tophash" { 2140 continue 2141 } 2142 tophashes, _ := b.toField(f) 2143 tophash0var, _ := tophashes.sliceAccess(0) 2144 tophash0, err := tophash0var.asUint() 2145 if err != nil { 2146 return true 2147 } 2148 //TODO: this needs to be > hashTophashEmptyOne for go >= 1.12 2149 return tophash0 > it.hashTophashEmptyOne && tophash0 < it.hashMinTopHash 2150 } 2151 return true 2152 } 2153 2154 func (v *Variable) readInterface() (_type, data *Variable, isnil bool) { 2155 // An interface variable is implemented either by a runtime.iface 2156 // struct or a runtime.eface struct. The difference being that empty 2157 // interfaces (i.e. "interface {}") are represented by runtime.eface 2158 // and non-empty interfaces by runtime.iface. 2159 // 2160 // For both runtime.ifaces and runtime.efaces the data is stored in v.data 2161 // 2162 // The concrete type however is stored in v.tab._type for non-empty 2163 // interfaces and in v._type for empty interfaces. 2164 // 2165 // For nil empty interface variables _type will be nil, for nil 2166 // non-empty interface variables tab will be nil 2167 // 2168 // In either case the _type field is a pointer to a runtime._type struct. 2169 // 2170 // The following code works for both runtime.iface and runtime.eface. 2171 2172 v.mem = cacheMemory(v.mem, v.Addr, int(v.RealType.Size())) 2173 2174 ityp := resolveTypedef(&v.RealType.(*godwarf.InterfaceType).TypedefType).(*godwarf.StructType) 2175 2176 // +rtype -field iface.tab *itab 2177 // +rtype -field iface.data unsafe.Pointer 2178 // +rtype -field eface._type *_type 2179 // +rtype -field eface.data unsafe.Pointer 2180 2181 for _, f := range ityp.Field { 2182 switch f.Name { 2183 case "tab": // for runtime.iface 2184 tab, _ := v.toField(f) // +rtype *itab 2185 tab = tab.maybeDereference() 2186 isnil = tab.Addr == 0 2187 if !isnil { 2188 var err error 2189 _type, err = tab.structMember("_type") // +rtype *_type 2190 if err != nil { 2191 v.Unreadable = fmt.Errorf("invalid interface type: %v", err) 2192 return 2193 } 2194 } 2195 case "_type": // for runtime.eface 2196 _type, _ = v.toField(f) 2197 isnil = _type.maybeDereference().Addr == 0 2198 case "data": 2199 data, _ = v.toField(f) 2200 } 2201 } 2202 return 2203 } 2204 2205 func (v *Variable) loadInterface(recurseLevel int, loadData bool, cfg LoadConfig) { 2206 _type, data, isnil := v.readInterface() 2207 2208 if isnil { 2209 // interface to nil 2210 data = data.maybeDereference() 2211 v.Children = []Variable{*data} 2212 if loadData { 2213 v.Children[0].loadValueInternal(recurseLevel, cfg) 2214 } 2215 return 2216 } 2217 2218 if data == nil { 2219 v.Unreadable = fmt.Errorf("invalid interface type") 2220 return 2221 } 2222 2223 typ, kind, err := runtimeTypeToDIE(_type, data.Addr) 2224 if err != nil { 2225 v.Unreadable = err 2226 return 2227 } 2228 2229 deref := false 2230 if kind&kindDirectIface == 0 { 2231 realtyp := resolveTypedef(typ) 2232 if _, isptr := realtyp.(*godwarf.PtrType); !isptr { 2233 typ = pointerTo(typ, v.bi.Arch) 2234 deref = true 2235 } 2236 } 2237 2238 data = data.newVariable("data", data.Addr, typ, data.mem) 2239 if deref { 2240 data = data.maybeDereference() 2241 data.Name = "data" 2242 } 2243 2244 v.Children = []Variable{*data} 2245 if loadData && recurseLevel <= cfg.MaxVariableRecurse { 2246 v.Children[0].loadValueInternal(recurseLevel, cfg) 2247 } else { 2248 v.Children[0].OnlyAddr = true 2249 } 2250 } 2251 2252 // ConstDescr describes the value of v using constants. 2253 func (v *Variable) ConstDescr() string { 2254 if v.bi == nil || (v.Flags&VariableConstant != 0) { 2255 return "" 2256 } 2257 ctyp := v.bi.consts.Get(v.DwarfType) 2258 if ctyp == nil { 2259 return "" 2260 } 2261 if typename := v.DwarfType.Common().Name; !strings.Contains(typename, ".") || strings.HasPrefix(typename, "C.") { 2262 // only attempt to use constants for user defined type, otherwise every 2263 // int variable with value 1 will be described with os.SEEK_CUR and other 2264 // similar problems. 2265 return "" 2266 } 2267 2268 switch v.Kind { 2269 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: 2270 fallthrough 2271 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: 2272 n, _ := constant.Int64Val(v.Value) 2273 return ctyp.describe(n) 2274 } 2275 return "" 2276 } 2277 2278 // registerVariableTypeConv implements type conversions for CPU register variables (REGNAME.int8, etc) 2279 func (v *Variable) registerVariableTypeConv(newtyp string) (*Variable, error) { 2280 var n int = 0 2281 for i := 0; i < len(v.reg.Bytes); i += n { 2282 var child *Variable 2283 switch newtyp { 2284 case "int8": 2285 child = newConstant(constant.MakeInt64(int64(int8(v.reg.Bytes[i]))), v.mem) 2286 child.Kind = reflect.Int8 2287 n = 1 2288 case "int16": 2289 child = newConstant(constant.MakeInt64(int64(int16(binary.LittleEndian.Uint16(v.reg.Bytes[i:])))), v.mem) 2290 child.Kind = reflect.Int16 2291 n = 2 2292 case "int32": 2293 child = newConstant(constant.MakeInt64(int64(int32(binary.LittleEndian.Uint32(v.reg.Bytes[i:])))), v.mem) 2294 child.Kind = reflect.Int32 2295 n = 4 2296 case "int64": 2297 child = newConstant(constant.MakeInt64(int64(binary.LittleEndian.Uint64(v.reg.Bytes[i:]))), v.mem) 2298 child.Kind = reflect.Int64 2299 n = 8 2300 case "uint8": 2301 child = newConstant(constant.MakeUint64(uint64(v.reg.Bytes[i])), v.mem) 2302 child.Kind = reflect.Uint8 2303 n = 1 2304 case "uint16": 2305 child = newConstant(constant.MakeUint64(uint64(binary.LittleEndian.Uint16(v.reg.Bytes[i:]))), v.mem) 2306 child.Kind = reflect.Uint16 2307 n = 2 2308 case "uint32": 2309 child = newConstant(constant.MakeUint64(uint64(binary.LittleEndian.Uint32(v.reg.Bytes[i:]))), v.mem) 2310 child.Kind = reflect.Uint32 2311 n = 4 2312 case "uint64": 2313 child = newConstant(constant.MakeUint64(uint64(binary.LittleEndian.Uint64(v.reg.Bytes[i:]))), v.mem) 2314 child.Kind = reflect.Uint64 2315 n = 8 2316 case "float32": 2317 a := binary.LittleEndian.Uint32(v.reg.Bytes[i:]) 2318 x := *(*float32)(unsafe.Pointer(&a)) 2319 child = newConstant(constant.MakeFloat64(float64(x)), v.mem) 2320 child.Kind = reflect.Float32 2321 n = 4 2322 case "float64": 2323 a := binary.LittleEndian.Uint64(v.reg.Bytes[i:]) 2324 x := *(*float64)(unsafe.Pointer(&a)) 2325 child = newConstant(constant.MakeFloat64(x), v.mem) 2326 child.Kind = reflect.Float64 2327 n = 8 2328 default: 2329 if n == 0 { 2330 for _, pfx := range []string{"uint", "int"} { 2331 if strings.HasPrefix(newtyp, pfx) { 2332 n, _ = strconv.Atoi(newtyp[len(pfx):]) 2333 break 2334 } 2335 } 2336 if n == 0 || popcnt(uint64(n)) != 1 { 2337 return nil, fmt.Errorf("unknown CPU register type conversion to %q", newtyp) 2338 } 2339 n = n / 8 2340 } 2341 child = newConstant(constant.MakeString(fmt.Sprintf("%x", v.reg.Bytes[i:][:n])), v.mem) 2342 } 2343 v.Children = append(v.Children, *child) 2344 } 2345 2346 v.loaded = true 2347 v.Kind = reflect.Array 2348 v.Len = int64(len(v.Children)) 2349 v.Base = fakeAddressUnresolv 2350 v.DwarfType = fakeArrayType(uint64(len(v.Children)), &godwarf.VoidType{CommonType: godwarf.CommonType{ByteSize: int64(n)}}) 2351 v.RealType = v.DwarfType 2352 return v, nil 2353 } 2354 2355 // popcnt is the number of bits set to 1 in x. 2356 // It's the same as math/bits.OnesCount64, copied here so that we can build 2357 // on versions of go that don't have math/bits. 2358 func popcnt(x uint64) int { 2359 const m0 = 0x5555555555555555 // 01010101 ... 2360 const m1 = 0x3333333333333333 // 00110011 ... 2361 const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ... 2362 const m = 1<<64 - 1 2363 x = x>>1&(m0&m) + x&(m0&m) 2364 x = x>>2&(m1&m) + x&(m1&m) 2365 x = (x>>4 + x) & (m2 & m) 2366 x += x >> 8 2367 x += x >> 16 2368 x += x >> 32 2369 return int(x) & (1<<7 - 1) 2370 } 2371 2372 func isCgoType(bi *BinaryInfo, typ godwarf.Type) bool { 2373 cu := bi.Images[typ.Common().Index].findCompileUnitForOffset(typ.Common().Offset) 2374 if cu == nil { 2375 return false 2376 } 2377 return !cu.isgo 2378 } 2379 2380 func isCgoCharPtr(bi *BinaryInfo, typ *godwarf.PtrType) bool { 2381 if !isCgoType(bi, typ) { 2382 return false 2383 } 2384 2385 fieldtyp := typ.Type 2386 resolveQualTypedef: 2387 for { 2388 switch t := fieldtyp.(type) { 2389 case *godwarf.QualType: 2390 fieldtyp = t.Type 2391 case *godwarf.TypedefType: 2392 fieldtyp = t.Type 2393 default: 2394 break resolveQualTypedef 2395 } 2396 } 2397 2398 _, ischar := fieldtyp.(*godwarf.CharType) 2399 _, isuchar := fieldtyp.(*godwarf.UcharType) 2400 return ischar || isuchar 2401 } 2402 2403 func (cm constantsMap) Get(typ godwarf.Type) *constantType { 2404 ctyp := cm[dwarfRef{typ.Common().Index, typ.Common().Offset}] 2405 if ctyp == nil { 2406 return nil 2407 } 2408 typepkg := packageName(typ.String()) + "." 2409 if !ctyp.initialized { 2410 ctyp.initialized = true 2411 sort.Sort(constantValuesByValue(ctyp.values)) 2412 for i := range ctyp.values { 2413 ctyp.values[i].name = strings.TrimPrefix(ctyp.values[i].name, typepkg) 2414 if popcnt(uint64(ctyp.values[i].value)) == 1 { 2415 ctyp.values[i].singleBit = true 2416 } 2417 } 2418 } 2419 return ctyp 2420 } 2421 2422 func (ctyp *constantType) describe(n int64) string { 2423 for _, val := range ctyp.values { 2424 if val.value == n { 2425 return val.name 2426 } 2427 } 2428 2429 if n == 0 { 2430 return "" 2431 } 2432 2433 // If all the values for this constant only have one bit set we try to 2434 // represent the value as a bitwise or of constants. 2435 2436 fields := []string{} 2437 for _, val := range ctyp.values { 2438 if !val.singleBit { 2439 continue 2440 } 2441 if n&val.value != 0 { 2442 fields = append(fields, val.name) 2443 n = n & ^val.value 2444 } 2445 } 2446 if n == 0 { 2447 return strings.Join(fields, "|") 2448 } 2449 return "" 2450 } 2451 2452 type variablesByDepthAndDeclLine struct { 2453 vars []*Variable 2454 depths []int 2455 } 2456 2457 func (v *variablesByDepthAndDeclLine) Len() int { return len(v.vars) } 2458 2459 func (v *variablesByDepthAndDeclLine) Less(i int, j int) bool { 2460 if v.depths[i] == v.depths[j] { 2461 return v.vars[i].DeclLine < v.vars[j].DeclLine 2462 } 2463 return v.depths[i] < v.depths[j] 2464 } 2465 2466 func (v *variablesByDepthAndDeclLine) Swap(i int, j int) { 2467 v.depths[i], v.depths[j] = v.depths[j], v.depths[i] 2468 v.vars[i], v.vars[j] = v.vars[j], v.vars[i] 2469 } 2470 2471 type constantValuesByValue []constantValue 2472 2473 func (v constantValuesByValue) Len() int { return len(v) } 2474 func (v constantValuesByValue) Less(i int, j int) bool { return v[i].value < v[j].value } 2475 func (v constantValuesByValue) Swap(i int, j int) { v[i], v[j] = v[j], v[i] } 2476 2477 const ( 2478 timeTimeWallHasMonotonicBit uint64 = (1 << 63) // hasMonotonic bit of time.Time.wall 2479 2480 //lint:ignore ST1011 addSeconds is the name of the relevant function 2481 maxAddSeconds time.Duration = (time.Duration(^uint64(0)>>1) / time.Second) * time.Second // maximum number of seconds that can be added with (time.Time).Add, measured in nanoseconds 2482 2483 wallNsecShift = 30 // size of the nanoseconds field of time.Time.wall 2484 2485 unixTimestampOfWallEpoch = -2682288000 // number of seconds between the unix epoch and the epoch for time.Time.wall (1 jan 1885) 2486 ) 2487 2488 // formatTime writes formatted value of a time.Time to v.Value. 2489 // See $GOROOT/src/time/time.go for a description of time.Time internals. 2490 func (v *Variable) formatTime() { 2491 wallv := v.fieldVariable("wall") 2492 extv := v.fieldVariable("ext") 2493 if wallv == nil || extv == nil || wallv.Unreadable != nil || extv.Unreadable != nil || wallv.Value == nil || extv.Value == nil { 2494 return 2495 } 2496 2497 var loc *time.Location 2498 2499 locv := v.fieldVariable("loc") 2500 if locv != nil && locv.Unreadable == nil { 2501 namev := locv.loadFieldNamed("name") 2502 if namev != nil && namev.Unreadable == nil { 2503 name := constant.StringVal(namev.Value) 2504 loc, _ = time.LoadLocation(name) 2505 } 2506 } 2507 2508 wall, _ := constant.Uint64Val(wallv.Value) 2509 ext, _ := constant.Int64Val(extv.Value) 2510 2511 hasMonotonic := (wall & timeTimeWallHasMonotonicBit) != 0 2512 if hasMonotonic { 2513 // the 33-bit field of wall holds a 33-bit unsigned wall 2514 // seconds since Jan 1 year 1885, and ext holds a signed 64-bit monotonic 2515 // clock reading, nanoseconds since process start 2516 sec := int64(wall << 1 >> (wallNsecShift + 1)) // seconds since 1 Jan 1885 2517 t := time.Unix(sec+unixTimestampOfWallEpoch, 0).UTC() 2518 if loc != nil { 2519 t = t.In(loc) 2520 } 2521 v.Value = constant.MakeString(fmt.Sprintf("%s, %+d", t.Format(time.RFC3339), ext)) 2522 } else { 2523 // the full signed 64-bit wall seconds since Jan 1 year 1 is stored in ext 2524 var t time.Time 2525 for ext > int64(maxAddSeconds/time.Second) { 2526 t = t.Add(maxAddSeconds) 2527 ext -= int64(maxAddSeconds / time.Second) 2528 } 2529 t = t.Add(time.Duration(ext) * time.Second) 2530 if loc != nil { 2531 t = t.In(loc) 2532 } 2533 v.Value = constant.MakeString(t.Format(time.RFC3339)) 2534 } 2535 }