github.com/undoio/delve@v1.9.0/pkg/proc/fncall.go (about) 1 package proc 2 3 import ( 4 "debug/dwarf" 5 "encoding/binary" 6 "errors" 7 "fmt" 8 "go/ast" 9 "go/constant" 10 "go/token" 11 "reflect" 12 "sort" 13 "strconv" 14 "strings" 15 16 "github.com/undoio/delve/pkg/dwarf/godwarf" 17 "github.com/undoio/delve/pkg/dwarf/op" 18 "github.com/undoio/delve/pkg/dwarf/reader" 19 "github.com/undoio/delve/pkg/dwarf/regnum" 20 "github.com/undoio/delve/pkg/goversion" 21 "github.com/undoio/delve/pkg/logflags" 22 ) 23 24 // This file implements the function call injection introduced in go1.11. 25 // 26 // The protocol is described in $GOROOT/src/runtime/asm_amd64.s in the 27 // comments for function runtime·debugCallV1. 28 // 29 // The main entry point is EvalExpressionWithCalls which will start a goroutine to 30 // evaluate the provided expression. 31 // This goroutine can either return immediately, if no function calls were 32 // needed, or write a continue request to the scope.callCtx.continueRequest 33 // channel. When this happens EvalExpressionWithCalls will call Continue and 34 // return. 35 // 36 // The Continue loop will write to scope.callCtx.continueCompleted when it 37 // hits a breakpoint in the call injection protocol. 38 // 39 // The work of setting up the function call and executing the protocol is 40 // done by evalFunctionCall and funcCallStep. 41 42 const ( 43 debugCallFunctionNamePrefix1 = "debugCall" 44 debugCallFunctionNamePrefix2 = "runtime.debugCall" 45 maxDebugCallVersion = 2 46 maxArgFrameSize = 65535 47 ) 48 49 var ( 50 errFuncCallUnsupported = errors.New("function calls not supported by this version of Go") 51 errFuncCallUnsupportedBackend = errors.New("backend does not support function calls") 52 errFuncCallInProgress = errors.New("cannot call function while another function call is already in progress") 53 errNoGoroutine = errors.New("no goroutine selected") 54 errGoroutineNotRunning = errors.New("selected goroutine not running") 55 errNotEnoughStack = errors.New("not enough stack space") 56 errTooManyArguments = errors.New("too many arguments") 57 errNotEnoughArguments = errors.New("not enough arguments") 58 errNotAGoFunction = errors.New("not a Go function") 59 errFuncCallNotAllowed = errors.New("function calls not allowed without using 'call'") 60 errFuncCallNotAllowedStrAlloc = errors.New("literal string can not be allocated because function calls are not allowed without using 'call'") 61 ) 62 63 type functionCallState struct { 64 // savedRegs contains the saved registers 65 savedRegs Registers 66 // err contains a saved error 67 err error 68 // expr is the expression being evaluated 69 expr *ast.CallExpr 70 // fn is the function that is being called 71 fn *Function 72 // receiver is the receiver argument for the function 73 receiver *Variable 74 // closureAddr is the address of the closure being called 75 closureAddr uint64 76 // formalArgs are the formal arguments of fn 77 formalArgs []funcCallArg 78 // argFrameSize contains the size of the arguments 79 argFrameSize int64 80 // retvars contains the return variables after the function call terminates without panic'ing 81 retvars []*Variable 82 // panicvar is a variable used to store the value of the panic, if the 83 // called function panics. 84 panicvar *Variable 85 // lateCallFailure is set to true if the function call could not be 86 // completed after we started evaluating the arguments. 87 lateCallFailure bool 88 } 89 90 type callContext struct { 91 p *Target 92 93 // checkEscape is true if the escape check should be performed. 94 // See service/api.DebuggerCommand.UnsafeCall in service/api/types.go. 95 checkEscape bool 96 97 // retLoadCfg is the load configuration used to load return values 98 retLoadCfg LoadConfig 99 100 // Write to continueRequest to request a call to Continue from the 101 // debugger's main goroutine. 102 // Read from continueCompleted to wait for the target process to stop at 103 // one of the interaction point of the function call protocol. 104 // To signal that evaluation is completed a value will be written to 105 // continueRequest having cont == false and the return values in ret. 106 continueRequest chan<- continueRequest 107 continueCompleted <-chan *G 108 109 // injectionThread is the thread to use for nested call injections if the 110 // original injection goroutine isn't running (because we are in Go 1.15) 111 injectionThread Thread 112 113 // stacks is a slice of known goroutine stacks used to check for 114 // inappropriate escapes 115 stacks []stack 116 } 117 118 type continueRequest struct { 119 cont bool 120 err error 121 ret *Variable 122 } 123 124 type callInjection struct { 125 // if continueCompleted is not nil it means we are in the process of 126 // executing an injected function call, see comments throughout 127 // pkg/proc/fncall.go for a description of how this works. 128 continueCompleted chan<- *G 129 continueRequest <-chan continueRequest 130 startThreadID int 131 endCallInjection func() 132 } 133 134 func (callCtx *callContext) doContinue() *G { 135 callCtx.continueRequest <- continueRequest{cont: true} 136 return <-callCtx.continueCompleted 137 } 138 139 func (callCtx *callContext) doReturn(ret *Variable, err error) { 140 if callCtx == nil { 141 return 142 } 143 callCtx.continueRequest <- continueRequest{cont: false, ret: ret, err: err} 144 } 145 146 // EvalExpressionWithCalls is like EvalExpression but allows function calls in 'expr'. 147 // Because this can only be done in the current goroutine, unlike 148 // EvalExpression, EvalExpressionWithCalls is not a method of EvalScope. 149 func EvalExpressionWithCalls(t *Target, g *G, expr string, retLoadCfg LoadConfig, checkEscape bool) error { 150 bi := t.BinInfo() 151 if !t.SupportsFunctionCalls() { 152 return errFuncCallUnsupportedBackend 153 } 154 155 // check that the target goroutine is running 156 if g == nil { 157 return errNoGoroutine 158 } 159 if g.Status != Grunning || g.Thread == nil { 160 return errGoroutineNotRunning 161 } 162 163 if callinj := t.fncallForG[g.ID]; callinj != nil && callinj.continueCompleted != nil { 164 return errFuncCallInProgress 165 } 166 167 dbgcallfn, _ := debugCallFunction(bi) 168 if dbgcallfn == nil { 169 return errFuncCallUnsupported 170 } 171 172 scope, err := GoroutineScope(t, g.Thread) 173 if err != nil { 174 return err 175 } 176 177 continueRequest := make(chan continueRequest) 178 continueCompleted := make(chan *G) 179 180 scope.callCtx = &callContext{ 181 p: t, 182 checkEscape: checkEscape, 183 retLoadCfg: retLoadCfg, 184 continueRequest: continueRequest, 185 continueCompleted: continueCompleted, 186 } 187 188 endCallInjection, err := t.proc.StartCallInjection() 189 if err != nil { 190 return err 191 } 192 193 t.fncallForG[g.ID] = &callInjection{ 194 continueCompleted: continueCompleted, 195 continueRequest: continueRequest, 196 startThreadID: 0, 197 endCallInjection: endCallInjection, 198 } 199 200 go scope.EvalExpression(expr, retLoadCfg) 201 202 contReq, ok := <-continueRequest 203 if contReq.cont { 204 return t.Continue() 205 } 206 207 return finishEvalExpressionWithCalls(t, g, contReq, ok) 208 } 209 210 func finishEvalExpressionWithCalls(t *Target, g *G, contReq continueRequest, ok bool) error { 211 fncallLog("stashing return values for %d in thread=%d", g.ID, g.Thread.ThreadID()) 212 g.Thread.Common().CallReturn = true 213 var err error 214 if !ok { 215 err = errors.New("internal error EvalExpressionWithCalls didn't return anything") 216 } else if contReq.err != nil { 217 if fpe, ispanic := contReq.err.(fncallPanicErr); ispanic { 218 g.Thread.Common().returnValues = []*Variable{fpe.panicVar} 219 } else { 220 err = contReq.err 221 } 222 } else if contReq.ret == nil { 223 g.Thread.Common().returnValues = nil 224 } else if contReq.ret.Addr == 0 && contReq.ret.DwarfType == nil && contReq.ret.Kind == reflect.Invalid { 225 // this is a variable returned by a function call with multiple return values 226 r := make([]*Variable, len(contReq.ret.Children)) 227 for i := range contReq.ret.Children { 228 r[i] = &contReq.ret.Children[i] 229 } 230 g.Thread.Common().returnValues = r 231 } else { 232 g.Thread.Common().returnValues = []*Variable{contReq.ret} 233 } 234 235 close(t.fncallForG[g.ID].continueCompleted) 236 callinj := t.fncallForG[g.ID] 237 for goid := range t.fncallForG { 238 if t.fncallForG[goid] == callinj { 239 delete(t.fncallForG, goid) 240 } 241 } 242 callinj.endCallInjection() 243 return err 244 } 245 246 // evalFunctionCall evaluates a function call. 247 // If this is a built-in function it's evaluated directly. 248 // Otherwise this will start the function call injection protocol and 249 // request that the target process resumes. 250 // See the comment describing the field EvalScope.callCtx for a description 251 // of the preconditions that make starting the function call protocol 252 // possible. 253 // See runtime.debugCallV1 in $GOROOT/src/runtime/asm_amd64.s for a 254 // description of the protocol. 255 func evalFunctionCall(scope *EvalScope, node *ast.CallExpr) (*Variable, error) { 256 r, err := scope.evalBuiltinCall(node) 257 if r != nil || err != nil { 258 // it was a builtin call 259 return r, err 260 } 261 if scope.callCtx == nil { 262 return nil, errFuncCallNotAllowed 263 } 264 thread := scope.g.Thread 265 stacklo := scope.g.stack.lo 266 if thread == nil { 267 // We are doing a nested function call and using Go 1.15, the original 268 // injection goroutine was suspended and now we are using a different 269 // goroutine, evaluation still happened on the original goroutine but we 270 // need to use a different thread to do the nested call injection. 271 thread = scope.callCtx.injectionThread 272 g2, err := GetG(thread) 273 if err != nil { 274 return nil, err 275 } 276 stacklo = g2.stack.lo 277 } 278 if thread == nil { 279 return nil, errGoroutineNotRunning 280 } 281 282 p := scope.callCtx.p 283 bi := scope.BinInfo 284 if !p.SupportsFunctionCalls() { 285 return nil, errFuncCallUnsupportedBackend 286 } 287 288 dbgcallfn, dbgcallversion := debugCallFunction(bi) 289 if dbgcallfn == nil { 290 return nil, errFuncCallUnsupported 291 } 292 293 // check that there are at least 256 bytes free on the stack 294 regs, err := thread.Registers() 295 if err != nil { 296 return nil, err 297 } 298 regs, err = regs.Copy() 299 if err != nil { 300 return nil, err 301 } 302 if regs.SP()-bi.Arch.debugCallMinStackSize <= stacklo { 303 return nil, errNotEnoughStack 304 } 305 protocolReg, ok := debugCallProtocolReg(bi.Arch.Name, dbgcallversion) 306 if !ok { 307 return nil, errFuncCallUnsupported 308 } 309 if bi.Arch.RegistersToDwarfRegisters(0, regs).Reg(protocolReg) == nil { 310 return nil, errFuncCallUnsupportedBackend 311 } 312 313 fncall := functionCallState{ 314 expr: node, 315 savedRegs: regs, 316 } 317 318 err = funcCallEvalFuncExpr(scope, &fncall, false) 319 if err != nil { 320 return nil, err 321 } 322 323 switch bi.Arch.Name { 324 case "amd64": 325 if err := callOP(bi, thread, regs, dbgcallfn.Entry); err != nil { 326 return nil, err 327 } 328 // write the desired argument frame size at SP-(2*pointer_size) (the extra pointer is the saved PC) 329 if err := writePointer(bi, scope.Mem, regs.SP()-3*uint64(bi.Arch.PtrSize()), uint64(fncall.argFrameSize)); err != nil { 330 return nil, err 331 } 332 case "arm64": 333 // debugCallV2 on arm64 needs a special call sequence, callOP can not be used 334 sp := regs.SP() 335 sp -= 2 * uint64(bi.Arch.PtrSize()) 336 if err := setSP(thread, sp); err != nil { 337 return nil, err 338 } 339 if err := writePointer(bi, scope.Mem, sp, regs.LR()); err != nil { 340 return nil, err 341 } 342 if err := setLR(thread, regs.PC()); err != nil { 343 return nil, err 344 } 345 if err := writePointer(bi, scope.Mem, sp-uint64(2*bi.Arch.PtrSize()), uint64(fncall.argFrameSize)); err != nil { 346 return nil, err 347 } 348 regs, err = thread.Registers() 349 if err != nil { 350 return nil, err 351 } 352 regs, err = regs.Copy() 353 if err != nil { 354 return nil, err 355 } 356 fncall.savedRegs = regs 357 err = setPC(thread, dbgcallfn.Entry) 358 if err != nil { 359 return nil, err 360 } 361 } 362 363 fncallLog("function call initiated %v frame size %d goroutine %d (thread %d)", fncall.fn, fncall.argFrameSize, scope.g.ID, thread.ThreadID()) 364 365 thread.Breakpoint().Clear() // since we moved address in PC the thread is no longer stopped at a breakpoint, leaving the breakpoint set will confuse Continue 366 p.fncallForG[scope.g.ID].startThreadID = thread.ThreadID() 367 368 spoff := int64(scope.Regs.Uint64Val(scope.Regs.SPRegNum)) - int64(scope.g.stack.hi) 369 bpoff := int64(scope.Regs.Uint64Val(scope.Regs.BPRegNum)) - int64(scope.g.stack.hi) 370 fboff := scope.Regs.FrameBase - int64(scope.g.stack.hi) 371 372 for { 373 scope.callCtx.injectionThread = nil 374 g := scope.callCtx.doContinue() 375 // Go 1.15 will move call injection execution to a different goroutine, 376 // but we want to keep evaluation on the original goroutine. 377 if g.ID == scope.g.ID { 378 scope.g = g 379 } else { 380 // We are in Go 1.15 and we switched to a new goroutine, the original 381 // goroutine is now parked and therefore does not have a thread 382 // associated. 383 scope.g.Thread = nil 384 scope.g.Status = Gwaiting 385 scope.callCtx.injectionThread = g.Thread 386 } 387 388 // adjust the value of registers inside scope 389 pcreg, bpreg, spreg := scope.Regs.Reg(scope.Regs.PCRegNum), scope.Regs.Reg(scope.Regs.BPRegNum), scope.Regs.Reg(scope.Regs.SPRegNum) 390 scope.Regs.ClearRegisters() 391 scope.Regs.AddReg(scope.Regs.PCRegNum, pcreg) 392 scope.Regs.AddReg(scope.Regs.BPRegNum, bpreg) 393 scope.Regs.AddReg(scope.Regs.SPRegNum, spreg) 394 scope.Regs.Reg(scope.Regs.SPRegNum).Uint64Val = uint64(spoff + int64(scope.g.stack.hi)) 395 scope.Regs.Reg(scope.Regs.BPRegNum).Uint64Val = uint64(bpoff + int64(scope.g.stack.hi)) 396 scope.Regs.FrameBase = fboff + int64(scope.g.stack.hi) 397 scope.Regs.CFA = scope.frameOffset + int64(scope.g.stack.hi) 398 399 finished := funcCallStep(scope, &fncall, g.Thread, protocolReg, dbgcallfn.Name) 400 if finished { 401 break 402 } 403 } 404 405 if fncall.err != nil { 406 return nil, fncall.err 407 } 408 409 if fncall.panicvar != nil { 410 return nil, fncallPanicErr{fncall.panicvar} 411 } 412 switch len(fncall.retvars) { 413 case 0: 414 r := newVariable("", 0, nil, scope.BinInfo, nil) 415 r.loaded = true 416 r.Unreadable = errors.New("no return values") 417 return r, nil 418 case 1: 419 return fncall.retvars[0], nil 420 default: 421 // create a fake variable without address or type to return multiple values 422 r := newVariable("", 0, nil, scope.BinInfo, nil) 423 r.loaded = true 424 r.Children = make([]Variable, len(fncall.retvars)) 425 for i := range fncall.retvars { 426 r.Children[i] = *fncall.retvars[i] 427 } 428 return r, nil 429 } 430 } 431 432 // fncallPanicErr is the error returned if a called function panics 433 type fncallPanicErr struct { 434 panicVar *Variable 435 } 436 437 func (err fncallPanicErr) Error() string { 438 return "panic calling a function" 439 } 440 441 func fncallLog(fmtstr string, args ...interface{}) { 442 logflags.FnCallLogger().Infof(fmtstr, args...) 443 } 444 445 // writePointer writes val as an architecture pointer at addr in mem. 446 func writePointer(bi *BinaryInfo, mem MemoryReadWriter, addr, val uint64) error { 447 ptrbuf := make([]byte, bi.Arch.PtrSize()) 448 449 // TODO: use target architecture endianness instead of LittleEndian 450 switch len(ptrbuf) { 451 case 4: 452 binary.LittleEndian.PutUint32(ptrbuf, uint32(val)) 453 case 8: 454 binary.LittleEndian.PutUint64(ptrbuf, val) 455 default: 456 panic(fmt.Errorf("unsupported pointer size %d", len(ptrbuf))) 457 } 458 _, err := mem.WriteMemory(addr, ptrbuf) 459 return err 460 } 461 462 // callOP simulates a call instruction on the given thread: 463 // * pushes the current value of PC on the stack (adjusting SP) 464 // * changes the value of PC to callAddr 465 // Note: regs are NOT updated! 466 func callOP(bi *BinaryInfo, thread Thread, regs Registers, callAddr uint64) error { 467 switch bi.Arch.Name { 468 case "amd64": 469 sp := regs.SP() 470 // push PC on the stack 471 sp -= uint64(bi.Arch.PtrSize()) 472 if err := setSP(thread, sp); err != nil { 473 return err 474 } 475 if err := writePointer(bi, thread.ProcessMemory(), sp, regs.PC()); err != nil { 476 return err 477 } 478 return setPC(thread, callAddr) 479 case "arm64": 480 if err := setLR(thread, regs.PC()); err != nil { 481 return err 482 } 483 return setPC(thread, callAddr) 484 default: 485 panic("not implemented") 486 } 487 } 488 489 // funcCallEvalFuncExpr evaluates expr.Fun and returns the function that we're trying to call. 490 // If allowCalls is false function calls will be disabled even if scope.callCtx != nil 491 func funcCallEvalFuncExpr(scope *EvalScope, fncall *functionCallState, allowCalls bool) error { 492 bi := scope.BinInfo 493 494 if !allowCalls { 495 callCtx := scope.callCtx 496 scope.callCtx = nil 497 defer func() { 498 scope.callCtx = callCtx 499 }() 500 } 501 502 fnvar, err := scope.evalAST(fncall.expr.Fun) 503 if err == errFuncCallNotAllowed { 504 // we can't determine the frame size because callexpr.Fun can't be 505 // evaluated without enabling function calls, just set up an argument 506 // frame for the maximum possible argument size. 507 fncall.argFrameSize = maxArgFrameSize 508 return nil 509 } else if err != nil { 510 return err 511 } 512 if fnvar.Kind != reflect.Func { 513 return fmt.Errorf("expression %q is not a function", exprToString(fncall.expr.Fun)) 514 } 515 fnvar.loadValue(LoadConfig{false, 0, 0, 0, 0, 0}) 516 if fnvar.Unreadable != nil { 517 return fnvar.Unreadable 518 } 519 if fnvar.Base == 0 { 520 return errors.New("nil pointer dereference") 521 } 522 fncall.fn = bi.PCToFunc(uint64(fnvar.Base)) 523 if fncall.fn == nil { 524 return fmt.Errorf("could not find DIE for function %q", exprToString(fncall.expr.Fun)) 525 } 526 if !fncall.fn.cu.isgo { 527 return errNotAGoFunction 528 } 529 fncall.closureAddr = fnvar.closureAddr 530 531 fncall.argFrameSize, fncall.formalArgs, err = funcCallArgs(fncall.fn, bi, false) 532 if err != nil { 533 return err 534 } 535 536 argnum := len(fncall.expr.Args) 537 538 // If the function variable has a child then that child is the method 539 // receiver. However, if the method receiver is not being used (e.g. 540 // func (_ X) Foo()) then it will not actually be listed as a formal 541 // argument. Ensure that we are really off by 1 to add the receiver to 542 // the function call. 543 if len(fnvar.Children) > 0 && argnum == (len(fncall.formalArgs)-1) { 544 argnum++ 545 fncall.receiver = &fnvar.Children[0] 546 fncall.receiver.Name = exprToString(fncall.expr.Fun) 547 } 548 549 if argnum > len(fncall.formalArgs) { 550 return errTooManyArguments 551 } 552 if argnum < len(fncall.formalArgs) { 553 return errNotEnoughArguments 554 } 555 556 return nil 557 } 558 559 type funcCallArg struct { 560 name string 561 typ godwarf.Type 562 off int64 563 dwarfEntry *godwarf.Tree // non-nil if Go 1.17+ 564 isret bool 565 } 566 567 // funcCallEvalArgs evaluates the arguments of the function call, copying 568 // them into the argument frame starting at argFrameAddr. 569 func funcCallEvalArgs(scope *EvalScope, fncall *functionCallState, formalScope *EvalScope) error { 570 if scope.g == nil { 571 // this should never happen 572 return errNoGoroutine 573 } 574 575 if fncall.receiver != nil { 576 err := funcCallCopyOneArg(scope, fncall, fncall.receiver, &fncall.formalArgs[0], formalScope) 577 if err != nil { 578 return err 579 } 580 fncall.formalArgs = fncall.formalArgs[1:] 581 } 582 583 for i := range fncall.formalArgs { 584 formalArg := &fncall.formalArgs[i] 585 586 actualArg, err := scope.evalAST(fncall.expr.Args[i]) 587 if err != nil { 588 if _, ispanic := err.(fncallPanicErr); ispanic { 589 return err 590 } 591 return fmt.Errorf("error evaluating %q as argument %s in function %s: %v", exprToString(fncall.expr.Args[i]), formalArg.name, fncall.fn.Name, err) 592 } 593 actualArg.Name = exprToString(fncall.expr.Args[i]) 594 595 err = funcCallCopyOneArg(scope, fncall, actualArg, formalArg, formalScope) 596 if err != nil { 597 return err 598 } 599 } 600 601 return nil 602 } 603 604 func funcCallCopyOneArg(scope *EvalScope, fncall *functionCallState, actualArg *Variable, formalArg *funcCallArg, formalScope *EvalScope) error { 605 if scope.callCtx.checkEscape { 606 //TODO(aarzilli): only apply the escapeCheck to leaking parameters. 607 if err := escapeCheck(actualArg, formalArg.name, scope.g.stack); err != nil { 608 return fmt.Errorf("cannot use %s as argument %s in function %s: %v", actualArg.Name, formalArg.name, fncall.fn.Name, err) 609 } 610 for _, stack := range scope.callCtx.stacks { 611 if err := escapeCheck(actualArg, formalArg.name, stack); err != nil { 612 return fmt.Errorf("cannot use %s as argument %s in function %s: %v", actualArg.Name, formalArg.name, fncall.fn.Name, err) 613 } 614 } 615 } 616 617 //TODO(aarzilli): autmoatic wrapping in interfaces for cases not handled 618 // by convertToEface. 619 620 var formalArgVar *Variable 621 if formalArg.dwarfEntry != nil { 622 var err error 623 formalArgVar, err = extractVarInfoFromEntry(scope.target, formalScope.BinInfo, formalScope.image(), formalScope.Regs, formalScope.Mem, formalArg.dwarfEntry, 0) 624 if err != nil { 625 return err 626 } 627 } else { 628 formalArgVar = newVariable(formalArg.name, uint64(formalArg.off+int64(formalScope.Regs.CFA)), formalArg.typ, scope.BinInfo, scope.Mem) 629 } 630 if err := scope.setValue(formalArgVar, actualArg, actualArg.Name); err != nil { 631 return err 632 } 633 634 return nil 635 } 636 637 func funcCallArgs(fn *Function, bi *BinaryInfo, includeRet bool) (argFrameSize int64, formalArgs []funcCallArg, err error) { 638 dwarfTree, err := fn.cu.image.getDwarfTree(fn.offset) 639 if err != nil { 640 return 0, nil, fmt.Errorf("DWARF read error: %v", err) 641 } 642 643 producer := bi.Producer() 644 trustArgOrder := producer != "" && goversion.ProducerAfterOrEqual(bi.Producer(), 1, 12) 645 646 if bi.regabi && fn.cu.optimized && fn.Name != "runtime.mallocgc" { 647 // Debug info for function arguments on optimized functions is currently 648 // too incomplete to attempt injecting calls to arbitrary optimized 649 // functions. 650 // Prior to regabi we could do this because the ABI was simple enough to 651 // manually encode it in Delve. 652 // Runtime.mallocgc is an exception, we specifically patch it's DIE to be 653 // correct for call injection purposes. 654 return 0, nil, fmt.Errorf("can not call optimized function %s when regabi is in use", fn.Name) 655 } 656 657 varEntries := reader.Variables(dwarfTree, fn.Entry, int(^uint(0)>>1), reader.VariablesSkipInlinedSubroutines) 658 659 // typechecks arguments, calculates argument frame size 660 for _, entry := range varEntries { 661 if entry.Tag != dwarf.TagFormalParameter { 662 continue 663 } 664 argname, typ, err := readVarEntry(entry.Tree, fn.cu.image) 665 if err != nil { 666 return 0, nil, err 667 } 668 typ = resolveTypedef(typ) 669 670 var formalArg *funcCallArg 671 if bi.regabi { 672 formalArg, err = funcCallArgRegABI(fn, bi, entry, argname, typ, &argFrameSize) 673 } else { 674 formalArg, err = funcCallArgOldABI(fn, bi, entry, argname, typ, trustArgOrder, &argFrameSize) 675 } 676 if err != nil { 677 return 0, nil, err 678 } 679 if !formalArg.isret || includeRet { 680 formalArgs = append(formalArgs, *formalArg) 681 } 682 } 683 684 if bi.regabi { 685 // The argument frame size is computed conservatively, assuming that 686 // there's space for each argument on the stack even if its passed in 687 // registers. Unfortunately this isn't quite enough because the register 688 // assignment algorithm Go uses can result in an amount of additional 689 // space used due to alignment requirements, bounded by the number of argument registers. 690 // Because we currently don't have an easy way to obtain the frame size, 691 // let's be even more conservative. 692 // A safe lower-bound on the size of the argument frame includes space for 693 // each argument plus the total bytes of register arguments. 694 // This is derived from worst-case alignment padding of up to 695 // (pointer-word-bytes - 1) per argument passed in registers. 696 // See: https://github.com/go-delve/delve/pull/2451#discussion_r665761531 697 // TODO: Make this generic for other platforms. 698 argFrameSize = alignAddr(argFrameSize, 8) 699 argFrameSize += int64(bi.Arch.maxRegArgBytes) 700 } 701 702 sort.Slice(formalArgs, func(i, j int) bool { 703 return formalArgs[i].off < formalArgs[j].off 704 }) 705 706 return argFrameSize, formalArgs, nil 707 } 708 709 func funcCallArgOldABI(fn *Function, bi *BinaryInfo, entry reader.Variable, argname string, typ godwarf.Type, trustArgOrder bool, pargFrameSize *int64) (*funcCallArg, error) { 710 const CFA = 0x1000 711 var off int64 712 713 locprog, _, err := bi.locationExpr(entry, dwarf.AttrLocation, fn.Entry) 714 if err != nil { 715 err = fmt.Errorf("could not get argument location of %s: %v", argname, err) 716 } else { 717 var pieces []op.Piece 718 off, pieces, err = op.ExecuteStackProgram(op.DwarfRegisters{CFA: CFA, FrameBase: CFA}, locprog, bi.Arch.PtrSize(), nil) 719 if err != nil { 720 err = fmt.Errorf("unsupported location expression for argument %s: %v", argname, err) 721 } 722 if pieces != nil { 723 err = fmt.Errorf("unsupported location expression for argument %s (uses DW_OP_piece)", argname) 724 } 725 off -= CFA 726 } 727 if err != nil { 728 if !trustArgOrder { 729 return nil, err 730 } 731 732 // With Go version 1.12 or later we can trust that the arguments appear 733 // in the same order as declared, which means we can calculate their 734 // address automatically. 735 // With this we can call optimized functions (which sometimes do not have 736 // an argument address, due to a compiler bug) as well as runtime 737 // functions (which are always optimized). 738 off = *pargFrameSize 739 off = alignAddr(off, typ.Align()) 740 } 741 742 if e := off + typ.Size(); e > *pargFrameSize { 743 *pargFrameSize = e 744 } 745 746 isret, _ := entry.Val(dwarf.AttrVarParam).(bool) 747 return &funcCallArg{name: argname, typ: typ, off: off, isret: isret}, nil 748 } 749 750 func funcCallArgRegABI(fn *Function, bi *BinaryInfo, entry reader.Variable, argname string, typ godwarf.Type, pargFrameSize *int64) (*funcCallArg, error) { 751 // Conservatively calculate the full stack argument space for ABI0. 752 *pargFrameSize = alignAddr(*pargFrameSize, typ.Align()) 753 *pargFrameSize += typ.Size() 754 755 isret, _ := entry.Val(dwarf.AttrVarParam).(bool) 756 return &funcCallArg{name: argname, typ: typ, dwarfEntry: entry.Tree, isret: isret}, nil 757 } 758 759 // alignAddr rounds up addr to a multiple of align. Align must be a power of 2. 760 func alignAddr(addr, align int64) int64 { 761 return (addr + int64(align-1)) &^ int64(align-1) 762 } 763 764 func escapeCheck(v *Variable, name string, stack stack) error { 765 switch v.Kind { 766 case reflect.Ptr: 767 var w *Variable 768 if len(v.Children) == 1 { 769 // this branch is here to support pointers constructed with typecasts from ints or the '&' operator 770 w = &v.Children[0] 771 } else { 772 w = v.maybeDereference() 773 } 774 return escapeCheckPointer(w.Addr, name, stack) 775 case reflect.Chan, reflect.String, reflect.Slice: 776 return escapeCheckPointer(v.Base, name, stack) 777 case reflect.Map: 778 sv := v.clone() 779 sv.RealType = resolveTypedef(&(v.RealType.(*godwarf.MapType).TypedefType)) 780 sv = sv.maybeDereference() 781 return escapeCheckPointer(sv.Addr, name, stack) 782 case reflect.Struct: 783 t := v.RealType.(*godwarf.StructType) 784 for _, field := range t.Field { 785 fv, _ := v.toField(field) 786 if err := escapeCheck(fv, fmt.Sprintf("%s.%s", name, field.Name), stack); err != nil { 787 return err 788 } 789 } 790 case reflect.Array: 791 for i := int64(0); i < v.Len; i++ { 792 sv, _ := v.sliceAccess(int(i)) 793 if err := escapeCheck(sv, fmt.Sprintf("%s[%d]", name, i), stack); err != nil { 794 return err 795 } 796 } 797 case reflect.Func: 798 if err := escapeCheckPointer(v.funcvalAddr(), name, stack); err != nil { 799 return err 800 } 801 } 802 803 return nil 804 } 805 806 func escapeCheckPointer(addr uint64, name string, stack stack) error { 807 if uint64(addr) >= stack.lo && uint64(addr) < stack.hi { 808 return fmt.Errorf("stack object passed to escaping pointer: %s", name) 809 } 810 return nil 811 } 812 813 const ( 814 debugCallRegPrecheckFailed = 8 815 debugCallRegCompleteCall = 0 816 debugCallRegReadReturn = 1 817 debugCallRegReadPanic = 2 818 debugCallRegRestoreRegisters = 16 819 ) 820 821 // funcCallStep executes one step of the function call injection protocol. 822 func funcCallStep(callScope *EvalScope, fncall *functionCallState, thread Thread, protocolReg uint64, debugCallName string) bool { 823 p := callScope.callCtx.p 824 bi := p.BinInfo() 825 826 regs, err := thread.Registers() 827 if err != nil { 828 fncall.err = err 829 return true 830 } 831 832 regval := bi.Arch.RegistersToDwarfRegisters(0, regs).Uint64Val(protocolReg) 833 834 if logflags.FnCall() { 835 loc, _ := thread.Location() 836 var pc uint64 837 var fnname string 838 if loc != nil { 839 pc = loc.PC 840 if loc.Fn != nil { 841 fnname = loc.Fn.Name 842 } 843 } 844 fncallLog("function call interrupt gid=%d (original) thread=%d regval=%#x (PC=%#x in %s)", callScope.g.ID, thread.ThreadID(), regval, pc, fnname) 845 } 846 847 switch regval { 848 case debugCallRegPrecheckFailed: // 8 849 archoff := uint64(0) 850 if bi.Arch.Name == "arm64" { 851 archoff = 8 852 } 853 // get error from top of the stack and return it to user 854 errvar, err := readStackVariable(p, thread, regs, archoff, "string", loadFullValue) 855 if err != nil { 856 fncall.err = fmt.Errorf("could not get precheck error reason: %v", err) 857 break 858 } 859 errvar.Name = "err" 860 fncall.err = fmt.Errorf("%v", constant.StringVal(errvar.Value)) 861 862 case debugCallRegCompleteCall: // 0 863 p.fncallForG[callScope.g.ID].startThreadID = 0 864 865 // evaluate arguments of the target function, copy them into its argument frame and call the function 866 if fncall.fn == nil || fncall.receiver != nil || fncall.closureAddr != 0 { 867 // if we couldn't figure out which function we are calling before 868 // (because the function we are calling is the return value of a call to 869 // another function) now we have to figure it out by recursively 870 // evaluating the function calls. 871 // This also needs to be done if the function call has a receiver 872 // argument or a closure address (because those addresses could be on the stack 873 // and have changed position between the start of the call and now). 874 875 err := funcCallEvalFuncExpr(callScope, fncall, true) 876 if err != nil { 877 fncall.err = err 878 fncall.lateCallFailure = true 879 break 880 } 881 //TODO: double check that function call size isn't too big 882 } 883 884 // instead of evaluating the arguments we start first by pushing the call 885 // on the stack, this is the opposite of what would happen normally but 886 // it's necessary because otherwise the GC wouldn't be able to deal with 887 // the argument frame. 888 if fncall.closureAddr != 0 { 889 // When calling a function pointer we must set the DX register to the 890 // address of the function pointer itself. 891 setClosureReg(thread, fncall.closureAddr) 892 } 893 cfa := regs.SP() 894 oldpc := regs.PC() 895 var oldlr uint64 896 if bi.Arch.Name == "arm64" { 897 oldlr = regs.LR() 898 } 899 callOP(bi, thread, regs, fncall.fn.Entry) 900 formalScope, err := GoroutineScope(callScope.target, thread) 901 if formalScope != nil && formalScope.Regs.CFA != int64(cfa) { 902 // This should never happen, checking just to avoid hard to figure out disasters. 903 err = fmt.Errorf("mismatch in CFA %#x (calculated) %#x (expected)", formalScope.Regs.CFA, int64(cfa)) 904 } 905 if err == nil { 906 err = funcCallEvalArgs(callScope, fncall, formalScope) 907 } 908 909 if err != nil { 910 // rolling back the call, note: this works because we called regs.Copy() above 911 switch bi.Arch.Name { 912 case "amd64": 913 setSP(thread, cfa) 914 setPC(thread, oldpc) 915 case "arm64": 916 setLR(thread, oldlr) 917 setPC(thread, oldpc) 918 default: 919 panic("not implemented") 920 } 921 fncall.err = err 922 fncall.lateCallFailure = true 923 break 924 } 925 926 case debugCallRegRestoreRegisters: // 16 927 // runtime requests that we restore the registers (all except pc and sp), 928 // this is also the last step of the function call protocol. 929 pc, sp := regs.PC(), regs.SP() 930 if err := thread.RestoreRegisters(fncall.savedRegs); err != nil { 931 fncall.err = fmt.Errorf("could not restore registers: %v", err) 932 } 933 if err := setPC(thread, pc); err != nil { 934 fncall.err = fmt.Errorf("could not restore PC: %v", err) 935 } 936 if err := setSP(thread, sp); err != nil { 937 fncall.err = fmt.Errorf("could not restore SP: %v", err) 938 } 939 if err := stepInstructionOut(p, thread, debugCallName, debugCallName); err != nil { 940 fncall.err = fmt.Errorf("could not step out of %s: %v", debugCallName, err) 941 } 942 if bi.Arch.Name == "amd64" { 943 // The tail of debugCallV2 corrupts the state of RFLAGS, we must restore 944 // it one extra time after stepping out of it. 945 // See https://github.com/go-delve/delve/issues/2985 and 946 // TestCallInjectionFlagCorruption 947 rflags := bi.Arch.RegistersToDwarfRegisters(0, fncall.savedRegs).Uint64Val(regnum.AMD64_Rflags) 948 err := thread.SetReg(regnum.AMD64_Rflags, op.DwarfRegisterFromUint64(rflags)) 949 if err != nil { 950 fncall.err = fmt.Errorf("could not restore RFLAGS register: %v", err) 951 } 952 } 953 return true 954 955 case debugCallRegReadReturn: // 1 956 // read return arguments from stack 957 if fncall.panicvar != nil || fncall.lateCallFailure { 958 break 959 } 960 retScope, err := ThreadScope(p, thread) 961 if err != nil { 962 fncall.err = fmt.Errorf("could not get return values: %v", err) 963 break 964 } 965 966 // pretend we are still inside the function we called 967 fakeFunctionEntryScope(retScope, fncall.fn, int64(regs.SP()), regs.SP()-uint64(bi.Arch.PtrSize())) 968 var flags localsFlags 969 flags |= localsNoDeclLineCheck // if the function we are calling is an autogenerated stub then declaration lines have no meaning 970 if !bi.regabi { 971 flags |= localsTrustArgOrder 972 } 973 974 fncall.retvars, err = retScope.Locals(flags) 975 if err != nil { 976 fncall.err = fmt.Errorf("could not get return values: %v", err) 977 break 978 } 979 fncall.retvars = filterVariables(fncall.retvars, func(v *Variable) bool { 980 return (v.Flags & VariableReturnArgument) != 0 981 }) 982 983 loadValues(fncall.retvars, callScope.callCtx.retLoadCfg) 984 for _, v := range fncall.retvars { 985 v.Flags |= VariableFakeAddress 986 } 987 988 // Store the stack span of the currently running goroutine (which in Go >= 989 // 1.15 might be different from the original injection goroutine) so that 990 // later on we can use it to perform the escapeCheck 991 if threadg, _ := GetG(thread); threadg != nil { 992 callScope.callCtx.stacks = append(callScope.callCtx.stacks, threadg.stack) 993 } 994 if bi.Arch.Name == "arm64" { 995 oldlr, err := readUintRaw(thread.ProcessMemory(), regs.SP(), int64(bi.Arch.PtrSize())) 996 if err != nil { 997 fncall.err = fmt.Errorf("could not restore LR: %v", err) 998 break 999 } 1000 if err = setLR(thread, oldlr); err != nil { 1001 fncall.err = fmt.Errorf("could not restore LR: %v", err) 1002 break 1003 } 1004 } 1005 1006 case debugCallRegReadPanic: // 2 1007 // read panic value from stack 1008 archoff := uint64(0) 1009 if bi.Arch.Name == "arm64" { 1010 archoff = 8 1011 } 1012 fncall.panicvar, err = readStackVariable(p, thread, regs, archoff, "interface {}", callScope.callCtx.retLoadCfg) 1013 if err != nil { 1014 fncall.err = fmt.Errorf("could not get panic: %v", err) 1015 break 1016 } 1017 fncall.panicvar.Name = "~panic" 1018 1019 default: 1020 // Got an unknown protocol register value, this is probably bad but the safest thing 1021 // possible is to ignore it and hope it didn't matter. 1022 fncallLog("unknown value of protocol register %#x", regval) 1023 } 1024 1025 return false 1026 } 1027 1028 func readStackVariable(t *Target, thread Thread, regs Registers, off uint64, typename string, loadCfg LoadConfig) (*Variable, error) { 1029 bi := thread.BinInfo() 1030 scope, err := ThreadScope(t, thread) 1031 if err != nil { 1032 return nil, err 1033 } 1034 typ, err := bi.findType(typename) 1035 if err != nil { 1036 return nil, err 1037 } 1038 v := newVariable("", regs.SP()+off, typ, scope.BinInfo, scope.Mem) 1039 v.loadValue(loadCfg) 1040 if v.Unreadable != nil { 1041 return nil, v.Unreadable 1042 } 1043 v.Flags |= VariableFakeAddress 1044 return v, nil 1045 } 1046 1047 // fakeEntryScope alters scope to pretend that we are at the entry point of 1048 // fn and CFA and SP are the ones passed as argument. 1049 // This function is used to create a scope for a call frame that doesn't 1050 // exist anymore, to read the return variables of an injected function call, 1051 // or after a stepout command. 1052 func fakeFunctionEntryScope(scope *EvalScope, fn *Function, cfa int64, sp uint64) error { 1053 scope.PC = fn.Entry 1054 scope.Fn = fn 1055 scope.File, scope.Line, _ = scope.BinInfo.PCToLine(fn.Entry) 1056 1057 scope.Regs.CFA = cfa 1058 scope.Regs.Reg(scope.Regs.SPRegNum).Uint64Val = sp 1059 scope.Regs.Reg(scope.Regs.PCRegNum).Uint64Val = fn.Entry 1060 1061 fn.cu.image.dwarfReader.Seek(fn.offset) 1062 e, err := fn.cu.image.dwarfReader.Next() 1063 if err != nil { 1064 return err 1065 } 1066 scope.Regs.FrameBase, _, _, _ = scope.BinInfo.Location(e, dwarf.AttrFrameBase, scope.PC, scope.Regs, nil) 1067 return nil 1068 } 1069 1070 // allocString allocates spaces for the contents of v if it needs to be allocated 1071 func allocString(scope *EvalScope, v *Variable) error { 1072 if v.Base != 0 || v.Len == 0 { 1073 // already allocated 1074 return nil 1075 } 1076 1077 if scope.callCtx == nil { 1078 return errFuncCallNotAllowedStrAlloc 1079 } 1080 savedLoadCfg := scope.callCtx.retLoadCfg 1081 scope.callCtx.retLoadCfg = loadFullValue 1082 defer func() { 1083 scope.callCtx.retLoadCfg = savedLoadCfg 1084 }() 1085 mallocv, err := evalFunctionCall(scope, &ast.CallExpr{ 1086 Fun: &ast.SelectorExpr{ 1087 X: &ast.Ident{Name: "runtime"}, 1088 Sel: &ast.Ident{Name: "mallocgc"}, 1089 }, 1090 Args: []ast.Expr{ 1091 &ast.BasicLit{Kind: token.INT, Value: strconv.Itoa(int(v.Len))}, 1092 &ast.Ident{Name: "nil"}, 1093 &ast.Ident{Name: "false"}, 1094 }, 1095 }) 1096 if err != nil { 1097 return err 1098 } 1099 if mallocv.Unreadable != nil { 1100 return mallocv.Unreadable 1101 } 1102 if mallocv.DwarfType.String() != "*void" { 1103 return fmt.Errorf("unexpected return type for mallocgc call: %v", mallocv.DwarfType.String()) 1104 } 1105 if len(mallocv.Children) != 1 { 1106 return errors.New("internal error, could not interpret return value of mallocgc call") 1107 } 1108 v.Base = mallocv.Children[0].Addr 1109 _, err = scope.Mem.WriteMemory(v.Base, []byte(constant.StringVal(v.Value))) 1110 return err 1111 } 1112 1113 func isCallInjectionStop(t *Target, thread Thread, loc *Location) bool { 1114 if loc.Fn == nil { 1115 return false 1116 } 1117 if !strings.HasPrefix(loc.Fn.Name, debugCallFunctionNamePrefix1) && !strings.HasPrefix(loc.Fn.Name, debugCallFunctionNamePrefix2) { 1118 return false 1119 } 1120 if loc.PC == loc.Fn.Entry { 1121 // call injection just started, did not make any progress before being interrupted by a concurrent breakpoint. 1122 return false 1123 } 1124 off := int64(0) 1125 if thread.BinInfo().Arch.breakInstrMovesPC { 1126 off = -int64(len(thread.BinInfo().Arch.breakpointInstruction)) 1127 } 1128 text, err := disassembleCurrentInstruction(t, thread, off) 1129 if err != nil || len(text) <= 0 { 1130 return false 1131 } 1132 return text[0].IsHardBreak() 1133 } 1134 1135 // callInjectionProtocol is the function called from Continue to progress 1136 // the injection protocol for all threads. 1137 // Returns true if a call injection terminated 1138 func callInjectionProtocol(t *Target, threads []Thread) (done bool, err error) { 1139 if len(t.fncallForG) == 0 { 1140 // we aren't injecting any calls, no need to check the threads. 1141 return false, nil 1142 } 1143 for _, thread := range threads { 1144 loc, err := thread.Location() 1145 if err != nil { 1146 continue 1147 } 1148 if !isCallInjectionStop(t, thread, loc) { 1149 continue 1150 } 1151 1152 g, callinj, err := findCallInjectionStateForThread(t, thread) 1153 if err != nil { 1154 return false, err 1155 } 1156 1157 arch := thread.BinInfo().Arch 1158 if !arch.breakInstrMovesPC { 1159 setPC(thread, loc.PC+uint64(len(arch.breakpointInstruction))) 1160 } 1161 1162 fncallLog("step for injection on goroutine %d (current) thread=%d (location %s)", g.ID, thread.ThreadID(), loc.Fn.Name) 1163 callinj.continueCompleted <- g 1164 contReq, ok := <-callinj.continueRequest 1165 if !contReq.cont { 1166 err := finishEvalExpressionWithCalls(t, g, contReq, ok) 1167 if err != nil { 1168 return done, err 1169 } 1170 done = true 1171 } 1172 } 1173 return done, nil 1174 } 1175 1176 func findCallInjectionStateForThread(t *Target, thread Thread) (*G, *callInjection, error) { 1177 g, err := GetG(thread) 1178 if err != nil { 1179 return nil, nil, fmt.Errorf("could not determine running goroutine for thread %#x currently executing the function call injection protocol: %v", thread.ThreadID(), err) 1180 } 1181 fncallLog("findCallInjectionStateForThread thread=%d goroutine=%d", thread.ThreadID(), g.ID) 1182 notfound := func() error { 1183 return fmt.Errorf("could not recover call injection state for goroutine %d (thread %d)", g.ID, thread.ThreadID()) 1184 } 1185 callinj := t.fncallForG[g.ID] 1186 if callinj != nil { 1187 if callinj.continueCompleted == nil { 1188 return nil, nil, notfound() 1189 } 1190 return g, callinj, nil 1191 } 1192 1193 // In Go 1.15 and later the call injection protocol will switch to a 1194 // different goroutine. 1195 // Here we try to recover the injection goroutine by checking the injection 1196 // thread. 1197 1198 for goid, callinj := range t.fncallForG { 1199 if callinj != nil && callinj.continueCompleted != nil && callinj.startThreadID != 0 && callinj.startThreadID == thread.ThreadID() { 1200 t.fncallForG[g.ID] = callinj 1201 fncallLog("goroutine %d is the goroutine executing the call injection started in goroutine %d", g.ID, goid) 1202 return g, callinj, nil 1203 } 1204 } 1205 1206 return nil, nil, notfound() 1207 } 1208 1209 // debugCallFunction searches for the debug call function in the binary and 1210 // uses this search to detect the debug call version. 1211 // Returns the debug call function and its version as an integer (the lowest 1212 // valid version is 1) or nil and zero. 1213 func debugCallFunction(bi *BinaryInfo) (*Function, int) { 1214 for version := maxDebugCallVersion; version >= 1; version-- { 1215 name := debugCallFunctionNamePrefix2 + "V" + strconv.Itoa(version) 1216 fn, ok := bi.LookupFunc[name] 1217 if ok && fn != nil { 1218 return fn, version 1219 } 1220 } 1221 return nil, 0 1222 } 1223 1224 // debugCallProtocolReg returns the register ID (as defined in pkg/dwarf/regnum) 1225 // of the register used in the debug call protocol, given the debug call version. 1226 // Also returns a bool indicating whether the version is supported. 1227 func debugCallProtocolReg(archName string, version int) (uint64, bool) { 1228 switch archName { 1229 case "amd64": 1230 var protocolReg uint64 1231 switch version { 1232 case 1: 1233 protocolReg = regnum.AMD64_Rax 1234 case 2: 1235 protocolReg = regnum.AMD64_R12 1236 default: 1237 return 0, false 1238 } 1239 return protocolReg, true 1240 case "arm64": 1241 if version == 2 { 1242 return regnum.ARM64_X0 + 20, true 1243 } 1244 return 0, false 1245 default: 1246 return 0, false 1247 } 1248 } 1249 1250 type fakeEntry map[dwarf.Attr]interface{} 1251 1252 func (e fakeEntry) Val(attr dwarf.Attr) interface{} { 1253 return e[attr] 1254 } 1255 1256 func regabiMallocgcWorkaround(bi *BinaryInfo) ([]*godwarf.Tree, error) { 1257 var err1 error 1258 1259 t := func(name string) godwarf.Type { 1260 if err1 != nil { 1261 return nil 1262 } 1263 typ, err := bi.findType(name) 1264 if err != nil { 1265 err1 = err 1266 return nil 1267 } 1268 return typ 1269 } 1270 1271 m := func(name string, typ godwarf.Type, reg int, isret bool) *godwarf.Tree { 1272 if err1 != nil { 1273 return nil 1274 } 1275 var e fakeEntry = map[dwarf.Attr]interface{}{ 1276 dwarf.AttrName: name, 1277 dwarf.AttrType: typ.Common().Offset, 1278 dwarf.AttrLocation: []byte{byte(op.DW_OP_reg0) + byte(reg)}, 1279 dwarf.AttrVarParam: isret, 1280 } 1281 1282 return &godwarf.Tree{ 1283 Entry: e, 1284 Tag: dwarf.TagFormalParameter, 1285 } 1286 } 1287 1288 switch bi.Arch.Name { 1289 case "amd64": 1290 r := []*godwarf.Tree{ 1291 m("size", t("uintptr"), regnum.AMD64_Rax, false), 1292 m("typ", t("*runtime._type"), regnum.AMD64_Rbx, false), 1293 m("needzero", t("bool"), regnum.AMD64_Rcx, false), 1294 m("~r1", t("unsafe.Pointer"), regnum.AMD64_Rax, true), 1295 } 1296 return r, err1 1297 case "arm64": 1298 r := []*godwarf.Tree{ 1299 m("size", t("uintptr"), regnum.ARM64_X0, false), 1300 m("typ", t("*runtime._type"), regnum.ARM64_X0+1, false), 1301 m("needzero", t("bool"), regnum.ARM64_X0+2, false), 1302 m("~r1", t("unsafe.Pointer"), regnum.ARM64_X0, true), 1303 } 1304 return r, err1 1305 default: 1306 // do nothing 1307 return nil, nil 1308 } 1309 }