github.com/zxy12/go_duplicate_112_new@v0.0.0-20200807091221-747231827200/src/cmd/compile/internal/gc/pgen.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/ssa" 9 "cmd/compile/internal/types" 10 "cmd/internal/dwarf" 11 "cmd/internal/obj" 12 "cmd/internal/objabi" 13 "cmd/internal/src" 14 "cmd/internal/sys" 15 "fmt" 16 "math/rand" 17 "sort" 18 "sync" 19 "time" 20 ) 21 22 // "Portable" code generation. 23 24 var ( 25 nBackendWorkers int // number of concurrent backend workers, set by a compiler flag 26 compilequeue []*Node // functions waiting to be compiled 27 ) 28 29 func emitptrargsmap(fn *Node) { 30 if fn.funcname() == "_" { 31 return 32 } 33 sym := lookup(fmt.Sprintf("%s.args_stackmap", fn.funcname())) 34 lsym := sym.Linksym() 35 36 nptr := int(fn.Type.ArgWidth() / int64(Widthptr)) 37 bv := bvalloc(int32(nptr) * 2) 38 nbitmap := 1 39 if fn.Type.NumResults() > 0 { 40 nbitmap = 2 41 } 42 off := duint32(lsym, 0, uint32(nbitmap)) 43 off = duint32(lsym, off, uint32(bv.n)) 44 45 if fn.IsMethod() { 46 onebitwalktype1(fn.Type.Recvs(), 0, bv) 47 } 48 if fn.Type.NumParams() > 0 { 49 onebitwalktype1(fn.Type.Params(), 0, bv) 50 } 51 off = dbvec(lsym, off, bv) 52 53 if fn.Type.NumResults() > 0 { 54 onebitwalktype1(fn.Type.Results(), 0, bv) 55 off = dbvec(lsym, off, bv) 56 } 57 58 ggloblsym(lsym, int32(off), obj.RODATA|obj.LOCAL) 59 } 60 61 // cmpstackvarlt reports whether the stack variable a sorts before b. 62 // 63 // Sort the list of stack variables. Autos after anything else, 64 // within autos, unused after used, within used, things with 65 // pointers first, zeroed things first, and then decreasing size. 66 // Because autos are laid out in decreasing addresses 67 // on the stack, pointers first, zeroed things first and decreasing size 68 // really means, in memory, things with pointers needing zeroing at 69 // the top of the stack and increasing in size. 70 // Non-autos sort on offset. 71 func cmpstackvarlt(a, b *Node) bool { 72 if (a.Class() == PAUTO) != (b.Class() == PAUTO) { 73 return b.Class() == PAUTO 74 } 75 76 if a.Class() != PAUTO { 77 return a.Xoffset < b.Xoffset 78 } 79 80 if a.Name.Used() != b.Name.Used() { 81 return a.Name.Used() 82 } 83 84 ap := types.Haspointers(a.Type) 85 bp := types.Haspointers(b.Type) 86 if ap != bp { 87 return ap 88 } 89 90 ap = a.Name.Needzero() 91 bp = b.Name.Needzero() 92 if ap != bp { 93 return ap 94 } 95 96 if a.Type.Width != b.Type.Width { 97 return a.Type.Width > b.Type.Width 98 } 99 100 return a.Sym.Name < b.Sym.Name 101 } 102 103 // byStackvar implements sort.Interface for []*Node using cmpstackvarlt. 104 type byStackVar []*Node 105 106 func (s byStackVar) Len() int { return len(s) } 107 func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) } 108 func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] } 109 110 func (s *ssafn) AllocFrame(f *ssa.Func) { 111 s.stksize = 0 112 s.stkptrsize = 0 113 fn := s.curfn.Func 114 115 // Mark the PAUTO's unused. 116 for _, ln := range fn.Dcl { 117 if ln.Class() == PAUTO { 118 ln.Name.SetUsed(false) 119 } 120 } 121 122 for _, l := range f.RegAlloc { 123 if ls, ok := l.(ssa.LocalSlot); ok { 124 ls.N.(*Node).Name.SetUsed(true) 125 } 126 } 127 128 scratchUsed := false 129 for _, b := range f.Blocks { 130 for _, v := range b.Values { 131 if n, ok := v.Aux.(*Node); ok { 132 switch n.Class() { 133 case PPARAM, PPARAMOUT: 134 // Don't modify nodfp; it is a global. 135 if n != nodfp { 136 n.Name.SetUsed(true) 137 } 138 case PAUTO: 139 n.Name.SetUsed(true) 140 } 141 } 142 if !scratchUsed { 143 scratchUsed = v.Op.UsesScratch() 144 } 145 146 } 147 } 148 149 if f.Config.NeedsFpScratch && scratchUsed { 150 s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64]) 151 } 152 153 sort.Sort(byStackVar(fn.Dcl)) 154 155 // Reassign stack offsets of the locals that are used. 156 lastHasPtr := false 157 for i, n := range fn.Dcl { 158 if n.Op != ONAME || n.Class() != PAUTO { 159 continue 160 } 161 if !n.Name.Used() { 162 fn.Dcl = fn.Dcl[:i] 163 break 164 } 165 166 dowidth(n.Type) 167 w := n.Type.Width 168 if w >= thearch.MAXWIDTH || w < 0 { 169 Fatalf("bad width") 170 } 171 if w == 0 && lastHasPtr { 172 // Pad between a pointer-containing object and a zero-sized object. 173 // This prevents a pointer to the zero-sized object from being interpreted 174 // as a pointer to the pointer-containing object (and causing it 175 // to be scanned when it shouldn't be). See issue 24993. 176 w = 1 177 } 178 s.stksize += w 179 s.stksize = Rnd(s.stksize, int64(n.Type.Align)) 180 if types.Haspointers(n.Type) { 181 s.stkptrsize = s.stksize 182 lastHasPtr = true 183 } else { 184 lastHasPtr = false 185 } 186 if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { 187 s.stksize = Rnd(s.stksize, int64(Widthptr)) 188 } 189 n.Xoffset = -s.stksize 190 } 191 192 s.stksize = Rnd(s.stksize, int64(Widthreg)) 193 s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg)) 194 } 195 196 func funccompile(fn *Node) { 197 if Curfn != nil { 198 Fatalf("funccompile %v inside %v", fn.Func.Nname.Sym, Curfn.Func.Nname.Sym) 199 } 200 201 if fn.Type == nil { 202 if nerrors == 0 { 203 Fatalf("funccompile missing type") 204 } 205 return 206 } 207 208 // assign parameter offsets 209 dowidth(fn.Type) 210 211 if fn.Nbody.Len() == 0 { 212 // Initialize ABI wrappers if necessary. 213 fn.Func.initLSym(false) 214 emitptrargsmap(fn) 215 return 216 } 217 218 dclcontext = PAUTO 219 Curfn = fn 220 221 compile(fn) 222 223 Curfn = nil 224 dclcontext = PEXTERN 225 } 226 227 func compile(fn *Node) { 228 saveerrors() 229 230 order(fn) 231 if nerrors != 0 { 232 return 233 } 234 235 walk(fn) 236 if nerrors != 0 { 237 return 238 } 239 if instrumenting { 240 instrument(fn) 241 } 242 243 // From this point, there should be no uses of Curfn. Enforce that. 244 Curfn = nil 245 246 if fn.funcname() == "_" { 247 // We don't need to generate code for this function, just report errors in its body. 248 // At this point we've generated any errors needed. 249 // (Beyond here we generate only non-spec errors, like "stack frame too large".) 250 // See issue 29870. 251 return 252 } 253 254 // Set up the function's LSym early to avoid data races with the assemblers. 255 fn.Func.initLSym(true) 256 257 // Make sure type syms are declared for all types that might 258 // be types of stack objects. We need to do this here 259 // because symbols must be allocated before the parallel 260 // phase of the compiler. 261 if fn.Func.lsym != nil { // not func _(){} 262 for _, n := range fn.Func.Dcl { 263 switch n.Class() { 264 case PPARAM, PPARAMOUT, PAUTO: 265 if livenessShouldTrack(n) && n.Addrtaken() { 266 dtypesym(n.Type) 267 // Also make sure we allocate a linker symbol 268 // for the stack object data, for the same reason. 269 if fn.Func.lsym.Func.StackObjects == nil { 270 fn.Func.lsym.Func.StackObjects = Ctxt.Lookup(fn.Func.lsym.Name + ".stkobj") 271 } 272 } 273 } 274 } 275 } 276 277 if compilenow() { 278 compileSSA(fn, 0) 279 } else { 280 compilequeue = append(compilequeue, fn) 281 } 282 } 283 284 // compilenow reports whether to compile immediately. 285 // If functions are not compiled immediately, 286 // they are enqueued in compilequeue, 287 // which is drained by compileFunctions. 288 func compilenow() bool { 289 return nBackendWorkers == 1 && Debug_compilelater == 0 290 } 291 292 const maxStackSize = 1 << 30 293 294 // compileSSA builds an SSA backend function, 295 // uses it to generate a plist, 296 // and flushes that plist to machine code. 297 // worker indicates which of the backend workers is doing the processing. 298 func compileSSA(fn *Node, worker int) { 299 f := buildssa(fn, worker) 300 // Note: check arg size to fix issue 25507. 301 if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type.ArgWidth() >= maxStackSize { 302 largeStackFramesMu.Lock() 303 largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type.ArgWidth(), pos: fn.Pos}) 304 largeStackFramesMu.Unlock() 305 return 306 } 307 pp := newProgs(fn, worker) 308 defer pp.Free() 309 genssa(f, pp) 310 // Check frame size again. 311 // The check above included only the space needed for local variables. 312 // After genssa, the space needed includes local variables and the callee arg region. 313 // We must do this check prior to calling pp.Flush. 314 // If there are any oversized stack frames, 315 // the assembler may emit inscrutable complaints about invalid instructions. 316 if pp.Text.To.Offset >= maxStackSize { 317 largeStackFramesMu.Lock() 318 locals := f.Frontend().(*ssafn).stksize 319 largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos}) 320 largeStackFramesMu.Unlock() 321 return 322 } 323 324 pp.Flush() // assemble, fill in boilerplate, etc. 325 // fieldtrack must be called after pp.Flush. See issue 20014. 326 fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack) 327 } 328 329 func init() { 330 if raceEnabled { 331 rand.Seed(time.Now().UnixNano()) 332 } 333 } 334 335 // compileFunctions compiles all functions in compilequeue. 336 // It fans out nBackendWorkers to do the work 337 // and waits for them to complete. 338 func compileFunctions() { 339 if len(compilequeue) != 0 { 340 sizeCalculationDisabled = true // not safe to calculate sizes concurrently 341 if raceEnabled { 342 // Randomize compilation order to try to shake out races. 343 tmp := make([]*Node, len(compilequeue)) 344 perm := rand.Perm(len(compilequeue)) 345 for i, v := range perm { 346 tmp[v] = compilequeue[i] 347 } 348 copy(compilequeue, tmp) 349 } else { 350 // Compile the longest functions first, 351 // since they're most likely to be the slowest. 352 // This helps avoid stragglers. 353 obj.SortSlice(compilequeue, func(i, j int) bool { 354 return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len() 355 }) 356 } 357 var wg sync.WaitGroup 358 Ctxt.InParallel = true 359 c := make(chan *Node, nBackendWorkers) 360 for i := 0; i < nBackendWorkers; i++ { 361 wg.Add(1) 362 go func(worker int) { 363 for fn := range c { 364 compileSSA(fn, worker) 365 } 366 wg.Done() 367 }(i) 368 } 369 for _, fn := range compilequeue { 370 c <- fn 371 } 372 close(c) 373 compilequeue = nil 374 wg.Wait() 375 Ctxt.InParallel = false 376 sizeCalculationDisabled = false 377 } 378 } 379 380 func debuginfo(fnsym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) { 381 fn := curfn.(*Node) 382 if fn.Func.Nname != nil { 383 if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect { 384 Fatalf("unexpected fnsym: %v != %v", fnsym, expect) 385 } 386 } 387 388 var automDecls []*Node 389 // Populate Automs for fn. 390 for _, n := range fn.Func.Dcl { 391 if n.Op != ONAME { // might be OTYPE or OLITERAL 392 continue 393 } 394 var name obj.AddrName 395 switch n.Class() { 396 case PAUTO: 397 if !n.Name.Used() { 398 // Text == nil -> generating abstract function 399 if fnsym.Func.Text != nil { 400 Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)") 401 } 402 continue 403 } 404 name = obj.NAME_AUTO 405 case PPARAM, PPARAMOUT: 406 name = obj.NAME_PARAM 407 default: 408 continue 409 } 410 automDecls = append(automDecls, n) 411 gotype := ngotype(n).Linksym() 412 fnsym.Func.Autom = append(fnsym.Func.Autom, &obj.Auto{ 413 Asym: Ctxt.Lookup(n.Sym.Name), 414 Aoffset: int32(n.Xoffset), 415 Name: name, 416 Gotype: gotype, 417 }) 418 } 419 420 decls, dwarfVars := createDwarfVars(fnsym, fn.Func, automDecls) 421 422 var varScopes []ScopeID 423 for _, decl := range decls { 424 pos := decl.Pos 425 if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) { 426 // It's not clear which position is correct for captured variables here: 427 // * decl.Pos is the wrong position for captured variables, in the inner 428 // function, but it is the right position in the outer function. 429 // * decl.Name.Defn is nil for captured variables that were arguments 430 // on the outer function, however the decl.Pos for those seems to be 431 // correct. 432 // * decl.Name.Defn is the "wrong" thing for variables declared in the 433 // header of a type switch, it's their position in the header, rather 434 // than the position of the case statement. In principle this is the 435 // right thing, but here we prefer the latter because it makes each 436 // instance of the header variable local to the lexical block of its 437 // case statement. 438 // This code is probably wrong for type switch variables that are also 439 // captured. 440 pos = decl.Name.Defn.Pos 441 } 442 varScopes = append(varScopes, findScope(fn.Func.Marks, pos)) 443 } 444 445 scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes) 446 var inlcalls dwarf.InlCalls 447 if genDwarfInline > 0 { 448 inlcalls = assembleInlines(fnsym, dwarfVars) 449 } 450 return scopes, inlcalls 451 } 452 453 // createSimpleVars creates a DWARF entry for every variable declared in the 454 // function, claiming that they are permanently on the stack. 455 func createSimpleVars(automDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) { 456 var vars []*dwarf.Var 457 var decls []*Node 458 selected := make(map[*Node]bool) 459 for _, n := range automDecls { 460 if n.IsAutoTmp() { 461 continue 462 } 463 464 decls = append(decls, n) 465 vars = append(vars, createSimpleVar(n)) 466 selected[n] = true 467 } 468 return decls, vars, selected 469 } 470 471 func createSimpleVar(n *Node) *dwarf.Var { 472 var abbrev int 473 offs := n.Xoffset 474 475 switch n.Class() { 476 case PAUTO: 477 abbrev = dwarf.DW_ABRV_AUTO 478 if Ctxt.FixedFrameSize() == 0 { 479 offs -= int64(Widthptr) 480 } 481 if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) || objabi.GOARCH == "arm64" { 482 // There is a word space for FP on ARM64 even if the frame pointer is disabled 483 offs -= int64(Widthptr) 484 } 485 486 case PPARAM, PPARAMOUT: 487 abbrev = dwarf.DW_ABRV_PARAM 488 offs += Ctxt.FixedFrameSize() 489 default: 490 Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n) 491 } 492 493 typename := dwarf.InfoPrefix + typesymname(n.Type) 494 inlIndex := 0 495 if genDwarfInline > 1 { 496 if n.InlFormal() || n.InlLocal() { 497 inlIndex = posInlIndex(n.Pos) + 1 498 if n.InlFormal() { 499 abbrev = dwarf.DW_ABRV_PARAM 500 } 501 } 502 } 503 declpos := Ctxt.InnermostPos(n.Pos) 504 return &dwarf.Var{ 505 Name: n.Sym.Name, 506 IsReturnValue: n.Class() == PPARAMOUT, 507 IsInlFormal: n.InlFormal(), 508 Abbrev: abbrev, 509 StackOffset: int32(offs), 510 Type: Ctxt.Lookup(typename), 511 DeclFile: declpos.RelFilename(), 512 DeclLine: declpos.RelLine(), 513 DeclCol: declpos.Col(), 514 InlIndex: int32(inlIndex), 515 ChildIndex: -1, 516 } 517 } 518 519 // createComplexVars creates recomposed DWARF vars with location lists, 520 // suitable for describing optimized code. 521 func createComplexVars(fn *Func) ([]*Node, []*dwarf.Var, map[*Node]bool) { 522 debugInfo := fn.DebugInfo 523 524 // Produce a DWARF variable entry for each user variable. 525 var decls []*Node 526 var vars []*dwarf.Var 527 ssaVars := make(map[*Node]bool) 528 529 for varID, dvar := range debugInfo.Vars { 530 n := dvar.(*Node) 531 ssaVars[n] = true 532 for _, slot := range debugInfo.VarSlots[varID] { 533 ssaVars[debugInfo.Slots[slot].N.(*Node)] = true 534 } 535 536 if dvar := createComplexVar(fn, ssa.VarID(varID)); dvar != nil { 537 decls = append(decls, n) 538 vars = append(vars, dvar) 539 } 540 } 541 542 return decls, vars, ssaVars 543 } 544 545 // createDwarfVars process fn, returning a list of DWARF variables and the 546 // Nodes they represent. 547 func createDwarfVars(fnsym *obj.LSym, fn *Func, automDecls []*Node) ([]*Node, []*dwarf.Var) { 548 // Collect a raw list of DWARF vars. 549 var vars []*dwarf.Var 550 var decls []*Node 551 var selected map[*Node]bool 552 if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && fn.DebugInfo != nil { 553 decls, vars, selected = createComplexVars(fn) 554 } else { 555 decls, vars, selected = createSimpleVars(automDecls) 556 } 557 558 var dcl []*Node 559 if fnsym.WasInlined() { 560 dcl = preInliningDcls(fnsym) 561 } else { 562 dcl = automDecls 563 } 564 565 // If optimization is enabled, the list above will typically be 566 // missing some of the original pre-optimization variables in the 567 // function (they may have been promoted to registers, folded into 568 // constants, dead-coded away, etc). Input arguments not eligible 569 // for SSA optimization are also missing. Here we add back in entries 570 // for selected missing vars. Note that the recipe below creates a 571 // conservative location. The idea here is that we want to 572 // communicate to the user that "yes, there is a variable named X 573 // in this function, but no, I don't have enough information to 574 // reliably report its contents." 575 // For non-SSA-able arguments, however, the correct information 576 // is known -- they have a single home on the stack. 577 for _, n := range dcl { 578 if _, found := selected[n]; found { 579 continue 580 } 581 c := n.Sym.Name[0] 582 if c == '.' || n.Type.IsUntyped() { 583 continue 584 } 585 if n.Class() == PPARAM && !canSSAType(n.Type) { 586 // SSA-able args get location lists, and may move in and 587 // out of registers, so those are handled elsewhere. 588 // Autos and named output params seem to get handled 589 // with VARDEF, which creates location lists. 590 // Args not of SSA-able type are treated here; they 591 // are homed on the stack in a single place for the 592 // entire call. 593 vars = append(vars, createSimpleVar(n)) 594 decls = append(decls, n) 595 continue 596 } 597 typename := dwarf.InfoPrefix + typesymname(n.Type) 598 decls = append(decls, n) 599 abbrev := dwarf.DW_ABRV_AUTO_LOCLIST 600 isReturnValue := (n.Class() == PPARAMOUT) 601 if n.Class() == PPARAM || n.Class() == PPARAMOUT { 602 abbrev = dwarf.DW_ABRV_PARAM_LOCLIST 603 } else if n.Class() == PAUTOHEAP { 604 // If dcl in question has been promoted to heap, do a bit 605 // of extra work to recover original class (auto or param); 606 // see issue 30908. This insures that we get the proper 607 // signature in the abstract function DIE, but leaves a 608 // misleading location for the param (we want pointer-to-heap 609 // and not stack). 610 // TODO(thanm): generate a better location expression 611 stackcopy := n.Name.Param.Stackcopy 612 if stackcopy != nil && (stackcopy.Class() == PPARAM || stackcopy.Class() == PPARAMOUT) { 613 abbrev = dwarf.DW_ABRV_PARAM_LOCLIST 614 isReturnValue = (stackcopy.Class() == PPARAMOUT) 615 } 616 } 617 inlIndex := 0 618 if genDwarfInline > 1 { 619 if n.InlFormal() || n.InlLocal() { 620 inlIndex = posInlIndex(n.Pos) + 1 621 if n.InlFormal() { 622 abbrev = dwarf.DW_ABRV_PARAM_LOCLIST 623 } 624 } 625 } 626 declpos := Ctxt.InnermostPos(n.Pos) 627 vars = append(vars, &dwarf.Var{ 628 Name: n.Sym.Name, 629 IsReturnValue: isReturnValue, 630 Abbrev: abbrev, 631 StackOffset: int32(n.Xoffset), 632 Type: Ctxt.Lookup(typename), 633 DeclFile: declpos.RelFilename(), 634 DeclLine: declpos.RelLine(), 635 DeclCol: declpos.Col(), 636 InlIndex: int32(inlIndex), 637 ChildIndex: -1, 638 }) 639 // Append a "deleted auto" entry to the autom list so as to 640 // insure that the type in question is picked up by the linker. 641 // See issue 22941. 642 gotype := ngotype(n).Linksym() 643 fnsym.Func.Autom = append(fnsym.Func.Autom, &obj.Auto{ 644 Asym: Ctxt.Lookup(n.Sym.Name), 645 Aoffset: int32(-1), 646 Name: obj.NAME_DELETED_AUTO, 647 Gotype: gotype, 648 }) 649 650 } 651 652 return decls, vars 653 } 654 655 // Given a function that was inlined at some point during the 656 // compilation, return a sorted list of nodes corresponding to the 657 // autos/locals in that function prior to inlining. If this is a 658 // function that is not local to the package being compiled, then the 659 // names of the variables may have been "versioned" to avoid conflicts 660 // with local vars; disregard this versioning when sorting. 661 func preInliningDcls(fnsym *obj.LSym) []*Node { 662 fn := Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node) 663 var rdcl []*Node 664 for _, n := range fn.Func.Inl.Dcl { 665 c := n.Sym.Name[0] 666 // Avoid reporting "_" parameters, since if there are more than 667 // one, it can result in a collision later on, as in #23179. 668 if unversion(n.Sym.Name) == "_" || c == '.' || n.Type.IsUntyped() { 669 continue 670 } 671 rdcl = append(rdcl, n) 672 } 673 return rdcl 674 } 675 676 // stackOffset returns the stack location of a LocalSlot relative to the 677 // stack pointer, suitable for use in a DWARF location entry. This has nothing 678 // to do with its offset in the user variable. 679 func stackOffset(slot ssa.LocalSlot) int32 { 680 n := slot.N.(*Node) 681 var base int64 682 switch n.Class() { 683 case PAUTO: 684 if Ctxt.FixedFrameSize() == 0 { 685 base -= int64(Widthptr) 686 } 687 if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) || objabi.GOARCH == "arm64" { 688 // There is a word space for FP on ARM64 even if the frame pointer is disabled 689 base -= int64(Widthptr) 690 } 691 case PPARAM, PPARAMOUT: 692 base += Ctxt.FixedFrameSize() 693 } 694 return int32(base + n.Xoffset + slot.Off) 695 } 696 697 // createComplexVar builds a single DWARF variable entry and location list. 698 func createComplexVar(fn *Func, varID ssa.VarID) *dwarf.Var { 699 debug := fn.DebugInfo 700 n := debug.Vars[varID].(*Node) 701 702 var abbrev int 703 switch n.Class() { 704 case PAUTO: 705 abbrev = dwarf.DW_ABRV_AUTO_LOCLIST 706 case PPARAM, PPARAMOUT: 707 abbrev = dwarf.DW_ABRV_PARAM_LOCLIST 708 default: 709 return nil 710 } 711 712 gotype := ngotype(n).Linksym() 713 typename := dwarf.InfoPrefix + gotype.Name[len("type."):] 714 inlIndex := 0 715 if genDwarfInline > 1 { 716 if n.InlFormal() || n.InlLocal() { 717 inlIndex = posInlIndex(n.Pos) + 1 718 if n.InlFormal() { 719 abbrev = dwarf.DW_ABRV_PARAM_LOCLIST 720 } 721 } 722 } 723 declpos := Ctxt.InnermostPos(n.Pos) 724 dvar := &dwarf.Var{ 725 Name: n.Sym.Name, 726 IsReturnValue: n.Class() == PPARAMOUT, 727 IsInlFormal: n.InlFormal(), 728 Abbrev: abbrev, 729 Type: Ctxt.Lookup(typename), 730 // The stack offset is used as a sorting key, so for decomposed 731 // variables just give it the first one. It's not used otherwise. 732 // This won't work well if the first slot hasn't been assigned a stack 733 // location, but it's not obvious how to do better. 734 StackOffset: stackOffset(debug.Slots[debug.VarSlots[varID][0]]), 735 DeclFile: declpos.RelFilename(), 736 DeclLine: declpos.RelLine(), 737 DeclCol: declpos.Col(), 738 InlIndex: int32(inlIndex), 739 ChildIndex: -1, 740 } 741 list := debug.LocationLists[varID] 742 if len(list) != 0 { 743 dvar.PutLocationList = func(listSym, startPC dwarf.Sym) { 744 debug.PutLocationList(list, Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym)) 745 } 746 } 747 return dvar 748 } 749 750 // fieldtrack adds R_USEFIELD relocations to fnsym to record any 751 // struct fields that it used. 752 func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) { 753 if fnsym == nil { 754 return 755 } 756 if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 { 757 return 758 } 759 760 trackSyms := make([]*types.Sym, 0, len(tracked)) 761 for sym := range tracked { 762 trackSyms = append(trackSyms, sym) 763 } 764 sort.Sort(symByName(trackSyms)) 765 for _, sym := range trackSyms { 766 r := obj.Addrel(fnsym) 767 r.Sym = sym.Linksym() 768 r.Type = objabi.R_USEFIELD 769 } 770 } 771 772 type symByName []*types.Sym 773 774 func (a symByName) Len() int { return len(a) } 775 func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name } 776 func (a symByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }