github.com/Filosottile/go@v0.0.0-20170906193555-dbed9972d994/src/cmd/compile/internal/gc/pgen.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/ssa" 9 "cmd/compile/internal/types" 10 "cmd/internal/dwarf" 11 "cmd/internal/obj" 12 "cmd/internal/objabi" 13 "cmd/internal/src" 14 "cmd/internal/sys" 15 "fmt" 16 "math" 17 "math/rand" 18 "sort" 19 "sync" 20 "time" 21 ) 22 23 // "Portable" code generation. 24 25 var ( 26 nBackendWorkers int // number of concurrent backend workers, set by a compiler flag 27 compilequeue []*Node // functions waiting to be compiled 28 ) 29 30 func emitptrargsmap() { 31 if Curfn.funcname() == "_" { 32 return 33 } 34 sym := lookup(fmt.Sprintf("%s.args_stackmap", Curfn.funcname())) 35 lsym := sym.Linksym() 36 37 nptr := int(Curfn.Type.ArgWidth() / int64(Widthptr)) 38 bv := bvalloc(int32(nptr) * 2) 39 nbitmap := 1 40 if Curfn.Type.NumResults() > 0 { 41 nbitmap = 2 42 } 43 off := duint32(lsym, 0, uint32(nbitmap)) 44 off = duint32(lsym, off, uint32(bv.n)) 45 var xoffset int64 46 if Curfn.IsMethod() { 47 xoffset = 0 48 onebitwalktype1(Curfn.Type.Recvs(), &xoffset, bv) 49 } 50 51 if Curfn.Type.NumParams() > 0 { 52 xoffset = 0 53 onebitwalktype1(Curfn.Type.Params(), &xoffset, bv) 54 } 55 56 off = dbvec(lsym, off, bv) 57 if Curfn.Type.NumResults() > 0 { 58 xoffset = 0 59 onebitwalktype1(Curfn.Type.Results(), &xoffset, bv) 60 off = dbvec(lsym, off, bv) 61 } 62 63 ggloblsym(lsym, int32(off), obj.RODATA|obj.LOCAL) 64 } 65 66 // cmpstackvarlt reports whether the stack variable a sorts before b. 67 // 68 // Sort the list of stack variables. Autos after anything else, 69 // within autos, unused after used, within used, things with 70 // pointers first, zeroed things first, and then decreasing size. 71 // Because autos are laid out in decreasing addresses 72 // on the stack, pointers first, zeroed things first and decreasing size 73 // really means, in memory, things with pointers needing zeroing at 74 // the top of the stack and increasing in size. 75 // Non-autos sort on offset. 76 func cmpstackvarlt(a, b *Node) bool { 77 if (a.Class() == PAUTO) != (b.Class() == PAUTO) { 78 return b.Class() == PAUTO 79 } 80 81 if a.Class() != PAUTO { 82 return a.Xoffset < b.Xoffset 83 } 84 85 if a.Name.Used() != b.Name.Used() { 86 return a.Name.Used() 87 } 88 89 ap := types.Haspointers(a.Type) 90 bp := types.Haspointers(b.Type) 91 if ap != bp { 92 return ap 93 } 94 95 ap = a.Name.Needzero() 96 bp = b.Name.Needzero() 97 if ap != bp { 98 return ap 99 } 100 101 if a.Type.Width != b.Type.Width { 102 return a.Type.Width > b.Type.Width 103 } 104 105 return a.Sym.Name < b.Sym.Name 106 } 107 108 // byStackvar implements sort.Interface for []*Node using cmpstackvarlt. 109 type byStackVar []*Node 110 111 func (s byStackVar) Len() int { return len(s) } 112 func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) } 113 func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] } 114 115 func (s *ssafn) AllocFrame(f *ssa.Func) { 116 s.stksize = 0 117 s.stkptrsize = 0 118 fn := s.curfn.Func 119 120 // Mark the PAUTO's unused. 121 for _, ln := range fn.Dcl { 122 if ln.Class() == PAUTO { 123 ln.Name.SetUsed(false) 124 } 125 } 126 127 for _, l := range f.RegAlloc { 128 if ls, ok := l.(ssa.LocalSlot); ok { 129 ls.N.(*Node).Name.SetUsed(true) 130 } 131 } 132 133 scratchUsed := false 134 for _, b := range f.Blocks { 135 for _, v := range b.Values { 136 switch a := v.Aux.(type) { 137 case *ssa.ArgSymbol: 138 n := a.Node.(*Node) 139 // Don't modify nodfp; it is a global. 140 if n != nodfp { 141 n.Name.SetUsed(true) 142 } 143 case *ssa.AutoSymbol: 144 a.Node.(*Node).Name.SetUsed(true) 145 } 146 147 if !scratchUsed { 148 scratchUsed = v.Op.UsesScratch() 149 } 150 } 151 } 152 153 if f.Config.NeedsFpScratch && scratchUsed { 154 s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[TUINT64]) 155 } 156 157 sort.Sort(byStackVar(fn.Dcl)) 158 159 // Reassign stack offsets of the locals that are used. 160 for i, n := range fn.Dcl { 161 if n.Op != ONAME || n.Class() != PAUTO { 162 continue 163 } 164 if !n.Name.Used() { 165 fn.Dcl = fn.Dcl[:i] 166 break 167 } 168 169 dowidth(n.Type) 170 w := n.Type.Width 171 if w >= thearch.MAXWIDTH || w < 0 { 172 Fatalf("bad width") 173 } 174 s.stksize += w 175 s.stksize = Rnd(s.stksize, int64(n.Type.Align)) 176 if types.Haspointers(n.Type) { 177 s.stkptrsize = s.stksize 178 } 179 if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { 180 s.stksize = Rnd(s.stksize, int64(Widthptr)) 181 } 182 n.Xoffset = -s.stksize 183 } 184 185 s.stksize = Rnd(s.stksize, int64(Widthreg)) 186 s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg)) 187 } 188 189 func compile(fn *Node) { 190 Curfn = fn 191 dowidth(fn.Type) 192 193 if fn.Nbody.Len() == 0 { 194 emitptrargsmap() 195 return 196 } 197 198 saveerrors() 199 200 order(fn) 201 if nerrors != 0 { 202 return 203 } 204 205 walk(fn) 206 if nerrors != 0 { 207 return 208 } 209 if instrumenting { 210 instrument(fn) 211 } 212 213 // From this point, there should be no uses of Curfn. Enforce that. 214 Curfn = nil 215 216 // Set up the function's LSym early to avoid data races with the assemblers. 217 fn.Func.initLSym() 218 219 if compilenow() { 220 compileSSA(fn, 0) 221 } else { 222 compilequeue = append(compilequeue, fn) 223 } 224 } 225 226 // compilenow reports whether to compile immediately. 227 // If functions are not compiled immediately, 228 // they are enqueued in compilequeue, 229 // which is drained by compileFunctions. 230 func compilenow() bool { 231 return nBackendWorkers == 1 && Debug_compilelater == 0 232 } 233 234 const maxStackSize = 1 << 31 235 236 // compileSSA builds an SSA backend function, 237 // uses it to generate a plist, 238 // and flushes that plist to machine code. 239 // worker indicates which of the backend workers is doing the processing. 240 func compileSSA(fn *Node, worker int) { 241 ssafn := buildssa(fn, worker) 242 pp := newProgs(fn, worker) 243 genssa(ssafn, pp) 244 if pp.Text.To.Offset < maxStackSize { 245 pp.Flush() 246 } else { 247 largeStackFramesMu.Lock() 248 largeStackFrames = append(largeStackFrames, fn.Pos) 249 largeStackFramesMu.Unlock() 250 } 251 // fieldtrack must be called after pp.Flush. See issue 20014. 252 fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack) 253 pp.Free() 254 } 255 256 func init() { 257 if raceEnabled { 258 rand.Seed(time.Now().UnixNano()) 259 } 260 } 261 262 // compileFunctions compiles all functions in compilequeue. 263 // It fans out nBackendWorkers to do the work 264 // and waits for them to complete. 265 func compileFunctions() { 266 if len(compilequeue) != 0 { 267 sizeCalculationDisabled = true // not safe to calculate sizes concurrently 268 if raceEnabled { 269 // Randomize compilation order to try to shake out races. 270 tmp := make([]*Node, len(compilequeue)) 271 perm := rand.Perm(len(compilequeue)) 272 for i, v := range perm { 273 tmp[v] = compilequeue[i] 274 } 275 copy(compilequeue, tmp) 276 } else { 277 // Compile the longest functions first, 278 // since they're most likely to be the slowest. 279 // This helps avoid stragglers. 280 obj.SortSlice(compilequeue, func(i, j int) bool { 281 return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len() 282 }) 283 } 284 var wg sync.WaitGroup 285 c := make(chan *Node, nBackendWorkers) 286 for i := 0; i < nBackendWorkers; i++ { 287 wg.Add(1) 288 go func(worker int) { 289 for fn := range c { 290 compileSSA(fn, worker) 291 } 292 wg.Done() 293 }(i) 294 } 295 for _, fn := range compilequeue { 296 c <- fn 297 } 298 close(c) 299 compilequeue = nil 300 wg.Wait() 301 sizeCalculationDisabled = false 302 } 303 } 304 305 func debuginfo(fnsym *obj.LSym, curfn interface{}) []dwarf.Scope { 306 fn := curfn.(*Node) 307 debugInfo := fn.Func.DebugInfo 308 fn.Func.DebugInfo = nil 309 if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect { 310 Fatalf("unexpected fnsym: %v != %v", fnsym, expect) 311 } 312 313 var automDecls []*Node 314 // Populate Automs for fn. 315 for _, n := range fn.Func.Dcl { 316 if n.Op != ONAME { // might be OTYPE or OLITERAL 317 continue 318 } 319 var name obj.AddrName 320 switch n.Class() { 321 case PAUTO: 322 if !n.Name.Used() { 323 Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)") 324 } 325 name = obj.NAME_AUTO 326 case PPARAM, PPARAMOUT: 327 name = obj.NAME_PARAM 328 default: 329 continue 330 } 331 automDecls = append(automDecls, n) 332 gotype := ngotype(n).Linksym() 333 fnsym.Func.Autom = append(fnsym.Func.Autom, &obj.Auto{ 334 Asym: Ctxt.Lookup(n.Sym.Name), 335 Aoffset: int32(n.Xoffset), 336 Name: name, 337 Gotype: gotype, 338 }) 339 } 340 341 var dwarfVars []*dwarf.Var 342 var decls []*Node 343 if Ctxt.Flag_locationlists && Ctxt.Flag_optimize { 344 decls, dwarfVars = createComplexVars(fn, debugInfo) 345 } else { 346 decls, dwarfVars = createSimpleVars(automDecls) 347 } 348 349 var varScopes []ScopeID 350 for _, decl := range decls { 351 var scope ScopeID 352 if !decl.Name.Captured() && !decl.Name.Byval() { 353 // n.Pos of captured variables is their first 354 // use in the closure but they should always 355 // be assigned to scope 0 instead. 356 // TODO(mdempsky): Verify this. 357 scope = findScope(fn.Func.Marks, decl.Pos) 358 } 359 varScopes = append(varScopes, scope) 360 } 361 return assembleScopes(fnsym, fn, dwarfVars, varScopes) 362 } 363 364 // createSimpleVars creates a DWARF entry for every variable declared in the 365 // function, claiming that they are permanently on the stack. 366 func createSimpleVars(automDecls []*Node) ([]*Node, []*dwarf.Var) { 367 var vars []*dwarf.Var 368 var decls []*Node 369 for _, n := range automDecls { 370 if n.IsAutoTmp() { 371 continue 372 } 373 var abbrev int 374 offs := n.Xoffset 375 376 switch n.Class() { 377 case PAUTO: 378 abbrev = dwarf.DW_ABRV_AUTO 379 if Ctxt.FixedFrameSize() == 0 { 380 offs -= int64(Widthptr) 381 } 382 if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { 383 offs -= int64(Widthptr) 384 } 385 386 case PPARAM, PPARAMOUT: 387 abbrev = dwarf.DW_ABRV_PARAM 388 offs += Ctxt.FixedFrameSize() 389 default: 390 Fatalf("createSimpleVars unexpected type %v for node %v", n.Class(), n) 391 } 392 393 typename := dwarf.InfoPrefix + typesymname(n.Type) 394 decls = append(decls, n) 395 vars = append(vars, &dwarf.Var{ 396 Name: n.Sym.Name, 397 Abbrev: abbrev, 398 StackOffset: int32(offs), 399 Type: Ctxt.Lookup(typename), 400 DeclLine: n.Pos.Line(), 401 }) 402 } 403 return decls, vars 404 } 405 406 type varPart struct { 407 varOffset int64 408 slot ssa.SlotID 409 locs ssa.VarLocList 410 } 411 412 func createComplexVars(fn *Node, debugInfo *ssa.FuncDebug) ([]*Node, []*dwarf.Var) { 413 for _, locList := range debugInfo.Variables { 414 for _, loc := range locList.Locations { 415 if loc.StartProg != nil { 416 loc.StartPC = loc.StartProg.Pc 417 } 418 if loc.EndProg != nil { 419 loc.EndPC = loc.EndProg.Pc 420 } 421 if Debug_locationlist == 0 { 422 loc.EndProg = nil 423 loc.StartProg = nil 424 } 425 } 426 } 427 428 // Group SSA variables by the user variable they were decomposed from. 429 varParts := map[*Node][]varPart{} 430 for slotID, locList := range debugInfo.Variables { 431 if len(locList.Locations) == 0 { 432 continue 433 } 434 slot := debugInfo.Slots[slotID] 435 for slot.SplitOf != nil { 436 slot = slot.SplitOf 437 } 438 n := slot.N.(*Node) 439 varParts[n] = append(varParts[n], varPart{varOffset(slot), ssa.SlotID(slotID), locList}) 440 } 441 442 // Produce a DWARF variable entry for each user variable. 443 // Don't iterate over the map -- that's nondeterministic, and 444 // createComplexVar has side effects. Instead, go by slot. 445 var decls []*Node 446 var vars []*dwarf.Var 447 for _, slot := range debugInfo.Slots { 448 for slot.SplitOf != nil { 449 slot = slot.SplitOf 450 } 451 n := slot.N.(*Node) 452 parts := varParts[n] 453 if parts == nil { 454 continue 455 } 456 // Don't work on this variable again, no matter how many slots it has. 457 delete(varParts, n) 458 459 // Get the order the parts need to be in to represent the memory 460 // of the decomposed user variable. 461 sort.Sort(partsByVarOffset(parts)) 462 463 if dvar := createComplexVar(debugInfo, n, parts); dvar != nil { 464 decls = append(decls, n) 465 vars = append(vars, dvar) 466 } 467 } 468 return decls, vars 469 } 470 471 // varOffset returns the offset of slot within the user variable it was 472 // decomposed from. This has nothing to do with its stack offset. 473 func varOffset(slot *ssa.LocalSlot) int64 { 474 offset := slot.Off 475 for ; slot.SplitOf != nil; slot = slot.SplitOf { 476 offset += slot.SplitOffset 477 } 478 return offset 479 } 480 481 type partsByVarOffset []varPart 482 483 func (a partsByVarOffset) Len() int { return len(a) } 484 func (a partsByVarOffset) Less(i, j int) bool { return a[i].varOffset < a[j].varOffset } 485 func (a partsByVarOffset) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 486 487 // createComplexVar builds a DWARF variable entry and location list representing n. 488 func createComplexVar(debugInfo *ssa.FuncDebug, n *Node, parts []varPart) *dwarf.Var { 489 slots := debugInfo.Slots 490 var offs int64 // base stack offset for this kind of variable 491 var abbrev int 492 switch n.Class() { 493 case PAUTO: 494 abbrev = dwarf.DW_ABRV_AUTO_LOCLIST 495 if Ctxt.FixedFrameSize() == 0 { 496 offs -= int64(Widthptr) 497 } 498 if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { 499 offs -= int64(Widthptr) 500 } 501 502 case PPARAM, PPARAMOUT: 503 abbrev = dwarf.DW_ABRV_PARAM_LOCLIST 504 offs += Ctxt.FixedFrameSize() 505 default: 506 return nil 507 } 508 509 gotype := ngotype(n).Linksym() 510 typename := dwarf.InfoPrefix + gotype.Name[len("type."):] 511 // The stack offset is used as a sorting key, so for decomposed 512 // variables just give it the lowest one. It's not used otherwise. 513 stackOffset := debugInfo.Slots[parts[0].slot].N.(*Node).Xoffset + offs 514 dvar := &dwarf.Var{ 515 Name: n.Sym.Name, 516 Abbrev: abbrev, 517 Type: Ctxt.Lookup(typename), 518 StackOffset: int32(stackOffset), 519 DeclLine: n.Pos.Line(), 520 } 521 522 if Debug_locationlist != 0 { 523 Ctxt.Logf("Building location list for %+v. Parts:\n", n) 524 for _, part := range parts { 525 Ctxt.Logf("\t%v => %v\n", debugInfo.Slots[part.slot], part.locs) 526 } 527 } 528 529 // Given a variable that's been decomposed into multiple parts, 530 // its location list may need a new entry after the beginning or 531 // end of every location entry for each of its parts. For example: 532 // 533 // [variable] [pc range] 534 // string.ptr |----|-----| |----| 535 // string.len |------------| |--| 536 // ... needs a location list like: 537 // string |----|-----|-| |--|-| 538 // 539 // Note that location entries may or may not line up with each other, 540 // and some of the result will only have one or the other part. 541 // 542 // To build the resulting list: 543 // - keep a "current" pointer for each part 544 // - find the next transition point 545 // - advance the current pointer for each part up to that transition point 546 // - build the piece for the range between that transition point and the next 547 // - repeat 548 549 curLoc := make([]int, len(slots)) 550 551 // findBoundaryAfter finds the next beginning or end of a piece after currentPC. 552 findBoundaryAfter := func(currentPC int64) int64 { 553 min := int64(math.MaxInt64) 554 for slot, part := range parts { 555 // For each part, find the first PC greater than current. Doesn't 556 // matter if it's a start or an end, since we're looking for any boundary. 557 // If it's the new winner, save it. 558 onePart: 559 for i := curLoc[slot]; i < len(part.locs.Locations); i++ { 560 for _, pc := range [2]int64{part.locs.Locations[i].StartPC, part.locs.Locations[i].EndPC} { 561 if pc > currentPC { 562 if pc < min { 563 min = pc 564 } 565 break onePart 566 } 567 } 568 } 569 } 570 return min 571 } 572 var start int64 573 end := findBoundaryAfter(0) 574 for { 575 // Advance to the next chunk. 576 start = end 577 end = findBoundaryAfter(start) 578 if end == math.MaxInt64 { 579 break 580 } 581 582 dloc := dwarf.Location{StartPC: start, EndPC: end} 583 if Debug_locationlist != 0 { 584 Ctxt.Logf("Processing range %x -> %x\n", start, end) 585 } 586 587 // Advance curLoc to the last location that starts before/at start. 588 // After this loop, if there's a location that covers [start, end), it will be current. 589 // Otherwise the current piece will be too early. 590 for _, part := range parts { 591 choice := -1 592 for i := curLoc[part.slot]; i < len(part.locs.Locations); i++ { 593 if part.locs.Locations[i].StartPC > start { 594 break //overshot 595 } 596 choice = i // best yet 597 } 598 if choice != -1 { 599 curLoc[part.slot] = choice 600 } 601 if Debug_locationlist != 0 { 602 Ctxt.Logf("\t %v => %v", slots[part.slot], curLoc[part.slot]) 603 } 604 } 605 if Debug_locationlist != 0 { 606 Ctxt.Logf("\n") 607 } 608 // Assemble the location list entry for this chunk. 609 present := 0 610 for _, part := range parts { 611 dpiece := dwarf.Piece{ 612 Length: slots[part.slot].Type.Size(), 613 } 614 locIdx := curLoc[part.slot] 615 if locIdx >= len(part.locs.Locations) || 616 start >= part.locs.Locations[locIdx].EndPC || 617 end <= part.locs.Locations[locIdx].StartPC { 618 if Debug_locationlist != 0 { 619 Ctxt.Logf("\t%v: missing", slots[part.slot]) 620 } 621 dpiece.Missing = true 622 dloc.Pieces = append(dloc.Pieces, dpiece) 623 continue 624 } 625 present++ 626 loc := part.locs.Locations[locIdx] 627 if Debug_locationlist != 0 { 628 Ctxt.Logf("\t%v: %v", slots[part.slot], loc) 629 } 630 if loc.OnStack { 631 dpiece.OnStack = true 632 dpiece.StackOffset = int32(offs + slots[part.slot].Off + slots[part.slot].N.(*Node).Xoffset) 633 } else { 634 for reg := 0; reg < len(debugInfo.Registers); reg++ { 635 if loc.Registers&(1<<uint8(reg)) != 0 { 636 dpiece.RegNum = Ctxt.Arch.DWARFRegisters[debugInfo.Registers[reg].ObjNum()] 637 } 638 } 639 } 640 dloc.Pieces = append(dloc.Pieces, dpiece) 641 } 642 if present == 0 { 643 if Debug_locationlist != 0 { 644 Ctxt.Logf(" -> totally missing\n") 645 } 646 continue 647 } 648 // Extend the previous entry if possible. 649 if len(dvar.LocationList) > 0 { 650 prev := &dvar.LocationList[len(dvar.LocationList)-1] 651 if prev.EndPC == dloc.StartPC && len(prev.Pieces) == len(dloc.Pieces) { 652 equal := true 653 for i := range prev.Pieces { 654 if prev.Pieces[i] != dloc.Pieces[i] { 655 equal = false 656 } 657 } 658 if equal { 659 prev.EndPC = end 660 if Debug_locationlist != 0 { 661 Ctxt.Logf("-> merged with previous, now %#v\n", prev) 662 } 663 continue 664 } 665 } 666 } 667 dvar.LocationList = append(dvar.LocationList, dloc) 668 if Debug_locationlist != 0 { 669 Ctxt.Logf("-> added: %#v\n", dloc) 670 } 671 } 672 return dvar 673 } 674 675 // fieldtrack adds R_USEFIELD relocations to fnsym to record any 676 // struct fields that it used. 677 func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) { 678 if fnsym == nil { 679 return 680 } 681 if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 { 682 return 683 } 684 685 trackSyms := make([]*types.Sym, 0, len(tracked)) 686 for sym := range tracked { 687 trackSyms = append(trackSyms, sym) 688 } 689 sort.Sort(symByName(trackSyms)) 690 for _, sym := range trackSyms { 691 r := obj.Addrel(fnsym) 692 r.Sym = sym.Linksym() 693 r.Type = objabi.R_USEFIELD 694 } 695 } 696 697 type symByName []*types.Sym 698 699 func (a symByName) Len() int { return len(a) } 700 func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name } 701 func (a symByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }