github.com/bir3/gocompiler@v0.3.205/src/cmd/link/internal/ppc64/asm.go (about) 1 // Inferno utils/5l/asm.c 2 // https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/asm.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package ppc64 32 33 import ( 34 "github.com/bir3/gocompiler/src/cmd/internal/objabi" 35 "github.com/bir3/gocompiler/src/cmd/internal/sys" 36 "github.com/bir3/gocompiler/src/cmd/link/internal/ld" 37 "github.com/bir3/gocompiler/src/cmd/link/internal/loader" 38 "github.com/bir3/gocompiler/src/cmd/link/internal/sym" 39 "debug/elf" 40 "encoding/binary" 41 "fmt" 42 "log" 43 "strconv" 44 "strings" 45 ) 46 47 func genpltstub(ctxt *ld.Link, ldr *loader.Loader, r loader.Reloc, s loader.Sym) (sym loader.Sym, firstUse bool) { 48 // The ppc64 ABI PLT has similar concepts to other 49 // architectures, but is laid out quite differently. When we 50 // see an R_PPC64_REL24 relocation to a dynamic symbol 51 // (indicating that the call needs to go through the PLT), we 52 // generate up to three stubs and reserve a PLT slot. 53 // 54 // 1) The call site will be bl x; nop (where the relocation 55 // applies to the bl). We rewrite this to bl x_stub; ld 56 // r2,24(r1). The ld is necessary because x_stub will save 57 // r2 (the TOC pointer) at 24(r1) (the "TOC save slot"). 58 // 59 // 2) We reserve space for a pointer in the .plt section (once 60 // per referenced dynamic function). .plt is a data 61 // section filled solely by the dynamic linker (more like 62 // .plt.got on other architectures). Initially, the 63 // dynamic linker will fill each slot with a pointer to the 64 // corresponding x@plt entry point. 65 // 66 // 3) We generate the "call stub" x_stub (once per dynamic 67 // function/object file pair). This saves the TOC in the 68 // TOC save slot, reads the function pointer from x's .plt 69 // slot and calls it like any other global entry point 70 // (including setting r12 to the function address). 71 // 72 // 4) We generate the "symbol resolver stub" x@plt (once per 73 // dynamic function). This is solely a branch to the glink 74 // resolver stub. 75 // 76 // 5) We generate the glink resolver stub (only once). This 77 // computes which symbol resolver stub we came through and 78 // invokes the dynamic resolver via a pointer provided by 79 // the dynamic linker. This will patch up the .plt slot to 80 // point directly at the function so future calls go 81 // straight from the call stub to the real function, and 82 // then call the function. 83 84 // NOTE: It's possible we could make ppc64 closer to other 85 // architectures: ppc64's .plt is like .plt.got on other 86 // platforms and ppc64's .glink is like .plt on other 87 // platforms. 88 89 // Find all R_PPC64_REL24 relocations that reference dynamic 90 // imports. Reserve PLT entries for these symbols and 91 // generate call stubs. The call stubs need to live in .text, 92 // which is why we need to do this pass this early. 93 // 94 // This assumes "case 1" from the ABI, where the caller needs 95 // us to save and restore the TOC pointer. 96 97 // Reserve PLT entry and generate symbol 98 // resolver 99 addpltsym(ctxt, ldr, r.Sym()) 100 101 // Generate call stub. Important to note that we're looking 102 // up the stub using the same version as the parent symbol (s), 103 // needed so that symtoc() will select the right .TOC. symbol 104 // when processing the stub. In older versions of the linker 105 // this was done by setting stub.Outer to the parent, but 106 // if the stub has the right version initially this is not needed. 107 n := fmt.Sprintf("%s.%s", ldr.SymName(s), ldr.SymName(r.Sym())) 108 stub := ldr.CreateSymForUpdate(n, ldr.SymVersion(s)) 109 firstUse = stub.Size() == 0 110 if firstUse { 111 gencallstub(ctxt, ldr, 1, stub, r.Sym()) 112 } 113 114 // Update the relocation to use the call stub 115 r.SetSym(stub.Sym()) 116 117 // Make the symbol writeable so we can fixup toc. 118 su := ldr.MakeSymbolUpdater(s) 119 su.MakeWritable() 120 p := su.Data() 121 122 // Check for toc restore slot (a nop), and replace with toc restore. 123 var nop uint32 124 if len(p) >= int(r.Off()+8) { 125 nop = ctxt.Arch.ByteOrder.Uint32(p[r.Off()+4:]) 126 } 127 if nop != 0x60000000 { 128 ldr.Errorf(s, "Symbol %s is missing toc restoration slot at offset %d", ldr.SymName(s), r.Off()+4) 129 } 130 const o1 = 0xe8410018 // ld r2,24(r1) 131 ctxt.Arch.ByteOrder.PutUint32(p[r.Off()+4:], o1) 132 133 return stub.Sym(), firstUse 134 } 135 136 // Scan relocs and generate PLT stubs and generate/fixup ABI defined functions created by the linker. 137 func genstubs(ctxt *ld.Link, ldr *loader.Loader) { 138 var stubs []loader.Sym 139 var abifuncs []loader.Sym 140 for _, s := range ctxt.Textp { 141 relocs := ldr.Relocs(s) 142 for i := 0; i < relocs.Count(); i++ { 143 r := relocs.At(i) 144 switch r.Type() { 145 case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_REL24): 146 switch ldr.SymType(r.Sym()) { 147 case sym.SDYNIMPORT: 148 // This call goes through the PLT, generate and call through a PLT stub. 149 if sym, firstUse := genpltstub(ctxt, ldr, r, s); firstUse { 150 stubs = append(stubs, sym) 151 } 152 153 case sym.SXREF: 154 // Is this an ELF ABI defined function which is (in practice) 155 // generated by the linker to save/restore callee save registers? 156 // These are defined similarly for both PPC64 ELF and ELFv2. 157 targName := ldr.SymName(r.Sym()) 158 if strings.HasPrefix(targName, "_save") || strings.HasPrefix(targName, "_rest") { 159 if sym, firstUse := rewriteABIFuncReloc(ctxt, ldr, targName, r); firstUse { 160 abifuncs = append(abifuncs, sym) 161 } 162 } 163 } 164 165 // Handle objects compiled with -fno-plt. Rewrite local calls to avoid indirect calling. 166 // These are 0 sized relocs. They mark the mtctr r12, or bctrl + ld r2,24(r1). 167 case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_PLTSEQ): 168 if ldr.SymType(r.Sym()) == sym.STEXT { 169 // This should be an mtctr instruction. Turn it into a nop. 170 su := ldr.MakeSymbolUpdater(s) 171 const OP_MTCTR = 31<<26 | 0x9<<16 | 467<<1 172 const MASK_OP_MTCTR = 63<<26 | 0x3FF<<11 | 0x1FF<<1 173 rewritetonop(&ctxt.Target, ldr, su, int64(r.Off()), MASK_OP_MTCTR, OP_MTCTR) 174 } 175 case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_PLTCALL): 176 if ldr.SymType(r.Sym()) == sym.STEXT { 177 // This relocation should point to a bctrl followed by a ld r2, 24(41) 178 const OP_BL = 0x48000001 // bl 0 179 const OP_TOCRESTORE = 0xe8410018 // ld r2,24(r1) 180 const OP_BCTRL = 0x4e800421 // bctrl 181 182 // Convert the bctrl into a bl. 183 su := ldr.MakeSymbolUpdater(s) 184 rewritetoinsn(&ctxt.Target, ldr, su, int64(r.Off()), 0xFFFFFFFF, OP_BCTRL, OP_BL) 185 186 // Turn this reloc into an R_CALLPOWER, and convert the TOC restore into a nop. 187 su.SetRelocType(i, objabi.R_CALLPOWER) 188 su.SetRelocAdd(i, r.Add()+int64(ldr.SymLocalentry(r.Sym()))) 189 r.SetSiz(4) 190 rewritetonop(&ctxt.Target, ldr, su, int64(r.Off()+4), 0xFFFFFFFF, OP_TOCRESTORE) 191 } 192 } 193 } 194 } 195 196 // Append any usage of the go versions of ELF save/restore 197 // functions to the end of the callstub list to minimize 198 // chances a trampoline might be needed. 199 stubs = append(stubs, abifuncs...) 200 201 // Put stubs at the beginning (instead of the end). 202 // So when resolving the relocations to calls to the stubs, 203 // the addresses are known and trampolines can be inserted 204 // when necessary. 205 ctxt.Textp = append(stubs, ctxt.Textp...) 206 } 207 208 func genaddmoduledata(ctxt *ld.Link, ldr *loader.Loader) { 209 initfunc, addmoduledata := ld.PrepareAddmoduledata(ctxt) 210 if initfunc == nil { 211 return 212 } 213 214 o := func(op uint32) { 215 initfunc.AddUint32(ctxt.Arch, op) 216 } 217 218 // addis r2, r12, .TOC.-func@ha 219 toc := ctxt.DotTOC[0] 220 rel1, _ := initfunc.AddRel(objabi.R_ADDRPOWER_PCREL) 221 rel1.SetOff(0) 222 rel1.SetSiz(8) 223 rel1.SetSym(toc) 224 o(0x3c4c0000) 225 // addi r2, r2, .TOC.-func@l 226 o(0x38420000) 227 // mflr r31 228 o(0x7c0802a6) 229 // stdu r31, -32(r1) 230 o(0xf801ffe1) 231 // addis r3, r2, local.moduledata@got@ha 232 var tgt loader.Sym 233 if s := ldr.Lookup("local.moduledata", 0); s != 0 { 234 tgt = s 235 } else if s := ldr.Lookup("local.pluginmoduledata", 0); s != 0 { 236 tgt = s 237 } else { 238 tgt = ldr.LookupOrCreateSym("runtime.firstmoduledata", 0) 239 } 240 rel2, _ := initfunc.AddRel(objabi.R_ADDRPOWER_GOT) 241 rel2.SetOff(int32(initfunc.Size())) 242 rel2.SetSiz(8) 243 rel2.SetSym(tgt) 244 o(0x3c620000) 245 // ld r3, local.moduledata@got@l(r3) 246 o(0xe8630000) 247 // bl runtime.addmoduledata 248 rel3, _ := initfunc.AddRel(objabi.R_CALLPOWER) 249 rel3.SetOff(int32(initfunc.Size())) 250 rel3.SetSiz(4) 251 rel3.SetSym(addmoduledata) 252 o(0x48000001) 253 // nop 254 o(0x60000000) 255 // ld r31, 0(r1) 256 o(0xe8010000) 257 // mtlr r31 258 o(0x7c0803a6) 259 // addi r1,r1,32 260 o(0x38210020) 261 // blr 262 o(0x4e800020) 263 } 264 265 // Rewrite ELF (v1 or v2) calls to _savegpr0_n, _savegpr1_n, _savefpr_n, _restfpr_n, _savevr_m, or 266 // _restvr_m (14<=n<=31, 20<=m<=31). Redirect them to runtime.elf_restgpr0+(n-14)*4, 267 // runtime.elf_restvr+(m-20)*8, and similar. 268 // 269 // These functions are defined in the ELFv2 ABI (generated when using gcc -Os option) to save and 270 // restore callee-saved registers (as defined in the PPC64 ELF ABIs) from registers n or m to 31 of 271 // the named type. R12 and R0 are sometimes used in exceptional ways described in the ABI. 272 // 273 // Final note, this is only needed when linking internally. The external linker will generate these 274 // functions if they are used. 275 func rewriteABIFuncReloc(ctxt *ld.Link, ldr *loader.Loader, tname string, r loader.Reloc) (sym loader.Sym, firstUse bool) { 276 s := strings.Split(tname, "_") 277 // A valid call will split like {"", "savegpr0", "20"} 278 if len(s) != 3 { 279 return 0, false // Not an abi func. 280 } 281 minReg := 14 // _savegpr0_{n}, _savegpr1_{n}, _savefpr_{n}, 14 <= n <= 31 282 offMul := 4 // 1 instruction per register op. 283 switch s[1] { 284 case "savegpr0", "savegpr1", "savefpr": 285 case "restgpr0", "restgpr1", "restfpr": 286 case "savevr", "restvr": 287 minReg = 20 // _savevr_{n} or _restvr_{n}, 20 <= n <= 31 288 offMul = 8 // 2 instructions per register op. 289 default: 290 return 0, false // Not an abi func 291 } 292 n, e := strconv.Atoi(s[2]) 293 if e != nil || n < minReg || n > 31 || r.Add() != 0 { 294 return 0, false // Invalid register number, or non-zero addend. Not an abi func. 295 } 296 297 // tname is a valid relocation to an ABI defined register save/restore function. Re-relocate 298 // them to a go version of these functions in runtime/asm_ppc64x.s 299 ts := ldr.LookupOrCreateSym("runtime.elf_"+s[1], 0) 300 r.SetSym(ts) 301 r.SetAdd(int64((n - minReg) * offMul)) 302 firstUse = !ldr.AttrReachable(ts) 303 if firstUse { 304 ldr.SetAttrReachable(ts, true) 305 // This function only becomes reachable now. It has been dropped from 306 // the text section (it was unreachable until now), it needs included. 307 // 308 // Similarly, TOC regeneration should not happen for these functions, 309 // remove it from this save/restore function. 310 if ldr.AttrShared(ts) { 311 sb := ldr.MakeSymbolUpdater(ts) 312 sb.SetData(sb.Data()[8:]) 313 sb.SetSize(sb.Size() - 8) 314 relocs := sb.Relocs() 315 // Only one PCREL reloc to .TOC. should be present. 316 if relocs.Count() != 1 { 317 log.Fatalf("Unexpected number of relocs in %s\n", ldr.SymName(ts)) 318 } 319 sb.ResetRelocs() 320 321 } 322 } 323 return ts, firstUse 324 } 325 326 func gentext(ctxt *ld.Link, ldr *loader.Loader) { 327 if ctxt.DynlinkingGo() { 328 genaddmoduledata(ctxt, ldr) 329 } 330 331 if ctxt.LinkMode == ld.LinkInternal { 332 genstubs(ctxt, ldr) 333 } 334 } 335 336 // Construct a call stub in stub that calls symbol targ via its PLT 337 // entry. 338 func gencallstub(ctxt *ld.Link, ldr *loader.Loader, abicase int, stub *loader.SymbolBuilder, targ loader.Sym) { 339 if abicase != 1 { 340 // If we see R_PPC64_TOCSAVE or R_PPC64_REL24_NOTOC 341 // relocations, we'll need to implement cases 2 and 3. 342 log.Fatalf("gencallstub only implements case 1 calls") 343 } 344 345 plt := ctxt.PLT 346 347 stub.SetType(sym.STEXT) 348 349 // Save TOC pointer in TOC save slot 350 stub.AddUint32(ctxt.Arch, 0xf8410018) // std r2,24(r1) 351 352 // Load the function pointer from the PLT. 353 rel, ri1 := stub.AddRel(objabi.R_POWER_TOC) 354 rel.SetOff(int32(stub.Size())) 355 rel.SetSiz(2) 356 rel.SetAdd(int64(ldr.SymPlt(targ))) 357 rel.SetSym(plt) 358 if ctxt.Arch.ByteOrder == binary.BigEndian { 359 rel.SetOff(rel.Off() + int32(rel.Siz())) 360 } 361 ldr.SetRelocVariant(stub.Sym(), int(ri1), sym.RV_POWER_HA) 362 stub.AddUint32(ctxt.Arch, 0x3d820000) // addis r12,r2,targ@plt@toc@ha 363 364 rel2, ri2 := stub.AddRel(objabi.R_POWER_TOC) 365 rel2.SetOff(int32(stub.Size())) 366 rel2.SetSiz(2) 367 rel2.SetAdd(int64(ldr.SymPlt(targ))) 368 rel2.SetSym(plt) 369 if ctxt.Arch.ByteOrder == binary.BigEndian { 370 rel2.SetOff(rel2.Off() + int32(rel2.Siz())) 371 } 372 ldr.SetRelocVariant(stub.Sym(), int(ri2), sym.RV_POWER_LO) 373 stub.AddUint32(ctxt.Arch, 0xe98c0000) // ld r12,targ@plt@toc@l(r12) 374 375 // Jump to the loaded pointer 376 stub.AddUint32(ctxt.Arch, 0x7d8903a6) // mtctr r12 377 stub.AddUint32(ctxt.Arch, 0x4e800420) // bctr 378 } 379 380 // Rewrite the instruction at offset into newinsn. Also, verify the 381 // existing instruction under mask matches the check value. 382 func rewritetoinsn(target *ld.Target, ldr *loader.Loader, su *loader.SymbolBuilder, offset int64, mask, check, newinsn uint32) { 383 su.MakeWritable() 384 op := target.Arch.ByteOrder.Uint32(su.Data()[offset:]) 385 if op&mask != check { 386 ldr.Errorf(su.Sym(), "Rewrite offset 0x%x to 0x%08X failed check (0x%08X&0x%08X != 0x%08X)", offset, newinsn, op, mask, check) 387 } 388 su.SetUint32(target.Arch, offset, newinsn) 389 } 390 391 // Rewrite the instruction at offset into a hardware nop instruction. Also, verify the 392 // existing instruction under mask matches the check value. 393 func rewritetonop(target *ld.Target, ldr *loader.Loader, su *loader.SymbolBuilder, offset int64, mask, check uint32) { 394 const NOP = 0x60000000 395 rewritetoinsn(target, ldr, su, offset, mask, check, NOP) 396 } 397 398 func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { 399 if target.IsElf() { 400 return addelfdynrel(target, ldr, syms, s, r, rIdx) 401 } else if target.IsAIX() { 402 return ld.Xcoffadddynrel(target, ldr, syms, s, r, rIdx) 403 } 404 return false 405 } 406 407 func addelfdynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { 408 targ := r.Sym() 409 var targType sym.SymKind 410 if targ != 0 { 411 targType = ldr.SymType(targ) 412 } 413 414 switch r.Type() { 415 default: 416 if r.Type() >= objabi.ElfRelocOffset { 417 ldr.Errorf(s, "unexpected relocation type %d (%s)", r.Type(), sym.RelocName(target.Arch, r.Type())) 418 return false 419 } 420 421 // Handle relocations found in ELF object files. 422 case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_REL24): 423 su := ldr.MakeSymbolUpdater(s) 424 su.SetRelocType(rIdx, objabi.R_CALLPOWER) 425 426 // This is a local call, so the caller isn't setting 427 // up r12 and r2 is the same for the caller and 428 // callee. Hence, we need to go to the local entry 429 // point. (If we don't do this, the callee will try 430 // to use r12 to compute r2.) 431 su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymLocalentry(targ))) 432 433 if targType == sym.SDYNIMPORT { 434 // Should have been handled in elfsetupplt 435 ldr.Errorf(s, "unexpected R_PPC64_REL24 for dyn import") 436 } 437 438 return true 439 440 case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC_REL32): 441 su := ldr.MakeSymbolUpdater(s) 442 su.SetRelocType(rIdx, objabi.R_PCREL) 443 su.SetRelocAdd(rIdx, r.Add()+4) 444 445 if targType == sym.SDYNIMPORT { 446 ldr.Errorf(s, "unexpected R_PPC_REL32 for dyn import") 447 } 448 449 return true 450 451 case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_ADDR64): 452 su := ldr.MakeSymbolUpdater(s) 453 su.SetRelocType(rIdx, objabi.R_ADDR) 454 if targType == sym.SDYNIMPORT { 455 // These happen in .toc sections 456 ld.Adddynsym(ldr, target, syms, targ) 457 458 rela := ldr.MakeSymbolUpdater(syms.Rela) 459 rela.AddAddrPlus(target.Arch, s, int64(r.Off())) 460 rela.AddUint64(target.Arch, elf.R_INFO(uint32(ldr.SymDynid(targ)), uint32(elf.R_PPC64_ADDR64))) 461 rela.AddUint64(target.Arch, uint64(r.Add())) 462 su.SetRelocType(rIdx, objabi.ElfRelocOffset) // ignore during relocsym 463 } else if target.IsPIE() && target.IsInternal() { 464 // For internal linking PIE, this R_ADDR relocation cannot 465 // be resolved statically. We need to generate a dynamic 466 // relocation. Let the code below handle it. 467 break 468 } 469 return true 470 471 case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_TOC16): 472 su := ldr.MakeSymbolUpdater(s) 473 su.SetRelocType(rIdx, objabi.R_POWER_TOC) 474 ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_LO|sym.RV_CHECK_OVERFLOW) 475 return true 476 477 case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_TOC16_LO): 478 su := ldr.MakeSymbolUpdater(s) 479 su.SetRelocType(rIdx, objabi.R_POWER_TOC) 480 ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_LO) 481 return true 482 483 case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_TOC16_HA): 484 su := ldr.MakeSymbolUpdater(s) 485 su.SetRelocType(rIdx, objabi.R_POWER_TOC) 486 ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_HA|sym.RV_CHECK_OVERFLOW) 487 return true 488 489 case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_TOC16_HI): 490 su := ldr.MakeSymbolUpdater(s) 491 su.SetRelocType(rIdx, objabi.R_POWER_TOC) 492 ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_HI|sym.RV_CHECK_OVERFLOW) 493 return true 494 495 case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_TOC16_DS): 496 su := ldr.MakeSymbolUpdater(s) 497 su.SetRelocType(rIdx, objabi.R_POWER_TOC) 498 ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_DS|sym.RV_CHECK_OVERFLOW) 499 return true 500 501 case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_TOC16_LO_DS): 502 su := ldr.MakeSymbolUpdater(s) 503 su.SetRelocType(rIdx, objabi.R_POWER_TOC) 504 ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_DS) 505 return true 506 507 case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_REL16_LO): 508 su := ldr.MakeSymbolUpdater(s) 509 su.SetRelocType(rIdx, objabi.R_PCREL) 510 ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_LO) 511 su.SetRelocAdd(rIdx, r.Add()+2) // Compensate for relocation size of 2 512 return true 513 514 case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_REL16_HI): 515 su := ldr.MakeSymbolUpdater(s) 516 su.SetRelocType(rIdx, objabi.R_PCREL) 517 ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_HI|sym.RV_CHECK_OVERFLOW) 518 su.SetRelocAdd(rIdx, r.Add()+2) 519 return true 520 521 case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_REL16_HA): 522 su := ldr.MakeSymbolUpdater(s) 523 su.SetRelocType(rIdx, objabi.R_PCREL) 524 ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_HA|sym.RV_CHECK_OVERFLOW) 525 su.SetRelocAdd(rIdx, r.Add()+2) 526 return true 527 528 // When compiling with gcc's -fno-plt option (no PLT), the following code and relocation 529 // sequences may be present to call an external function: 530 // 531 // 1. addis Rx,foo@R_PPC64_PLT16_HA 532 // 2. ld 12,foo@R_PPC64_PLT16_LO_DS(Rx) 533 // 3. mtctr 12 ; foo@R_PPC64_PLTSEQ 534 // 4. bctrl ; foo@R_PPC64_PLTCALL 535 // 5. ld r2,24(r1) 536 // 537 // Note, 5 is required to follow the R_PPC64_PLTCALL. Similarly, relocations targeting 538 // instructions 3 and 4 are zero sized informational relocations. 539 case objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_PLT16_HA), 540 objabi.ElfRelocOffset + objabi.RelocType(elf.R_PPC64_PLT16_LO_DS): 541 su := ldr.MakeSymbolUpdater(s) 542 isPLT16_LO_DS := r.Type() == objabi.ElfRelocOffset+objabi.RelocType(elf.R_PPC64_PLT16_LO_DS) 543 if isPLT16_LO_DS { 544 ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_DS) 545 } else { 546 ldr.SetRelocVariant(s, rIdx, sym.RV_POWER_HA|sym.RV_CHECK_OVERFLOW) 547 } 548 su.SetRelocType(rIdx, objabi.R_POWER_TOC) 549 if targType == sym.SDYNIMPORT { 550 // This is an external symbol, make space in the GOT and retarget the reloc. 551 ld.AddGotSym(target, ldr, syms, targ, uint32(elf.R_PPC64_GLOB_DAT)) 552 su.SetRelocSym(rIdx, syms.GOT) 553 su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ))) 554 } else if targType == sym.STEXT { 555 if isPLT16_LO_DS { 556 // Expect an ld opcode to nop 557 const MASK_OP_LD = 63<<26 | 0x3 558 const OP_LD = 58 << 26 559 rewritetonop(target, ldr, su, int64(r.Off()), MASK_OP_LD, OP_LD) 560 } else { 561 // Expect an addis opcode to nop 562 const MASK_OP_ADDIS = 63 << 26 563 const OP_ADDIS = 15 << 26 564 rewritetonop(target, ldr, su, int64(r.Off()), MASK_OP_ADDIS, OP_ADDIS) 565 } 566 // And we can ignore this reloc now. 567 su.SetRelocType(rIdx, objabi.ElfRelocOffset) 568 } else { 569 ldr.Errorf(s, "unexpected PLT relocation target symbol type %s", targType.String()) 570 } 571 return true 572 } 573 574 // Handle references to ELF symbols from our own object files. 575 relocs := ldr.Relocs(s) 576 r = relocs.At(rIdx) 577 578 switch r.Type() { 579 case objabi.R_ADDR: 580 if ldr.SymType(s) == sym.STEXT { 581 log.Fatalf("R_ADDR relocation in text symbol %s is unsupported\n", ldr.SymName(s)) 582 } 583 if target.IsPIE() && target.IsInternal() { 584 // When internally linking, generate dynamic relocations 585 // for all typical R_ADDR relocations. The exception 586 // are those R_ADDR that are created as part of generating 587 // the dynamic relocations and must be resolved statically. 588 // 589 // There are three phases relevant to understanding this: 590 // 591 // dodata() // we are here 592 // address() // symbol address assignment 593 // reloc() // resolution of static R_ADDR relocs 594 // 595 // At this point symbol addresses have not been 596 // assigned yet (as the final size of the .rela section 597 // will affect the addresses), and so we cannot write 598 // the Elf64_Rela.r_offset now. Instead we delay it 599 // until after the 'address' phase of the linker is 600 // complete. We do this via Addaddrplus, which creates 601 // a new R_ADDR relocation which will be resolved in 602 // the 'reloc' phase. 603 // 604 // These synthetic static R_ADDR relocs must be skipped 605 // now, or else we will be caught in an infinite loop 606 // of generating synthetic relocs for our synthetic 607 // relocs. 608 // 609 // Furthermore, the rela sections contain dynamic 610 // relocations with R_ADDR relocations on 611 // Elf64_Rela.r_offset. This field should contain the 612 // symbol offset as determined by reloc(), not the 613 // final dynamically linked address as a dynamic 614 // relocation would provide. 615 switch ldr.SymName(s) { 616 case ".dynsym", ".rela", ".rela.plt", ".got.plt", ".dynamic": 617 return false 618 } 619 } else { 620 // Either internally linking a static executable, 621 // in which case we can resolve these relocations 622 // statically in the 'reloc' phase, or externally 623 // linking, in which case the relocation will be 624 // prepared in the 'reloc' phase and passed to the 625 // external linker in the 'asmb' phase. 626 if ldr.SymType(s) != sym.SDATA && ldr.SymType(s) != sym.SRODATA { 627 break 628 } 629 } 630 // Generate R_PPC64_RELATIVE relocations for best 631 // efficiency in the dynamic linker. 632 // 633 // As noted above, symbol addresses have not been 634 // assigned yet, so we can't generate the final reloc 635 // entry yet. We ultimately want: 636 // 637 // r_offset = s + r.Off 638 // r_info = R_PPC64_RELATIVE 639 // r_addend = targ + r.Add 640 // 641 // The dynamic linker will set *offset = base address + 642 // addend. 643 // 644 // AddAddrPlus is used for r_offset and r_addend to 645 // generate new R_ADDR relocations that will update 646 // these fields in the 'reloc' phase. 647 rela := ldr.MakeSymbolUpdater(syms.Rela) 648 rela.AddAddrPlus(target.Arch, s, int64(r.Off())) 649 if r.Siz() == 8 { 650 rela.AddUint64(target.Arch, elf.R_INFO(0, uint32(elf.R_PPC64_RELATIVE))) 651 } else { 652 ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) 653 } 654 rela.AddAddrPlus(target.Arch, targ, int64(r.Add())) 655 656 // Not mark r done here. So we still apply it statically, 657 // so in the file content we'll also have the right offset 658 // to the relocation target. So it can be examined statically 659 // (e.g. go version). 660 return true 661 } 662 663 return false 664 } 665 666 func xcoffreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, sectoff int64) bool { 667 rs := r.Xsym 668 669 emitReloc := func(v uint16, off uint64) { 670 out.Write64(uint64(sectoff) + off) 671 out.Write32(uint32(ldr.SymDynid(rs))) 672 out.Write16(v) 673 } 674 675 var v uint16 676 switch r.Type { 677 default: 678 return false 679 case objabi.R_ADDR, objabi.R_DWARFSECREF: 680 v = ld.XCOFF_R_POS 681 if r.Size == 4 { 682 v |= 0x1F << 8 683 } else { 684 v |= 0x3F << 8 685 } 686 emitReloc(v, 0) 687 case objabi.R_ADDRPOWER_TOCREL: 688 case objabi.R_ADDRPOWER_TOCREL_DS: 689 emitReloc(ld.XCOFF_R_TOCU|(0x0F<<8), 2) 690 emitReloc(ld.XCOFF_R_TOCL|(0x0F<<8), 6) 691 case objabi.R_POWER_TLS_LE: 692 // This only supports 16b relocations. It is fixed up in archreloc. 693 emitReloc(ld.XCOFF_R_TLS_LE|0x0F<<8, 2) 694 case objabi.R_CALLPOWER: 695 if r.Size != 4 { 696 return false 697 } 698 emitReloc(ld.XCOFF_R_RBR|0x19<<8, 0) 699 case objabi.R_XCOFFREF: 700 emitReloc(ld.XCOFF_R_REF|0x3F<<8, 0) 701 } 702 return true 703 704 } 705 706 func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, ri int, sectoff int64) bool { 707 // Beware that bit0~bit15 start from the third byte of a instruction in Big-Endian machines. 708 rt := r.Type 709 if rt == objabi.R_ADDR || rt == objabi.R_POWER_TLS || rt == objabi.R_CALLPOWER { 710 } else { 711 if ctxt.Arch.ByteOrder == binary.BigEndian { 712 sectoff += 2 713 } 714 } 715 out.Write64(uint64(sectoff)) 716 717 elfsym := ld.ElfSymForReloc(ctxt, r.Xsym) 718 switch rt { 719 default: 720 return false 721 case objabi.R_ADDR, objabi.R_DWARFSECREF: 722 switch r.Size { 723 case 4: 724 out.Write64(uint64(elf.R_PPC64_ADDR32) | uint64(elfsym)<<32) 725 case 8: 726 out.Write64(uint64(elf.R_PPC64_ADDR64) | uint64(elfsym)<<32) 727 default: 728 return false 729 } 730 case objabi.R_ADDRPOWER_D34: 731 out.Write64(uint64(elf.R_PPC64_D34) | uint64(elfsym)<<32) 732 case objabi.R_ADDRPOWER_PCREL34: 733 out.Write64(uint64(elf.R_PPC64_PCREL34) | uint64(elfsym)<<32) 734 case objabi.R_POWER_TLS: 735 out.Write64(uint64(elf.R_PPC64_TLS) | uint64(elfsym)<<32) 736 case objabi.R_POWER_TLS_LE: 737 out.Write64(uint64(elf.R_PPC64_TPREL16_HA) | uint64(elfsym)<<32) 738 out.Write64(uint64(r.Xadd)) 739 out.Write64(uint64(sectoff + 4)) 740 out.Write64(uint64(elf.R_PPC64_TPREL16_LO) | uint64(elfsym)<<32) 741 case objabi.R_POWER_TLS_LE_TPREL34: 742 out.Write64(uint64(elf.R_PPC64_TPREL34) | uint64(elfsym)<<32) 743 case objabi.R_POWER_TLS_IE_PCREL34: 744 out.Write64(uint64(elf.R_PPC64_GOT_TPREL_PCREL34) | uint64(elfsym)<<32) 745 case objabi.R_POWER_TLS_IE: 746 out.Write64(uint64(elf.R_PPC64_GOT_TPREL16_HA) | uint64(elfsym)<<32) 747 out.Write64(uint64(r.Xadd)) 748 out.Write64(uint64(sectoff + 4)) 749 out.Write64(uint64(elf.R_PPC64_GOT_TPREL16_LO_DS) | uint64(elfsym)<<32) 750 case objabi.R_ADDRPOWER: 751 out.Write64(uint64(elf.R_PPC64_ADDR16_HA) | uint64(elfsym)<<32) 752 out.Write64(uint64(r.Xadd)) 753 out.Write64(uint64(sectoff + 4)) 754 out.Write64(uint64(elf.R_PPC64_ADDR16_LO) | uint64(elfsym)<<32) 755 case objabi.R_ADDRPOWER_DS: 756 out.Write64(uint64(elf.R_PPC64_ADDR16_HA) | uint64(elfsym)<<32) 757 out.Write64(uint64(r.Xadd)) 758 out.Write64(uint64(sectoff + 4)) 759 out.Write64(uint64(elf.R_PPC64_ADDR16_LO_DS) | uint64(elfsym)<<32) 760 case objabi.R_ADDRPOWER_GOT: 761 out.Write64(uint64(elf.R_PPC64_GOT16_HA) | uint64(elfsym)<<32) 762 out.Write64(uint64(r.Xadd)) 763 out.Write64(uint64(sectoff + 4)) 764 out.Write64(uint64(elf.R_PPC64_GOT16_LO_DS) | uint64(elfsym)<<32) 765 case objabi.R_ADDRPOWER_PCREL: 766 out.Write64(uint64(elf.R_PPC64_REL16_HA) | uint64(elfsym)<<32) 767 out.Write64(uint64(r.Xadd)) 768 out.Write64(uint64(sectoff + 4)) 769 out.Write64(uint64(elf.R_PPC64_REL16_LO) | uint64(elfsym)<<32) 770 r.Xadd += 4 771 case objabi.R_ADDRPOWER_TOCREL: 772 out.Write64(uint64(elf.R_PPC64_TOC16_HA) | uint64(elfsym)<<32) 773 out.Write64(uint64(r.Xadd)) 774 out.Write64(uint64(sectoff + 4)) 775 out.Write64(uint64(elf.R_PPC64_TOC16_LO) | uint64(elfsym)<<32) 776 case objabi.R_ADDRPOWER_TOCREL_DS: 777 out.Write64(uint64(elf.R_PPC64_TOC16_HA) | uint64(elfsym)<<32) 778 out.Write64(uint64(r.Xadd)) 779 out.Write64(uint64(sectoff + 4)) 780 out.Write64(uint64(elf.R_PPC64_TOC16_LO_DS) | uint64(elfsym)<<32) 781 case objabi.R_CALLPOWER: 782 if r.Size != 4 { 783 return false 784 } 785 out.Write64(uint64(elf.R_PPC64_REL24) | uint64(elfsym)<<32) 786 787 } 788 out.Write64(uint64(r.Xadd)) 789 790 return true 791 } 792 793 func elfsetupplt(ctxt *ld.Link, plt, got *loader.SymbolBuilder, dynamic loader.Sym) { 794 if plt.Size() == 0 { 795 // The dynamic linker stores the address of the 796 // dynamic resolver and the DSO identifier in the two 797 // doublewords at the beginning of the .plt section 798 // before the PLT array. Reserve space for these. 799 plt.SetSize(16) 800 } 801 } 802 803 func machoreloc1(*sys.Arch, *ld.OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int64) bool { 804 return false 805 } 806 807 // Return the value of .TOC. for symbol s 808 func symtoc(ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym) int64 { 809 v := ldr.SymVersion(s) 810 if out := ldr.OuterSym(s); out != 0 { 811 v = ldr.SymVersion(out) 812 } 813 814 toc := syms.DotTOC[v] 815 if toc == 0 { 816 ldr.Errorf(s, "TOC-relative relocation in object without .TOC.") 817 return 0 818 } 819 820 return ldr.SymValue(toc) 821 } 822 823 // archreloctoc relocates a TOC relative symbol. 824 func archreloctoc(ldr *loader.Loader, target *ld.Target, syms *ld.ArchSyms, r loader.Reloc, s loader.Sym, val int64) int64 { 825 rs := r.Sym() 826 var o1, o2 uint32 827 var t int64 828 useAddi := false 829 830 if target.IsBigEndian() { 831 o1 = uint32(val >> 32) 832 o2 = uint32(val) 833 } else { 834 o1 = uint32(val) 835 o2 = uint32(val >> 32) 836 } 837 838 // On AIX, TOC data accesses are always made indirectly against R2 (a sequence of addis+ld+load/store). If the 839 // The target of the load is known, the sequence can be written into addis+addi+load/store. On Linux, 840 // TOC data accesses are always made directly against R2 (e.g addis+load/store). 841 if target.IsAIX() { 842 if !strings.HasPrefix(ldr.SymName(rs), "TOC.") { 843 ldr.Errorf(s, "archreloctoc called for a symbol without TOC anchor") 844 } 845 relocs := ldr.Relocs(rs) 846 tarSym := relocs.At(0).Sym() 847 848 if target.IsInternal() && tarSym != 0 && ldr.AttrReachable(tarSym) && ldr.SymSect(tarSym).Seg == &ld.Segdata { 849 t = ldr.SymValue(tarSym) + r.Add() - ldr.SymValue(syms.TOC) 850 // change ld to addi in the second instruction 851 o2 = (o2 & 0x03FF0000) | 0xE<<26 852 useAddi = true 853 } else { 854 t = ldr.SymValue(rs) + r.Add() - ldr.SymValue(syms.TOC) 855 } 856 } else { 857 t = ldr.SymValue(rs) + r.Add() - symtoc(ldr, syms, s) 858 } 859 860 if t != int64(int32(t)) { 861 ldr.Errorf(s, "TOC relocation for %s is too big to relocate %s: 0x%x", ldr.SymName(s), rs, t) 862 } 863 864 if t&0x8000 != 0 { 865 t += 0x10000 866 } 867 868 o1 |= uint32((t >> 16) & 0xFFFF) 869 870 switch r.Type() { 871 case objabi.R_ADDRPOWER_TOCREL_DS: 872 if useAddi { 873 o2 |= uint32(t) & 0xFFFF 874 } else { 875 if t&3 != 0 { 876 ldr.Errorf(s, "bad DS reloc for %s: %d", ldr.SymName(s), ldr.SymValue(rs)) 877 } 878 o2 |= uint32(t) & 0xFFFC 879 } 880 case objabi.R_ADDRPOWER_TOCREL: 881 o2 |= uint32(t) & 0xffff 882 default: 883 return -1 884 } 885 886 if target.IsBigEndian() { 887 return int64(o1)<<32 | int64(o2) 888 } 889 return int64(o2)<<32 | int64(o1) 890 } 891 892 // archrelocaddr relocates a symbol address. 893 // This code is for linux only. 894 func archrelocaddr(ldr *loader.Loader, target *ld.Target, syms *ld.ArchSyms, r loader.Reloc, s loader.Sym, val int64) int64 { 895 rs := r.Sym() 896 if target.IsAIX() { 897 ldr.Errorf(s, "archrelocaddr called for %s relocation\n", ldr.SymName(rs)) 898 } 899 o1, o2 := unpackInstPair(target, val) 900 901 // Verify resulting address fits within a 31 bit (2GB) address space. 902 // This is a restriction arising from the usage of lis (HA) + d-form 903 // (LO) instruction sequences used to implement absolute relocations 904 // on PPC64 prior to ISA 3.1 (P10). For consistency, maintain this 905 // restriction for ISA 3.1 unless it becomes problematic. 906 t := ldr.SymAddr(rs) + r.Add() 907 if t < 0 || t >= 1<<31 { 908 ldr.Errorf(s, "relocation for %s is too big (>=2G): 0x%x", ldr.SymName(s), ldr.SymValue(rs)) 909 } 910 911 switch r.Type() { 912 case objabi.R_ADDRPOWER_PCREL34: 913 // S + A - P 914 t -= (ldr.SymValue(s) + int64(r.Off())) 915 o1 |= computePrefix34HI(t) 916 o2 |= computeLO(int32(t)) 917 case objabi.R_ADDRPOWER_D34: 918 o1 |= computePrefix34HI(t) 919 o2 |= computeLO(int32(t)) 920 case objabi.R_ADDRPOWER: 921 o1 |= computeHA(int32(t)) 922 o2 |= computeLO(int32(t)) 923 case objabi.R_ADDRPOWER_DS: 924 o1 |= computeHA(int32(t)) 925 o2 |= computeLO(int32(t)) 926 if t&3 != 0 { 927 ldr.Errorf(s, "bad DS reloc for %s: %d", ldr.SymName(s), ldr.SymValue(rs)) 928 } 929 default: 930 return -1 931 } 932 933 return packInstPair(target, o1, o2) 934 } 935 936 // Determine if the code was compiled so that the TOC register R2 is initialized and maintained. 937 func r2Valid(ctxt *ld.Link) bool { 938 switch ctxt.BuildMode { 939 case ld.BuildModeCArchive, ld.BuildModeCShared, ld.BuildModePIE, ld.BuildModeShared, ld.BuildModePlugin: 940 return true 941 } 942 // -linkshared option 943 return ctxt.IsSharedGoLink() 944 } 945 946 // resolve direct jump relocation r in s, and add trampoline if necessary. 947 func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) { 948 949 // Trampolines are created if the branch offset is too large and the linker cannot insert a call stub to handle it. 950 // For internal linking, trampolines are always created for long calls. 951 // For external linking, the linker can insert a call stub to handle a long call, but depends on having the TOC address in 952 // r2. For those build modes with external linking where the TOC address is not maintained in r2, trampolines must be created. 953 if ctxt.IsExternal() && r2Valid(ctxt) { 954 // The TOC pointer is valid. The external linker will insert trampolines. 955 return 956 } 957 958 relocs := ldr.Relocs(s) 959 r := relocs.At(ri) 960 var t int64 961 // ldr.SymValue(rs) == 0 indicates a cross-package jump to a function that is not yet 962 // laid out. Conservatively use a trampoline. This should be rare, as we lay out packages 963 // in dependency order. 964 if ldr.SymValue(rs) != 0 { 965 t = ldr.SymValue(rs) + r.Add() - (ldr.SymValue(s) + int64(r.Off())) 966 } 967 switch r.Type() { 968 case objabi.R_CALLPOWER: 969 970 // If branch offset is too far then create a trampoline. 971 972 if (ctxt.IsExternal() && ldr.SymSect(s) != ldr.SymSect(rs)) || (ctxt.IsInternal() && int64(int32(t<<6)>>6) != t) || ldr.SymValue(rs) == 0 || (*ld.FlagDebugTramp > 1 && ldr.SymPkg(s) != ldr.SymPkg(rs)) { 973 var tramp loader.Sym 974 for i := 0; ; i++ { 975 976 // Using r.Add as part of the name is significant in functions like duffzero where the call 977 // target is at some offset within the function. Calls to duff+8 and duff+256 must appear as 978 // distinct trampolines. 979 980 oName := ldr.SymName(rs) 981 name := oName 982 if r.Add() == 0 { 983 name += fmt.Sprintf("-tramp%d", i) 984 } else { 985 name += fmt.Sprintf("%+x-tramp%d", r.Add(), i) 986 } 987 988 // Look up the trampoline in case it already exists 989 990 tramp = ldr.LookupOrCreateSym(name, int(ldr.SymVersion(rs))) 991 if oName == "runtime.deferreturn" { 992 ldr.SetIsDeferReturnTramp(tramp, true) 993 } 994 if ldr.SymValue(tramp) == 0 { 995 break 996 } 997 // Note, the trampoline is always called directly. The addend of the original relocation is accounted for in the 998 // trampoline itself. 999 t = ldr.SymValue(tramp) - (ldr.SymValue(s) + int64(r.Off())) 1000 1001 // With internal linking, the trampoline can be used if it is not too far. 1002 // With external linking, the trampoline must be in this section for it to be reused. 1003 if (ctxt.IsInternal() && int64(int32(t<<6)>>6) == t) || (ctxt.IsExternal() && ldr.SymSect(s) == ldr.SymSect(tramp)) { 1004 break 1005 } 1006 } 1007 if ldr.SymType(tramp) == 0 { 1008 trampb := ldr.MakeSymbolUpdater(tramp) 1009 ctxt.AddTramp(trampb) 1010 gentramp(ctxt, ldr, trampb, rs, r.Add()) 1011 } 1012 sb := ldr.MakeSymbolUpdater(s) 1013 relocs := sb.Relocs() 1014 r := relocs.At(ri) 1015 r.SetSym(tramp) 1016 r.SetAdd(0) // This was folded into the trampoline target address 1017 } 1018 default: 1019 ctxt.Errorf(s, "trampoline called with non-jump reloc: %d (%s)", r.Type(), sym.RelocName(ctxt.Arch, r.Type())) 1020 } 1021 } 1022 1023 func gentramp(ctxt *ld.Link, ldr *loader.Loader, tramp *loader.SymbolBuilder, target loader.Sym, offset int64) { 1024 tramp.SetSize(16) // 4 instructions 1025 P := make([]byte, tramp.Size()) 1026 var o1, o2 uint32 1027 1028 if ctxt.IsAIX() { 1029 // On AIX, the address is retrieved with a TOC symbol. 1030 // For internal linking, the "Linux" way might still be used. 1031 // However, all text symbols are accessed with a TOC symbol as 1032 // text relocations aren't supposed to be possible. 1033 // So, keep using the external linking way to be more AIX friendly. 1034 o1 = uint32(0x3c000000) | 12<<21 | 2<<16 // addis r12, r2, toctargetaddr hi 1035 o2 = uint32(0xe8000000) | 12<<21 | 12<<16 // ld r12, r12, toctargetaddr lo 1036 1037 toctramp := ldr.CreateSymForUpdate("TOC."+ldr.SymName(tramp.Sym()), 0) 1038 toctramp.SetType(sym.SXCOFFTOC) 1039 toctramp.AddAddrPlus(ctxt.Arch, target, offset) 1040 1041 r, _ := tramp.AddRel(objabi.R_ADDRPOWER_TOCREL_DS) 1042 r.SetOff(0) 1043 r.SetSiz(8) // generates 2 relocations: HA + LO 1044 r.SetSym(toctramp.Sym()) 1045 } else { 1046 // Used for default build mode for an executable 1047 // Address of the call target is generated using 1048 // relocation and doesn't depend on r2 (TOC). 1049 o1 = uint32(0x3c000000) | 12<<21 // lis r12,targetaddr hi 1050 o2 = uint32(0x38000000) | 12<<21 | 12<<16 // addi r12,r12,targetaddr lo 1051 1052 // ELFv2 save/restore functions use R0/R12 in special ways, therefore trampolines 1053 // as generated here will not always work correctly. 1054 if strings.HasPrefix(ldr.SymName(target), "runtime.elf_") { 1055 log.Fatalf("Internal linker does not support trampolines to ELFv2 ABI"+ 1056 " register save/restore function %s", ldr.SymName(target)) 1057 } 1058 1059 t := ldr.SymValue(target) 1060 if t == 0 || r2Valid(ctxt) || ctxt.IsExternal() { 1061 // Target address is unknown, generate relocations 1062 r, _ := tramp.AddRel(objabi.R_ADDRPOWER) 1063 if r2Valid(ctxt) { 1064 // Use a TOC relative address if R2 holds the TOC pointer 1065 o1 |= uint32(2 << 16) // Transform lis r31,ha into addis r31,r2,ha 1066 r.SetType(objabi.R_ADDRPOWER_TOCREL) 1067 } 1068 r.SetOff(0) 1069 r.SetSiz(8) // generates 2 relocations: HA + LO 1070 r.SetSym(target) 1071 r.SetAdd(offset) 1072 } else { 1073 // The target address is known, resolve it 1074 t += offset 1075 o1 |= (uint32(t) + 0x8000) >> 16 // HA 1076 o2 |= uint32(t) & 0xFFFF // LO 1077 } 1078 } 1079 1080 o3 := uint32(0x7c0903a6) | 12<<21 // mtctr r12 1081 o4 := uint32(0x4e800420) // bctr 1082 ctxt.Arch.ByteOrder.PutUint32(P, o1) 1083 ctxt.Arch.ByteOrder.PutUint32(P[4:], o2) 1084 ctxt.Arch.ByteOrder.PutUint32(P[8:], o3) 1085 ctxt.Arch.ByteOrder.PutUint32(P[12:], o4) 1086 tramp.SetData(P) 1087 } 1088 1089 // Unpack a pair of 32 bit instruction words from 1090 // a 64 bit relocation into instN and instN+1 in endian order. 1091 func unpackInstPair(target *ld.Target, r int64) (uint32, uint32) { 1092 if target.IsBigEndian() { 1093 return uint32(r >> 32), uint32(r) 1094 } 1095 return uint32(r), uint32(r >> 32) 1096 } 1097 1098 // Pack a pair of 32 bit instruction words o1, o2 into 64 bit relocation 1099 // in endian order. 1100 func packInstPair(target *ld.Target, o1, o2 uint32) int64 { 1101 if target.IsBigEndian() { 1102 return (int64(o1) << 32) | int64(o2) 1103 } 1104 return int64(o1) | (int64(o2) << 32) 1105 } 1106 1107 // Compute the high-adjusted value (always a signed 32b value) per the ELF ABI. 1108 // The returned value is always 0 <= x <= 0xFFFF. 1109 func computeHA(val int32) uint32 { 1110 return uint32(uint16((val + 0x8000) >> 16)) 1111 } 1112 1113 // Compute the low value (the lower 16 bits of any 32b value) per the ELF ABI. 1114 // The returned value is always 0 <= x <= 0xFFFF. 1115 func computeLO(val int32) uint32 { 1116 return uint32(uint16(val)) 1117 } 1118 1119 // Compute the high 18 bits of a signed 34b constant. Used to pack the high 18 bits 1120 // of a prefix34 relocation field. This assumes the input is already restricted to 1121 // 34 bits. 1122 func computePrefix34HI(val int64) uint32 { 1123 return uint32((val >> 16) & 0x3FFFF) 1124 } 1125 1126 func computeTLSLEReloc(target *ld.Target, ldr *loader.Loader, rs, s loader.Sym) int64 { 1127 // The thread pointer points 0x7000 bytes after the start of the 1128 // thread local storage area as documented in section "3.7.2 TLS 1129 // Runtime Handling" of "Power Architecture 64-Bit ELF V2 ABI 1130 // Specification". 1131 v := ldr.SymValue(rs) - 0x7000 1132 if target.IsAIX() { 1133 // On AIX, the thread pointer points 0x7800 bytes after 1134 // the TLS. 1135 v -= 0x800 1136 } 1137 1138 if int64(int32(v)) != v { 1139 ldr.Errorf(s, "TLS offset out of range %d", v) 1140 } 1141 return v 1142 } 1143 1144 func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loader.Reloc, s loader.Sym, val int64) (relocatedOffset int64, nExtReloc int, ok bool) { 1145 rs := r.Sym() 1146 if target.IsExternal() { 1147 // On AIX, relocations (except TLS ones) must be also done to the 1148 // value with the current addresses. 1149 switch rt := r.Type(); rt { 1150 default: 1151 if !target.IsAIX() { 1152 return val, nExtReloc, false 1153 } 1154 case objabi.R_POWER_TLS, objabi.R_POWER_TLS_IE_PCREL34, objabi.R_POWER_TLS_LE_TPREL34: 1155 nExtReloc = 1 1156 return val, nExtReloc, true 1157 case objabi.R_POWER_TLS_LE, objabi.R_POWER_TLS_IE: 1158 if target.IsAIX() && rt == objabi.R_POWER_TLS_LE { 1159 // Fixup val, an addis/addi pair of instructions, which generate a 32b displacement 1160 // from the threadpointer (R13), into a 16b relocation. XCOFF only supports 16b 1161 // TLS LE relocations. Likewise, verify this is an addis/addi sequence. 1162 const expectedOpcodes = 0x3C00000038000000 1163 const expectedOpmasks = 0xFC000000FC000000 1164 if uint64(val)&expectedOpmasks != expectedOpcodes { 1165 ldr.Errorf(s, "relocation for %s+%d is not an addis/addi pair: %16x", ldr.SymName(rs), r.Off(), uint64(val)) 1166 } 1167 nval := (int64(uint32(0x380d0000)) | val&0x03e00000) << 32 // addi rX, r13, $0 1168 nval |= int64(0x60000000) // nop 1169 val = nval 1170 nExtReloc = 1 1171 } else { 1172 nExtReloc = 2 1173 } 1174 return val, nExtReloc, true 1175 case objabi.R_ADDRPOWER, 1176 objabi.R_ADDRPOWER_DS, 1177 objabi.R_ADDRPOWER_TOCREL, 1178 objabi.R_ADDRPOWER_TOCREL_DS, 1179 objabi.R_ADDRPOWER_GOT, 1180 objabi.R_ADDRPOWER_PCREL: 1181 nExtReloc = 2 // need two ELF relocations, see elfreloc1 1182 if !target.IsAIX() { 1183 return val, nExtReloc, true 1184 } 1185 case objabi.R_CALLPOWER, objabi.R_ADDRPOWER_D34, objabi.R_ADDRPOWER_PCREL34: 1186 nExtReloc = 1 1187 if !target.IsAIX() { 1188 return val, nExtReloc, true 1189 } 1190 } 1191 } 1192 1193 switch r.Type() { 1194 case objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS: 1195 return archreloctoc(ldr, target, syms, r, s, val), nExtReloc, true 1196 case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS, objabi.R_ADDRPOWER_D34, objabi.R_ADDRPOWER_PCREL34: 1197 return archrelocaddr(ldr, target, syms, r, s, val), nExtReloc, true 1198 case objabi.R_CALLPOWER: 1199 // Bits 6 through 29 = (S + A - P) >> 2 1200 1201 t := ldr.SymValue(rs) + r.Add() - (ldr.SymValue(s) + int64(r.Off())) 1202 1203 tgtName := ldr.SymName(rs) 1204 1205 // If we are linking PIE or shared code, all golang generated object files have an extra 2 instruction prologue 1206 // to regenerate the TOC pointer from R12. The exception are two special case functions tested below. Note, 1207 // local call offsets for externally generated objects are accounted for when converting into golang relocs. 1208 if !ldr.AttrExternal(rs) && ldr.AttrShared(rs) && tgtName != "runtime.duffzero" && tgtName != "runtime.duffcopy" { 1209 // Furthermore, only apply the offset if the target looks like the start of a function call. 1210 if r.Add() == 0 && ldr.SymType(rs) == sym.STEXT { 1211 t += 8 1212 } 1213 } 1214 1215 if t&3 != 0 { 1216 ldr.Errorf(s, "relocation for %s+%d is not aligned: %d", ldr.SymName(rs), r.Off(), t) 1217 } 1218 // If branch offset is too far then create a trampoline. 1219 1220 if int64(int32(t<<6)>>6) != t { 1221 ldr.Errorf(s, "direct call too far: %s %x", ldr.SymName(rs), t) 1222 } 1223 return val | int64(uint32(t)&^0xfc000003), nExtReloc, true 1224 case objabi.R_POWER_TOC: // S + A - .TOC. 1225 return ldr.SymValue(rs) + r.Add() - symtoc(ldr, syms, s), nExtReloc, true 1226 1227 case objabi.R_ADDRPOWER_PCREL: // S + A - P 1228 t := ldr.SymValue(rs) + r.Add() - (ldr.SymValue(s) + int64(r.Off())) 1229 ha, l := unpackInstPair(target, val) 1230 l |= computeLO(int32(t)) 1231 ha |= computeHA(int32(t)) 1232 return packInstPair(target, ha, l), nExtReloc, true 1233 1234 case objabi.R_POWER_TLS: 1235 const OP_ADD = 31<<26 | 266<<1 1236 const MASK_OP_ADD = 0x3F<<26 | 0x1FF<<1 1237 if val&MASK_OP_ADD != OP_ADD { 1238 ldr.Errorf(s, "R_POWER_TLS reloc only supports XO form ADD, not %08X", val) 1239 } 1240 // Verify RB is R13 in ADD RA,RB,RT. 1241 if (val>>11)&0x1F != 13 { 1242 // If external linking is made to support this, it may expect the linker to rewrite RB. 1243 ldr.Errorf(s, "R_POWER_TLS reloc requires R13 in RB (%08X).", uint32(val)) 1244 } 1245 return val, nExtReloc, true 1246 1247 case objabi.R_POWER_TLS_IE: 1248 // Convert TLS_IE relocation to TLS_LE if supported. 1249 if !(target.IsPIE() && target.IsElf()) { 1250 log.Fatalf("cannot handle R_POWER_TLS_IE (sym %s) when linking non-PIE, non-ELF binaries internally", ldr.SymName(s)) 1251 } 1252 1253 // We are an ELF binary, we can safely convert to TLS_LE from: 1254 // addis to, r2, x@got@tprel@ha 1255 // ld to, to, x@got@tprel@l(to) 1256 // 1257 // to TLS_LE by converting to: 1258 // addis to, r0, x@tprel@ha 1259 // addi to, to, x@tprel@l(to) 1260 1261 const OP_ADDI = 14 << 26 1262 const OP_MASK = 0x3F << 26 1263 const OP_RA_MASK = 0x1F << 16 1264 // convert r2 to r0, and ld to addi 1265 mask := packInstPair(target, OP_RA_MASK, OP_MASK) 1266 addi_op := packInstPair(target, 0, OP_ADDI) 1267 val &^= mask 1268 val |= addi_op 1269 fallthrough 1270 1271 case objabi.R_POWER_TLS_LE: 1272 v := computeTLSLEReloc(target, ldr, rs, s) 1273 o1, o2 := unpackInstPair(target, val) 1274 o1 |= computeHA(int32(v)) 1275 o2 |= computeLO(int32(v)) 1276 return packInstPair(target, o1, o2), nExtReloc, true 1277 1278 case objabi.R_POWER_TLS_IE_PCREL34: 1279 // Convert TLS_IE relocation to TLS_LE if supported. 1280 if !(target.IsPIE() && target.IsElf()) { 1281 log.Fatalf("cannot handle R_POWER_TLS_IE (sym %s) when linking non-PIE, non-ELF binaries internally", ldr.SymName(s)) 1282 } 1283 1284 // We are an ELF binary, we can safely convert to TLS_LE_TPREL34 from: 1285 // pld rX, x@got@tprel@pcrel 1286 // 1287 // to TLS_LE_TPREL32 by converting to: 1288 // pla rX, x@tprel 1289 1290 const OP_MASK_PFX = 0xFFFFFFFF // Discard prefix word 1291 const OP_MASK = (0x3F << 26) | 0xFFFF // Preserve RT, RA 1292 const OP_PFX = 1<<26 | 2<<24 1293 const OP_PLA = 14 << 26 1294 mask := packInstPair(target, OP_MASK_PFX, OP_MASK) 1295 pla_op := packInstPair(target, OP_PFX, OP_PLA) 1296 val &^= mask 1297 val |= pla_op 1298 fallthrough 1299 1300 case objabi.R_POWER_TLS_LE_TPREL34: 1301 v := computeTLSLEReloc(target, ldr, rs, s) 1302 o1, o2 := unpackInstPair(target, val) 1303 o1 |= computePrefix34HI(v) 1304 o2 |= computeLO(int32(v)) 1305 return packInstPair(target, o1, o2), nExtReloc, true 1306 } 1307 1308 return val, nExtReloc, false 1309 } 1310 1311 func archrelocvariant(target *ld.Target, ldr *loader.Loader, r loader.Reloc, rv sym.RelocVariant, s loader.Sym, t int64, p []byte) (relocatedOffset int64) { 1312 rs := r.Sym() 1313 switch rv & sym.RV_TYPE_MASK { 1314 default: 1315 ldr.Errorf(s, "unexpected relocation variant %d", rv) 1316 fallthrough 1317 1318 case sym.RV_NONE: 1319 return t 1320 1321 case sym.RV_POWER_LO: 1322 if rv&sym.RV_CHECK_OVERFLOW != 0 { 1323 // Whether to check for signed or unsigned 1324 // overflow depends on the instruction 1325 var o1 uint32 1326 if target.IsBigEndian() { 1327 o1 = binary.BigEndian.Uint32(p[r.Off()-2:]) 1328 1329 } else { 1330 o1 = binary.LittleEndian.Uint32(p[r.Off():]) 1331 } 1332 switch o1 >> 26 { 1333 case 24, // ori 1334 26, // xori 1335 28: // andi 1336 if t>>16 != 0 { 1337 goto overflow 1338 } 1339 1340 default: 1341 if int64(int16(t)) != t { 1342 goto overflow 1343 } 1344 } 1345 } 1346 1347 return int64(int16(t)) 1348 1349 case sym.RV_POWER_HA: 1350 t += 0x8000 1351 fallthrough 1352 1353 // Fallthrough 1354 case sym.RV_POWER_HI: 1355 t >>= 16 1356 1357 if rv&sym.RV_CHECK_OVERFLOW != 0 { 1358 // Whether to check for signed or unsigned 1359 // overflow depends on the instruction 1360 var o1 uint32 1361 if target.IsBigEndian() { 1362 o1 = binary.BigEndian.Uint32(p[r.Off()-2:]) 1363 } else { 1364 o1 = binary.LittleEndian.Uint32(p[r.Off():]) 1365 } 1366 switch o1 >> 26 { 1367 case 25, // oris 1368 27, // xoris 1369 29: // andis 1370 if t>>16 != 0 { 1371 goto overflow 1372 } 1373 1374 default: 1375 if int64(int16(t)) != t { 1376 goto overflow 1377 } 1378 } 1379 } 1380 1381 return int64(int16(t)) 1382 1383 case sym.RV_POWER_DS: 1384 var o1 uint32 1385 if target.IsBigEndian() { 1386 o1 = uint32(binary.BigEndian.Uint16(p[r.Off():])) 1387 } else { 1388 o1 = uint32(binary.LittleEndian.Uint16(p[r.Off():])) 1389 } 1390 if t&3 != 0 { 1391 ldr.Errorf(s, "relocation for %s+%d is not aligned: %d", ldr.SymName(rs), r.Off(), t) 1392 } 1393 if (rv&sym.RV_CHECK_OVERFLOW != 0) && int64(int16(t)) != t { 1394 goto overflow 1395 } 1396 return int64(o1)&0x3 | int64(int16(t)) 1397 } 1398 1399 overflow: 1400 ldr.Errorf(s, "relocation for %s+%d is too big: %d", ldr.SymName(rs), r.Off(), t) 1401 return t 1402 } 1403 1404 func extreloc(target *ld.Target, ldr *loader.Loader, r loader.Reloc, s loader.Sym) (loader.ExtReloc, bool) { 1405 switch r.Type() { 1406 case objabi.R_POWER_TLS, objabi.R_POWER_TLS_LE, objabi.R_POWER_TLS_IE, objabi.R_POWER_TLS_IE_PCREL34, objabi.R_POWER_TLS_LE_TPREL34, objabi.R_CALLPOWER: 1407 return ld.ExtrelocSimple(ldr, r), true 1408 case objabi.R_ADDRPOWER, 1409 objabi.R_ADDRPOWER_DS, 1410 objabi.R_ADDRPOWER_TOCREL, 1411 objabi.R_ADDRPOWER_TOCREL_DS, 1412 objabi.R_ADDRPOWER_GOT, 1413 objabi.R_ADDRPOWER_PCREL, 1414 objabi.R_ADDRPOWER_D34, 1415 objabi.R_ADDRPOWER_PCREL34: 1416 return ld.ExtrelocViaOuterSym(ldr, r, s), true 1417 } 1418 return loader.ExtReloc{}, false 1419 } 1420 1421 func addpltsym(ctxt *ld.Link, ldr *loader.Loader, s loader.Sym) { 1422 if ldr.SymPlt(s) >= 0 { 1423 return 1424 } 1425 1426 ld.Adddynsym(ldr, &ctxt.Target, &ctxt.ArchSyms, s) 1427 1428 if ctxt.IsELF { 1429 plt := ldr.MakeSymbolUpdater(ctxt.PLT) 1430 rela := ldr.MakeSymbolUpdater(ctxt.RelaPLT) 1431 if plt.Size() == 0 { 1432 panic("plt is not set up") 1433 } 1434 1435 // Create the glink resolver if necessary 1436 glink := ensureglinkresolver(ctxt, ldr) 1437 1438 // Write symbol resolver stub (just a branch to the 1439 // glink resolver stub) 1440 rel, _ := glink.AddRel(objabi.R_CALLPOWER) 1441 rel.SetOff(int32(glink.Size())) 1442 rel.SetSiz(4) 1443 rel.SetSym(glink.Sym()) 1444 glink.AddUint32(ctxt.Arch, 0x48000000) // b .glink 1445 1446 // In the ppc64 ABI, the dynamic linker is responsible 1447 // for writing the entire PLT. We just need to 1448 // reserve 8 bytes for each PLT entry and generate a 1449 // JMP_SLOT dynamic relocation for it. 1450 // 1451 // TODO(austin): ABI v1 is different 1452 ldr.SetPlt(s, int32(plt.Size())) 1453 1454 plt.Grow(plt.Size() + 8) 1455 plt.SetSize(plt.Size() + 8) 1456 1457 rela.AddAddrPlus(ctxt.Arch, plt.Sym(), int64(ldr.SymPlt(s))) 1458 rela.AddUint64(ctxt.Arch, elf.R_INFO(uint32(ldr.SymDynid(s)), uint32(elf.R_PPC64_JMP_SLOT))) 1459 rela.AddUint64(ctxt.Arch, 0) 1460 } else { 1461 ctxt.Errorf(s, "addpltsym: unsupported binary format") 1462 } 1463 } 1464 1465 // Generate the glink resolver stub if necessary and return the .glink section. 1466 func ensureglinkresolver(ctxt *ld.Link, ldr *loader.Loader) *loader.SymbolBuilder { 1467 glink := ldr.CreateSymForUpdate(".glink", 0) 1468 if glink.Size() != 0 { 1469 return glink 1470 } 1471 1472 // This is essentially the resolver from the ppc64 ELFv2 ABI. 1473 // At entry, r12 holds the address of the symbol resolver stub 1474 // for the target routine and the argument registers hold the 1475 // arguments for the target routine. 1476 // 1477 // PC-rel offsets are computed once the final codesize of the 1478 // resolver is known. 1479 // 1480 // This stub is PIC, so first get the PC of label 1 into r11. 1481 glink.AddUint32(ctxt.Arch, 0x7c0802a6) // mflr r0 1482 glink.AddUint32(ctxt.Arch, 0x429f0005) // bcl 20,31,1f 1483 glink.AddUint32(ctxt.Arch, 0x7d6802a6) // 1: mflr r11 1484 glink.AddUint32(ctxt.Arch, 0x7c0803a6) // mtlr r0 1485 1486 // Compute the .plt array index from the entry point address 1487 // into r0. This is computed relative to label 1 above. 1488 glink.AddUint32(ctxt.Arch, 0x38000000) // li r0,-(res_0-1b) 1489 glink.AddUint32(ctxt.Arch, 0x7c006214) // add r0,r0,r12 1490 glink.AddUint32(ctxt.Arch, 0x7c0b0050) // sub r0,r0,r11 1491 glink.AddUint32(ctxt.Arch, 0x7800f082) // srdi r0,r0,2 1492 1493 // Load the PC-rel offset of ".plt - 1b", and add it to 1b. 1494 // This is stored after this stub and before the resolvers. 1495 glink.AddUint32(ctxt.Arch, 0xe98b0000) // ld r12,res_0-1b-8(r11) 1496 glink.AddUint32(ctxt.Arch, 0x7d6b6214) // add r11,r11,r12 1497 1498 // Load r12 = dynamic resolver address and r11 = DSO 1499 // identifier from the first two doublewords of the PLT. 1500 glink.AddUint32(ctxt.Arch, 0xe98b0000) // ld r12,0(r11) 1501 glink.AddUint32(ctxt.Arch, 0xe96b0008) // ld r11,8(r11) 1502 1503 // Jump to the dynamic resolver 1504 glink.AddUint32(ctxt.Arch, 0x7d8903a6) // mtctr r12 1505 glink.AddUint32(ctxt.Arch, 0x4e800420) // bctr 1506 1507 // Store the PC-rel offset to the PLT 1508 r, _ := glink.AddRel(objabi.R_PCREL) 1509 r.SetSym(ctxt.PLT) 1510 r.SetSiz(8) 1511 r.SetOff(int32(glink.Size())) 1512 r.SetAdd(glink.Size()) // Adjust the offset to be relative to label 1 above. 1513 glink.AddUint64(ctxt.Arch, 0) // The offset to the PLT. 1514 1515 // Resolve PC-rel offsets above now the final size of the stub is known. 1516 res0m1b := glink.Size() - 8 // res_0 - 1b 1517 glink.SetUint32(ctxt.Arch, 16, 0x38000000|uint32(uint16(-res0m1b))) 1518 glink.SetUint32(ctxt.Arch, 32, 0xe98b0000|uint32(uint16(res0m1b-8))) 1519 1520 // The symbol resolvers must immediately follow. 1521 // res_0: 1522 1523 // Add DT_PPC64_GLINK .dynamic entry, which points to 32 bytes 1524 // before the first symbol resolver stub. 1525 du := ldr.MakeSymbolUpdater(ctxt.Dynamic) 1526 ld.Elfwritedynentsymplus(ctxt, du, elf.DT_PPC64_GLINK, glink.Sym(), glink.Size()-32) 1527 1528 return glink 1529 }