github.com/gagliardetto/golang-go@v0.0.0-20201020153340-53909ea70814/cmd/compile/internal/gc/reflect.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "github.com/gagliardetto/golang-go/cmd/compile/internal/types" 9 "github.com/gagliardetto/golang-go/cmd/internal/gcprog" 10 "github.com/gagliardetto/golang-go/cmd/internal/obj" 11 "github.com/gagliardetto/golang-go/cmd/internal/objabi" 12 "github.com/gagliardetto/golang-go/cmd/internal/src" 13 "fmt" 14 "os" 15 "sort" 16 "strings" 17 "sync" 18 ) 19 20 type itabEntry struct { 21 t, itype *types.Type 22 lsym *obj.LSym // symbol of the itab itself 23 24 // symbols of each method in 25 // the itab, sorted by byte offset; 26 // filled in by peekitabs 27 entries []*obj.LSym 28 } 29 30 type ptabEntry struct { 31 s *types.Sym 32 t *types.Type 33 } 34 35 // runtime interface and reflection data structures 36 var ( 37 signatmu sync.Mutex // protects signatset and signatslice 38 signatset = make(map[*types.Type]struct{}) 39 signatslice []*types.Type 40 41 itabs []itabEntry 42 ptabs []ptabEntry 43 ) 44 45 type Sig struct { 46 name *types.Sym 47 isym *types.Sym 48 tsym *types.Sym 49 type_ *types.Type 50 mtype *types.Type 51 } 52 53 // Builds a type representing a Bucket structure for 54 // the given map type. This type is not visible to users - 55 // we include only enough information to generate a correct GC 56 // program for it. 57 // Make sure this stays in sync with runtime/map.go. 58 const ( 59 BUCKETSIZE = 8 60 MAXKEYSIZE = 128 61 MAXELEMSIZE = 128 62 ) 63 64 func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) 65 func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) 66 67 func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) 68 if t.Sym == nil && len(methods(t)) == 0 { 69 return 0 70 } 71 return 4 + 2 + 2 + 4 + 4 72 } 73 74 func makefield(name string, t *types.Type) *types.Field { 75 f := types.NewField() 76 f.Type = t 77 f.Sym = (*types.Pkg)(nil).Lookup(name) 78 return f 79 } 80 81 // bmap makes the map bucket type given the type of the map. 82 func bmap(t *types.Type) *types.Type { 83 if t.MapType().Bucket != nil { 84 return t.MapType().Bucket 85 } 86 87 bucket := types.New(TSTRUCT) 88 keytype := t.Key() 89 elemtype := t.Elem() 90 dowidth(keytype) 91 dowidth(elemtype) 92 if keytype.Width > MAXKEYSIZE { 93 keytype = types.NewPtr(keytype) 94 } 95 if elemtype.Width > MAXELEMSIZE { 96 elemtype = types.NewPtr(elemtype) 97 } 98 99 field := make([]*types.Field, 0, 5) 100 101 // The first field is: uint8 topbits[BUCKETSIZE]. 102 arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE) 103 field = append(field, makefield("topbits", arr)) 104 105 arr = types.NewArray(keytype, BUCKETSIZE) 106 arr.SetNoalg(true) 107 keys := makefield("keys", arr) 108 field = append(field, keys) 109 110 arr = types.NewArray(elemtype, BUCKETSIZE) 111 arr.SetNoalg(true) 112 elems := makefield("elems", arr) 113 field = append(field, elems) 114 115 // If keys and elems have no pointers, the map implementation 116 // can keep a list of overflow pointers on the side so that 117 // buckets can be marked as having no pointers. 118 // Arrange for the bucket to have no pointers by changing 119 // the type of the overflow field to uintptr in this case. 120 // See comment on hmap.overflow in runtime/map.go. 121 otyp := types.NewPtr(bucket) 122 if !types.Haspointers(elemtype) && !types.Haspointers(keytype) { 123 otyp = types.Types[TUINTPTR] 124 } 125 overflow := makefield("overflow", otyp) 126 field = append(field, overflow) 127 128 // link up fields 129 bucket.SetNoalg(true) 130 bucket.SetFields(field[:]) 131 dowidth(bucket) 132 133 // Check invariants that map code depends on. 134 if !IsComparable(t.Key()) { 135 Fatalf("unsupported map key type for %v", t) 136 } 137 if BUCKETSIZE < 8 { 138 Fatalf("bucket size too small for proper alignment") 139 } 140 if keytype.Align > BUCKETSIZE { 141 Fatalf("key align too big for %v", t) 142 } 143 if elemtype.Align > BUCKETSIZE { 144 Fatalf("elem align too big for %v", t) 145 } 146 if keytype.Width > MAXKEYSIZE { 147 Fatalf("key size to large for %v", t) 148 } 149 if elemtype.Width > MAXELEMSIZE { 150 Fatalf("elem size to large for %v", t) 151 } 152 if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() { 153 Fatalf("key indirect incorrect for %v", t) 154 } 155 if t.Elem().Width > MAXELEMSIZE && !elemtype.IsPtr() { 156 Fatalf("elem indirect incorrect for %v", t) 157 } 158 if keytype.Width%int64(keytype.Align) != 0 { 159 Fatalf("key size not a multiple of key align for %v", t) 160 } 161 if elemtype.Width%int64(elemtype.Align) != 0 { 162 Fatalf("elem size not a multiple of elem align for %v", t) 163 } 164 if bucket.Align%keytype.Align != 0 { 165 Fatalf("bucket align not multiple of key align %v", t) 166 } 167 if bucket.Align%elemtype.Align != 0 { 168 Fatalf("bucket align not multiple of elem align %v", t) 169 } 170 if keys.Offset%int64(keytype.Align) != 0 { 171 Fatalf("bad alignment of keys in bmap for %v", t) 172 } 173 if elems.Offset%int64(elemtype.Align) != 0 { 174 Fatalf("bad alignment of elems in bmap for %v", t) 175 } 176 177 // Double-check that overflow field is final memory in struct, 178 // with no padding at end. 179 if overflow.Offset != bucket.Width-int64(Widthptr) { 180 Fatalf("bad offset of overflow in bmap for %v", t) 181 } 182 183 t.MapType().Bucket = bucket 184 185 bucket.StructType().Map = t 186 return bucket 187 } 188 189 // hmap builds a type representing a Hmap structure for the given map type. 190 // Make sure this stays in sync with runtime/map.go. 191 func hmap(t *types.Type) *types.Type { 192 if t.MapType().Hmap != nil { 193 return t.MapType().Hmap 194 } 195 196 bmap := bmap(t) 197 198 // build a struct: 199 // type hmap struct { 200 // count int 201 // flags uint8 202 // B uint8 203 // noverflow uint16 204 // hash0 uint32 205 // buckets *bmap 206 // oldbuckets *bmap 207 // nevacuate uintptr 208 // extra unsafe.Pointer // *mapextra 209 // } 210 // must match runtime/map.go:hmap. 211 fields := []*types.Field{ 212 makefield("count", types.Types[TINT]), 213 makefield("flags", types.Types[TUINT8]), 214 makefield("B", types.Types[TUINT8]), 215 makefield("noverflow", types.Types[TUINT16]), 216 makefield("hash0", types.Types[TUINT32]), // Used in walk.go for OMAKEMAP. 217 makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP. 218 makefield("oldbuckets", types.NewPtr(bmap)), 219 makefield("nevacuate", types.Types[TUINTPTR]), 220 makefield("extra", types.Types[TUNSAFEPTR]), 221 } 222 223 hmap := types.New(TSTRUCT) 224 hmap.SetNoalg(true) 225 hmap.SetFields(fields) 226 dowidth(hmap) 227 228 // The size of hmap should be 48 bytes on 64 bit 229 // and 28 bytes on 32 bit platforms. 230 if size := int64(8 + 5*Widthptr); hmap.Width != size { 231 Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size) 232 } 233 234 t.MapType().Hmap = hmap 235 hmap.StructType().Map = t 236 return hmap 237 } 238 239 // hiter builds a type representing an Hiter structure for the given map type. 240 // Make sure this stays in sync with runtime/map.go. 241 func hiter(t *types.Type) *types.Type { 242 if t.MapType().Hiter != nil { 243 return t.MapType().Hiter 244 } 245 246 hmap := hmap(t) 247 bmap := bmap(t) 248 249 // build a struct: 250 // type hiter struct { 251 // key *Key 252 // elem *Elem 253 // t unsafe.Pointer // *MapType 254 // h *hmap 255 // buckets *bmap 256 // bptr *bmap 257 // overflow unsafe.Pointer // *[]*bmap 258 // oldoverflow unsafe.Pointer // *[]*bmap 259 // startBucket uintptr 260 // offset uint8 261 // wrapped bool 262 // B uint8 263 // i uint8 264 // bucket uintptr 265 // checkBucket uintptr 266 // } 267 // must match runtime/map.go:hiter. 268 fields := []*types.Field{ 269 makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP. 270 makefield("elem", types.NewPtr(t.Elem())), // Used in range.go for TMAP. 271 makefield("t", types.Types[TUNSAFEPTR]), 272 makefield("h", types.NewPtr(hmap)), 273 makefield("buckets", types.NewPtr(bmap)), 274 makefield("bptr", types.NewPtr(bmap)), 275 makefield("overflow", types.Types[TUNSAFEPTR]), 276 makefield("oldoverflow", types.Types[TUNSAFEPTR]), 277 makefield("startBucket", types.Types[TUINTPTR]), 278 makefield("offset", types.Types[TUINT8]), 279 makefield("wrapped", types.Types[TBOOL]), 280 makefield("B", types.Types[TUINT8]), 281 makefield("i", types.Types[TUINT8]), 282 makefield("bucket", types.Types[TUINTPTR]), 283 makefield("checkBucket", types.Types[TUINTPTR]), 284 } 285 286 // build iterator struct holding the above fields 287 hiter := types.New(TSTRUCT) 288 hiter.SetNoalg(true) 289 hiter.SetFields(fields) 290 dowidth(hiter) 291 if hiter.Width != int64(12*Widthptr) { 292 Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr) 293 } 294 t.MapType().Hiter = hiter 295 hiter.StructType().Map = t 296 return hiter 297 } 298 299 // deferstruct makes a runtime._defer structure, with additional space for 300 // stksize bytes of args. 301 func deferstruct(stksize int64) *types.Type { 302 makefield := func(name string, typ *types.Type) *types.Field { 303 f := types.NewField() 304 f.Type = typ 305 // Unlike the global makefield function, this one needs to set Pkg 306 // because these types might be compared (in SSA CSE sorting). 307 // TODO: unify this makefield and the global one above. 308 f.Sym = &types.Sym{Name: name, Pkg: localpkg} 309 return f 310 } 311 argtype := types.NewArray(types.Types[TUINT8], stksize) 312 argtype.Width = stksize 313 argtype.Align = 1 314 // These fields must match the ones in runtime/runtime2.go:_defer and 315 // cmd/compile/internal/gc/ssa.go:(*state).call. 316 fields := []*types.Field{ 317 makefield("siz", types.Types[TUINT32]), 318 makefield("started", types.Types[TBOOL]), 319 makefield("heap", types.Types[TBOOL]), 320 makefield("openDefer", types.Types[TBOOL]), 321 makefield("sp", types.Types[TUINTPTR]), 322 makefield("pc", types.Types[TUINTPTR]), 323 // Note: the types here don't really matter. Defer structures 324 // are always scanned explicitly during stack copying and GC, 325 // so we make them uintptr type even though they are real pointers. 326 makefield("fn", types.Types[TUINTPTR]), 327 makefield("_panic", types.Types[TUINTPTR]), 328 makefield("link", types.Types[TUINTPTR]), 329 makefield("framepc", types.Types[TUINTPTR]), 330 makefield("varp", types.Types[TUINTPTR]), 331 makefield("fd", types.Types[TUINTPTR]), 332 makefield("args", argtype), 333 } 334 335 // build struct holding the above fields 336 s := types.New(TSTRUCT) 337 s.SetNoalg(true) 338 s.SetFields(fields) 339 s.Width = widstruct(s, s, 0, 1) 340 s.Align = uint8(Widthptr) 341 return s 342 } 343 344 // f is method type, with receiver. 345 // return function type, receiver as first argument (or not). 346 func methodfunc(f *types.Type, receiver *types.Type) *types.Type { 347 inLen := f.Params().Fields().Len() 348 if receiver != nil { 349 inLen++ 350 } 351 in := make([]*Node, 0, inLen) 352 353 if receiver != nil { 354 d := anonfield(receiver) 355 in = append(in, d) 356 } 357 358 for _, t := range f.Params().Fields().Slice() { 359 d := anonfield(t.Type) 360 d.SetIsDDD(t.IsDDD()) 361 in = append(in, d) 362 } 363 364 outLen := f.Results().Fields().Len() 365 out := make([]*Node, 0, outLen) 366 for _, t := range f.Results().Fields().Slice() { 367 d := anonfield(t.Type) 368 out = append(out, d) 369 } 370 371 t := functype(nil, in, out) 372 if f.Nname() != nil { 373 // Link to name of original method function. 374 t.SetNname(f.Nname()) 375 } 376 377 return t 378 } 379 380 // methods returns the methods of the non-interface type t, sorted by name. 381 // Generates stub functions as needed. 382 func methods(t *types.Type) []*Sig { 383 // method type 384 mt := methtype(t) 385 386 if mt == nil { 387 return nil 388 } 389 expandmeth(mt) 390 391 // type stored in interface word 392 it := t 393 394 if !isdirectiface(it) { 395 it = types.NewPtr(t) 396 } 397 398 // make list of methods for t, 399 // generating code if necessary. 400 var ms []*Sig 401 for _, f := range mt.AllMethods().Slice() { 402 if !f.IsMethod() { 403 Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) 404 } 405 if f.Type.Recv() == nil { 406 Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) 407 } 408 if f.Nointerface() { 409 continue 410 } 411 412 method := f.Sym 413 if method == nil { 414 break 415 } 416 417 // get receiver type for this particular method. 418 // if pointer receiver but non-pointer t and 419 // this is not an embedded pointer inside a struct, 420 // method does not apply. 421 if !isMethodApplicable(t, f) { 422 continue 423 } 424 425 sig := &Sig{ 426 name: method, 427 isym: methodSym(it, method), 428 tsym: methodSym(t, method), 429 type_: methodfunc(f.Type, t), 430 mtype: methodfunc(f.Type, nil), 431 } 432 ms = append(ms, sig) 433 434 this := f.Type.Recv().Type 435 436 if !sig.isym.Siggen() { 437 sig.isym.SetSiggen(true) 438 if !types.Identical(this, it) { 439 genwrapper(it, f, sig.isym) 440 } 441 } 442 443 if !sig.tsym.Siggen() { 444 sig.tsym.SetSiggen(true) 445 if !types.Identical(this, t) { 446 genwrapper(t, f, sig.tsym) 447 } 448 } 449 } 450 451 return ms 452 } 453 454 // imethods returns the methods of the interface type t, sorted by name. 455 func imethods(t *types.Type) []*Sig { 456 var methods []*Sig 457 for _, f := range t.Fields().Slice() { 458 if f.Type.Etype != TFUNC || f.Sym == nil { 459 continue 460 } 461 if f.Sym.IsBlank() { 462 Fatalf("unexpected blank symbol in interface method set") 463 } 464 if n := len(methods); n > 0 { 465 last := methods[n-1] 466 if !last.name.Less(f.Sym) { 467 Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym) 468 } 469 } 470 471 sig := &Sig{ 472 name: f.Sym, 473 mtype: f.Type, 474 type_: methodfunc(f.Type, nil), 475 } 476 methods = append(methods, sig) 477 478 // NOTE(rsc): Perhaps an oversight that 479 // IfaceType.Method is not in the reflect data. 480 // Generate the method body, so that compiled 481 // code can refer to it. 482 isym := methodSym(t, f.Sym) 483 if !isym.Siggen() { 484 isym.SetSiggen(true) 485 genwrapper(t, f, isym) 486 } 487 } 488 489 return methods 490 } 491 492 func dimportpath(p *types.Pkg) { 493 if p.Pathsym != nil { 494 return 495 } 496 497 // If we are compiling the runtime package, there are two runtime packages around 498 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for 499 // both of them, so just produce one for localpkg. 500 if myimportpath == "runtime" && p == Runtimepkg { 501 return 502 } 503 504 str := p.Path 505 if p == localpkg { 506 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. 507 str = myimportpath 508 } 509 510 s := Ctxt.Lookup("type..importpath." + p.Prefix + ".") 511 ot := dnameData(s, 0, str, "", nil, false) 512 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 513 p.Pathsym = s 514 } 515 516 func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int { 517 if pkg == nil { 518 return duintptr(s, ot, 0) 519 } 520 521 if pkg == localpkg && myimportpath == "" { 522 // If we don't know the full import path of the package being compiled 523 // (i.e. -p was not passed on the compiler command line), emit a reference to 524 // type..importpath.""., which the linker will rewrite using the correct import path. 525 // Every package that imports this one directly defines the symbol. 526 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 527 ns := Ctxt.Lookup(`type..importpath."".`) 528 return dsymptr(s, ot, ns, 0) 529 } 530 531 dimportpath(pkg) 532 return dsymptr(s, ot, pkg.Pathsym, 0) 533 } 534 535 // dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol. 536 func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { 537 if pkg == nil { 538 return duint32(s, ot, 0) 539 } 540 if pkg == localpkg && myimportpath == "" { 541 // If we don't know the full import path of the package being compiled 542 // (i.e. -p was not passed on the compiler command line), emit a reference to 543 // type..importpath.""., which the linker will rewrite using the correct import path. 544 // Every package that imports this one directly defines the symbol. 545 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 546 ns := Ctxt.Lookup(`type..importpath."".`) 547 return dsymptrOff(s, ot, ns) 548 } 549 550 dimportpath(pkg) 551 return dsymptrOff(s, ot, pkg.Pathsym) 552 } 553 554 // dnameField dumps a reflect.name for a struct field. 555 func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int { 556 if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg { 557 Fatalf("package mismatch for %v", ft.Sym) 558 } 559 nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name)) 560 return dsymptr(lsym, ot, nsym, 0) 561 } 562 563 // dnameData writes the contents of a reflect.name into s at offset ot. 564 func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int { 565 if len(name) > 1<<16-1 { 566 Fatalf("name too long: %s", name) 567 } 568 if len(tag) > 1<<16-1 { 569 Fatalf("tag too long: %s", tag) 570 } 571 572 // Encode name and tag. See reflect/type.go for details. 573 var bits byte 574 l := 1 + 2 + len(name) 575 if exported { 576 bits |= 1 << 0 577 } 578 if len(tag) > 0 { 579 l += 2 + len(tag) 580 bits |= 1 << 1 581 } 582 if pkg != nil { 583 bits |= 1 << 2 584 } 585 b := make([]byte, l) 586 b[0] = bits 587 b[1] = uint8(len(name) >> 8) 588 b[2] = uint8(len(name)) 589 copy(b[3:], name) 590 if len(tag) > 0 { 591 tb := b[3+len(name):] 592 tb[0] = uint8(len(tag) >> 8) 593 tb[1] = uint8(len(tag)) 594 copy(tb[2:], tag) 595 } 596 597 ot = int(s.WriteBytes(Ctxt, int64(ot), b)) 598 599 if pkg != nil { 600 ot = dgopkgpathOff(s, ot, pkg) 601 } 602 603 return ot 604 } 605 606 var dnameCount int 607 608 // dname creates a reflect.name for a struct field or method. 609 func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym { 610 // Write out data as "type.." to signal two things to the 611 // linker, first that when dynamically linking, the symbol 612 // should be moved to a relro section, and second that the 613 // contents should not be decoded as a type. 614 sname := "type..namedata." 615 if pkg == nil { 616 // In the common case, share data with other packages. 617 if name == "" { 618 if exported { 619 sname += "-noname-exported." + tag 620 } else { 621 sname += "-noname-unexported." + tag 622 } 623 } else { 624 if exported { 625 sname += name + "." + tag 626 } else { 627 sname += name + "-" + tag 628 } 629 } 630 } else { 631 sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount) 632 dnameCount++ 633 } 634 s := Ctxt.Lookup(sname) 635 if len(s.P) > 0 { 636 return s 637 } 638 ot := dnameData(s, 0, name, tag, pkg, exported) 639 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 640 return s 641 } 642 643 // dextratype dumps the fields of a runtime.uncommontype. 644 // dataAdd is the offset in bytes after the header where the 645 // backing array of the []method field is written (by dextratypeData). 646 func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { 647 m := methods(t) 648 if t.Sym == nil && len(m) == 0 { 649 return ot 650 } 651 noff := int(Rnd(int64(ot), int64(Widthptr))) 652 if noff != ot { 653 Fatalf("unexpected alignment in dextratype for %v", t) 654 } 655 656 for _, a := range m { 657 dtypesym(a.type_) 658 } 659 660 ot = dgopkgpathOff(lsym, ot, typePkg(t)) 661 662 dataAdd += uncommonSize(t) 663 mcount := len(m) 664 if mcount != int(uint16(mcount)) { 665 Fatalf("too many methods on %v: %d", t, mcount) 666 } 667 xcount := sort.Search(mcount, func(i int) bool { return !types.IsExported(m[i].name.Name) }) 668 if dataAdd != int(uint32(dataAdd)) { 669 Fatalf("methods are too far away on %v: %d", t, dataAdd) 670 } 671 672 ot = duint16(lsym, ot, uint16(mcount)) 673 ot = duint16(lsym, ot, uint16(xcount)) 674 ot = duint32(lsym, ot, uint32(dataAdd)) 675 ot = duint32(lsym, ot, 0) 676 return ot 677 } 678 679 func typePkg(t *types.Type) *types.Pkg { 680 tsym := t.Sym 681 if tsym == nil { 682 switch t.Etype { 683 case TARRAY, TSLICE, TPTR, TCHAN: 684 if t.Elem() != nil { 685 tsym = t.Elem().Sym 686 } 687 } 688 } 689 if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype { 690 return tsym.Pkg 691 } 692 return nil 693 } 694 695 // dextratypeData dumps the backing array for the []method field of 696 // runtime.uncommontype. 697 func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int { 698 for _, a := range methods(t) { 699 // ../../../../runtime/type.go:/method 700 exported := types.IsExported(a.name.Name) 701 var pkg *types.Pkg 702 if !exported && a.name.Pkg != typePkg(t) { 703 pkg = a.name.Pkg 704 } 705 nsym := dname(a.name.Name, "", pkg, exported) 706 707 ot = dsymptrOff(lsym, ot, nsym) 708 ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype)) 709 ot = dmethodptrOff(lsym, ot, a.isym.Linksym()) 710 ot = dmethodptrOff(lsym, ot, a.tsym.Linksym()) 711 } 712 return ot 713 } 714 715 func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int { 716 duint32(s, ot, 0) 717 r := obj.Addrel(s) 718 r.Off = int32(ot) 719 r.Siz = 4 720 r.Sym = x 721 r.Type = objabi.R_METHODOFF 722 return ot + 4 723 } 724 725 var kinds = []int{ 726 TINT: objabi.KindInt, 727 TUINT: objabi.KindUint, 728 TINT8: objabi.KindInt8, 729 TUINT8: objabi.KindUint8, 730 TINT16: objabi.KindInt16, 731 TUINT16: objabi.KindUint16, 732 TINT32: objabi.KindInt32, 733 TUINT32: objabi.KindUint32, 734 TINT64: objabi.KindInt64, 735 TUINT64: objabi.KindUint64, 736 TUINTPTR: objabi.KindUintptr, 737 TFLOAT32: objabi.KindFloat32, 738 TFLOAT64: objabi.KindFloat64, 739 TBOOL: objabi.KindBool, 740 TSTRING: objabi.KindString, 741 TPTR: objabi.KindPtr, 742 TSTRUCT: objabi.KindStruct, 743 TINTER: objabi.KindInterface, 744 TCHAN: objabi.KindChan, 745 TMAP: objabi.KindMap, 746 TARRAY: objabi.KindArray, 747 TSLICE: objabi.KindSlice, 748 TFUNC: objabi.KindFunc, 749 TCOMPLEX64: objabi.KindComplex64, 750 TCOMPLEX128: objabi.KindComplex128, 751 TUNSAFEPTR: objabi.KindUnsafePointer, 752 } 753 754 // typeptrdata returns the length in bytes of the prefix of t 755 // containing pointer data. Anything after this offset is scalar data. 756 func typeptrdata(t *types.Type) int64 { 757 if !types.Haspointers(t) { 758 return 0 759 } 760 761 switch t.Etype { 762 case TPTR, 763 TUNSAFEPTR, 764 TFUNC, 765 TCHAN, 766 TMAP: 767 return int64(Widthptr) 768 769 case TSTRING: 770 // struct { byte *str; intgo len; } 771 return int64(Widthptr) 772 773 case TINTER: 774 // struct { Itab *tab; void *data; } or 775 // struct { Type *type; void *data; } 776 // Note: see comment in plive.go:onebitwalktype1. 777 return 2 * int64(Widthptr) 778 779 case TSLICE: 780 // struct { byte *array; uintgo len; uintgo cap; } 781 return int64(Widthptr) 782 783 case TARRAY: 784 // haspointers already eliminated t.NumElem() == 0. 785 return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem()) 786 787 case TSTRUCT: 788 // Find the last field that has pointers. 789 var lastPtrField *types.Field 790 for _, t1 := range t.Fields().Slice() { 791 if types.Haspointers(t1.Type) { 792 lastPtrField = t1 793 } 794 } 795 return lastPtrField.Offset + typeptrdata(lastPtrField.Type) 796 797 default: 798 Fatalf("typeptrdata: unexpected type, %v", t) 799 return 0 800 } 801 } 802 803 // tflag is documented in reflect/type.go. 804 // 805 // tflag values must be kept in sync with copies in: 806 // cmd/compile/internal/gc/reflect.go 807 // cmd/link/internal/ld/decodesym.go 808 // reflect/type.go 809 // runtime/type.go 810 const ( 811 tflagUncommon = 1 << 0 812 tflagExtraStar = 1 << 1 813 tflagNamed = 1 << 2 814 tflagRegularMemory = 1 << 3 815 ) 816 817 var ( 818 memhashvarlen *obj.LSym 819 memequalvarlen *obj.LSym 820 ) 821 822 // dcommontype dumps the contents of a reflect.rtype (runtime._type). 823 func dcommontype(lsym *obj.LSym, t *types.Type) int { 824 dowidth(t) 825 eqfunc := geneq(t) 826 827 sptrWeak := true 828 var sptr *obj.LSym 829 if !t.IsPtr() || t.IsPtrElem() { 830 tptr := types.NewPtr(t) 831 if t.Sym != nil || methods(tptr) != nil { 832 sptrWeak = false 833 } 834 sptr = dtypesym(tptr) 835 } 836 837 gcsym, useGCProg, ptrdata := dgcsym(t) 838 839 // ../../../../reflect/type.go:/^type.rtype 840 // actual type structure 841 // type rtype struct { 842 // size uintptr 843 // ptrdata uintptr 844 // hash uint32 845 // tflag tflag 846 // align uint8 847 // fieldAlign uint8 848 // kind uint8 849 // equal func(unsafe.Pointer, unsafe.Pointer) bool 850 // gcdata *byte 851 // str nameOff 852 // ptrToThis typeOff 853 // } 854 ot := 0 855 ot = duintptr(lsym, ot, uint64(t.Width)) 856 ot = duintptr(lsym, ot, uint64(ptrdata)) 857 ot = duint32(lsym, ot, typehash(t)) 858 859 var tflag uint8 860 if uncommonSize(t) != 0 { 861 tflag |= tflagUncommon 862 } 863 if t.Sym != nil && t.Sym.Name != "" { 864 tflag |= tflagNamed 865 } 866 if IsRegularMemory(t) { 867 tflag |= tflagRegularMemory 868 } 869 870 exported := false 871 p := t.LongString() 872 // If we're writing out type T, 873 // we are very likely to write out type *T as well. 874 // Use the string "*T"[1:] for "T", so that the two 875 // share storage. This is a cheap way to reduce the 876 // amount of space taken up by reflect strings. 877 if !strings.HasPrefix(p, "*") { 878 p = "*" + p 879 tflag |= tflagExtraStar 880 if t.Sym != nil { 881 exported = types.IsExported(t.Sym.Name) 882 } 883 } else { 884 if t.Elem() != nil && t.Elem().Sym != nil { 885 exported = types.IsExported(t.Elem().Sym.Name) 886 } 887 } 888 889 ot = duint8(lsym, ot, tflag) 890 891 // runtime (and common sense) expects alignment to be a power of two. 892 i := int(t.Align) 893 894 if i == 0 { 895 i = 1 896 } 897 if i&(i-1) != 0 { 898 Fatalf("invalid alignment %d for %v", t.Align, t) 899 } 900 ot = duint8(lsym, ot, t.Align) // align 901 ot = duint8(lsym, ot, t.Align) // fieldAlign 902 903 i = kinds[t.Etype] 904 if isdirectiface(t) { 905 i |= objabi.KindDirectIface 906 } 907 if useGCProg { 908 i |= objabi.KindGCProg 909 } 910 ot = duint8(lsym, ot, uint8(i)) // kind 911 if eqfunc != nil { 912 ot = dsymptr(lsym, ot, eqfunc, 0) // equality function 913 } else { 914 ot = duintptr(lsym, ot, 0) // type we can't do == with 915 } 916 ot = dsymptr(lsym, ot, gcsym, 0) // gcdata 917 918 nsym := dname(p, "", nil, exported) 919 ot = dsymptrOff(lsym, ot, nsym) // str 920 // ptrToThis 921 if sptr == nil { 922 ot = duint32(lsym, ot, 0) 923 } else if sptrWeak { 924 ot = dsymptrWeakOff(lsym, ot, sptr) 925 } else { 926 ot = dsymptrOff(lsym, ot, sptr) 927 } 928 929 return ot 930 } 931 932 // typeHasNoAlg reports whether t does not have any associated hash/eq 933 // algorithms because t, or some component of t, is marked Noalg. 934 func typeHasNoAlg(t *types.Type) bool { 935 a, bad := algtype1(t) 936 return a == ANOEQ && bad.Noalg() 937 } 938 939 func typesymname(t *types.Type) string { 940 name := t.ShortString() 941 // Use a separate symbol name for Noalg types for #17752. 942 if typeHasNoAlg(t) { 943 name = "noalg." + name 944 } 945 return name 946 } 947 948 // Fake package for runtime type info (headers) 949 // Don't access directly, use typeLookup below. 950 var ( 951 typepkgmu sync.Mutex // protects typepkg lookups 952 typepkg = types.NewPkg("type", "type") 953 ) 954 955 func typeLookup(name string) *types.Sym { 956 typepkgmu.Lock() 957 s := typepkg.Lookup(name) 958 typepkgmu.Unlock() 959 return s 960 } 961 962 func typesym(t *types.Type) *types.Sym { 963 return typeLookup(typesymname(t)) 964 } 965 966 // tracksym returns the symbol for tracking use of field/method f, assumed 967 // to be a member of struct/interface type t. 968 func tracksym(t *types.Type, f *types.Field) *types.Sym { 969 return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name) 970 } 971 972 func typesymprefix(prefix string, t *types.Type) *types.Sym { 973 p := prefix + "." + t.ShortString() 974 s := typeLookup(p) 975 976 // This function is for looking up type-related generated functions 977 // (e.g. eq and hash). Make sure they are indeed generated. 978 signatmu.Lock() 979 addsignat(t) 980 signatmu.Unlock() 981 982 //print("algsym: %s -> %+S\n", p, s); 983 984 return s 985 } 986 987 func typenamesym(t *types.Type) *types.Sym { 988 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { 989 Fatalf("typenamesym %v", t) 990 } 991 s := typesym(t) 992 signatmu.Lock() 993 addsignat(t) 994 signatmu.Unlock() 995 return s 996 } 997 998 func typename(t *types.Type) *Node { 999 s := typenamesym(t) 1000 if s.Def == nil { 1001 n := newnamel(src.NoXPos, s) 1002 n.Type = types.Types[TUINT8] 1003 n.SetClass(PEXTERN) 1004 n.SetTypecheck(1) 1005 s.Def = asTypesNode(n) 1006 } 1007 1008 n := nod(OADDR, asNode(s.Def), nil) 1009 n.Type = types.NewPtr(asNode(s.Def).Type) 1010 n.SetTypecheck(1) 1011 return n 1012 } 1013 1014 func itabname(t, itype *types.Type) *Node { 1015 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { 1016 Fatalf("itabname(%v, %v)", t, itype) 1017 } 1018 s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString()) 1019 if s.Def == nil { 1020 n := newname(s) 1021 n.Type = types.Types[TUINT8] 1022 n.SetClass(PEXTERN) 1023 n.SetTypecheck(1) 1024 s.Def = asTypesNode(n) 1025 itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()}) 1026 } 1027 1028 n := nod(OADDR, asNode(s.Def), nil) 1029 n.Type = types.NewPtr(asNode(s.Def).Type) 1030 n.SetTypecheck(1) 1031 return n 1032 } 1033 1034 // isreflexive reports whether t has a reflexive equality operator. 1035 // That is, if x==x for all x of type t. 1036 func isreflexive(t *types.Type) bool { 1037 switch t.Etype { 1038 case TBOOL, 1039 TINT, 1040 TUINT, 1041 TINT8, 1042 TUINT8, 1043 TINT16, 1044 TUINT16, 1045 TINT32, 1046 TUINT32, 1047 TINT64, 1048 TUINT64, 1049 TUINTPTR, 1050 TPTR, 1051 TUNSAFEPTR, 1052 TSTRING, 1053 TCHAN: 1054 return true 1055 1056 case TFLOAT32, 1057 TFLOAT64, 1058 TCOMPLEX64, 1059 TCOMPLEX128, 1060 TINTER: 1061 return false 1062 1063 case TARRAY: 1064 return isreflexive(t.Elem()) 1065 1066 case TSTRUCT: 1067 for _, t1 := range t.Fields().Slice() { 1068 if !isreflexive(t1.Type) { 1069 return false 1070 } 1071 } 1072 return true 1073 1074 default: 1075 Fatalf("bad type for map key: %v", t) 1076 return false 1077 } 1078 } 1079 1080 // needkeyupdate reports whether map updates with t as a key 1081 // need the key to be updated. 1082 func needkeyupdate(t *types.Type) bool { 1083 switch t.Etype { 1084 case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, 1085 TINT64, TUINT64, TUINTPTR, TPTR, TUNSAFEPTR, TCHAN: 1086 return false 1087 1088 case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0 1089 TINTER, 1090 TSTRING: // strings might have smaller backing stores 1091 return true 1092 1093 case TARRAY: 1094 return needkeyupdate(t.Elem()) 1095 1096 case TSTRUCT: 1097 for _, t1 := range t.Fields().Slice() { 1098 if needkeyupdate(t1.Type) { 1099 return true 1100 } 1101 } 1102 return false 1103 1104 default: 1105 Fatalf("bad type for map key: %v", t) 1106 return true 1107 } 1108 } 1109 1110 // hashMightPanic reports whether the hash of a map key of type t might panic. 1111 func hashMightPanic(t *types.Type) bool { 1112 switch t.Etype { 1113 case TINTER: 1114 return true 1115 1116 case TARRAY: 1117 return hashMightPanic(t.Elem()) 1118 1119 case TSTRUCT: 1120 for _, t1 := range t.Fields().Slice() { 1121 if hashMightPanic(t1.Type) { 1122 return true 1123 } 1124 } 1125 return false 1126 1127 default: 1128 return false 1129 } 1130 } 1131 1132 // formalType replaces byte and rune aliases with real types. 1133 // They've been separate internally to make error messages 1134 // better, but we have to merge them in the reflect tables. 1135 func formalType(t *types.Type) *types.Type { 1136 if t == types.Bytetype || t == types.Runetype { 1137 return types.Types[t.Etype] 1138 } 1139 return t 1140 } 1141 1142 func dtypesym(t *types.Type) *obj.LSym { 1143 t = formalType(t) 1144 if t.IsUntyped() { 1145 Fatalf("dtypesym %v", t) 1146 } 1147 1148 s := typesym(t) 1149 lsym := s.Linksym() 1150 if s.Siggen() { 1151 return lsym 1152 } 1153 s.SetSiggen(true) 1154 1155 // special case (look for runtime below): 1156 // when compiling package runtime, 1157 // emit the type structures for int, float, etc. 1158 tbase := t 1159 1160 if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil { 1161 tbase = t.Elem() 1162 } 1163 dupok := 0 1164 if tbase.Sym == nil { 1165 dupok = obj.DUPOK 1166 } 1167 1168 if myimportpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc 1169 // named types from other files are defined only by those files 1170 if tbase.Sym != nil && tbase.Sym.Pkg != localpkg { 1171 return lsym 1172 } 1173 // TODO(mdempsky): Investigate whether this can happen. 1174 if tbase.Etype == TFORW { 1175 return lsym 1176 } 1177 } 1178 1179 ot := 0 1180 switch t.Etype { 1181 default: 1182 ot = dcommontype(lsym, t) 1183 ot = dextratype(lsym, ot, t, 0) 1184 1185 case TARRAY: 1186 // ../../../../runtime/type.go:/arrayType 1187 s1 := dtypesym(t.Elem()) 1188 t2 := types.NewSlice(t.Elem()) 1189 s2 := dtypesym(t2) 1190 ot = dcommontype(lsym, t) 1191 ot = dsymptr(lsym, ot, s1, 0) 1192 ot = dsymptr(lsym, ot, s2, 0) 1193 ot = duintptr(lsym, ot, uint64(t.NumElem())) 1194 ot = dextratype(lsym, ot, t, 0) 1195 1196 case TSLICE: 1197 // ../../../../runtime/type.go:/sliceType 1198 s1 := dtypesym(t.Elem()) 1199 ot = dcommontype(lsym, t) 1200 ot = dsymptr(lsym, ot, s1, 0) 1201 ot = dextratype(lsym, ot, t, 0) 1202 1203 case TCHAN: 1204 // ../../../../runtime/type.go:/chanType 1205 s1 := dtypesym(t.Elem()) 1206 ot = dcommontype(lsym, t) 1207 ot = dsymptr(lsym, ot, s1, 0) 1208 ot = duintptr(lsym, ot, uint64(t.ChanDir())) 1209 ot = dextratype(lsym, ot, t, 0) 1210 1211 case TFUNC: 1212 for _, t1 := range t.Recvs().Fields().Slice() { 1213 dtypesym(t1.Type) 1214 } 1215 isddd := false 1216 for _, t1 := range t.Params().Fields().Slice() { 1217 isddd = t1.IsDDD() 1218 dtypesym(t1.Type) 1219 } 1220 for _, t1 := range t.Results().Fields().Slice() { 1221 dtypesym(t1.Type) 1222 } 1223 1224 ot = dcommontype(lsym, t) 1225 inCount := t.NumRecvs() + t.NumParams() 1226 outCount := t.NumResults() 1227 if isddd { 1228 outCount |= 1 << 15 1229 } 1230 ot = duint16(lsym, ot, uint16(inCount)) 1231 ot = duint16(lsym, ot, uint16(outCount)) 1232 if Widthptr == 8 { 1233 ot += 4 // align for *rtype 1234 } 1235 1236 dataAdd := (inCount + t.NumResults()) * Widthptr 1237 ot = dextratype(lsym, ot, t, dataAdd) 1238 1239 // Array of rtype pointers follows funcType. 1240 for _, t1 := range t.Recvs().Fields().Slice() { 1241 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) 1242 } 1243 for _, t1 := range t.Params().Fields().Slice() { 1244 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) 1245 } 1246 for _, t1 := range t.Results().Fields().Slice() { 1247 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) 1248 } 1249 1250 case TINTER: 1251 m := imethods(t) 1252 n := len(m) 1253 for _, a := range m { 1254 dtypesym(a.type_) 1255 } 1256 1257 // ../../../../runtime/type.go:/interfaceType 1258 ot = dcommontype(lsym, t) 1259 1260 var tpkg *types.Pkg 1261 if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype { 1262 tpkg = t.Sym.Pkg 1263 } 1264 ot = dgopkgpath(lsym, ot, tpkg) 1265 1266 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1267 ot = duintptr(lsym, ot, uint64(n)) 1268 ot = duintptr(lsym, ot, uint64(n)) 1269 dataAdd := imethodSize() * n 1270 ot = dextratype(lsym, ot, t, dataAdd) 1271 1272 for _, a := range m { 1273 // ../../../../runtime/type.go:/imethod 1274 exported := types.IsExported(a.name.Name) 1275 var pkg *types.Pkg 1276 if !exported && a.name.Pkg != tpkg { 1277 pkg = a.name.Pkg 1278 } 1279 nsym := dname(a.name.Name, "", pkg, exported) 1280 1281 ot = dsymptrOff(lsym, ot, nsym) 1282 ot = dsymptrOff(lsym, ot, dtypesym(a.type_)) 1283 } 1284 1285 // ../../../../runtime/type.go:/mapType 1286 case TMAP: 1287 s1 := dtypesym(t.Key()) 1288 s2 := dtypesym(t.Elem()) 1289 s3 := dtypesym(bmap(t)) 1290 hasher := genhash(t.Key()) 1291 1292 ot = dcommontype(lsym, t) 1293 ot = dsymptr(lsym, ot, s1, 0) 1294 ot = dsymptr(lsym, ot, s2, 0) 1295 ot = dsymptr(lsym, ot, s3, 0) 1296 ot = dsymptr(lsym, ot, hasher, 0) 1297 var flags uint32 1298 // Note: flags must match maptype accessors in ../../../../runtime/type.go 1299 // and maptype builder in ../../../../reflect/type.go:MapOf. 1300 if t.Key().Width > MAXKEYSIZE { 1301 ot = duint8(lsym, ot, uint8(Widthptr)) 1302 flags |= 1 // indirect key 1303 } else { 1304 ot = duint8(lsym, ot, uint8(t.Key().Width)) 1305 } 1306 1307 if t.Elem().Width > MAXELEMSIZE { 1308 ot = duint8(lsym, ot, uint8(Widthptr)) 1309 flags |= 2 // indirect value 1310 } else { 1311 ot = duint8(lsym, ot, uint8(t.Elem().Width)) 1312 } 1313 ot = duint16(lsym, ot, uint16(bmap(t).Width)) 1314 if isreflexive(t.Key()) { 1315 flags |= 4 // reflexive key 1316 } 1317 if needkeyupdate(t.Key()) { 1318 flags |= 8 // need key update 1319 } 1320 if hashMightPanic(t.Key()) { 1321 flags |= 16 // hash might panic 1322 } 1323 ot = duint32(lsym, ot, flags) 1324 ot = dextratype(lsym, ot, t, 0) 1325 1326 case TPTR: 1327 if t.Elem().Etype == TANY { 1328 // ../../../../runtime/type.go:/UnsafePointerType 1329 ot = dcommontype(lsym, t) 1330 ot = dextratype(lsym, ot, t, 0) 1331 1332 break 1333 } 1334 1335 // ../../../../runtime/type.go:/ptrType 1336 s1 := dtypesym(t.Elem()) 1337 1338 ot = dcommontype(lsym, t) 1339 ot = dsymptr(lsym, ot, s1, 0) 1340 ot = dextratype(lsym, ot, t, 0) 1341 1342 // ../../../../runtime/type.go:/structType 1343 // for security, only the exported fields. 1344 case TSTRUCT: 1345 fields := t.Fields().Slice() 1346 for _, t1 := range fields { 1347 dtypesym(t1.Type) 1348 } 1349 1350 // All non-exported struct field names within a struct 1351 // type must originate from a single package. By 1352 // identifying and recording that package within the 1353 // struct type descriptor, we can omit that 1354 // information from the field descriptors. 1355 var spkg *types.Pkg 1356 for _, f := range fields { 1357 if !types.IsExported(f.Sym.Name) { 1358 spkg = f.Sym.Pkg 1359 break 1360 } 1361 } 1362 1363 ot = dcommontype(lsym, t) 1364 ot = dgopkgpath(lsym, ot, spkg) 1365 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1366 ot = duintptr(lsym, ot, uint64(len(fields))) 1367 ot = duintptr(lsym, ot, uint64(len(fields))) 1368 1369 dataAdd := len(fields) * structfieldSize() 1370 ot = dextratype(lsym, ot, t, dataAdd) 1371 1372 for _, f := range fields { 1373 // ../../../../runtime/type.go:/structField 1374 ot = dnameField(lsym, ot, spkg, f) 1375 ot = dsymptr(lsym, ot, dtypesym(f.Type), 0) 1376 offsetAnon := uint64(f.Offset) << 1 1377 if offsetAnon>>1 != uint64(f.Offset) { 1378 Fatalf("%v: bad field offset for %s", t, f.Sym.Name) 1379 } 1380 if f.Embedded != 0 { 1381 offsetAnon |= 1 1382 } 1383 ot = duintptr(lsym, ot, offsetAnon) 1384 } 1385 } 1386 1387 ot = dextratypeData(lsym, ot, t) 1388 ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA)) 1389 1390 // The linker will leave a table of all the typelinks for 1391 // types in the binary, so the runtime can find them. 1392 // 1393 // When buildmode=shared, all types are in typelinks so the 1394 // runtime can deduplicate type pointers. 1395 keep := Ctxt.Flag_dynlink 1396 if !keep && t.Sym == nil { 1397 // For an unnamed type, we only need the link if the type can 1398 // be created at run time by reflect.PtrTo and similar 1399 // functions. If the type exists in the program, those 1400 // functions must return the existing type structure rather 1401 // than creating a new one. 1402 switch t.Etype { 1403 case TPTR, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT: 1404 keep = true 1405 } 1406 } 1407 // Do not put Noalg types in typelinks. See issue #22605. 1408 if typeHasNoAlg(t) { 1409 keep = false 1410 } 1411 lsym.Set(obj.AttrMakeTypelink, keep) 1412 1413 return lsym 1414 } 1415 1416 // for each itabEntry, gather the methods on 1417 // the concrete type that implement the interface 1418 func peekitabs() { 1419 for i := range itabs { 1420 tab := &itabs[i] 1421 methods := genfun(tab.t, tab.itype) 1422 if len(methods) == 0 { 1423 continue 1424 } 1425 tab.entries = methods 1426 } 1427 } 1428 1429 // for the given concrete type and interface 1430 // type, return the (sorted) set of methods 1431 // on the concrete type that implement the interface 1432 func genfun(t, it *types.Type) []*obj.LSym { 1433 if t == nil || it == nil { 1434 return nil 1435 } 1436 sigs := imethods(it) 1437 methods := methods(t) 1438 out := make([]*obj.LSym, 0, len(sigs)) 1439 // TODO(mdempsky): Short circuit before calling methods(t)? 1440 // See discussion on CL 105039. 1441 if len(sigs) == 0 { 1442 return nil 1443 } 1444 1445 // both sigs and methods are sorted by name, 1446 // so we can find the intersect in a single pass 1447 for _, m := range methods { 1448 if m.name == sigs[0].name { 1449 out = append(out, m.isym.Linksym()) 1450 sigs = sigs[1:] 1451 if len(sigs) == 0 { 1452 break 1453 } 1454 } 1455 } 1456 1457 if len(sigs) != 0 { 1458 Fatalf("incomplete itab") 1459 } 1460 1461 return out 1462 } 1463 1464 // itabsym uses the information gathered in 1465 // peekitabs to de-virtualize interface methods. 1466 // Since this is called by the SSA backend, it shouldn't 1467 // generate additional Nodes, Syms, etc. 1468 func itabsym(it *obj.LSym, offset int64) *obj.LSym { 1469 var syms []*obj.LSym 1470 if it == nil { 1471 return nil 1472 } 1473 1474 for i := range itabs { 1475 e := &itabs[i] 1476 if e.lsym == it { 1477 syms = e.entries 1478 break 1479 } 1480 } 1481 if syms == nil { 1482 return nil 1483 } 1484 1485 // keep this arithmetic in sync with *itab layout 1486 methodnum := int((offset - 2*int64(Widthptr) - 8) / int64(Widthptr)) 1487 if methodnum >= len(syms) { 1488 return nil 1489 } 1490 return syms[methodnum] 1491 } 1492 1493 // addsignat ensures that a runtime type descriptor is emitted for t. 1494 func addsignat(t *types.Type) { 1495 if _, ok := signatset[t]; !ok { 1496 signatset[t] = struct{}{} 1497 signatslice = append(signatslice, t) 1498 } 1499 } 1500 1501 func addsignats(dcls []*Node) { 1502 // copy types from dcl list to signatset 1503 for _, n := range dcls { 1504 if n.Op == OTYPE { 1505 addsignat(n.Type) 1506 } 1507 } 1508 } 1509 1510 func dumpsignats() { 1511 // Process signatset. Use a loop, as dtypesym adds 1512 // entries to signatset while it is being processed. 1513 signats := make([]typeAndStr, len(signatslice)) 1514 for len(signatslice) > 0 { 1515 signats = signats[:0] 1516 // Transfer entries to a slice and sort, for reproducible builds. 1517 for _, t := range signatslice { 1518 signats = append(signats, typeAndStr{t: t, short: typesymname(t), regular: t.String()}) 1519 delete(signatset, t) 1520 } 1521 signatslice = signatslice[:0] 1522 sort.Sort(typesByString(signats)) 1523 for _, ts := range signats { 1524 t := ts.t 1525 dtypesym(t) 1526 if t.Sym != nil { 1527 dtypesym(types.NewPtr(t)) 1528 } 1529 } 1530 } 1531 } 1532 1533 func dumptabs() { 1534 // process itabs 1535 for _, i := range itabs { 1536 // dump empty itab symbol into i.sym 1537 // type itab struct { 1538 // inter *interfacetype 1539 // _type *_type 1540 // hash uint32 1541 // _ [4]byte 1542 // fun [1]uintptr // variable sized 1543 // } 1544 o := dsymptr(i.lsym, 0, dtypesym(i.itype), 0) 1545 o = dsymptr(i.lsym, o, dtypesym(i.t), 0) 1546 o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash 1547 o += 4 // skip unused field 1548 for _, fn := range genfun(i.t, i.itype) { 1549 o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method 1550 } 1551 // Nothing writes static itabs, so they are read only. 1552 ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA)) 1553 ilink := itablinkpkg.Lookup(i.t.ShortString() + "," + i.itype.ShortString()).Linksym() 1554 dsymptr(ilink, 0, i.lsym, 0) 1555 ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA)) 1556 } 1557 1558 // process ptabs 1559 if localpkg.Name == "main" && len(ptabs) > 0 { 1560 ot := 0 1561 s := Ctxt.Lookup("go.plugin.tabs") 1562 for _, p := range ptabs { 1563 // Dump ptab symbol into go.pluginsym package. 1564 // 1565 // type ptab struct { 1566 // name nameOff 1567 // typ typeOff // pointer to symbol 1568 // } 1569 nsym := dname(p.s.Name, "", nil, true) 1570 ot = dsymptrOff(s, ot, nsym) 1571 ot = dsymptrOff(s, ot, dtypesym(p.t)) 1572 } 1573 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1574 1575 ot = 0 1576 s = Ctxt.Lookup("go.plugin.exports") 1577 for _, p := range ptabs { 1578 ot = dsymptr(s, ot, p.s.Linksym(), 0) 1579 } 1580 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1581 } 1582 } 1583 1584 func dumpimportstrings() { 1585 // generate import strings for imported packages 1586 for _, p := range types.ImportedPkgList() { 1587 dimportpath(p) 1588 } 1589 } 1590 1591 func dumpbasictypes() { 1592 // do basic types if compiling package runtime. 1593 // they have to be in at least one package, 1594 // and runtime is always loaded implicitly, 1595 // so this is as good as any. 1596 // another possible choice would be package main, 1597 // but using runtime means fewer copies in object files. 1598 if myimportpath == "runtime" { 1599 for i := types.EType(1); i <= TBOOL; i++ { 1600 dtypesym(types.NewPtr(types.Types[i])) 1601 } 1602 dtypesym(types.NewPtr(types.Types[TSTRING])) 1603 dtypesym(types.NewPtr(types.Types[TUNSAFEPTR])) 1604 1605 // emit type structs for error and func(error) string. 1606 // The latter is the type of an auto-generated wrapper. 1607 dtypesym(types.NewPtr(types.Errortype)) 1608 1609 dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])})) 1610 1611 // add paths for runtime and main, which 6l imports implicitly. 1612 dimportpath(Runtimepkg) 1613 1614 if flag_race { 1615 dimportpath(racepkg) 1616 } 1617 if flag_msan { 1618 dimportpath(msanpkg) 1619 } 1620 dimportpath(types.NewPkg("main", "")) 1621 } 1622 } 1623 1624 type typeAndStr struct { 1625 t *types.Type 1626 short string 1627 regular string 1628 } 1629 1630 type typesByString []typeAndStr 1631 1632 func (a typesByString) Len() int { return len(a) } 1633 func (a typesByString) Less(i, j int) bool { 1634 if a[i].short != a[j].short { 1635 return a[i].short < a[j].short 1636 } 1637 // When the only difference between the types is whether 1638 // they refer to byte or uint8, such as **byte vs **uint8, 1639 // the types' ShortStrings can be identical. 1640 // To preserve deterministic sort ordering, sort these by String(). 1641 if a[i].regular != a[j].regular { 1642 return a[i].regular < a[j].regular 1643 } 1644 // Identical anonymous interfaces defined in different locations 1645 // will be equal for the above checks, but different in DWARF output. 1646 // Sort by source position to ensure deterministic order. 1647 // See issues 27013 and 30202. 1648 if a[i].t.Etype == types.TINTER && a[i].t.Methods().Len() > 0 { 1649 return a[i].t.Methods().Index(0).Pos.Before(a[j].t.Methods().Index(0).Pos) 1650 } 1651 return false 1652 } 1653 func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1654 1655 // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, 1656 // which holds 1-bit entries describing where pointers are in a given type. 1657 // Above this length, the GC information is recorded as a GC program, 1658 // which can express repetition compactly. In either form, the 1659 // information is used by the runtime to initialize the heap bitmap, 1660 // and for large types (like 128 or more words), they are roughly the 1661 // same speed. GC programs are never much larger and often more 1662 // compact. (If large arrays are involved, they can be arbitrarily 1663 // more compact.) 1664 // 1665 // The cutoff must be large enough that any allocation large enough to 1666 // use a GC program is large enough that it does not share heap bitmap 1667 // bytes with any other objects, allowing the GC program execution to 1668 // assume an aligned start and not use atomic operations. In the current 1669 // runtime, this means all malloc size classes larger than the cutoff must 1670 // be multiples of four words. On 32-bit systems that's 16 bytes, and 1671 // all size classes >= 16 bytes are 16-byte aligned, so no real constraint. 1672 // On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed 1673 // for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated 1674 // is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes 1675 // must be >= 4. 1676 // 1677 // We used to use 16 because the GC programs do have some constant overhead 1678 // to get started, and processing 128 pointers seems to be enough to 1679 // amortize that overhead well. 1680 // 1681 // To make sure that the runtime's chansend can call typeBitsBulkBarrier, 1682 // we raised the limit to 2048, so that even 32-bit systems are guaranteed to 1683 // use bitmaps for objects up to 64 kB in size. 1684 // 1685 // Also known to reflect/type.go. 1686 // 1687 const maxPtrmaskBytes = 2048 1688 1689 // dgcsym emits and returns a data symbol containing GC information for type t, 1690 // along with a boolean reporting whether the UseGCProg bit should be set in 1691 // the type kind, and the ptrdata field to record in the reflect type information. 1692 func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { 1693 ptrdata = typeptrdata(t) 1694 if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 { 1695 lsym = dgcptrmask(t) 1696 return 1697 } 1698 1699 useGCProg = true 1700 lsym, ptrdata = dgcprog(t) 1701 return 1702 } 1703 1704 // dgcptrmask emits and returns the symbol containing a pointer mask for type t. 1705 func dgcptrmask(t *types.Type) *obj.LSym { 1706 ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8) 1707 fillptrmask(t, ptrmask) 1708 p := fmt.Sprintf("gcbits.%x", ptrmask) 1709 1710 sym := Runtimepkg.Lookup(p) 1711 lsym := sym.Linksym() 1712 if !sym.Uniq() { 1713 sym.SetUniq(true) 1714 for i, x := range ptrmask { 1715 duint8(lsym, i, x) 1716 } 1717 ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) 1718 } 1719 return lsym 1720 } 1721 1722 // fillptrmask fills in ptrmask with 1s corresponding to the 1723 // word offsets in t that hold pointers. 1724 // ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits. 1725 func fillptrmask(t *types.Type, ptrmask []byte) { 1726 for i := range ptrmask { 1727 ptrmask[i] = 0 1728 } 1729 if !types.Haspointers(t) { 1730 return 1731 } 1732 1733 vec := bvalloc(8 * int32(len(ptrmask))) 1734 onebitwalktype1(t, 0, vec) 1735 1736 nptr := typeptrdata(t) / int64(Widthptr) 1737 for i := int64(0); i < nptr; i++ { 1738 if vec.Get(int32(i)) { 1739 ptrmask[i/8] |= 1 << (uint(i) % 8) 1740 } 1741 } 1742 } 1743 1744 // dgcprog emits and returns the symbol containing a GC program for type t 1745 // along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]). 1746 // In practice, the size is typeptrdata(t) except for non-trivial arrays. 1747 // For non-trivial arrays, the program describes the full t.Width size. 1748 func dgcprog(t *types.Type) (*obj.LSym, int64) { 1749 dowidth(t) 1750 if t.Width == BADWIDTH { 1751 Fatalf("dgcprog: %v badwidth", t) 1752 } 1753 lsym := typesymprefix(".gcprog", t).Linksym() 1754 var p GCProg 1755 p.init(lsym) 1756 p.emit(t, 0) 1757 offset := p.w.BitIndex() * int64(Widthptr) 1758 p.end() 1759 if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { 1760 Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) 1761 } 1762 return lsym, offset 1763 } 1764 1765 type GCProg struct { 1766 lsym *obj.LSym 1767 symoff int 1768 w gcprog.Writer 1769 } 1770 1771 var Debug_gcprog int // set by -d gcprog 1772 1773 func (p *GCProg) init(lsym *obj.LSym) { 1774 p.lsym = lsym 1775 p.symoff = 4 // first 4 bytes hold program length 1776 p.w.Init(p.writeByte) 1777 if Debug_gcprog > 0 { 1778 fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym) 1779 p.w.Debug(os.Stderr) 1780 } 1781 } 1782 1783 func (p *GCProg) writeByte(x byte) { 1784 p.symoff = duint8(p.lsym, p.symoff, x) 1785 } 1786 1787 func (p *GCProg) end() { 1788 p.w.End() 1789 duint32(p.lsym, 0, uint32(p.symoff-4)) 1790 ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) 1791 if Debug_gcprog > 0 { 1792 fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym) 1793 } 1794 } 1795 1796 func (p *GCProg) emit(t *types.Type, offset int64) { 1797 dowidth(t) 1798 if !types.Haspointers(t) { 1799 return 1800 } 1801 if t.Width == int64(Widthptr) { 1802 p.w.Ptr(offset / int64(Widthptr)) 1803 return 1804 } 1805 switch t.Etype { 1806 default: 1807 Fatalf("GCProg.emit: unexpected type %v", t) 1808 1809 case TSTRING: 1810 p.w.Ptr(offset / int64(Widthptr)) 1811 1812 case TINTER: 1813 // Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1. 1814 p.w.Ptr(offset/int64(Widthptr) + 1) 1815 1816 case TSLICE: 1817 p.w.Ptr(offset / int64(Widthptr)) 1818 1819 case TARRAY: 1820 if t.NumElem() == 0 { 1821 // should have been handled by haspointers check above 1822 Fatalf("GCProg.emit: empty array") 1823 } 1824 1825 // Flatten array-of-array-of-array to just a big array by multiplying counts. 1826 count := t.NumElem() 1827 elem := t.Elem() 1828 for elem.IsArray() { 1829 count *= elem.NumElem() 1830 elem = elem.Elem() 1831 } 1832 1833 if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) { 1834 // Cheaper to just emit the bits. 1835 for i := int64(0); i < count; i++ { 1836 p.emit(elem, offset+i*elem.Width) 1837 } 1838 return 1839 } 1840 p.emit(elem, offset) 1841 p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) 1842 p.w.Repeat(elem.Width/int64(Widthptr), count-1) 1843 1844 case TSTRUCT: 1845 for _, t1 := range t.Fields().Slice() { 1846 p.emit(t1.Type, offset+t1.Offset) 1847 } 1848 } 1849 } 1850 1851 // zeroaddr returns the address of a symbol with at least 1852 // size bytes of zeros. 1853 func zeroaddr(size int64) *Node { 1854 if size >= 1<<31 { 1855 Fatalf("map elem too big %d", size) 1856 } 1857 if zerosize < size { 1858 zerosize = size 1859 } 1860 s := mappkg.Lookup("zero") 1861 if s.Def == nil { 1862 x := newname(s) 1863 x.Type = types.Types[TUINT8] 1864 x.SetClass(PEXTERN) 1865 x.SetTypecheck(1) 1866 s.Def = asTypesNode(x) 1867 } 1868 z := nod(OADDR, asNode(s.Def), nil) 1869 z.Type = types.NewPtr(types.Types[TUINT8]) 1870 z.SetTypecheck(1) 1871 return z 1872 }