github.com/Filosottile/go@v0.0.0-20170906193555-dbed9972d994/src/cmd/compile/internal/gc/reflect.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "cmd/internal/gcprog" 10 "cmd/internal/obj" 11 "cmd/internal/objabi" 12 "cmd/internal/src" 13 "fmt" 14 "os" 15 "sort" 16 "strings" 17 "sync" 18 ) 19 20 type itabEntry struct { 21 t, itype *types.Type 22 lsym *obj.LSym // symbol of the itab itself 23 24 // symbols of each method in 25 // the itab, sorted by byte offset; 26 // filled in by peekitabs 27 entries []*obj.LSym 28 } 29 30 type ptabEntry struct { 31 s *types.Sym 32 t *types.Type 33 } 34 35 // runtime interface and reflection data structures 36 var ( 37 signatsetmu sync.Mutex // protects signatset 38 signatset = make(map[*types.Type]struct{}) 39 40 itabs []itabEntry 41 ptabs []ptabEntry 42 ) 43 44 type Sig struct { 45 name string 46 pkg *types.Pkg 47 isym *types.Sym 48 tsym *types.Sym 49 type_ *types.Type 50 mtype *types.Type 51 offset int32 52 } 53 54 // siglt sorts method signatures by name, then package path. 55 func siglt(a, b *Sig) bool { 56 if a.name != b.name { 57 return a.name < b.name 58 } 59 if a.pkg == b.pkg { 60 return false 61 } 62 if a.pkg == nil { 63 return true 64 } 65 if b.pkg == nil { 66 return false 67 } 68 return a.pkg.Path < b.pkg.Path 69 } 70 71 // Builds a type representing a Bucket structure for 72 // the given map type. This type is not visible to users - 73 // we include only enough information to generate a correct GC 74 // program for it. 75 // Make sure this stays in sync with ../../../../runtime/hashmap.go! 76 const ( 77 BUCKETSIZE = 8 78 MAXKEYSIZE = 128 79 MAXVALSIZE = 128 80 ) 81 82 func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) 83 func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) 84 85 func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) 86 if t.Sym == nil && len(methods(t)) == 0 { 87 return 0 88 } 89 return 4 + 2 + 2 + 4 + 4 90 } 91 92 func makefield(name string, t *types.Type) *types.Field { 93 f := types.NewField() 94 f.Type = t 95 f.Sym = (*types.Pkg)(nil).Lookup(name) 96 return f 97 } 98 99 // bmap makes the map bucket type given the type of the map. 100 func bmap(t *types.Type) *types.Type { 101 if t.MapType().Bucket != nil { 102 return t.MapType().Bucket 103 } 104 105 bucket := types.New(TSTRUCT) 106 keytype := t.Key() 107 valtype := t.Val() 108 dowidth(keytype) 109 dowidth(valtype) 110 if keytype.Width > MAXKEYSIZE { 111 keytype = types.NewPtr(keytype) 112 } 113 if valtype.Width > MAXVALSIZE { 114 valtype = types.NewPtr(valtype) 115 } 116 117 field := make([]*types.Field, 0, 5) 118 119 // The first field is: uint8 topbits[BUCKETSIZE]. 120 arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE) 121 field = append(field, makefield("topbits", arr)) 122 123 arr = types.NewArray(keytype, BUCKETSIZE) 124 arr.SetNoalg(true) 125 keys := makefield("keys", arr) 126 field = append(field, keys) 127 128 arr = types.NewArray(valtype, BUCKETSIZE) 129 arr.SetNoalg(true) 130 values := makefield("values", arr) 131 field = append(field, values) 132 133 // Make sure the overflow pointer is the last memory in the struct, 134 // because the runtime assumes it can use size-ptrSize as the 135 // offset of the overflow pointer. We double-check that property 136 // below once the offsets and size are computed. 137 // 138 // BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point. 139 // On 32-bit systems, the max alignment is 32-bit, and the 140 // overflow pointer will add another 32-bit field, and the struct 141 // will end with no padding. 142 // On 64-bit systems, the max alignment is 64-bit, and the 143 // overflow pointer will add another 64-bit field, and the struct 144 // will end with no padding. 145 // On nacl/amd64p32, however, the max alignment is 64-bit, 146 // but the overflow pointer will add only a 32-bit field, 147 // so if the struct needs 64-bit padding (because a key or value does) 148 // then it would end with an extra 32-bit padding field. 149 // Preempt that by emitting the padding here. 150 if int(valtype.Align) > Widthptr || int(keytype.Align) > Widthptr { 151 field = append(field, makefield("pad", types.Types[TUINTPTR])) 152 } 153 154 // If keys and values have no pointers, the map implementation 155 // can keep a list of overflow pointers on the side so that 156 // buckets can be marked as having no pointers. 157 // Arrange for the bucket to have no pointers by changing 158 // the type of the overflow field to uintptr in this case. 159 // See comment on hmap.overflow in ../../../../runtime/hashmap.go. 160 otyp := types.NewPtr(bucket) 161 if !types.Haspointers(valtype) && !types.Haspointers(keytype) { 162 otyp = types.Types[TUINTPTR] 163 } 164 overflow := makefield("overflow", otyp) 165 field = append(field, overflow) 166 167 // link up fields 168 bucket.SetNoalg(true) 169 bucket.SetLocal(t.Local()) 170 bucket.SetFields(field[:]) 171 dowidth(bucket) 172 173 // Check invariants that map code depends on. 174 if BUCKETSIZE < 8 { 175 Fatalf("bucket size too small for proper alignment") 176 } 177 if keytype.Align > BUCKETSIZE { 178 Fatalf("key align too big for %v", t) 179 } 180 if valtype.Align > BUCKETSIZE { 181 Fatalf("value align too big for %v", t) 182 } 183 if keytype.Width > MAXKEYSIZE { 184 Fatalf("key size to large for %v", t) 185 } 186 if valtype.Width > MAXVALSIZE { 187 Fatalf("value size to large for %v", t) 188 } 189 if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() { 190 Fatalf("key indirect incorrect for %v", t) 191 } 192 if t.Val().Width > MAXVALSIZE && !valtype.IsPtr() { 193 Fatalf("value indirect incorrect for %v", t) 194 } 195 if keytype.Width%int64(keytype.Align) != 0 { 196 Fatalf("key size not a multiple of key align for %v", t) 197 } 198 if valtype.Width%int64(valtype.Align) != 0 { 199 Fatalf("value size not a multiple of value align for %v", t) 200 } 201 if bucket.Align%keytype.Align != 0 { 202 Fatalf("bucket align not multiple of key align %v", t) 203 } 204 if bucket.Align%valtype.Align != 0 { 205 Fatalf("bucket align not multiple of value align %v", t) 206 } 207 if keys.Offset%int64(keytype.Align) != 0 { 208 Fatalf("bad alignment of keys in bmap for %v", t) 209 } 210 if values.Offset%int64(valtype.Align) != 0 { 211 Fatalf("bad alignment of values in bmap for %v", t) 212 } 213 214 // Double-check that overflow field is final memory in struct, 215 // with no padding at end. See comment above. 216 if overflow.Offset != bucket.Width-int64(Widthptr) { 217 Fatalf("bad offset of overflow in bmap for %v", t) 218 } 219 220 t.MapType().Bucket = bucket 221 222 bucket.StructType().Map = t 223 return bucket 224 } 225 226 // hmap builds a type representing a Hmap structure for the given map type. 227 // Make sure this stays in sync with ../../../../runtime/hashmap.go. 228 func hmap(t *types.Type) *types.Type { 229 if t.MapType().Hmap != nil { 230 return t.MapType().Hmap 231 } 232 233 bmap := bmap(t) 234 235 // build a struct: 236 // type hmap struct { 237 // count int 238 // flags uint8 239 // B uint8 240 // noverflow uint16 241 // hash0 uint32 242 // buckets *bmap 243 // oldbuckets *bmap 244 // nevacuate uintptr 245 // extra unsafe.Pointer // *mapextra 246 // } 247 // must match ../../../../runtime/hashmap.go:hmap. 248 fields := []*types.Field{ 249 makefield("count", types.Types[TINT]), 250 makefield("flags", types.Types[TUINT8]), 251 makefield("B", types.Types[TUINT8]), 252 makefield("noverflow", types.Types[TUINT16]), 253 makefield("hash0", types.Types[TUINT32]), 254 makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for makemap. 255 makefield("oldbuckets", types.NewPtr(bmap)), 256 makefield("nevacuate", types.Types[TUINTPTR]), 257 makefield("extra", types.Types[TUNSAFEPTR]), 258 } 259 260 hmap := types.New(TSTRUCT) 261 hmap.SetNoalg(true) 262 hmap.SetLocal(t.Local()) 263 hmap.SetFields(fields) 264 dowidth(hmap) 265 266 // The size of hmap should be 48 bytes on 64 bit 267 // and 28 bytes on 32 bit platforms. 268 if size := int64(8 + 5*Widthptr); hmap.Width != size { 269 Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size) 270 } 271 272 t.MapType().Hmap = hmap 273 hmap.StructType().Map = t 274 return hmap 275 } 276 277 // hiter builds a type representing an Hiter structure for the given map type. 278 // Make sure this stays in sync with ../../../../runtime/hashmap.go. 279 func hiter(t *types.Type) *types.Type { 280 if t.MapType().Hiter != nil { 281 return t.MapType().Hiter 282 } 283 284 hmap := hmap(t) 285 bmap := bmap(t) 286 287 // build a struct: 288 // type hiter struct { 289 // key *Key 290 // val *Value 291 // t unsafe.Pointer // *MapType 292 // h *hmap 293 // buckets *bmap 294 // bptr *bmap 295 // overflow [2]unsafe.Pointer // [2]*[]*bmap 296 // startBucket uintptr 297 // offset uint8 298 // wrapped bool 299 // B uint8 300 // i uint8 301 // bucket uintptr 302 // checkBucket uintptr 303 // } 304 // must match ../../../../runtime/hashmap.go:hiter. 305 fields := []*types.Field{ 306 makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP. 307 makefield("val", types.NewPtr(t.Val())), // Used in range.go for TMAP. 308 makefield("t", types.Types[TUNSAFEPTR]), 309 makefield("h", types.NewPtr(hmap)), 310 makefield("buckets", types.NewPtr(bmap)), 311 makefield("bptr", types.NewPtr(bmap)), 312 makefield("overflow", types.NewArray(types.Types[TUNSAFEPTR], 2)), 313 makefield("startBucket", types.Types[TUINTPTR]), 314 makefield("offset", types.Types[TUINT8]), 315 makefield("wrapped", types.Types[TBOOL]), 316 makefield("B", types.Types[TUINT8]), 317 makefield("i", types.Types[TUINT8]), 318 makefield("bucket", types.Types[TUINTPTR]), 319 makefield("checkBucket", types.Types[TUINTPTR]), 320 } 321 322 // build iterator struct holding the above fields 323 hiter := types.New(TSTRUCT) 324 hiter.SetNoalg(true) 325 hiter.SetFields(fields) 326 dowidth(hiter) 327 if hiter.Width != int64(12*Widthptr) { 328 Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr) 329 } 330 t.MapType().Hiter = hiter 331 hiter.StructType().Map = t 332 return hiter 333 } 334 335 // f is method type, with receiver. 336 // return function type, receiver as first argument (or not). 337 func methodfunc(f *types.Type, receiver *types.Type) *types.Type { 338 var in []*Node 339 if receiver != nil { 340 d := nod(ODCLFIELD, nil, nil) 341 d.Type = receiver 342 in = append(in, d) 343 } 344 345 var d *Node 346 for _, t := range f.Params().Fields().Slice() { 347 d = nod(ODCLFIELD, nil, nil) 348 d.Type = t.Type 349 d.SetIsddd(t.Isddd()) 350 in = append(in, d) 351 } 352 353 var out []*Node 354 for _, t := range f.Results().Fields().Slice() { 355 d = nod(ODCLFIELD, nil, nil) 356 d.Type = t.Type 357 out = append(out, d) 358 } 359 360 t := functype(nil, in, out) 361 if f.Nname() != nil { 362 // Link to name of original method function. 363 t.SetNname(f.Nname()) 364 } 365 366 return t 367 } 368 369 // methods returns the methods of the non-interface type t, sorted by name. 370 // Generates stub functions as needed. 371 func methods(t *types.Type) []*Sig { 372 // method type 373 mt := methtype(t) 374 375 if mt == nil { 376 return nil 377 } 378 expandmeth(mt) 379 380 // type stored in interface word 381 it := t 382 383 if !isdirectiface(it) { 384 it = types.NewPtr(t) 385 } 386 387 // make list of methods for t, 388 // generating code if necessary. 389 var ms []*Sig 390 for _, f := range mt.AllMethods().Slice() { 391 if f.Type.Etype != TFUNC || f.Type.Recv() == nil { 392 Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) 393 } 394 if f.Type.Recv() == nil { 395 Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) 396 } 397 if f.Nointerface() { 398 continue 399 } 400 401 method := f.Sym 402 if method == nil { 403 continue 404 } 405 406 // get receiver type for this particular method. 407 // if pointer receiver but non-pointer t and 408 // this is not an embedded pointer inside a struct, 409 // method does not apply. 410 this := f.Type.Recv().Type 411 412 if this.IsPtr() && this.Elem() == t { 413 continue 414 } 415 if this.IsPtr() && !t.IsPtr() && f.Embedded != 2 && !isifacemethod(f.Type) { 416 continue 417 } 418 419 var sig Sig 420 ms = append(ms, &sig) 421 422 sig.name = method.Name 423 if !exportname(method.Name) { 424 if method.Pkg == nil { 425 Fatalf("methods: missing package") 426 } 427 sig.pkg = method.Pkg 428 } 429 430 sig.isym = methodsym(method, it, true) 431 sig.tsym = methodsym(method, t, false) 432 sig.type_ = methodfunc(f.Type, t) 433 sig.mtype = methodfunc(f.Type, nil) 434 435 if !sig.isym.Siggen() { 436 sig.isym.SetSiggen(true) 437 if !eqtype(this, it) || this.Width < int64(Widthptr) { 438 compiling_wrappers = 1 439 genwrapper(it, f, sig.isym, 1) 440 compiling_wrappers = 0 441 } 442 } 443 444 if !sig.tsym.Siggen() { 445 sig.tsym.SetSiggen(true) 446 if !eqtype(this, t) { 447 compiling_wrappers = 1 448 genwrapper(t, f, sig.tsym, 0) 449 compiling_wrappers = 0 450 } 451 } 452 } 453 454 obj.SortSlice(ms, func(i, j int) bool { return siglt(ms[i], ms[j]) }) 455 return ms 456 } 457 458 // imethods returns the methods of the interface type t, sorted by name. 459 func imethods(t *types.Type) []*Sig { 460 var methods []*Sig 461 for _, f := range t.Fields().Slice() { 462 if f.Type.Etype != TFUNC || f.Sym == nil { 463 continue 464 } 465 method := f.Sym 466 var sig = Sig{ 467 name: method.Name, 468 } 469 if !exportname(method.Name) { 470 if method.Pkg == nil { 471 Fatalf("imethods: missing package") 472 } 473 sig.pkg = method.Pkg 474 } 475 476 sig.mtype = f.Type 477 sig.offset = 0 478 sig.type_ = methodfunc(f.Type, nil) 479 480 if n := len(methods); n > 0 { 481 last := methods[n-1] 482 if !(siglt(last, &sig)) { 483 Fatalf("sigcmp vs sortinter %s %s", last.name, sig.name) 484 } 485 } 486 methods = append(methods, &sig) 487 488 // Compiler can only refer to wrappers for non-blank methods. 489 if method.IsBlank() { 490 continue 491 } 492 493 // NOTE(rsc): Perhaps an oversight that 494 // IfaceType.Method is not in the reflect data. 495 // Generate the method body, so that compiled 496 // code can refer to it. 497 isym := methodsym(method, t, false) 498 if !isym.Siggen() { 499 isym.SetSiggen(true) 500 genwrapper(t, f, isym, 0) 501 } 502 } 503 504 return methods 505 } 506 507 func dimportpath(p *types.Pkg) { 508 if p.Pathsym != nil { 509 return 510 } 511 512 // If we are compiling the runtime package, there are two runtime packages around 513 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for 514 // both of them, so just produce one for localpkg. 515 if myimportpath == "runtime" && p == Runtimepkg { 516 return 517 } 518 519 var str string 520 if p == localpkg { 521 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. 522 str = myimportpath 523 } else { 524 str = p.Path 525 } 526 527 s := Ctxt.Lookup("type..importpath." + p.Prefix + ".") 528 ot := dnameData(s, 0, str, "", nil, false) 529 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 530 p.Pathsym = s 531 } 532 533 func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int { 534 if pkg == nil { 535 return duintptr(s, ot, 0) 536 } 537 538 if pkg == localpkg && myimportpath == "" { 539 // If we don't know the full import path of the package being compiled 540 // (i.e. -p was not passed on the compiler command line), emit a reference to 541 // type..importpath.""., which the linker will rewrite using the correct import path. 542 // Every package that imports this one directly defines the symbol. 543 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 544 ns := Ctxt.Lookup(`type..importpath."".`) 545 return dsymptr(s, ot, ns, 0) 546 } 547 548 dimportpath(pkg) 549 return dsymptr(s, ot, pkg.Pathsym, 0) 550 } 551 552 // dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol. 553 func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { 554 if pkg == nil { 555 return duint32(s, ot, 0) 556 } 557 if pkg == localpkg && myimportpath == "" { 558 // If we don't know the full import path of the package being compiled 559 // (i.e. -p was not passed on the compiler command line), emit a reference to 560 // type..importpath.""., which the linker will rewrite using the correct import path. 561 // Every package that imports this one directly defines the symbol. 562 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 563 ns := Ctxt.Lookup(`type..importpath."".`) 564 return dsymptrOff(s, ot, ns, 0) 565 } 566 567 dimportpath(pkg) 568 return dsymptrOff(s, ot, pkg.Pathsym, 0) 569 } 570 571 // isExportedField reports whether a struct field is exported. 572 // It also returns the package to use for PkgPath for an unexported field. 573 func isExportedField(ft *types.Field) (bool, *types.Pkg) { 574 if ft.Sym != nil && ft.Embedded == 0 { 575 return exportname(ft.Sym.Name), ft.Sym.Pkg 576 } 577 if ft.Type.Sym != nil && 578 (ft.Type.Sym.Pkg == builtinpkg || !exportname(ft.Type.Sym.Name)) { 579 return false, ft.Type.Sym.Pkg 580 } 581 return true, nil 582 } 583 584 // dnameField dumps a reflect.name for a struct field. 585 func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int { 586 var name string 587 if ft.Sym != nil { 588 name = ft.Sym.Name 589 } 590 isExported, fpkg := isExportedField(ft) 591 if isExported || fpkg == spkg { 592 fpkg = nil 593 } 594 nsym := dname(name, ft.Note, fpkg, isExported) 595 return dsymptr(lsym, ot, nsym, 0) 596 } 597 598 // dnameData writes the contents of a reflect.name into s at offset ot. 599 func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int { 600 if len(name) > 1<<16-1 { 601 Fatalf("name too long: %s", name) 602 } 603 if len(tag) > 1<<16-1 { 604 Fatalf("tag too long: %s", tag) 605 } 606 607 // Encode name and tag. See reflect/type.go for details. 608 var bits byte 609 l := 1 + 2 + len(name) 610 if exported { 611 bits |= 1 << 0 612 } 613 if len(tag) > 0 { 614 l += 2 + len(tag) 615 bits |= 1 << 1 616 } 617 if pkg != nil { 618 bits |= 1 << 2 619 } 620 b := make([]byte, l) 621 b[0] = bits 622 b[1] = uint8(len(name) >> 8) 623 b[2] = uint8(len(name)) 624 copy(b[3:], name) 625 if len(tag) > 0 { 626 tb := b[3+len(name):] 627 tb[0] = uint8(len(tag) >> 8) 628 tb[1] = uint8(len(tag)) 629 copy(tb[2:], tag) 630 } 631 632 ot = int(s.WriteBytes(Ctxt, int64(ot), b)) 633 634 if pkg != nil { 635 ot = dgopkgpathOff(s, ot, pkg) 636 } 637 638 return ot 639 } 640 641 var dnameCount int 642 643 // dname creates a reflect.name for a struct field or method. 644 func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym { 645 // Write out data as "type.." to signal two things to the 646 // linker, first that when dynamically linking, the symbol 647 // should be moved to a relro section, and second that the 648 // contents should not be decoded as a type. 649 sname := "type..namedata." 650 if pkg == nil { 651 // In the common case, share data with other packages. 652 if name == "" { 653 if exported { 654 sname += "-noname-exported." + tag 655 } else { 656 sname += "-noname-unexported." + tag 657 } 658 } else { 659 if exported { 660 sname += name + "." + tag 661 } else { 662 sname += name + "-" + tag 663 } 664 } 665 } else { 666 sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount) 667 dnameCount++ 668 } 669 s := Ctxt.Lookup(sname) 670 if len(s.P) > 0 { 671 return s 672 } 673 ot := dnameData(s, 0, name, tag, pkg, exported) 674 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 675 return s 676 } 677 678 // dextratype dumps the fields of a runtime.uncommontype. 679 // dataAdd is the offset in bytes after the header where the 680 // backing array of the []method field is written (by dextratypeData). 681 func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { 682 m := methods(t) 683 if t.Sym == nil && len(m) == 0 { 684 return ot 685 } 686 noff := int(Rnd(int64(ot), int64(Widthptr))) 687 if noff != ot { 688 Fatalf("unexpected alignment in dextratype for %v", t) 689 } 690 691 for _, a := range m { 692 dtypesym(a.type_) 693 } 694 695 ot = dgopkgpathOff(lsym, ot, typePkg(t)) 696 697 dataAdd += uncommonSize(t) 698 mcount := len(m) 699 if mcount != int(uint16(mcount)) { 700 Fatalf("too many methods on %v: %d", t, mcount) 701 } 702 if dataAdd != int(uint32(dataAdd)) { 703 Fatalf("methods are too far away on %v: %d", t, dataAdd) 704 } 705 706 ot = duint16(lsym, ot, uint16(mcount)) 707 ot = duint16(lsym, ot, 0) 708 ot = duint32(lsym, ot, uint32(dataAdd)) 709 ot = duint32(lsym, ot, 0) 710 return ot 711 } 712 713 func typePkg(t *types.Type) *types.Pkg { 714 tsym := t.Sym 715 if tsym == nil { 716 switch t.Etype { 717 case TARRAY, TSLICE, TPTR32, TPTR64, TCHAN: 718 if t.Elem() != nil { 719 tsym = t.Elem().Sym 720 } 721 } 722 } 723 if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype { 724 return tsym.Pkg 725 } 726 return nil 727 } 728 729 // dextratypeData dumps the backing array for the []method field of 730 // runtime.uncommontype. 731 func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int { 732 for _, a := range methods(t) { 733 // ../../../../runtime/type.go:/method 734 exported := exportname(a.name) 735 var pkg *types.Pkg 736 if !exported && a.pkg != typePkg(t) { 737 pkg = a.pkg 738 } 739 nsym := dname(a.name, "", pkg, exported) 740 741 ot = dsymptrOff(lsym, ot, nsym, 0) 742 ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype).Linksym()) 743 ot = dmethodptrOff(lsym, ot, a.isym.Linksym()) 744 ot = dmethodptrOff(lsym, ot, a.tsym.Linksym()) 745 } 746 return ot 747 } 748 749 func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int { 750 duint32(s, ot, 0) 751 r := obj.Addrel(s) 752 r.Off = int32(ot) 753 r.Siz = 4 754 r.Sym = x 755 r.Type = objabi.R_METHODOFF 756 return ot + 4 757 } 758 759 var kinds = []int{ 760 TINT: objabi.KindInt, 761 TUINT: objabi.KindUint, 762 TINT8: objabi.KindInt8, 763 TUINT8: objabi.KindUint8, 764 TINT16: objabi.KindInt16, 765 TUINT16: objabi.KindUint16, 766 TINT32: objabi.KindInt32, 767 TUINT32: objabi.KindUint32, 768 TINT64: objabi.KindInt64, 769 TUINT64: objabi.KindUint64, 770 TUINTPTR: objabi.KindUintptr, 771 TFLOAT32: objabi.KindFloat32, 772 TFLOAT64: objabi.KindFloat64, 773 TBOOL: objabi.KindBool, 774 TSTRING: objabi.KindString, 775 TPTR32: objabi.KindPtr, 776 TPTR64: objabi.KindPtr, 777 TSTRUCT: objabi.KindStruct, 778 TINTER: objabi.KindInterface, 779 TCHAN: objabi.KindChan, 780 TMAP: objabi.KindMap, 781 TARRAY: objabi.KindArray, 782 TSLICE: objabi.KindSlice, 783 TFUNC: objabi.KindFunc, 784 TCOMPLEX64: objabi.KindComplex64, 785 TCOMPLEX128: objabi.KindComplex128, 786 TUNSAFEPTR: objabi.KindUnsafePointer, 787 } 788 789 // typeptrdata returns the length in bytes of the prefix of t 790 // containing pointer data. Anything after this offset is scalar data. 791 func typeptrdata(t *types.Type) int64 { 792 if !types.Haspointers(t) { 793 return 0 794 } 795 796 switch t.Etype { 797 case TPTR32, 798 TPTR64, 799 TUNSAFEPTR, 800 TFUNC, 801 TCHAN, 802 TMAP: 803 return int64(Widthptr) 804 805 case TSTRING: 806 // struct { byte *str; intgo len; } 807 return int64(Widthptr) 808 809 case TINTER: 810 // struct { Itab *tab; void *data; } or 811 // struct { Type *type; void *data; } 812 return 2 * int64(Widthptr) 813 814 case TSLICE: 815 // struct { byte *array; uintgo len; uintgo cap; } 816 return int64(Widthptr) 817 818 case TARRAY: 819 // haspointers already eliminated t.NumElem() == 0. 820 return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem()) 821 822 case TSTRUCT: 823 // Find the last field that has pointers. 824 var lastPtrField *types.Field 825 for _, t1 := range t.Fields().Slice() { 826 if types.Haspointers(t1.Type) { 827 lastPtrField = t1 828 } 829 } 830 return lastPtrField.Offset + typeptrdata(lastPtrField.Type) 831 832 default: 833 Fatalf("typeptrdata: unexpected type, %v", t) 834 return 0 835 } 836 } 837 838 // tflag is documented in reflect/type.go. 839 // 840 // tflag values must be kept in sync with copies in: 841 // cmd/compile/internal/gc/reflect.go 842 // cmd/link/internal/ld/decodesym.go 843 // reflect/type.go 844 // runtime/type.go 845 const ( 846 tflagUncommon = 1 << 0 847 tflagExtraStar = 1 << 1 848 tflagNamed = 1 << 2 849 ) 850 851 var ( 852 algarray *obj.LSym 853 memhashvarlen *obj.LSym 854 memequalvarlen *obj.LSym 855 ) 856 857 // dcommontype dumps the contents of a reflect.rtype (runtime._type). 858 func dcommontype(lsym *obj.LSym, ot int, t *types.Type) int { 859 if ot != 0 { 860 Fatalf("dcommontype %d", ot) 861 } 862 863 sizeofAlg := 2 * Widthptr 864 if algarray == nil { 865 algarray = sysfunc("algarray") 866 } 867 dowidth(t) 868 alg := algtype(t) 869 var algsym *obj.LSym 870 if alg == ASPECIAL || alg == AMEM { 871 algsym = dalgsym(t) 872 } 873 874 sptrWeak := true 875 var sptr *obj.LSym 876 if !t.IsPtr() || t.PtrBase != nil { 877 tptr := types.NewPtr(t) 878 if t.Sym != nil || methods(tptr) != nil { 879 sptrWeak = false 880 } 881 sptr = dtypesym(tptr).Linksym() 882 } 883 884 gcsym, useGCProg, ptrdata := dgcsym(t) 885 886 // ../../../../reflect/type.go:/^type.rtype 887 // actual type structure 888 // type rtype struct { 889 // size uintptr 890 // ptrdata uintptr 891 // hash uint32 892 // tflag tflag 893 // align uint8 894 // fieldAlign uint8 895 // kind uint8 896 // alg *typeAlg 897 // gcdata *byte 898 // str nameOff 899 // ptrToThis typeOff 900 // } 901 ot = duintptr(lsym, ot, uint64(t.Width)) 902 ot = duintptr(lsym, ot, uint64(ptrdata)) 903 ot = duint32(lsym, ot, typehash(t)) 904 905 var tflag uint8 906 if uncommonSize(t) != 0 { 907 tflag |= tflagUncommon 908 } 909 if t.Sym != nil && t.Sym.Name != "" { 910 tflag |= tflagNamed 911 } 912 913 exported := false 914 p := t.LongString() 915 // If we're writing out type T, 916 // we are very likely to write out type *T as well. 917 // Use the string "*T"[1:] for "T", so that the two 918 // share storage. This is a cheap way to reduce the 919 // amount of space taken up by reflect strings. 920 if !strings.HasPrefix(p, "*") { 921 p = "*" + p 922 tflag |= tflagExtraStar 923 if t.Sym != nil { 924 exported = exportname(t.Sym.Name) 925 } 926 } else { 927 if t.Elem() != nil && t.Elem().Sym != nil { 928 exported = exportname(t.Elem().Sym.Name) 929 } 930 } 931 932 ot = duint8(lsym, ot, tflag) 933 934 // runtime (and common sense) expects alignment to be a power of two. 935 i := int(t.Align) 936 937 if i == 0 { 938 i = 1 939 } 940 if i&(i-1) != 0 { 941 Fatalf("invalid alignment %d for %v", t.Align, t) 942 } 943 ot = duint8(lsym, ot, t.Align) // align 944 ot = duint8(lsym, ot, t.Align) // fieldAlign 945 946 i = kinds[t.Etype] 947 if !types.Haspointers(t) { 948 i |= objabi.KindNoPointers 949 } 950 if isdirectiface(t) { 951 i |= objabi.KindDirectIface 952 } 953 if useGCProg { 954 i |= objabi.KindGCProg 955 } 956 ot = duint8(lsym, ot, uint8(i)) // kind 957 if algsym == nil { 958 ot = dsymptr(lsym, ot, algarray, int(alg)*sizeofAlg) 959 } else { 960 ot = dsymptr(lsym, ot, algsym, 0) 961 } 962 ot = dsymptr(lsym, ot, gcsym, 0) // gcdata 963 964 nsym := dname(p, "", nil, exported) 965 ot = dsymptrOff(lsym, ot, nsym, 0) // str 966 // ptrToThis 967 if sptr == nil { 968 ot = duint32(lsym, ot, 0) 969 } else if sptrWeak { 970 ot = dsymptrWeakOff(lsym, ot, sptr) 971 } else { 972 ot = dsymptrOff(lsym, ot, sptr, 0) 973 } 974 975 return ot 976 } 977 978 func typesymname(t *types.Type) string { 979 name := t.ShortString() 980 // Use a separate symbol name for Noalg types for #17752. 981 if a, bad := algtype1(t); a == ANOEQ && bad.Noalg() { 982 name = "noalg." + name 983 } 984 return name 985 } 986 987 // Fake package for runtime type info (headers) 988 // Don't access directly, use typeLookup below. 989 var ( 990 typepkgmu sync.Mutex // protects typepkg lookups 991 typepkg = types.NewPkg("type", "type") 992 ) 993 994 func typeLookup(name string) *types.Sym { 995 typepkgmu.Lock() 996 s := typepkg.Lookup(name) 997 typepkgmu.Unlock() 998 return s 999 } 1000 1001 func typesym(t *types.Type) *types.Sym { 1002 return typeLookup(typesymname(t)) 1003 } 1004 1005 // tracksym returns the symbol for tracking use of field/method f, assumed 1006 // to be a member of struct/interface type t. 1007 func tracksym(t *types.Type, f *types.Field) *types.Sym { 1008 return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name) 1009 } 1010 1011 func typesymprefix(prefix string, t *types.Type) *types.Sym { 1012 p := prefix + "." + t.ShortString() 1013 s := typeLookup(p) 1014 1015 //print("algsym: %s -> %+S\n", p, s); 1016 1017 return s 1018 } 1019 1020 func typenamesym(t *types.Type) *types.Sym { 1021 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { 1022 Fatalf("typenamesym %v", t) 1023 } 1024 s := typesym(t) 1025 signatsetmu.Lock() 1026 addsignat(t) 1027 signatsetmu.Unlock() 1028 return s 1029 } 1030 1031 func typename(t *types.Type) *Node { 1032 s := typenamesym(t) 1033 if s.Def == nil { 1034 n := newnamel(src.NoXPos, s) 1035 n.Type = types.Types[TUINT8] 1036 n.SetClass(PEXTERN) 1037 n.SetTypecheck(1) 1038 s.Def = asTypesNode(n) 1039 } 1040 1041 n := nod(OADDR, asNode(s.Def), nil) 1042 n.Type = types.NewPtr(asNode(s.Def).Type) 1043 n.SetAddable(true) 1044 n.SetTypecheck(1) 1045 return n 1046 } 1047 1048 func itabname(t, itype *types.Type) *Node { 1049 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { 1050 Fatalf("itabname(%v, %v)", t, itype) 1051 } 1052 s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString()) 1053 if s.Def == nil { 1054 n := newname(s) 1055 n.Type = types.Types[TUINT8] 1056 n.SetClass(PEXTERN) 1057 n.SetTypecheck(1) 1058 s.Def = asTypesNode(n) 1059 itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()}) 1060 } 1061 1062 n := nod(OADDR, asNode(s.Def), nil) 1063 n.Type = types.NewPtr(asNode(s.Def).Type) 1064 n.SetAddable(true) 1065 n.SetTypecheck(1) 1066 return n 1067 } 1068 1069 // isreflexive reports whether t has a reflexive equality operator. 1070 // That is, if x==x for all x of type t. 1071 func isreflexive(t *types.Type) bool { 1072 switch t.Etype { 1073 case TBOOL, 1074 TINT, 1075 TUINT, 1076 TINT8, 1077 TUINT8, 1078 TINT16, 1079 TUINT16, 1080 TINT32, 1081 TUINT32, 1082 TINT64, 1083 TUINT64, 1084 TUINTPTR, 1085 TPTR32, 1086 TPTR64, 1087 TUNSAFEPTR, 1088 TSTRING, 1089 TCHAN: 1090 return true 1091 1092 case TFLOAT32, 1093 TFLOAT64, 1094 TCOMPLEX64, 1095 TCOMPLEX128, 1096 TINTER: 1097 return false 1098 1099 case TARRAY: 1100 return isreflexive(t.Elem()) 1101 1102 case TSTRUCT: 1103 for _, t1 := range t.Fields().Slice() { 1104 if !isreflexive(t1.Type) { 1105 return false 1106 } 1107 } 1108 return true 1109 1110 default: 1111 Fatalf("bad type for map key: %v", t) 1112 return false 1113 } 1114 } 1115 1116 // needkeyupdate reports whether map updates with t as a key 1117 // need the key to be updated. 1118 func needkeyupdate(t *types.Type) bool { 1119 switch t.Etype { 1120 case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, 1121 TINT64, TUINT64, TUINTPTR, TPTR32, TPTR64, TUNSAFEPTR, TCHAN: 1122 return false 1123 1124 case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0 1125 TINTER, 1126 TSTRING: // strings might have smaller backing stores 1127 return true 1128 1129 case TARRAY: 1130 return needkeyupdate(t.Elem()) 1131 1132 case TSTRUCT: 1133 for _, t1 := range t.Fields().Slice() { 1134 if needkeyupdate(t1.Type) { 1135 return true 1136 } 1137 } 1138 return false 1139 1140 default: 1141 Fatalf("bad type for map key: %v", t) 1142 return true 1143 } 1144 } 1145 1146 // formalType replaces byte and rune aliases with real types. 1147 // They've been separate internally to make error messages 1148 // better, but we have to merge them in the reflect tables. 1149 func formalType(t *types.Type) *types.Type { 1150 if t == types.Bytetype || t == types.Runetype { 1151 return types.Types[t.Etype] 1152 } 1153 return t 1154 } 1155 1156 func dtypesym(t *types.Type) *types.Sym { 1157 t = formalType(t) 1158 if t.IsUntyped() { 1159 Fatalf("dtypesym %v", t) 1160 } 1161 1162 s := typesym(t) 1163 if s.Siggen() { 1164 return s 1165 } 1166 s.SetSiggen(true) 1167 1168 // special case (look for runtime below): 1169 // when compiling package runtime, 1170 // emit the type structures for int, float, etc. 1171 tbase := t 1172 1173 if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil { 1174 tbase = t.Elem() 1175 } 1176 dupok := 0 1177 if tbase.Sym == nil { 1178 dupok = obj.DUPOK 1179 } 1180 1181 if myimportpath == "runtime" && (tbase == types.Types[tbase.Etype] || tbase == types.Bytetype || tbase == types.Runetype || tbase == types.Errortype) { // int, float, etc 1182 goto ok 1183 } 1184 1185 // named types from other files are defined only by those files 1186 if tbase.Sym != nil && !tbase.Local() { 1187 return s 1188 } 1189 if isforw[tbase.Etype] { 1190 return s 1191 } 1192 1193 ok: 1194 ot := 0 1195 lsym := s.Linksym() 1196 switch t.Etype { 1197 default: 1198 ot = dcommontype(lsym, ot, t) 1199 ot = dextratype(lsym, ot, t, 0) 1200 1201 case TARRAY: 1202 // ../../../../runtime/type.go:/arrayType 1203 s1 := dtypesym(t.Elem()) 1204 t2 := types.NewSlice(t.Elem()) 1205 s2 := dtypesym(t2) 1206 ot = dcommontype(lsym, ot, t) 1207 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1208 ot = dsymptr(lsym, ot, s2.Linksym(), 0) 1209 ot = duintptr(lsym, ot, uint64(t.NumElem())) 1210 ot = dextratype(lsym, ot, t, 0) 1211 1212 case TSLICE: 1213 // ../../../../runtime/type.go:/sliceType 1214 s1 := dtypesym(t.Elem()) 1215 ot = dcommontype(lsym, ot, t) 1216 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1217 ot = dextratype(lsym, ot, t, 0) 1218 1219 case TCHAN: 1220 // ../../../../runtime/type.go:/chanType 1221 s1 := dtypesym(t.Elem()) 1222 ot = dcommontype(lsym, ot, t) 1223 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1224 ot = duintptr(lsym, ot, uint64(t.ChanDir())) 1225 ot = dextratype(lsym, ot, t, 0) 1226 1227 case TFUNC: 1228 for _, t1 := range t.Recvs().Fields().Slice() { 1229 dtypesym(t1.Type) 1230 } 1231 isddd := false 1232 for _, t1 := range t.Params().Fields().Slice() { 1233 isddd = t1.Isddd() 1234 dtypesym(t1.Type) 1235 } 1236 for _, t1 := range t.Results().Fields().Slice() { 1237 dtypesym(t1.Type) 1238 } 1239 1240 ot = dcommontype(lsym, ot, t) 1241 inCount := t.NumRecvs() + t.NumParams() 1242 outCount := t.NumResults() 1243 if isddd { 1244 outCount |= 1 << 15 1245 } 1246 ot = duint16(lsym, ot, uint16(inCount)) 1247 ot = duint16(lsym, ot, uint16(outCount)) 1248 if Widthptr == 8 { 1249 ot += 4 // align for *rtype 1250 } 1251 1252 dataAdd := (inCount + t.NumResults()) * Widthptr 1253 ot = dextratype(lsym, ot, t, dataAdd) 1254 1255 // Array of rtype pointers follows funcType. 1256 for _, t1 := range t.Recvs().Fields().Slice() { 1257 ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0) 1258 } 1259 for _, t1 := range t.Params().Fields().Slice() { 1260 ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0) 1261 } 1262 for _, t1 := range t.Results().Fields().Slice() { 1263 ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0) 1264 } 1265 1266 case TINTER: 1267 m := imethods(t) 1268 n := len(m) 1269 for _, a := range m { 1270 dtypesym(a.type_) 1271 } 1272 1273 // ../../../../runtime/type.go:/interfaceType 1274 ot = dcommontype(lsym, ot, t) 1275 1276 var tpkg *types.Pkg 1277 if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype { 1278 tpkg = t.Sym.Pkg 1279 } 1280 ot = dgopkgpath(lsym, ot, tpkg) 1281 1282 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1283 ot = duintptr(lsym, ot, uint64(n)) 1284 ot = duintptr(lsym, ot, uint64(n)) 1285 dataAdd := imethodSize() * n 1286 ot = dextratype(lsym, ot, t, dataAdd) 1287 1288 for _, a := range m { 1289 // ../../../../runtime/type.go:/imethod 1290 exported := exportname(a.name) 1291 var pkg *types.Pkg 1292 if !exported && a.pkg != tpkg { 1293 pkg = a.pkg 1294 } 1295 nsym := dname(a.name, "", pkg, exported) 1296 1297 ot = dsymptrOff(lsym, ot, nsym, 0) 1298 ot = dsymptrOff(lsym, ot, dtypesym(a.type_).Linksym(), 0) 1299 } 1300 1301 // ../../../../runtime/type.go:/mapType 1302 case TMAP: 1303 s1 := dtypesym(t.Key()) 1304 s2 := dtypesym(t.Val()) 1305 s3 := dtypesym(bmap(t)) 1306 s4 := dtypesym(hmap(t)) 1307 ot = dcommontype(lsym, ot, t) 1308 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1309 ot = dsymptr(lsym, ot, s2.Linksym(), 0) 1310 ot = dsymptr(lsym, ot, s3.Linksym(), 0) 1311 ot = dsymptr(lsym, ot, s4.Linksym(), 0) 1312 if t.Key().Width > MAXKEYSIZE { 1313 ot = duint8(lsym, ot, uint8(Widthptr)) 1314 ot = duint8(lsym, ot, 1) // indirect 1315 } else { 1316 ot = duint8(lsym, ot, uint8(t.Key().Width)) 1317 ot = duint8(lsym, ot, 0) // not indirect 1318 } 1319 1320 if t.Val().Width > MAXVALSIZE { 1321 ot = duint8(lsym, ot, uint8(Widthptr)) 1322 ot = duint8(lsym, ot, 1) // indirect 1323 } else { 1324 ot = duint8(lsym, ot, uint8(t.Val().Width)) 1325 ot = duint8(lsym, ot, 0) // not indirect 1326 } 1327 1328 ot = duint16(lsym, ot, uint16(bmap(t).Width)) 1329 ot = duint8(lsym, ot, uint8(obj.Bool2int(isreflexive(t.Key())))) 1330 ot = duint8(lsym, ot, uint8(obj.Bool2int(needkeyupdate(t.Key())))) 1331 ot = dextratype(lsym, ot, t, 0) 1332 1333 case TPTR32, TPTR64: 1334 if t.Elem().Etype == TANY { 1335 // ../../../../runtime/type.go:/UnsafePointerType 1336 ot = dcommontype(lsym, ot, t) 1337 ot = dextratype(lsym, ot, t, 0) 1338 1339 break 1340 } 1341 1342 // ../../../../runtime/type.go:/ptrType 1343 s1 := dtypesym(t.Elem()) 1344 1345 ot = dcommontype(lsym, ot, t) 1346 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1347 ot = dextratype(lsym, ot, t, 0) 1348 1349 // ../../../../runtime/type.go:/structType 1350 // for security, only the exported fields. 1351 case TSTRUCT: 1352 n := 0 1353 1354 for _, t1 := range t.Fields().Slice() { 1355 dtypesym(t1.Type) 1356 n++ 1357 } 1358 1359 ot = dcommontype(lsym, ot, t) 1360 pkg := localpkg 1361 if t.Sym != nil { 1362 pkg = t.Sym.Pkg 1363 } else { 1364 // Unnamed type. Grab the package from the first field, if any. 1365 for _, f := range t.Fields().Slice() { 1366 if f.Embedded != 0 { 1367 continue 1368 } 1369 pkg = f.Sym.Pkg 1370 break 1371 } 1372 } 1373 ot = dgopkgpath(lsym, ot, pkg) 1374 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1375 ot = duintptr(lsym, ot, uint64(n)) 1376 ot = duintptr(lsym, ot, uint64(n)) 1377 1378 dataAdd := n * structfieldSize() 1379 ot = dextratype(lsym, ot, t, dataAdd) 1380 1381 for _, f := range t.Fields().Slice() { 1382 // ../../../../runtime/type.go:/structField 1383 ot = dnameField(lsym, ot, pkg, f) 1384 ot = dsymptr(lsym, ot, dtypesym(f.Type).Linksym(), 0) 1385 offsetAnon := uint64(f.Offset) << 1 1386 if offsetAnon>>1 != uint64(f.Offset) { 1387 Fatalf("%v: bad field offset for %s", t, f.Sym.Name) 1388 } 1389 if f.Embedded != 0 { 1390 offsetAnon |= 1 1391 } 1392 ot = duintptr(lsym, ot, offsetAnon) 1393 } 1394 } 1395 1396 ot = dextratypeData(lsym, ot, t) 1397 ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA)) 1398 1399 // The linker will leave a table of all the typelinks for 1400 // types in the binary, so the runtime can find them. 1401 // 1402 // When buildmode=shared, all types are in typelinks so the 1403 // runtime can deduplicate type pointers. 1404 keep := Ctxt.Flag_dynlink 1405 if !keep && t.Sym == nil { 1406 // For an unnamed type, we only need the link if the type can 1407 // be created at run time by reflect.PtrTo and similar 1408 // functions. If the type exists in the program, those 1409 // functions must return the existing type structure rather 1410 // than creating a new one. 1411 switch t.Etype { 1412 case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT: 1413 keep = true 1414 } 1415 } 1416 lsym.Set(obj.AttrMakeTypelink, keep) 1417 1418 return s 1419 } 1420 1421 // for each itabEntry, gather the methods on 1422 // the concrete type that implement the interface 1423 func peekitabs() { 1424 for i := range itabs { 1425 tab := &itabs[i] 1426 methods := genfun(tab.t, tab.itype) 1427 if len(methods) == 0 { 1428 continue 1429 } 1430 tab.entries = methods 1431 } 1432 } 1433 1434 // for the given concrete type and interface 1435 // type, return the (sorted) set of methods 1436 // on the concrete type that implement the interface 1437 func genfun(t, it *types.Type) []*obj.LSym { 1438 if t == nil || it == nil { 1439 return nil 1440 } 1441 sigs := imethods(it) 1442 methods := methods(t) 1443 out := make([]*obj.LSym, 0, len(sigs)) 1444 if len(sigs) == 0 { 1445 return nil 1446 } 1447 1448 // both sigs and methods are sorted by name, 1449 // so we can find the intersect in a single pass 1450 for _, m := range methods { 1451 if m.name == sigs[0].name { 1452 out = append(out, m.isym.Linksym()) 1453 sigs = sigs[1:] 1454 if len(sigs) == 0 { 1455 break 1456 } 1457 } 1458 } 1459 1460 return out 1461 } 1462 1463 // itabsym uses the information gathered in 1464 // peekitabs to de-virtualize interface methods. 1465 // Since this is called by the SSA backend, it shouldn't 1466 // generate additional Nodes, Syms, etc. 1467 func itabsym(it *obj.LSym, offset int64) *obj.LSym { 1468 var syms []*obj.LSym 1469 if it == nil { 1470 return nil 1471 } 1472 1473 for i := range itabs { 1474 e := &itabs[i] 1475 if e.lsym == it { 1476 syms = e.entries 1477 break 1478 } 1479 } 1480 if syms == nil { 1481 return nil 1482 } 1483 1484 // keep this arithmetic in sync with *itab layout 1485 methodnum := int((offset - 2*int64(Widthptr) - 8) / int64(Widthptr)) 1486 if methodnum >= len(syms) { 1487 return nil 1488 } 1489 return syms[methodnum] 1490 } 1491 1492 func addsignat(t *types.Type) { 1493 signatset[t] = struct{}{} 1494 } 1495 1496 func addsignats(dcls []*Node) { 1497 // copy types from dcl list to signatset 1498 for _, n := range dcls { 1499 if n.Op == OTYPE { 1500 addsignat(n.Type) 1501 } 1502 } 1503 } 1504 1505 func dumpsignats() { 1506 // Process signatset. Use a loop, as dtypesym adds 1507 // entries to signatset while it is being processed. 1508 signats := make([]typeAndStr, len(signatset)) 1509 for len(signatset) > 0 { 1510 signats = signats[:0] 1511 // Transfer entries to a slice and sort, for reproducible builds. 1512 for t := range signatset { 1513 signats = append(signats, typeAndStr{t: t, short: typesymname(t), regular: t.String()}) 1514 delete(signatset, t) 1515 } 1516 sort.Sort(typesByString(signats)) 1517 for _, ts := range signats { 1518 t := ts.t 1519 dtypesym(t) 1520 if t.Sym != nil { 1521 dtypesym(types.NewPtr(t)) 1522 } 1523 } 1524 } 1525 } 1526 1527 func dumptabs() { 1528 // process itabs 1529 for _, i := range itabs { 1530 // dump empty itab symbol into i.sym 1531 // type itab struct { 1532 // inter *interfacetype 1533 // _type *_type 1534 // hash uint32 1535 // _ [4]byte 1536 // fun [1]uintptr // variable sized 1537 // } 1538 o := dsymptr(i.lsym, 0, dtypesym(i.itype).Linksym(), 0) 1539 o = dsymptr(i.lsym, o, dtypesym(i.t).Linksym(), 0) 1540 o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash 1541 o += 4 // skip unused field 1542 for _, fn := range genfun(i.t, i.itype) { 1543 o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method 1544 } 1545 // Nothing writes static itabs, so they are read only. 1546 ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA)) 1547 ilink := itablinkpkg.Lookup(i.t.ShortString() + "," + i.itype.ShortString()).Linksym() 1548 dsymptr(ilink, 0, i.lsym, 0) 1549 ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA)) 1550 } 1551 1552 // process ptabs 1553 if localpkg.Name == "main" && len(ptabs) > 0 { 1554 ot := 0 1555 s := Ctxt.Lookup("go.plugin.tabs") 1556 for _, p := range ptabs { 1557 // Dump ptab symbol into go.pluginsym package. 1558 // 1559 // type ptab struct { 1560 // name nameOff 1561 // typ typeOff // pointer to symbol 1562 // } 1563 nsym := dname(p.s.Name, "", nil, true) 1564 ot = dsymptrOff(s, ot, nsym, 0) 1565 ot = dsymptrOff(s, ot, dtypesym(p.t).Linksym(), 0) 1566 } 1567 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1568 1569 ot = 0 1570 s = Ctxt.Lookup("go.plugin.exports") 1571 for _, p := range ptabs { 1572 ot = dsymptr(s, ot, p.s.Linksym(), 0) 1573 } 1574 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1575 } 1576 } 1577 1578 func dumpimportstrings() { 1579 // generate import strings for imported packages 1580 for _, p := range types.ImportedPkgList() { 1581 dimportpath(p) 1582 } 1583 } 1584 1585 func dumpbasictypes() { 1586 // do basic types if compiling package runtime. 1587 // they have to be in at least one package, 1588 // and runtime is always loaded implicitly, 1589 // so this is as good as any. 1590 // another possible choice would be package main, 1591 // but using runtime means fewer copies in object files. 1592 if myimportpath == "runtime" { 1593 for i := types.EType(1); i <= TBOOL; i++ { 1594 dtypesym(types.NewPtr(types.Types[i])) 1595 } 1596 dtypesym(types.NewPtr(types.Types[TSTRING])) 1597 dtypesym(types.NewPtr(types.Types[TUNSAFEPTR])) 1598 1599 // emit type structs for error and func(error) string. 1600 // The latter is the type of an auto-generated wrapper. 1601 dtypesym(types.NewPtr(types.Errortype)) 1602 1603 dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])})) 1604 1605 // add paths for runtime and main, which 6l imports implicitly. 1606 dimportpath(Runtimepkg) 1607 1608 if flag_race { 1609 dimportpath(racepkg) 1610 } 1611 if flag_msan { 1612 dimportpath(msanpkg) 1613 } 1614 dimportpath(types.NewPkg("main", "")) 1615 } 1616 } 1617 1618 type typeAndStr struct { 1619 t *types.Type 1620 short string 1621 regular string 1622 } 1623 1624 type typesByString []typeAndStr 1625 1626 func (a typesByString) Len() int { return len(a) } 1627 func (a typesByString) Less(i, j int) bool { 1628 if a[i].short != a[j].short { 1629 return a[i].short < a[j].short 1630 } 1631 // When the only difference between the types is whether 1632 // they refer to byte or uint8, such as **byte vs **uint8, 1633 // the types' ShortStrings can be identical. 1634 // To preserve deterministic sort ordering, sort these by String(). 1635 return a[i].regular < a[j].regular 1636 } 1637 func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1638 1639 func dalgsym(t *types.Type) *obj.LSym { 1640 var lsym *obj.LSym 1641 var hashfunc *obj.LSym 1642 var eqfunc *obj.LSym 1643 1644 // dalgsym is only called for a type that needs an algorithm table, 1645 // which implies that the type is comparable (or else it would use ANOEQ). 1646 1647 if algtype(t) == AMEM { 1648 // we use one algorithm table for all AMEM types of a given size 1649 p := fmt.Sprintf(".alg%d", t.Width) 1650 1651 s := typeLookup(p) 1652 lsym = s.Linksym() 1653 if s.AlgGen() { 1654 return lsym 1655 } 1656 s.SetAlgGen(true) 1657 1658 if memhashvarlen == nil { 1659 memhashvarlen = sysfunc("memhash_varlen") 1660 memequalvarlen = sysfunc("memequal_varlen") 1661 } 1662 1663 // make hash closure 1664 p = fmt.Sprintf(".hashfunc%d", t.Width) 1665 1666 hashfunc = typeLookup(p).Linksym() 1667 1668 ot := 0 1669 ot = dsymptr(hashfunc, ot, memhashvarlen, 0) 1670 ot = duintptr(hashfunc, ot, uint64(t.Width)) // size encoded in closure 1671 ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA) 1672 1673 // make equality closure 1674 p = fmt.Sprintf(".eqfunc%d", t.Width) 1675 1676 eqfunc = typeLookup(p).Linksym() 1677 1678 ot = 0 1679 ot = dsymptr(eqfunc, ot, memequalvarlen, 0) 1680 ot = duintptr(eqfunc, ot, uint64(t.Width)) 1681 ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA) 1682 } else { 1683 // generate an alg table specific to this type 1684 s := typesymprefix(".alg", t) 1685 lsym = s.Linksym() 1686 1687 hash := typesymprefix(".hash", t) 1688 eq := typesymprefix(".eq", t) 1689 hashfunc = typesymprefix(".hashfunc", t).Linksym() 1690 eqfunc = typesymprefix(".eqfunc", t).Linksym() 1691 1692 genhash(hash, t) 1693 geneq(eq, t) 1694 1695 // make Go funcs (closures) for calling hash and equal from Go 1696 dsymptr(hashfunc, 0, hash.Linksym(), 0) 1697 ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1698 dsymptr(eqfunc, 0, eq.Linksym(), 0) 1699 ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1700 } 1701 1702 // ../../../../runtime/alg.go:/typeAlg 1703 ot := 0 1704 1705 ot = dsymptr(lsym, ot, hashfunc, 0) 1706 ot = dsymptr(lsym, ot, eqfunc, 0) 1707 ggloblsym(lsym, int32(ot), obj.DUPOK|obj.RODATA) 1708 return lsym 1709 } 1710 1711 // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, 1712 // which holds 1-bit entries describing where pointers are in a given type. 1713 // Above this length, the GC information is recorded as a GC program, 1714 // which can express repetition compactly. In either form, the 1715 // information is used by the runtime to initialize the heap bitmap, 1716 // and for large types (like 128 or more words), they are roughly the 1717 // same speed. GC programs are never much larger and often more 1718 // compact. (If large arrays are involved, they can be arbitrarily 1719 // more compact.) 1720 // 1721 // The cutoff must be large enough that any allocation large enough to 1722 // use a GC program is large enough that it does not share heap bitmap 1723 // bytes with any other objects, allowing the GC program execution to 1724 // assume an aligned start and not use atomic operations. In the current 1725 // runtime, this means all malloc size classes larger than the cutoff must 1726 // be multiples of four words. On 32-bit systems that's 16 bytes, and 1727 // all size classes >= 16 bytes are 16-byte aligned, so no real constraint. 1728 // On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed 1729 // for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated 1730 // is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes 1731 // must be >= 4. 1732 // 1733 // We used to use 16 because the GC programs do have some constant overhead 1734 // to get started, and processing 128 pointers seems to be enough to 1735 // amortize that overhead well. 1736 // 1737 // To make sure that the runtime's chansend can call typeBitsBulkBarrier, 1738 // we raised the limit to 2048, so that even 32-bit systems are guaranteed to 1739 // use bitmaps for objects up to 64 kB in size. 1740 // 1741 // Also known to reflect/type.go. 1742 // 1743 const maxPtrmaskBytes = 2048 1744 1745 // dgcsym emits and returns a data symbol containing GC information for type t, 1746 // along with a boolean reporting whether the UseGCProg bit should be set in 1747 // the type kind, and the ptrdata field to record in the reflect type information. 1748 func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { 1749 ptrdata = typeptrdata(t) 1750 if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 { 1751 lsym = dgcptrmask(t) 1752 return 1753 } 1754 1755 useGCProg = true 1756 lsym, ptrdata = dgcprog(t) 1757 return 1758 } 1759 1760 // dgcptrmask emits and returns the symbol containing a pointer mask for type t. 1761 func dgcptrmask(t *types.Type) *obj.LSym { 1762 ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8) 1763 fillptrmask(t, ptrmask) 1764 p := fmt.Sprintf("gcbits.%x", ptrmask) 1765 1766 sym := Runtimepkg.Lookup(p) 1767 lsym := sym.Linksym() 1768 if !sym.Uniq() { 1769 sym.SetUniq(true) 1770 for i, x := range ptrmask { 1771 duint8(lsym, i, x) 1772 } 1773 ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) 1774 } 1775 return lsym 1776 } 1777 1778 // fillptrmask fills in ptrmask with 1s corresponding to the 1779 // word offsets in t that hold pointers. 1780 // ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits. 1781 func fillptrmask(t *types.Type, ptrmask []byte) { 1782 for i := range ptrmask { 1783 ptrmask[i] = 0 1784 } 1785 if !types.Haspointers(t) { 1786 return 1787 } 1788 1789 vec := bvalloc(8 * int32(len(ptrmask))) 1790 xoffset := int64(0) 1791 onebitwalktype1(t, &xoffset, vec) 1792 1793 nptr := typeptrdata(t) / int64(Widthptr) 1794 for i := int64(0); i < nptr; i++ { 1795 if vec.Get(int32(i)) { 1796 ptrmask[i/8] |= 1 << (uint(i) % 8) 1797 } 1798 } 1799 } 1800 1801 // dgcprog emits and returns the symbol containing a GC program for type t 1802 // along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]). 1803 // In practice, the size is typeptrdata(t) except for non-trivial arrays. 1804 // For non-trivial arrays, the program describes the full t.Width size. 1805 func dgcprog(t *types.Type) (*obj.LSym, int64) { 1806 dowidth(t) 1807 if t.Width == BADWIDTH { 1808 Fatalf("dgcprog: %v badwidth", t) 1809 } 1810 lsym := typesymprefix(".gcprog", t).Linksym() 1811 var p GCProg 1812 p.init(lsym) 1813 p.emit(t, 0) 1814 offset := p.w.BitIndex() * int64(Widthptr) 1815 p.end() 1816 if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { 1817 Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) 1818 } 1819 return lsym, offset 1820 } 1821 1822 type GCProg struct { 1823 lsym *obj.LSym 1824 symoff int 1825 w gcprog.Writer 1826 } 1827 1828 var Debug_gcprog int // set by -d gcprog 1829 1830 func (p *GCProg) init(lsym *obj.LSym) { 1831 p.lsym = lsym 1832 p.symoff = 4 // first 4 bytes hold program length 1833 p.w.Init(p.writeByte) 1834 if Debug_gcprog > 0 { 1835 fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym) 1836 p.w.Debug(os.Stderr) 1837 } 1838 } 1839 1840 func (p *GCProg) writeByte(x byte) { 1841 p.symoff = duint8(p.lsym, p.symoff, x) 1842 } 1843 1844 func (p *GCProg) end() { 1845 p.w.End() 1846 duint32(p.lsym, 0, uint32(p.symoff-4)) 1847 ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) 1848 if Debug_gcprog > 0 { 1849 fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym) 1850 } 1851 } 1852 1853 func (p *GCProg) emit(t *types.Type, offset int64) { 1854 dowidth(t) 1855 if !types.Haspointers(t) { 1856 return 1857 } 1858 if t.Width == int64(Widthptr) { 1859 p.w.Ptr(offset / int64(Widthptr)) 1860 return 1861 } 1862 switch t.Etype { 1863 default: 1864 Fatalf("GCProg.emit: unexpected type %v", t) 1865 1866 case TSTRING: 1867 p.w.Ptr(offset / int64(Widthptr)) 1868 1869 case TINTER: 1870 p.w.Ptr(offset / int64(Widthptr)) 1871 p.w.Ptr(offset/int64(Widthptr) + 1) 1872 1873 case TSLICE: 1874 p.w.Ptr(offset / int64(Widthptr)) 1875 1876 case TARRAY: 1877 if t.NumElem() == 0 { 1878 // should have been handled by haspointers check above 1879 Fatalf("GCProg.emit: empty array") 1880 } 1881 1882 // Flatten array-of-array-of-array to just a big array by multiplying counts. 1883 count := t.NumElem() 1884 elem := t.Elem() 1885 for elem.IsArray() { 1886 count *= elem.NumElem() 1887 elem = elem.Elem() 1888 } 1889 1890 if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) { 1891 // Cheaper to just emit the bits. 1892 for i := int64(0); i < count; i++ { 1893 p.emit(elem, offset+i*elem.Width) 1894 } 1895 return 1896 } 1897 p.emit(elem, offset) 1898 p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) 1899 p.w.Repeat(elem.Width/int64(Widthptr), count-1) 1900 1901 case TSTRUCT: 1902 for _, t1 := range t.Fields().Slice() { 1903 p.emit(t1.Type, offset+t1.Offset) 1904 } 1905 } 1906 } 1907 1908 // zeroaddr returns the address of a symbol with at least 1909 // size bytes of zeros. 1910 func zeroaddr(size int64) *Node { 1911 if size >= 1<<31 { 1912 Fatalf("map value too big %d", size) 1913 } 1914 if zerosize < size { 1915 zerosize = size 1916 } 1917 s := mappkg.Lookup("zero") 1918 if s.Def == nil { 1919 x := newname(s) 1920 x.Type = types.Types[TUINT8] 1921 x.SetClass(PEXTERN) 1922 x.SetTypecheck(1) 1923 s.Def = asTypesNode(x) 1924 } 1925 z := nod(OADDR, asNode(s.Def), nil) 1926 z.Type = types.NewPtr(types.Types[TUINT8]) 1927 z.SetAddable(true) 1928 z.SetTypecheck(1) 1929 return z 1930 }