github.com/mattn/go@v0.0.0-20171011075504-07f7db3ea99f/src/cmd/compile/internal/gc/reflect.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "cmd/internal/gcprog" 10 "cmd/internal/obj" 11 "cmd/internal/objabi" 12 "cmd/internal/src" 13 "fmt" 14 "os" 15 "sort" 16 "strings" 17 "sync" 18 ) 19 20 type itabEntry struct { 21 t, itype *types.Type 22 lsym *obj.LSym // symbol of the itab itself 23 24 // symbols of each method in 25 // the itab, sorted by byte offset; 26 // filled in by peekitabs 27 entries []*obj.LSym 28 } 29 30 type ptabEntry struct { 31 s *types.Sym 32 t *types.Type 33 } 34 35 // runtime interface and reflection data structures 36 var ( 37 signatsetmu sync.Mutex // protects signatset 38 signatset = make(map[*types.Type]struct{}) 39 40 itabs []itabEntry 41 ptabs []ptabEntry 42 ) 43 44 type Sig struct { 45 name string 46 pkg *types.Pkg 47 isym *types.Sym 48 tsym *types.Sym 49 type_ *types.Type 50 mtype *types.Type 51 offset int32 52 } 53 54 // siglt sorts method signatures by name, then package path. 55 func siglt(a, b *Sig) bool { 56 if a.name != b.name { 57 return a.name < b.name 58 } 59 if a.pkg == b.pkg { 60 return false 61 } 62 if a.pkg == nil { 63 return true 64 } 65 if b.pkg == nil { 66 return false 67 } 68 return a.pkg.Path < b.pkg.Path 69 } 70 71 // Builds a type representing a Bucket structure for 72 // the given map type. This type is not visible to users - 73 // we include only enough information to generate a correct GC 74 // program for it. 75 // Make sure this stays in sync with ../../../../runtime/hashmap.go! 76 const ( 77 BUCKETSIZE = 8 78 MAXKEYSIZE = 128 79 MAXVALSIZE = 128 80 ) 81 82 func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) 83 func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) 84 85 func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) 86 if t.Sym == nil && len(methods(t)) == 0 { 87 return 0 88 } 89 return 4 + 2 + 2 + 4 + 4 90 } 91 92 func makefield(name string, t *types.Type) *types.Field { 93 f := types.NewField() 94 f.Type = t 95 f.Sym = (*types.Pkg)(nil).Lookup(name) 96 return f 97 } 98 99 // bmap makes the map bucket type given the type of the map. 100 func bmap(t *types.Type) *types.Type { 101 if t.MapType().Bucket != nil { 102 return t.MapType().Bucket 103 } 104 105 bucket := types.New(TSTRUCT) 106 keytype := t.Key() 107 valtype := t.Val() 108 dowidth(keytype) 109 dowidth(valtype) 110 if keytype.Width > MAXKEYSIZE { 111 keytype = types.NewPtr(keytype) 112 } 113 if valtype.Width > MAXVALSIZE { 114 valtype = types.NewPtr(valtype) 115 } 116 117 field := make([]*types.Field, 0, 5) 118 119 // The first field is: uint8 topbits[BUCKETSIZE]. 120 arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE) 121 field = append(field, makefield("topbits", arr)) 122 123 arr = types.NewArray(keytype, BUCKETSIZE) 124 arr.SetNoalg(true) 125 keys := makefield("keys", arr) 126 field = append(field, keys) 127 128 arr = types.NewArray(valtype, BUCKETSIZE) 129 arr.SetNoalg(true) 130 values := makefield("values", arr) 131 field = append(field, values) 132 133 // Make sure the overflow pointer is the last memory in the struct, 134 // because the runtime assumes it can use size-ptrSize as the 135 // offset of the overflow pointer. We double-check that property 136 // below once the offsets and size are computed. 137 // 138 // BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point. 139 // On 32-bit systems, the max alignment is 32-bit, and the 140 // overflow pointer will add another 32-bit field, and the struct 141 // will end with no padding. 142 // On 64-bit systems, the max alignment is 64-bit, and the 143 // overflow pointer will add another 64-bit field, and the struct 144 // will end with no padding. 145 // On nacl/amd64p32, however, the max alignment is 64-bit, 146 // but the overflow pointer will add only a 32-bit field, 147 // so if the struct needs 64-bit padding (because a key or value does) 148 // then it would end with an extra 32-bit padding field. 149 // Preempt that by emitting the padding here. 150 if int(valtype.Align) > Widthptr || int(keytype.Align) > Widthptr { 151 field = append(field, makefield("pad", types.Types[TUINTPTR])) 152 } 153 154 // If keys and values have no pointers, the map implementation 155 // can keep a list of overflow pointers on the side so that 156 // buckets can be marked as having no pointers. 157 // Arrange for the bucket to have no pointers by changing 158 // the type of the overflow field to uintptr in this case. 159 // See comment on hmap.overflow in ../../../../runtime/hashmap.go. 160 otyp := types.NewPtr(bucket) 161 if !types.Haspointers(valtype) && !types.Haspointers(keytype) { 162 otyp = types.Types[TUINTPTR] 163 } 164 overflow := makefield("overflow", otyp) 165 field = append(field, overflow) 166 167 // link up fields 168 bucket.SetNoalg(true) 169 bucket.SetFields(field[:]) 170 dowidth(bucket) 171 172 // Check invariants that map code depends on. 173 if !IsComparable(t.Key()) { 174 Fatalf("unsupported map key type for %v", t) 175 } 176 if BUCKETSIZE < 8 { 177 Fatalf("bucket size too small for proper alignment") 178 } 179 if keytype.Align > BUCKETSIZE { 180 Fatalf("key align too big for %v", t) 181 } 182 if valtype.Align > BUCKETSIZE { 183 Fatalf("value align too big for %v", t) 184 } 185 if keytype.Width > MAXKEYSIZE { 186 Fatalf("key size to large for %v", t) 187 } 188 if valtype.Width > MAXVALSIZE { 189 Fatalf("value size to large for %v", t) 190 } 191 if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() { 192 Fatalf("key indirect incorrect for %v", t) 193 } 194 if t.Val().Width > MAXVALSIZE && !valtype.IsPtr() { 195 Fatalf("value indirect incorrect for %v", t) 196 } 197 if keytype.Width%int64(keytype.Align) != 0 { 198 Fatalf("key size not a multiple of key align for %v", t) 199 } 200 if valtype.Width%int64(valtype.Align) != 0 { 201 Fatalf("value size not a multiple of value align for %v", t) 202 } 203 if bucket.Align%keytype.Align != 0 { 204 Fatalf("bucket align not multiple of key align %v", t) 205 } 206 if bucket.Align%valtype.Align != 0 { 207 Fatalf("bucket align not multiple of value align %v", t) 208 } 209 if keys.Offset%int64(keytype.Align) != 0 { 210 Fatalf("bad alignment of keys in bmap for %v", t) 211 } 212 if values.Offset%int64(valtype.Align) != 0 { 213 Fatalf("bad alignment of values in bmap for %v", t) 214 } 215 216 // Double-check that overflow field is final memory in struct, 217 // with no padding at end. See comment above. 218 if overflow.Offset != bucket.Width-int64(Widthptr) { 219 Fatalf("bad offset of overflow in bmap for %v", t) 220 } 221 222 t.MapType().Bucket = bucket 223 224 bucket.StructType().Map = t 225 return bucket 226 } 227 228 // hmap builds a type representing a Hmap structure for the given map type. 229 // Make sure this stays in sync with ../../../../runtime/hashmap.go. 230 func hmap(t *types.Type) *types.Type { 231 if t.MapType().Hmap != nil { 232 return t.MapType().Hmap 233 } 234 235 bmap := bmap(t) 236 237 // build a struct: 238 // type hmap struct { 239 // count int 240 // flags uint8 241 // B uint8 242 // noverflow uint16 243 // hash0 uint32 244 // buckets *bmap 245 // oldbuckets *bmap 246 // nevacuate uintptr 247 // extra unsafe.Pointer // *mapextra 248 // } 249 // must match ../../../../runtime/hashmap.go:hmap. 250 fields := []*types.Field{ 251 makefield("count", types.Types[TINT]), 252 makefield("flags", types.Types[TUINT8]), 253 makefield("B", types.Types[TUINT8]), 254 makefield("noverflow", types.Types[TUINT16]), 255 makefield("hash0", types.Types[TUINT32]), 256 makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for makemap. 257 makefield("oldbuckets", types.NewPtr(bmap)), 258 makefield("nevacuate", types.Types[TUINTPTR]), 259 makefield("extra", types.Types[TUNSAFEPTR]), 260 } 261 262 hmap := types.New(TSTRUCT) 263 hmap.SetNoalg(true) 264 hmap.SetFields(fields) 265 dowidth(hmap) 266 267 // The size of hmap should be 48 bytes on 64 bit 268 // and 28 bytes on 32 bit platforms. 269 if size := int64(8 + 5*Widthptr); hmap.Width != size { 270 Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size) 271 } 272 273 t.MapType().Hmap = hmap 274 hmap.StructType().Map = t 275 return hmap 276 } 277 278 // hiter builds a type representing an Hiter structure for the given map type. 279 // Make sure this stays in sync with ../../../../runtime/hashmap.go. 280 func hiter(t *types.Type) *types.Type { 281 if t.MapType().Hiter != nil { 282 return t.MapType().Hiter 283 } 284 285 hmap := hmap(t) 286 bmap := bmap(t) 287 288 // build a struct: 289 // type hiter struct { 290 // key *Key 291 // val *Value 292 // t unsafe.Pointer // *MapType 293 // h *hmap 294 // buckets *bmap 295 // bptr *bmap 296 // overflow unsafe.Pointer // *[]*bmap 297 // oldoverflow unsafe.Pointer // *[]*bmap 298 // startBucket uintptr 299 // offset uint8 300 // wrapped bool 301 // B uint8 302 // i uint8 303 // bucket uintptr 304 // checkBucket uintptr 305 // } 306 // must match ../../../../runtime/hashmap.go:hiter. 307 fields := []*types.Field{ 308 makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP. 309 makefield("val", types.NewPtr(t.Val())), // Used in range.go for TMAP. 310 makefield("t", types.Types[TUNSAFEPTR]), 311 makefield("h", types.NewPtr(hmap)), 312 makefield("buckets", types.NewPtr(bmap)), 313 makefield("bptr", types.NewPtr(bmap)), 314 makefield("overflow", types.Types[TUNSAFEPTR]), 315 makefield("oldoverflow", types.Types[TUNSAFEPTR]), 316 makefield("startBucket", types.Types[TUINTPTR]), 317 makefield("offset", types.Types[TUINT8]), 318 makefield("wrapped", types.Types[TBOOL]), 319 makefield("B", types.Types[TUINT8]), 320 makefield("i", types.Types[TUINT8]), 321 makefield("bucket", types.Types[TUINTPTR]), 322 makefield("checkBucket", types.Types[TUINTPTR]), 323 } 324 325 // build iterator struct holding the above fields 326 hiter := types.New(TSTRUCT) 327 hiter.SetNoalg(true) 328 hiter.SetFields(fields) 329 dowidth(hiter) 330 if hiter.Width != int64(12*Widthptr) { 331 Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr) 332 } 333 t.MapType().Hiter = hiter 334 hiter.StructType().Map = t 335 return hiter 336 } 337 338 // f is method type, with receiver. 339 // return function type, receiver as first argument (or not). 340 func methodfunc(f *types.Type, receiver *types.Type) *types.Type { 341 var in []*Node 342 if receiver != nil { 343 d := nod(ODCLFIELD, nil, nil) 344 d.Type = receiver 345 in = append(in, d) 346 } 347 348 var d *Node 349 for _, t := range f.Params().Fields().Slice() { 350 d = nod(ODCLFIELD, nil, nil) 351 d.Type = t.Type 352 d.SetIsddd(t.Isddd()) 353 in = append(in, d) 354 } 355 356 var out []*Node 357 for _, t := range f.Results().Fields().Slice() { 358 d = nod(ODCLFIELD, nil, nil) 359 d.Type = t.Type 360 out = append(out, d) 361 } 362 363 t := functype(nil, in, out) 364 if f.Nname() != nil { 365 // Link to name of original method function. 366 t.SetNname(f.Nname()) 367 } 368 369 return t 370 } 371 372 // methods returns the methods of the non-interface type t, sorted by name. 373 // Generates stub functions as needed. 374 func methods(t *types.Type) []*Sig { 375 // method type 376 mt := methtype(t) 377 378 if mt == nil { 379 return nil 380 } 381 expandmeth(mt) 382 383 // type stored in interface word 384 it := t 385 386 if !isdirectiface(it) { 387 it = types.NewPtr(t) 388 } 389 390 // make list of methods for t, 391 // generating code if necessary. 392 var ms []*Sig 393 for _, f := range mt.AllMethods().Slice() { 394 if f.Type.Etype != TFUNC || f.Type.Recv() == nil { 395 Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) 396 } 397 if f.Type.Recv() == nil { 398 Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) 399 } 400 if f.Nointerface() { 401 continue 402 } 403 404 method := f.Sym 405 if method == nil { 406 continue 407 } 408 409 // get receiver type for this particular method. 410 // if pointer receiver but non-pointer t and 411 // this is not an embedded pointer inside a struct, 412 // method does not apply. 413 this := f.Type.Recv().Type 414 415 if this.IsPtr() && this.Elem() == t { 416 continue 417 } 418 if this.IsPtr() && !t.IsPtr() && f.Embedded != 2 && !isifacemethod(f.Type) { 419 continue 420 } 421 422 var sig Sig 423 ms = append(ms, &sig) 424 425 sig.name = method.Name 426 if !exportname(method.Name) { 427 if method.Pkg == nil { 428 Fatalf("methods: missing package") 429 } 430 sig.pkg = method.Pkg 431 } 432 433 sig.isym = methodsym(method, it, true) 434 sig.tsym = methodsym(method, t, false) 435 sig.type_ = methodfunc(f.Type, t) 436 sig.mtype = methodfunc(f.Type, nil) 437 438 if !sig.isym.Siggen() { 439 sig.isym.SetSiggen(true) 440 if !eqtype(this, it) || this.Width < int64(Widthptr) { 441 compiling_wrappers = true 442 genwrapper(it, f, sig.isym, true) 443 compiling_wrappers = false 444 } 445 } 446 447 if !sig.tsym.Siggen() { 448 sig.tsym.SetSiggen(true) 449 if !eqtype(this, t) { 450 compiling_wrappers = true 451 genwrapper(t, f, sig.tsym, false) 452 compiling_wrappers = false 453 } 454 } 455 } 456 457 obj.SortSlice(ms, func(i, j int) bool { return siglt(ms[i], ms[j]) }) 458 return ms 459 } 460 461 // imethods returns the methods of the interface type t, sorted by name. 462 func imethods(t *types.Type) []*Sig { 463 var methods []*Sig 464 for _, f := range t.Fields().Slice() { 465 if f.Type.Etype != TFUNC || f.Sym == nil { 466 continue 467 } 468 method := f.Sym 469 var sig = Sig{ 470 name: method.Name, 471 } 472 if !exportname(method.Name) { 473 if method.Pkg == nil { 474 Fatalf("imethods: missing package") 475 } 476 sig.pkg = method.Pkg 477 } 478 479 sig.mtype = f.Type 480 sig.offset = 0 481 sig.type_ = methodfunc(f.Type, nil) 482 483 if n := len(methods); n > 0 { 484 last := methods[n-1] 485 if !(siglt(last, &sig)) { 486 Fatalf("sigcmp vs sortinter %s %s", last.name, sig.name) 487 } 488 } 489 methods = append(methods, &sig) 490 491 // Compiler can only refer to wrappers for non-blank methods. 492 if method.IsBlank() { 493 continue 494 } 495 496 // NOTE(rsc): Perhaps an oversight that 497 // IfaceType.Method is not in the reflect data. 498 // Generate the method body, so that compiled 499 // code can refer to it. 500 isym := methodsym(method, t, false) 501 if !isym.Siggen() { 502 isym.SetSiggen(true) 503 genwrapper(t, f, isym, false) 504 } 505 } 506 507 return methods 508 } 509 510 func dimportpath(p *types.Pkg) { 511 if p.Pathsym != nil { 512 return 513 } 514 515 // If we are compiling the runtime package, there are two runtime packages around 516 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for 517 // both of them, so just produce one for localpkg. 518 if myimportpath == "runtime" && p == Runtimepkg { 519 return 520 } 521 522 var str string 523 if p == localpkg { 524 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. 525 str = myimportpath 526 } else { 527 str = p.Path 528 } 529 530 s := Ctxt.Lookup("type..importpath." + p.Prefix + ".") 531 ot := dnameData(s, 0, str, "", nil, false) 532 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 533 p.Pathsym = s 534 } 535 536 func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int { 537 if pkg == nil { 538 return duintptr(s, ot, 0) 539 } 540 541 if pkg == localpkg && myimportpath == "" { 542 // If we don't know the full import path of the package being compiled 543 // (i.e. -p was not passed on the compiler command line), emit a reference to 544 // type..importpath.""., which the linker will rewrite using the correct import path. 545 // Every package that imports this one directly defines the symbol. 546 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 547 ns := Ctxt.Lookup(`type..importpath."".`) 548 return dsymptr(s, ot, ns, 0) 549 } 550 551 dimportpath(pkg) 552 return dsymptr(s, ot, pkg.Pathsym, 0) 553 } 554 555 // dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol. 556 func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { 557 if pkg == nil { 558 return duint32(s, ot, 0) 559 } 560 if pkg == localpkg && myimportpath == "" { 561 // If we don't know the full import path of the package being compiled 562 // (i.e. -p was not passed on the compiler command line), emit a reference to 563 // type..importpath.""., which the linker will rewrite using the correct import path. 564 // Every package that imports this one directly defines the symbol. 565 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 566 ns := Ctxt.Lookup(`type..importpath."".`) 567 return dsymptrOff(s, ot, ns, 0) 568 } 569 570 dimportpath(pkg) 571 return dsymptrOff(s, ot, pkg.Pathsym, 0) 572 } 573 574 // dnameField dumps a reflect.name for a struct field. 575 func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int { 576 if !exportname(ft.Sym.Name) && ft.Sym.Pkg != spkg { 577 Fatalf("package mismatch for %v", ft.Sym) 578 } 579 nsym := dname(ft.Sym.Name, ft.Note, nil, exportname(ft.Sym.Name)) 580 return dsymptr(lsym, ot, nsym, 0) 581 } 582 583 // dnameData writes the contents of a reflect.name into s at offset ot. 584 func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int { 585 if len(name) > 1<<16-1 { 586 Fatalf("name too long: %s", name) 587 } 588 if len(tag) > 1<<16-1 { 589 Fatalf("tag too long: %s", tag) 590 } 591 592 // Encode name and tag. See reflect/type.go for details. 593 var bits byte 594 l := 1 + 2 + len(name) 595 if exported { 596 bits |= 1 << 0 597 } 598 if len(tag) > 0 { 599 l += 2 + len(tag) 600 bits |= 1 << 1 601 } 602 if pkg != nil { 603 bits |= 1 << 2 604 } 605 b := make([]byte, l) 606 b[0] = bits 607 b[1] = uint8(len(name) >> 8) 608 b[2] = uint8(len(name)) 609 copy(b[3:], name) 610 if len(tag) > 0 { 611 tb := b[3+len(name):] 612 tb[0] = uint8(len(tag) >> 8) 613 tb[1] = uint8(len(tag)) 614 copy(tb[2:], tag) 615 } 616 617 ot = int(s.WriteBytes(Ctxt, int64(ot), b)) 618 619 if pkg != nil { 620 ot = dgopkgpathOff(s, ot, pkg) 621 } 622 623 return ot 624 } 625 626 var dnameCount int 627 628 // dname creates a reflect.name for a struct field or method. 629 func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym { 630 // Write out data as "type.." to signal two things to the 631 // linker, first that when dynamically linking, the symbol 632 // should be moved to a relro section, and second that the 633 // contents should not be decoded as a type. 634 sname := "type..namedata." 635 if pkg == nil { 636 // In the common case, share data with other packages. 637 if name == "" { 638 if exported { 639 sname += "-noname-exported." + tag 640 } else { 641 sname += "-noname-unexported." + tag 642 } 643 } else { 644 if exported { 645 sname += name + "." + tag 646 } else { 647 sname += name + "-" + tag 648 } 649 } 650 } else { 651 sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount) 652 dnameCount++ 653 } 654 s := Ctxt.Lookup(sname) 655 if len(s.P) > 0 { 656 return s 657 } 658 ot := dnameData(s, 0, name, tag, pkg, exported) 659 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 660 return s 661 } 662 663 // dextratype dumps the fields of a runtime.uncommontype. 664 // dataAdd is the offset in bytes after the header where the 665 // backing array of the []method field is written (by dextratypeData). 666 func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { 667 m := methods(t) 668 if t.Sym == nil && len(m) == 0 { 669 return ot 670 } 671 noff := int(Rnd(int64(ot), int64(Widthptr))) 672 if noff != ot { 673 Fatalf("unexpected alignment in dextratype for %v", t) 674 } 675 676 for _, a := range m { 677 dtypesym(a.type_) 678 } 679 680 ot = dgopkgpathOff(lsym, ot, typePkg(t)) 681 682 dataAdd += uncommonSize(t) 683 mcount := len(m) 684 if mcount != int(uint16(mcount)) { 685 Fatalf("too many methods on %v: %d", t, mcount) 686 } 687 if dataAdd != int(uint32(dataAdd)) { 688 Fatalf("methods are too far away on %v: %d", t, dataAdd) 689 } 690 691 ot = duint16(lsym, ot, uint16(mcount)) 692 ot = duint16(lsym, ot, 0) 693 ot = duint32(lsym, ot, uint32(dataAdd)) 694 ot = duint32(lsym, ot, 0) 695 return ot 696 } 697 698 func typePkg(t *types.Type) *types.Pkg { 699 tsym := t.Sym 700 if tsym == nil { 701 switch t.Etype { 702 case TARRAY, TSLICE, TPTR32, TPTR64, TCHAN: 703 if t.Elem() != nil { 704 tsym = t.Elem().Sym 705 } 706 } 707 } 708 if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype { 709 return tsym.Pkg 710 } 711 return nil 712 } 713 714 // dextratypeData dumps the backing array for the []method field of 715 // runtime.uncommontype. 716 func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int { 717 for _, a := range methods(t) { 718 // ../../../../runtime/type.go:/method 719 exported := exportname(a.name) 720 var pkg *types.Pkg 721 if !exported && a.pkg != typePkg(t) { 722 pkg = a.pkg 723 } 724 nsym := dname(a.name, "", pkg, exported) 725 726 ot = dsymptrOff(lsym, ot, nsym, 0) 727 ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype)) 728 ot = dmethodptrOff(lsym, ot, a.isym.Linksym()) 729 ot = dmethodptrOff(lsym, ot, a.tsym.Linksym()) 730 } 731 return ot 732 } 733 734 func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int { 735 duint32(s, ot, 0) 736 r := obj.Addrel(s) 737 r.Off = int32(ot) 738 r.Siz = 4 739 r.Sym = x 740 r.Type = objabi.R_METHODOFF 741 return ot + 4 742 } 743 744 var kinds = []int{ 745 TINT: objabi.KindInt, 746 TUINT: objabi.KindUint, 747 TINT8: objabi.KindInt8, 748 TUINT8: objabi.KindUint8, 749 TINT16: objabi.KindInt16, 750 TUINT16: objabi.KindUint16, 751 TINT32: objabi.KindInt32, 752 TUINT32: objabi.KindUint32, 753 TINT64: objabi.KindInt64, 754 TUINT64: objabi.KindUint64, 755 TUINTPTR: objabi.KindUintptr, 756 TFLOAT32: objabi.KindFloat32, 757 TFLOAT64: objabi.KindFloat64, 758 TBOOL: objabi.KindBool, 759 TSTRING: objabi.KindString, 760 TPTR32: objabi.KindPtr, 761 TPTR64: objabi.KindPtr, 762 TSTRUCT: objabi.KindStruct, 763 TINTER: objabi.KindInterface, 764 TCHAN: objabi.KindChan, 765 TMAP: objabi.KindMap, 766 TARRAY: objabi.KindArray, 767 TSLICE: objabi.KindSlice, 768 TFUNC: objabi.KindFunc, 769 TCOMPLEX64: objabi.KindComplex64, 770 TCOMPLEX128: objabi.KindComplex128, 771 TUNSAFEPTR: objabi.KindUnsafePointer, 772 } 773 774 // typeptrdata returns the length in bytes of the prefix of t 775 // containing pointer data. Anything after this offset is scalar data. 776 func typeptrdata(t *types.Type) int64 { 777 if !types.Haspointers(t) { 778 return 0 779 } 780 781 switch t.Etype { 782 case TPTR32, 783 TPTR64, 784 TUNSAFEPTR, 785 TFUNC, 786 TCHAN, 787 TMAP: 788 return int64(Widthptr) 789 790 case TSTRING: 791 // struct { byte *str; intgo len; } 792 return int64(Widthptr) 793 794 case TINTER: 795 // struct { Itab *tab; void *data; } or 796 // struct { Type *type; void *data; } 797 return 2 * int64(Widthptr) 798 799 case TSLICE: 800 // struct { byte *array; uintgo len; uintgo cap; } 801 return int64(Widthptr) 802 803 case TARRAY: 804 // haspointers already eliminated t.NumElem() == 0. 805 return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem()) 806 807 case TSTRUCT: 808 // Find the last field that has pointers. 809 var lastPtrField *types.Field 810 for _, t1 := range t.Fields().Slice() { 811 if types.Haspointers(t1.Type) { 812 lastPtrField = t1 813 } 814 } 815 return lastPtrField.Offset + typeptrdata(lastPtrField.Type) 816 817 default: 818 Fatalf("typeptrdata: unexpected type, %v", t) 819 return 0 820 } 821 } 822 823 // tflag is documented in reflect/type.go. 824 // 825 // tflag values must be kept in sync with copies in: 826 // cmd/compile/internal/gc/reflect.go 827 // cmd/link/internal/ld/decodesym.go 828 // reflect/type.go 829 // runtime/type.go 830 const ( 831 tflagUncommon = 1 << 0 832 tflagExtraStar = 1 << 1 833 tflagNamed = 1 << 2 834 ) 835 836 var ( 837 algarray *obj.LSym 838 memhashvarlen *obj.LSym 839 memequalvarlen *obj.LSym 840 ) 841 842 // dcommontype dumps the contents of a reflect.rtype (runtime._type). 843 func dcommontype(lsym *obj.LSym, ot int, t *types.Type) int { 844 if ot != 0 { 845 Fatalf("dcommontype %d", ot) 846 } 847 848 sizeofAlg := 2 * Widthptr 849 if algarray == nil { 850 algarray = sysfunc("algarray") 851 } 852 dowidth(t) 853 alg := algtype(t) 854 var algsym *obj.LSym 855 if alg == ASPECIAL || alg == AMEM { 856 algsym = dalgsym(t) 857 } 858 859 sptrWeak := true 860 var sptr *obj.LSym 861 if !t.IsPtr() || t.PtrBase != nil { 862 tptr := types.NewPtr(t) 863 if t.Sym != nil || methods(tptr) != nil { 864 sptrWeak = false 865 } 866 sptr = dtypesym(tptr) 867 } 868 869 gcsym, useGCProg, ptrdata := dgcsym(t) 870 871 // ../../../../reflect/type.go:/^type.rtype 872 // actual type structure 873 // type rtype struct { 874 // size uintptr 875 // ptrdata uintptr 876 // hash uint32 877 // tflag tflag 878 // align uint8 879 // fieldAlign uint8 880 // kind uint8 881 // alg *typeAlg 882 // gcdata *byte 883 // str nameOff 884 // ptrToThis typeOff 885 // } 886 ot = duintptr(lsym, ot, uint64(t.Width)) 887 ot = duintptr(lsym, ot, uint64(ptrdata)) 888 ot = duint32(lsym, ot, typehash(t)) 889 890 var tflag uint8 891 if uncommonSize(t) != 0 { 892 tflag |= tflagUncommon 893 } 894 if t.Sym != nil && t.Sym.Name != "" { 895 tflag |= tflagNamed 896 } 897 898 exported := false 899 p := t.LongString() 900 // If we're writing out type T, 901 // we are very likely to write out type *T as well. 902 // Use the string "*T"[1:] for "T", so that the two 903 // share storage. This is a cheap way to reduce the 904 // amount of space taken up by reflect strings. 905 if !strings.HasPrefix(p, "*") { 906 p = "*" + p 907 tflag |= tflagExtraStar 908 if t.Sym != nil { 909 exported = exportname(t.Sym.Name) 910 } 911 } else { 912 if t.Elem() != nil && t.Elem().Sym != nil { 913 exported = exportname(t.Elem().Sym.Name) 914 } 915 } 916 917 ot = duint8(lsym, ot, tflag) 918 919 // runtime (and common sense) expects alignment to be a power of two. 920 i := int(t.Align) 921 922 if i == 0 { 923 i = 1 924 } 925 if i&(i-1) != 0 { 926 Fatalf("invalid alignment %d for %v", t.Align, t) 927 } 928 ot = duint8(lsym, ot, t.Align) // align 929 ot = duint8(lsym, ot, t.Align) // fieldAlign 930 931 i = kinds[t.Etype] 932 if !types.Haspointers(t) { 933 i |= objabi.KindNoPointers 934 } 935 if isdirectiface(t) { 936 i |= objabi.KindDirectIface 937 } 938 if useGCProg { 939 i |= objabi.KindGCProg 940 } 941 ot = duint8(lsym, ot, uint8(i)) // kind 942 if algsym == nil { 943 ot = dsymptr(lsym, ot, algarray, int(alg)*sizeofAlg) 944 } else { 945 ot = dsymptr(lsym, ot, algsym, 0) 946 } 947 ot = dsymptr(lsym, ot, gcsym, 0) // gcdata 948 949 nsym := dname(p, "", nil, exported) 950 ot = dsymptrOff(lsym, ot, nsym, 0) // str 951 // ptrToThis 952 if sptr == nil { 953 ot = duint32(lsym, ot, 0) 954 } else if sptrWeak { 955 ot = dsymptrWeakOff(lsym, ot, sptr) 956 } else { 957 ot = dsymptrOff(lsym, ot, sptr, 0) 958 } 959 960 return ot 961 } 962 963 func typesymname(t *types.Type) string { 964 name := t.ShortString() 965 // Use a separate symbol name for Noalg types for #17752. 966 if a, bad := algtype1(t); a == ANOEQ && bad.Noalg() { 967 name = "noalg." + name 968 } 969 return name 970 } 971 972 // Fake package for runtime type info (headers) 973 // Don't access directly, use typeLookup below. 974 var ( 975 typepkgmu sync.Mutex // protects typepkg lookups 976 typepkg = types.NewPkg("type", "type") 977 ) 978 979 func typeLookup(name string) *types.Sym { 980 typepkgmu.Lock() 981 s := typepkg.Lookup(name) 982 typepkgmu.Unlock() 983 return s 984 } 985 986 func typesym(t *types.Type) *types.Sym { 987 return typeLookup(typesymname(t)) 988 } 989 990 // tracksym returns the symbol for tracking use of field/method f, assumed 991 // to be a member of struct/interface type t. 992 func tracksym(t *types.Type, f *types.Field) *types.Sym { 993 return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name) 994 } 995 996 func typesymprefix(prefix string, t *types.Type) *types.Sym { 997 p := prefix + "." + t.ShortString() 998 s := typeLookup(p) 999 1000 //print("algsym: %s -> %+S\n", p, s); 1001 1002 return s 1003 } 1004 1005 func typenamesym(t *types.Type) *types.Sym { 1006 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { 1007 Fatalf("typenamesym %v", t) 1008 } 1009 s := typesym(t) 1010 signatsetmu.Lock() 1011 addsignat(t) 1012 signatsetmu.Unlock() 1013 return s 1014 } 1015 1016 func typename(t *types.Type) *Node { 1017 s := typenamesym(t) 1018 if s.Def == nil { 1019 n := newnamel(src.NoXPos, s) 1020 n.Type = types.Types[TUINT8] 1021 n.SetClass(PEXTERN) 1022 n.SetTypecheck(1) 1023 s.Def = asTypesNode(n) 1024 } 1025 1026 n := nod(OADDR, asNode(s.Def), nil) 1027 n.Type = types.NewPtr(asNode(s.Def).Type) 1028 n.SetAddable(true) 1029 n.SetTypecheck(1) 1030 return n 1031 } 1032 1033 func itabname(t, itype *types.Type) *Node { 1034 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { 1035 Fatalf("itabname(%v, %v)", t, itype) 1036 } 1037 s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString()) 1038 if s.Def == nil { 1039 n := newname(s) 1040 n.Type = types.Types[TUINT8] 1041 n.SetClass(PEXTERN) 1042 n.SetTypecheck(1) 1043 s.Def = asTypesNode(n) 1044 itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()}) 1045 } 1046 1047 n := nod(OADDR, asNode(s.Def), nil) 1048 n.Type = types.NewPtr(asNode(s.Def).Type) 1049 n.SetAddable(true) 1050 n.SetTypecheck(1) 1051 return n 1052 } 1053 1054 // isreflexive reports whether t has a reflexive equality operator. 1055 // That is, if x==x for all x of type t. 1056 func isreflexive(t *types.Type) bool { 1057 switch t.Etype { 1058 case TBOOL, 1059 TINT, 1060 TUINT, 1061 TINT8, 1062 TUINT8, 1063 TINT16, 1064 TUINT16, 1065 TINT32, 1066 TUINT32, 1067 TINT64, 1068 TUINT64, 1069 TUINTPTR, 1070 TPTR32, 1071 TPTR64, 1072 TUNSAFEPTR, 1073 TSTRING, 1074 TCHAN: 1075 return true 1076 1077 case TFLOAT32, 1078 TFLOAT64, 1079 TCOMPLEX64, 1080 TCOMPLEX128, 1081 TINTER: 1082 return false 1083 1084 case TARRAY: 1085 return isreflexive(t.Elem()) 1086 1087 case TSTRUCT: 1088 for _, t1 := range t.Fields().Slice() { 1089 if !isreflexive(t1.Type) { 1090 return false 1091 } 1092 } 1093 return true 1094 1095 default: 1096 Fatalf("bad type for map key: %v", t) 1097 return false 1098 } 1099 } 1100 1101 // needkeyupdate reports whether map updates with t as a key 1102 // need the key to be updated. 1103 func needkeyupdate(t *types.Type) bool { 1104 switch t.Etype { 1105 case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, 1106 TINT64, TUINT64, TUINTPTR, TPTR32, TPTR64, TUNSAFEPTR, TCHAN: 1107 return false 1108 1109 case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0 1110 TINTER, 1111 TSTRING: // strings might have smaller backing stores 1112 return true 1113 1114 case TARRAY: 1115 return needkeyupdate(t.Elem()) 1116 1117 case TSTRUCT: 1118 for _, t1 := range t.Fields().Slice() { 1119 if needkeyupdate(t1.Type) { 1120 return true 1121 } 1122 } 1123 return false 1124 1125 default: 1126 Fatalf("bad type for map key: %v", t) 1127 return true 1128 } 1129 } 1130 1131 // formalType replaces byte and rune aliases with real types. 1132 // They've been separate internally to make error messages 1133 // better, but we have to merge them in the reflect tables. 1134 func formalType(t *types.Type) *types.Type { 1135 if t == types.Bytetype || t == types.Runetype { 1136 return types.Types[t.Etype] 1137 } 1138 return t 1139 } 1140 1141 func dtypesym(t *types.Type) *obj.LSym { 1142 t = formalType(t) 1143 if t.IsUntyped() { 1144 Fatalf("dtypesym %v", t) 1145 } 1146 1147 s := typesym(t) 1148 lsym := s.Linksym() 1149 if s.Siggen() { 1150 return lsym 1151 } 1152 s.SetSiggen(true) 1153 1154 // special case (look for runtime below): 1155 // when compiling package runtime, 1156 // emit the type structures for int, float, etc. 1157 tbase := t 1158 1159 if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil { 1160 tbase = t.Elem() 1161 } 1162 dupok := 0 1163 if tbase.Sym == nil { 1164 dupok = obj.DUPOK 1165 } 1166 1167 if myimportpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc 1168 // named types from other files are defined only by those files 1169 if tbase.Sym != nil && tbase.Sym.Pkg != localpkg { 1170 return lsym 1171 } 1172 // TODO(mdempsky): Investigate whether this can happen. 1173 if isforw[tbase.Etype] { 1174 return lsym 1175 } 1176 } 1177 1178 ot := 0 1179 switch t.Etype { 1180 default: 1181 ot = dcommontype(lsym, ot, t) 1182 ot = dextratype(lsym, ot, t, 0) 1183 1184 case TARRAY: 1185 // ../../../../runtime/type.go:/arrayType 1186 s1 := dtypesym(t.Elem()) 1187 t2 := types.NewSlice(t.Elem()) 1188 s2 := dtypesym(t2) 1189 ot = dcommontype(lsym, ot, t) 1190 ot = dsymptr(lsym, ot, s1, 0) 1191 ot = dsymptr(lsym, ot, s2, 0) 1192 ot = duintptr(lsym, ot, uint64(t.NumElem())) 1193 ot = dextratype(lsym, ot, t, 0) 1194 1195 case TSLICE: 1196 // ../../../../runtime/type.go:/sliceType 1197 s1 := dtypesym(t.Elem()) 1198 ot = dcommontype(lsym, ot, t) 1199 ot = dsymptr(lsym, ot, s1, 0) 1200 ot = dextratype(lsym, ot, t, 0) 1201 1202 case TCHAN: 1203 // ../../../../runtime/type.go:/chanType 1204 s1 := dtypesym(t.Elem()) 1205 ot = dcommontype(lsym, ot, t) 1206 ot = dsymptr(lsym, ot, s1, 0) 1207 ot = duintptr(lsym, ot, uint64(t.ChanDir())) 1208 ot = dextratype(lsym, ot, t, 0) 1209 1210 case TFUNC: 1211 for _, t1 := range t.Recvs().Fields().Slice() { 1212 dtypesym(t1.Type) 1213 } 1214 isddd := false 1215 for _, t1 := range t.Params().Fields().Slice() { 1216 isddd = t1.Isddd() 1217 dtypesym(t1.Type) 1218 } 1219 for _, t1 := range t.Results().Fields().Slice() { 1220 dtypesym(t1.Type) 1221 } 1222 1223 ot = dcommontype(lsym, ot, t) 1224 inCount := t.NumRecvs() + t.NumParams() 1225 outCount := t.NumResults() 1226 if isddd { 1227 outCount |= 1 << 15 1228 } 1229 ot = duint16(lsym, ot, uint16(inCount)) 1230 ot = duint16(lsym, ot, uint16(outCount)) 1231 if Widthptr == 8 { 1232 ot += 4 // align for *rtype 1233 } 1234 1235 dataAdd := (inCount + t.NumResults()) * Widthptr 1236 ot = dextratype(lsym, ot, t, dataAdd) 1237 1238 // Array of rtype pointers follows funcType. 1239 for _, t1 := range t.Recvs().Fields().Slice() { 1240 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) 1241 } 1242 for _, t1 := range t.Params().Fields().Slice() { 1243 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) 1244 } 1245 for _, t1 := range t.Results().Fields().Slice() { 1246 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) 1247 } 1248 1249 case TINTER: 1250 m := imethods(t) 1251 n := len(m) 1252 for _, a := range m { 1253 dtypesym(a.type_) 1254 } 1255 1256 // ../../../../runtime/type.go:/interfaceType 1257 ot = dcommontype(lsym, ot, t) 1258 1259 var tpkg *types.Pkg 1260 if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype { 1261 tpkg = t.Sym.Pkg 1262 } 1263 ot = dgopkgpath(lsym, ot, tpkg) 1264 1265 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1266 ot = duintptr(lsym, ot, uint64(n)) 1267 ot = duintptr(lsym, ot, uint64(n)) 1268 dataAdd := imethodSize() * n 1269 ot = dextratype(lsym, ot, t, dataAdd) 1270 1271 for _, a := range m { 1272 // ../../../../runtime/type.go:/imethod 1273 exported := exportname(a.name) 1274 var pkg *types.Pkg 1275 if !exported && a.pkg != tpkg { 1276 pkg = a.pkg 1277 } 1278 nsym := dname(a.name, "", pkg, exported) 1279 1280 ot = dsymptrOff(lsym, ot, nsym, 0) 1281 ot = dsymptrOff(lsym, ot, dtypesym(a.type_), 0) 1282 } 1283 1284 // ../../../../runtime/type.go:/mapType 1285 case TMAP: 1286 s1 := dtypesym(t.Key()) 1287 s2 := dtypesym(t.Val()) 1288 s3 := dtypesym(bmap(t)) 1289 s4 := dtypesym(hmap(t)) 1290 ot = dcommontype(lsym, ot, t) 1291 ot = dsymptr(lsym, ot, s1, 0) 1292 ot = dsymptr(lsym, ot, s2, 0) 1293 ot = dsymptr(lsym, ot, s3, 0) 1294 ot = dsymptr(lsym, ot, s4, 0) 1295 if t.Key().Width > MAXKEYSIZE { 1296 ot = duint8(lsym, ot, uint8(Widthptr)) 1297 ot = duint8(lsym, ot, 1) // indirect 1298 } else { 1299 ot = duint8(lsym, ot, uint8(t.Key().Width)) 1300 ot = duint8(lsym, ot, 0) // not indirect 1301 } 1302 1303 if t.Val().Width > MAXVALSIZE { 1304 ot = duint8(lsym, ot, uint8(Widthptr)) 1305 ot = duint8(lsym, ot, 1) // indirect 1306 } else { 1307 ot = duint8(lsym, ot, uint8(t.Val().Width)) 1308 ot = duint8(lsym, ot, 0) // not indirect 1309 } 1310 1311 ot = duint16(lsym, ot, uint16(bmap(t).Width)) 1312 ot = duint8(lsym, ot, uint8(obj.Bool2int(isreflexive(t.Key())))) 1313 ot = duint8(lsym, ot, uint8(obj.Bool2int(needkeyupdate(t.Key())))) 1314 ot = dextratype(lsym, ot, t, 0) 1315 1316 case TPTR32, TPTR64: 1317 if t.Elem().Etype == TANY { 1318 // ../../../../runtime/type.go:/UnsafePointerType 1319 ot = dcommontype(lsym, ot, t) 1320 ot = dextratype(lsym, ot, t, 0) 1321 1322 break 1323 } 1324 1325 // ../../../../runtime/type.go:/ptrType 1326 s1 := dtypesym(t.Elem()) 1327 1328 ot = dcommontype(lsym, ot, t) 1329 ot = dsymptr(lsym, ot, s1, 0) 1330 ot = dextratype(lsym, ot, t, 0) 1331 1332 // ../../../../runtime/type.go:/structType 1333 // for security, only the exported fields. 1334 case TSTRUCT: 1335 n := 0 1336 1337 for _, t1 := range t.Fields().Slice() { 1338 dtypesym(t1.Type) 1339 n++ 1340 } 1341 1342 // All non-exported struct field names within a struct 1343 // type must originate from a single package. By 1344 // identifying and recording that package within the 1345 // struct type descriptor, we can omit that 1346 // information from the field descriptors. 1347 var spkg *types.Pkg 1348 for _, f := range t.Fields().Slice() { 1349 if !exportname(f.Sym.Name) { 1350 spkg = f.Sym.Pkg 1351 break 1352 } 1353 } 1354 1355 ot = dcommontype(lsym, ot, t) 1356 ot = dgopkgpath(lsym, ot, spkg) 1357 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1358 ot = duintptr(lsym, ot, uint64(n)) 1359 ot = duintptr(lsym, ot, uint64(n)) 1360 1361 dataAdd := n * structfieldSize() 1362 ot = dextratype(lsym, ot, t, dataAdd) 1363 1364 for _, f := range t.Fields().Slice() { 1365 // ../../../../runtime/type.go:/structField 1366 ot = dnameField(lsym, ot, spkg, f) 1367 ot = dsymptr(lsym, ot, dtypesym(f.Type), 0) 1368 offsetAnon := uint64(f.Offset) << 1 1369 if offsetAnon>>1 != uint64(f.Offset) { 1370 Fatalf("%v: bad field offset for %s", t, f.Sym.Name) 1371 } 1372 if f.Embedded != 0 { 1373 offsetAnon |= 1 1374 } 1375 ot = duintptr(lsym, ot, offsetAnon) 1376 } 1377 } 1378 1379 ot = dextratypeData(lsym, ot, t) 1380 ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA)) 1381 1382 // The linker will leave a table of all the typelinks for 1383 // types in the binary, so the runtime can find them. 1384 // 1385 // When buildmode=shared, all types are in typelinks so the 1386 // runtime can deduplicate type pointers. 1387 keep := Ctxt.Flag_dynlink 1388 if !keep && t.Sym == nil { 1389 // For an unnamed type, we only need the link if the type can 1390 // be created at run time by reflect.PtrTo and similar 1391 // functions. If the type exists in the program, those 1392 // functions must return the existing type structure rather 1393 // than creating a new one. 1394 switch t.Etype { 1395 case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT: 1396 keep = true 1397 } 1398 } 1399 lsym.Set(obj.AttrMakeTypelink, keep) 1400 1401 return lsym 1402 } 1403 1404 // for each itabEntry, gather the methods on 1405 // the concrete type that implement the interface 1406 func peekitabs() { 1407 for i := range itabs { 1408 tab := &itabs[i] 1409 methods := genfun(tab.t, tab.itype) 1410 if len(methods) == 0 { 1411 continue 1412 } 1413 tab.entries = methods 1414 } 1415 } 1416 1417 // for the given concrete type and interface 1418 // type, return the (sorted) set of methods 1419 // on the concrete type that implement the interface 1420 func genfun(t, it *types.Type) []*obj.LSym { 1421 if t == nil || it == nil { 1422 return nil 1423 } 1424 sigs := imethods(it) 1425 methods := methods(t) 1426 out := make([]*obj.LSym, 0, len(sigs)) 1427 if len(sigs) == 0 { 1428 return nil 1429 } 1430 1431 // both sigs and methods are sorted by name, 1432 // so we can find the intersect in a single pass 1433 for _, m := range methods { 1434 if m.name == sigs[0].name { 1435 out = append(out, m.isym.Linksym()) 1436 sigs = sigs[1:] 1437 if len(sigs) == 0 { 1438 break 1439 } 1440 } 1441 } 1442 1443 return out 1444 } 1445 1446 // itabsym uses the information gathered in 1447 // peekitabs to de-virtualize interface methods. 1448 // Since this is called by the SSA backend, it shouldn't 1449 // generate additional Nodes, Syms, etc. 1450 func itabsym(it *obj.LSym, offset int64) *obj.LSym { 1451 var syms []*obj.LSym 1452 if it == nil { 1453 return nil 1454 } 1455 1456 for i := range itabs { 1457 e := &itabs[i] 1458 if e.lsym == it { 1459 syms = e.entries 1460 break 1461 } 1462 } 1463 if syms == nil { 1464 return nil 1465 } 1466 1467 // keep this arithmetic in sync with *itab layout 1468 methodnum := int((offset - 2*int64(Widthptr) - 8) / int64(Widthptr)) 1469 if methodnum >= len(syms) { 1470 return nil 1471 } 1472 return syms[methodnum] 1473 } 1474 1475 func addsignat(t *types.Type) { 1476 signatset[t] = struct{}{} 1477 } 1478 1479 func addsignats(dcls []*Node) { 1480 // copy types from dcl list to signatset 1481 for _, n := range dcls { 1482 if n.Op == OTYPE { 1483 addsignat(n.Type) 1484 } 1485 } 1486 } 1487 1488 func dumpsignats() { 1489 // Process signatset. Use a loop, as dtypesym adds 1490 // entries to signatset while it is being processed. 1491 signats := make([]typeAndStr, len(signatset)) 1492 for len(signatset) > 0 { 1493 signats = signats[:0] 1494 // Transfer entries to a slice and sort, for reproducible builds. 1495 for t := range signatset { 1496 signats = append(signats, typeAndStr{t: t, short: typesymname(t), regular: t.String()}) 1497 delete(signatset, t) 1498 } 1499 sort.Sort(typesByString(signats)) 1500 for _, ts := range signats { 1501 t := ts.t 1502 dtypesym(t) 1503 if t.Sym != nil { 1504 dtypesym(types.NewPtr(t)) 1505 } 1506 } 1507 } 1508 } 1509 1510 func dumptabs() { 1511 // process itabs 1512 for _, i := range itabs { 1513 // dump empty itab symbol into i.sym 1514 // type itab struct { 1515 // inter *interfacetype 1516 // _type *_type 1517 // hash uint32 1518 // _ [4]byte 1519 // fun [1]uintptr // variable sized 1520 // } 1521 o := dsymptr(i.lsym, 0, dtypesym(i.itype), 0) 1522 o = dsymptr(i.lsym, o, dtypesym(i.t), 0) 1523 o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash 1524 o += 4 // skip unused field 1525 for _, fn := range genfun(i.t, i.itype) { 1526 o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method 1527 } 1528 // Nothing writes static itabs, so they are read only. 1529 ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA)) 1530 ilink := itablinkpkg.Lookup(i.t.ShortString() + "," + i.itype.ShortString()).Linksym() 1531 dsymptr(ilink, 0, i.lsym, 0) 1532 ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA)) 1533 } 1534 1535 // process ptabs 1536 if localpkg.Name == "main" && len(ptabs) > 0 { 1537 ot := 0 1538 s := Ctxt.Lookup("go.plugin.tabs") 1539 for _, p := range ptabs { 1540 // Dump ptab symbol into go.pluginsym package. 1541 // 1542 // type ptab struct { 1543 // name nameOff 1544 // typ typeOff // pointer to symbol 1545 // } 1546 nsym := dname(p.s.Name, "", nil, true) 1547 ot = dsymptrOff(s, ot, nsym, 0) 1548 ot = dsymptrOff(s, ot, dtypesym(p.t), 0) 1549 } 1550 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1551 1552 ot = 0 1553 s = Ctxt.Lookup("go.plugin.exports") 1554 for _, p := range ptabs { 1555 ot = dsymptr(s, ot, p.s.Linksym(), 0) 1556 } 1557 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1558 } 1559 } 1560 1561 func dumpimportstrings() { 1562 // generate import strings for imported packages 1563 for _, p := range types.ImportedPkgList() { 1564 dimportpath(p) 1565 } 1566 } 1567 1568 func dumpbasictypes() { 1569 // do basic types if compiling package runtime. 1570 // they have to be in at least one package, 1571 // and runtime is always loaded implicitly, 1572 // so this is as good as any. 1573 // another possible choice would be package main, 1574 // but using runtime means fewer copies in object files. 1575 if myimportpath == "runtime" { 1576 for i := types.EType(1); i <= TBOOL; i++ { 1577 dtypesym(types.NewPtr(types.Types[i])) 1578 } 1579 dtypesym(types.NewPtr(types.Types[TSTRING])) 1580 dtypesym(types.NewPtr(types.Types[TUNSAFEPTR])) 1581 1582 // emit type structs for error and func(error) string. 1583 // The latter is the type of an auto-generated wrapper. 1584 dtypesym(types.NewPtr(types.Errortype)) 1585 1586 dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])})) 1587 1588 // add paths for runtime and main, which 6l imports implicitly. 1589 dimportpath(Runtimepkg) 1590 1591 if flag_race { 1592 dimportpath(racepkg) 1593 } 1594 if flag_msan { 1595 dimportpath(msanpkg) 1596 } 1597 dimportpath(types.NewPkg("main", "")) 1598 } 1599 } 1600 1601 type typeAndStr struct { 1602 t *types.Type 1603 short string 1604 regular string 1605 } 1606 1607 type typesByString []typeAndStr 1608 1609 func (a typesByString) Len() int { return len(a) } 1610 func (a typesByString) Less(i, j int) bool { 1611 if a[i].short != a[j].short { 1612 return a[i].short < a[j].short 1613 } 1614 // When the only difference between the types is whether 1615 // they refer to byte or uint8, such as **byte vs **uint8, 1616 // the types' ShortStrings can be identical. 1617 // To preserve deterministic sort ordering, sort these by String(). 1618 return a[i].regular < a[j].regular 1619 } 1620 func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1621 1622 func dalgsym(t *types.Type) *obj.LSym { 1623 var lsym *obj.LSym 1624 var hashfunc *obj.LSym 1625 var eqfunc *obj.LSym 1626 1627 // dalgsym is only called for a type that needs an algorithm table, 1628 // which implies that the type is comparable (or else it would use ANOEQ). 1629 1630 if algtype(t) == AMEM { 1631 // we use one algorithm table for all AMEM types of a given size 1632 p := fmt.Sprintf(".alg%d", t.Width) 1633 1634 s := typeLookup(p) 1635 lsym = s.Linksym() 1636 if s.AlgGen() { 1637 return lsym 1638 } 1639 s.SetAlgGen(true) 1640 1641 if memhashvarlen == nil { 1642 memhashvarlen = sysfunc("memhash_varlen") 1643 memequalvarlen = sysfunc("memequal_varlen") 1644 } 1645 1646 // make hash closure 1647 p = fmt.Sprintf(".hashfunc%d", t.Width) 1648 1649 hashfunc = typeLookup(p).Linksym() 1650 1651 ot := 0 1652 ot = dsymptr(hashfunc, ot, memhashvarlen, 0) 1653 ot = duintptr(hashfunc, ot, uint64(t.Width)) // size encoded in closure 1654 ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA) 1655 1656 // make equality closure 1657 p = fmt.Sprintf(".eqfunc%d", t.Width) 1658 1659 eqfunc = typeLookup(p).Linksym() 1660 1661 ot = 0 1662 ot = dsymptr(eqfunc, ot, memequalvarlen, 0) 1663 ot = duintptr(eqfunc, ot, uint64(t.Width)) 1664 ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA) 1665 } else { 1666 // generate an alg table specific to this type 1667 s := typesymprefix(".alg", t) 1668 lsym = s.Linksym() 1669 1670 hash := typesymprefix(".hash", t) 1671 eq := typesymprefix(".eq", t) 1672 hashfunc = typesymprefix(".hashfunc", t).Linksym() 1673 eqfunc = typesymprefix(".eqfunc", t).Linksym() 1674 1675 genhash(hash, t) 1676 geneq(eq, t) 1677 1678 // make Go funcs (closures) for calling hash and equal from Go 1679 dsymptr(hashfunc, 0, hash.Linksym(), 0) 1680 ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1681 dsymptr(eqfunc, 0, eq.Linksym(), 0) 1682 ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1683 } 1684 1685 // ../../../../runtime/alg.go:/typeAlg 1686 ot := 0 1687 1688 ot = dsymptr(lsym, ot, hashfunc, 0) 1689 ot = dsymptr(lsym, ot, eqfunc, 0) 1690 ggloblsym(lsym, int32(ot), obj.DUPOK|obj.RODATA) 1691 return lsym 1692 } 1693 1694 // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, 1695 // which holds 1-bit entries describing where pointers are in a given type. 1696 // Above this length, the GC information is recorded as a GC program, 1697 // which can express repetition compactly. In either form, the 1698 // information is used by the runtime to initialize the heap bitmap, 1699 // and for large types (like 128 or more words), they are roughly the 1700 // same speed. GC programs are never much larger and often more 1701 // compact. (If large arrays are involved, they can be arbitrarily 1702 // more compact.) 1703 // 1704 // The cutoff must be large enough that any allocation large enough to 1705 // use a GC program is large enough that it does not share heap bitmap 1706 // bytes with any other objects, allowing the GC program execution to 1707 // assume an aligned start and not use atomic operations. In the current 1708 // runtime, this means all malloc size classes larger than the cutoff must 1709 // be multiples of four words. On 32-bit systems that's 16 bytes, and 1710 // all size classes >= 16 bytes are 16-byte aligned, so no real constraint. 1711 // On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed 1712 // for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated 1713 // is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes 1714 // must be >= 4. 1715 // 1716 // We used to use 16 because the GC programs do have some constant overhead 1717 // to get started, and processing 128 pointers seems to be enough to 1718 // amortize that overhead well. 1719 // 1720 // To make sure that the runtime's chansend can call typeBitsBulkBarrier, 1721 // we raised the limit to 2048, so that even 32-bit systems are guaranteed to 1722 // use bitmaps for objects up to 64 kB in size. 1723 // 1724 // Also known to reflect/type.go. 1725 // 1726 const maxPtrmaskBytes = 2048 1727 1728 // dgcsym emits and returns a data symbol containing GC information for type t, 1729 // along with a boolean reporting whether the UseGCProg bit should be set in 1730 // the type kind, and the ptrdata field to record in the reflect type information. 1731 func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { 1732 ptrdata = typeptrdata(t) 1733 if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 { 1734 lsym = dgcptrmask(t) 1735 return 1736 } 1737 1738 useGCProg = true 1739 lsym, ptrdata = dgcprog(t) 1740 return 1741 } 1742 1743 // dgcptrmask emits and returns the symbol containing a pointer mask for type t. 1744 func dgcptrmask(t *types.Type) *obj.LSym { 1745 ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8) 1746 fillptrmask(t, ptrmask) 1747 p := fmt.Sprintf("gcbits.%x", ptrmask) 1748 1749 sym := Runtimepkg.Lookup(p) 1750 lsym := sym.Linksym() 1751 if !sym.Uniq() { 1752 sym.SetUniq(true) 1753 for i, x := range ptrmask { 1754 duint8(lsym, i, x) 1755 } 1756 ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) 1757 } 1758 return lsym 1759 } 1760 1761 // fillptrmask fills in ptrmask with 1s corresponding to the 1762 // word offsets in t that hold pointers. 1763 // ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits. 1764 func fillptrmask(t *types.Type, ptrmask []byte) { 1765 for i := range ptrmask { 1766 ptrmask[i] = 0 1767 } 1768 if !types.Haspointers(t) { 1769 return 1770 } 1771 1772 vec := bvalloc(8 * int32(len(ptrmask))) 1773 onebitwalktype1(t, 0, vec) 1774 1775 nptr := typeptrdata(t) / int64(Widthptr) 1776 for i := int64(0); i < nptr; i++ { 1777 if vec.Get(int32(i)) { 1778 ptrmask[i/8] |= 1 << (uint(i) % 8) 1779 } 1780 } 1781 } 1782 1783 // dgcprog emits and returns the symbol containing a GC program for type t 1784 // along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]). 1785 // In practice, the size is typeptrdata(t) except for non-trivial arrays. 1786 // For non-trivial arrays, the program describes the full t.Width size. 1787 func dgcprog(t *types.Type) (*obj.LSym, int64) { 1788 dowidth(t) 1789 if t.Width == BADWIDTH { 1790 Fatalf("dgcprog: %v badwidth", t) 1791 } 1792 lsym := typesymprefix(".gcprog", t).Linksym() 1793 var p GCProg 1794 p.init(lsym) 1795 p.emit(t, 0) 1796 offset := p.w.BitIndex() * int64(Widthptr) 1797 p.end() 1798 if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { 1799 Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) 1800 } 1801 return lsym, offset 1802 } 1803 1804 type GCProg struct { 1805 lsym *obj.LSym 1806 symoff int 1807 w gcprog.Writer 1808 } 1809 1810 var Debug_gcprog int // set by -d gcprog 1811 1812 func (p *GCProg) init(lsym *obj.LSym) { 1813 p.lsym = lsym 1814 p.symoff = 4 // first 4 bytes hold program length 1815 p.w.Init(p.writeByte) 1816 if Debug_gcprog > 0 { 1817 fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym) 1818 p.w.Debug(os.Stderr) 1819 } 1820 } 1821 1822 func (p *GCProg) writeByte(x byte) { 1823 p.symoff = duint8(p.lsym, p.symoff, x) 1824 } 1825 1826 func (p *GCProg) end() { 1827 p.w.End() 1828 duint32(p.lsym, 0, uint32(p.symoff-4)) 1829 ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) 1830 if Debug_gcprog > 0 { 1831 fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym) 1832 } 1833 } 1834 1835 func (p *GCProg) emit(t *types.Type, offset int64) { 1836 dowidth(t) 1837 if !types.Haspointers(t) { 1838 return 1839 } 1840 if t.Width == int64(Widthptr) { 1841 p.w.Ptr(offset / int64(Widthptr)) 1842 return 1843 } 1844 switch t.Etype { 1845 default: 1846 Fatalf("GCProg.emit: unexpected type %v", t) 1847 1848 case TSTRING: 1849 p.w.Ptr(offset / int64(Widthptr)) 1850 1851 case TINTER: 1852 p.w.Ptr(offset / int64(Widthptr)) 1853 p.w.Ptr(offset/int64(Widthptr) + 1) 1854 1855 case TSLICE: 1856 p.w.Ptr(offset / int64(Widthptr)) 1857 1858 case TARRAY: 1859 if t.NumElem() == 0 { 1860 // should have been handled by haspointers check above 1861 Fatalf("GCProg.emit: empty array") 1862 } 1863 1864 // Flatten array-of-array-of-array to just a big array by multiplying counts. 1865 count := t.NumElem() 1866 elem := t.Elem() 1867 for elem.IsArray() { 1868 count *= elem.NumElem() 1869 elem = elem.Elem() 1870 } 1871 1872 if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) { 1873 // Cheaper to just emit the bits. 1874 for i := int64(0); i < count; i++ { 1875 p.emit(elem, offset+i*elem.Width) 1876 } 1877 return 1878 } 1879 p.emit(elem, offset) 1880 p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) 1881 p.w.Repeat(elem.Width/int64(Widthptr), count-1) 1882 1883 case TSTRUCT: 1884 for _, t1 := range t.Fields().Slice() { 1885 p.emit(t1.Type, offset+t1.Offset) 1886 } 1887 } 1888 } 1889 1890 // zeroaddr returns the address of a symbol with at least 1891 // size bytes of zeros. 1892 func zeroaddr(size int64) *Node { 1893 if size >= 1<<31 { 1894 Fatalf("map value too big %d", size) 1895 } 1896 if zerosize < size { 1897 zerosize = size 1898 } 1899 s := mappkg.Lookup("zero") 1900 if s.Def == nil { 1901 x := newname(s) 1902 x.Type = types.Types[TUINT8] 1903 x.SetClass(PEXTERN) 1904 x.SetTypecheck(1) 1905 s.Def = asTypesNode(x) 1906 } 1907 z := nod(OADDR, asNode(s.Def), nil) 1908 z.Type = types.NewPtr(types.Types[TUINT8]) 1909 z.SetAddable(true) 1910 z.SetTypecheck(1) 1911 return z 1912 }