github.com/hikaru7719/go@v0.0.0-20181025140707-c8b2ac68906a/src/cmd/compile/internal/gc/reflect.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "cmd/internal/gcprog" 10 "cmd/internal/obj" 11 "cmd/internal/objabi" 12 "cmd/internal/src" 13 "fmt" 14 "os" 15 "sort" 16 "strings" 17 "sync" 18 ) 19 20 type itabEntry struct { 21 t, itype *types.Type 22 lsym *obj.LSym // symbol of the itab itself 23 24 // symbols of each method in 25 // the itab, sorted by byte offset; 26 // filled in by peekitabs 27 entries []*obj.LSym 28 } 29 30 type ptabEntry struct { 31 s *types.Sym 32 t *types.Type 33 } 34 35 // runtime interface and reflection data structures 36 var ( 37 signatmu sync.Mutex // protects signatset and signatslice 38 signatset = make(map[*types.Type]struct{}) 39 signatslice []*types.Type 40 41 itabs []itabEntry 42 ptabs []ptabEntry 43 ) 44 45 type Sig struct { 46 name *types.Sym 47 isym *types.Sym 48 tsym *types.Sym 49 type_ *types.Type 50 mtype *types.Type 51 } 52 53 // Builds a type representing a Bucket structure for 54 // the given map type. This type is not visible to users - 55 // we include only enough information to generate a correct GC 56 // program for it. 57 // Make sure this stays in sync with runtime/map.go. 58 const ( 59 BUCKETSIZE = 8 60 MAXKEYSIZE = 128 61 MAXVALSIZE = 128 62 ) 63 64 func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) 65 func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) 66 67 func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) 68 if t.Sym == nil && len(methods(t)) == 0 { 69 return 0 70 } 71 return 4 + 2 + 2 + 4 + 4 72 } 73 74 func makefield(name string, t *types.Type) *types.Field { 75 f := types.NewField() 76 f.Type = t 77 f.Sym = (*types.Pkg)(nil).Lookup(name) 78 return f 79 } 80 81 // bmap makes the map bucket type given the type of the map. 82 func bmap(t *types.Type) *types.Type { 83 if t.MapType().Bucket != nil { 84 return t.MapType().Bucket 85 } 86 87 bucket := types.New(TSTRUCT) 88 keytype := t.Key() 89 valtype := t.Elem() 90 dowidth(keytype) 91 dowidth(valtype) 92 if keytype.Width > MAXKEYSIZE { 93 keytype = types.NewPtr(keytype) 94 } 95 if valtype.Width > MAXVALSIZE { 96 valtype = types.NewPtr(valtype) 97 } 98 99 field := make([]*types.Field, 0, 5) 100 101 // The first field is: uint8 topbits[BUCKETSIZE]. 102 arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE) 103 field = append(field, makefield("topbits", arr)) 104 105 arr = types.NewArray(keytype, BUCKETSIZE) 106 arr.SetNoalg(true) 107 keys := makefield("keys", arr) 108 field = append(field, keys) 109 110 arr = types.NewArray(valtype, BUCKETSIZE) 111 arr.SetNoalg(true) 112 values := makefield("values", arr) 113 field = append(field, values) 114 115 // Make sure the overflow pointer is the last memory in the struct, 116 // because the runtime assumes it can use size-ptrSize as the 117 // offset of the overflow pointer. We double-check that property 118 // below once the offsets and size are computed. 119 // 120 // BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point. 121 // On 32-bit systems, the max alignment is 32-bit, and the 122 // overflow pointer will add another 32-bit field, and the struct 123 // will end with no padding. 124 // On 64-bit systems, the max alignment is 64-bit, and the 125 // overflow pointer will add another 64-bit field, and the struct 126 // will end with no padding. 127 // On nacl/amd64p32, however, the max alignment is 64-bit, 128 // but the overflow pointer will add only a 32-bit field, 129 // so if the struct needs 64-bit padding (because a key or value does) 130 // then it would end with an extra 32-bit padding field. 131 // Preempt that by emitting the padding here. 132 if int(valtype.Align) > Widthptr || int(keytype.Align) > Widthptr { 133 field = append(field, makefield("pad", types.Types[TUINTPTR])) 134 } 135 136 // If keys and values have no pointers, the map implementation 137 // can keep a list of overflow pointers on the side so that 138 // buckets can be marked as having no pointers. 139 // Arrange for the bucket to have no pointers by changing 140 // the type of the overflow field to uintptr in this case. 141 // See comment on hmap.overflow in runtime/map.go. 142 otyp := types.NewPtr(bucket) 143 if !types.Haspointers(valtype) && !types.Haspointers(keytype) { 144 otyp = types.Types[TUINTPTR] 145 } 146 overflow := makefield("overflow", otyp) 147 field = append(field, overflow) 148 149 // link up fields 150 bucket.SetNoalg(true) 151 bucket.SetFields(field[:]) 152 dowidth(bucket) 153 154 // Check invariants that map code depends on. 155 if !IsComparable(t.Key()) { 156 Fatalf("unsupported map key type for %v", t) 157 } 158 if BUCKETSIZE < 8 { 159 Fatalf("bucket size too small for proper alignment") 160 } 161 if keytype.Align > BUCKETSIZE { 162 Fatalf("key align too big for %v", t) 163 } 164 if valtype.Align > BUCKETSIZE { 165 Fatalf("value align too big for %v", t) 166 } 167 if keytype.Width > MAXKEYSIZE { 168 Fatalf("key size to large for %v", t) 169 } 170 if valtype.Width > MAXVALSIZE { 171 Fatalf("value size to large for %v", t) 172 } 173 if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() { 174 Fatalf("key indirect incorrect for %v", t) 175 } 176 if t.Elem().Width > MAXVALSIZE && !valtype.IsPtr() { 177 Fatalf("value indirect incorrect for %v", t) 178 } 179 if keytype.Width%int64(keytype.Align) != 0 { 180 Fatalf("key size not a multiple of key align for %v", t) 181 } 182 if valtype.Width%int64(valtype.Align) != 0 { 183 Fatalf("value size not a multiple of value align for %v", t) 184 } 185 if bucket.Align%keytype.Align != 0 { 186 Fatalf("bucket align not multiple of key align %v", t) 187 } 188 if bucket.Align%valtype.Align != 0 { 189 Fatalf("bucket align not multiple of value align %v", t) 190 } 191 if keys.Offset%int64(keytype.Align) != 0 { 192 Fatalf("bad alignment of keys in bmap for %v", t) 193 } 194 if values.Offset%int64(valtype.Align) != 0 { 195 Fatalf("bad alignment of values in bmap for %v", t) 196 } 197 198 // Double-check that overflow field is final memory in struct, 199 // with no padding at end. See comment above. 200 if overflow.Offset != bucket.Width-int64(Widthptr) { 201 Fatalf("bad offset of overflow in bmap for %v", t) 202 } 203 204 t.MapType().Bucket = bucket 205 206 bucket.StructType().Map = t 207 return bucket 208 } 209 210 // hmap builds a type representing a Hmap structure for the given map type. 211 // Make sure this stays in sync with runtime/map.go. 212 func hmap(t *types.Type) *types.Type { 213 if t.MapType().Hmap != nil { 214 return t.MapType().Hmap 215 } 216 217 bmap := bmap(t) 218 219 // build a struct: 220 // type hmap struct { 221 // count int 222 // flags uint8 223 // B uint8 224 // noverflow uint16 225 // hash0 uint32 226 // buckets *bmap 227 // oldbuckets *bmap 228 // nevacuate uintptr 229 // extra unsafe.Pointer // *mapextra 230 // } 231 // must match runtime/map.go:hmap. 232 fields := []*types.Field{ 233 makefield("count", types.Types[TINT]), 234 makefield("flags", types.Types[TUINT8]), 235 makefield("B", types.Types[TUINT8]), 236 makefield("noverflow", types.Types[TUINT16]), 237 makefield("hash0", types.Types[TUINT32]), // Used in walk.go for OMAKEMAP. 238 makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP. 239 makefield("oldbuckets", types.NewPtr(bmap)), 240 makefield("nevacuate", types.Types[TUINTPTR]), 241 makefield("extra", types.Types[TUNSAFEPTR]), 242 } 243 244 hmap := types.New(TSTRUCT) 245 hmap.SetNoalg(true) 246 hmap.SetFields(fields) 247 dowidth(hmap) 248 249 // The size of hmap should be 48 bytes on 64 bit 250 // and 28 bytes on 32 bit platforms. 251 if size := int64(8 + 5*Widthptr); hmap.Width != size { 252 Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size) 253 } 254 255 t.MapType().Hmap = hmap 256 hmap.StructType().Map = t 257 return hmap 258 } 259 260 // hiter builds a type representing an Hiter structure for the given map type. 261 // Make sure this stays in sync with runtime/map.go. 262 func hiter(t *types.Type) *types.Type { 263 if t.MapType().Hiter != nil { 264 return t.MapType().Hiter 265 } 266 267 hmap := hmap(t) 268 bmap := bmap(t) 269 270 // build a struct: 271 // type hiter struct { 272 // key *Key 273 // val *Value 274 // t unsafe.Pointer // *MapType 275 // h *hmap 276 // buckets *bmap 277 // bptr *bmap 278 // overflow unsafe.Pointer // *[]*bmap 279 // oldoverflow unsafe.Pointer // *[]*bmap 280 // startBucket uintptr 281 // offset uint8 282 // wrapped bool 283 // B uint8 284 // i uint8 285 // bucket uintptr 286 // checkBucket uintptr 287 // } 288 // must match runtime/map.go:hiter. 289 fields := []*types.Field{ 290 makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP. 291 makefield("val", types.NewPtr(t.Elem())), // Used in range.go for TMAP. 292 makefield("t", types.Types[TUNSAFEPTR]), 293 makefield("h", types.NewPtr(hmap)), 294 makefield("buckets", types.NewPtr(bmap)), 295 makefield("bptr", types.NewPtr(bmap)), 296 makefield("overflow", types.Types[TUNSAFEPTR]), 297 makefield("oldoverflow", types.Types[TUNSAFEPTR]), 298 makefield("startBucket", types.Types[TUINTPTR]), 299 makefield("offset", types.Types[TUINT8]), 300 makefield("wrapped", types.Types[TBOOL]), 301 makefield("B", types.Types[TUINT8]), 302 makefield("i", types.Types[TUINT8]), 303 makefield("bucket", types.Types[TUINTPTR]), 304 makefield("checkBucket", types.Types[TUINTPTR]), 305 } 306 307 // build iterator struct holding the above fields 308 hiter := types.New(TSTRUCT) 309 hiter.SetNoalg(true) 310 hiter.SetFields(fields) 311 dowidth(hiter) 312 if hiter.Width != int64(12*Widthptr) { 313 Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr) 314 } 315 t.MapType().Hiter = hiter 316 hiter.StructType().Map = t 317 return hiter 318 } 319 320 // f is method type, with receiver. 321 // return function type, receiver as first argument (or not). 322 func methodfunc(f *types.Type, receiver *types.Type) *types.Type { 323 inLen := f.Params().Fields().Len() 324 if receiver != nil { 325 inLen++ 326 } 327 in := make([]*Node, 0, inLen) 328 329 if receiver != nil { 330 d := anonfield(receiver) 331 in = append(in, d) 332 } 333 334 for _, t := range f.Params().Fields().Slice() { 335 d := anonfield(t.Type) 336 d.SetIsddd(t.Isddd()) 337 in = append(in, d) 338 } 339 340 outLen := f.Results().Fields().Len() 341 out := make([]*Node, 0, outLen) 342 for _, t := range f.Results().Fields().Slice() { 343 d := anonfield(t.Type) 344 out = append(out, d) 345 } 346 347 t := functype(nil, in, out) 348 if f.Nname() != nil { 349 // Link to name of original method function. 350 t.SetNname(f.Nname()) 351 } 352 353 return t 354 } 355 356 // methods returns the methods of the non-interface type t, sorted by name. 357 // Generates stub functions as needed. 358 func methods(t *types.Type) []*Sig { 359 // method type 360 mt := methtype(t) 361 362 if mt == nil { 363 return nil 364 } 365 expandmeth(mt) 366 367 // type stored in interface word 368 it := t 369 370 if !isdirectiface(it) { 371 it = types.NewPtr(t) 372 } 373 374 // make list of methods for t, 375 // generating code if necessary. 376 var ms []*Sig 377 for _, f := range mt.AllMethods().Slice() { 378 if f.Type.Etype != TFUNC || f.Type.Recv() == nil { 379 Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) 380 } 381 if f.Type.Recv() == nil { 382 Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) 383 } 384 if f.Nointerface() { 385 continue 386 } 387 388 method := f.Sym 389 if method == nil { 390 break 391 } 392 393 // get receiver type for this particular method. 394 // if pointer receiver but non-pointer t and 395 // this is not an embedded pointer inside a struct, 396 // method does not apply. 397 if !isMethodApplicable(t, f) { 398 continue 399 } 400 401 sig := &Sig{ 402 name: method, 403 isym: methodSym(it, method), 404 tsym: methodSym(t, method), 405 type_: methodfunc(f.Type, t), 406 mtype: methodfunc(f.Type, nil), 407 } 408 ms = append(ms, sig) 409 410 this := f.Type.Recv().Type 411 412 if !sig.isym.Siggen() { 413 sig.isym.SetSiggen(true) 414 if !types.Identical(this, it) { 415 genwrapper(it, f, sig.isym) 416 } 417 } 418 419 if !sig.tsym.Siggen() { 420 sig.tsym.SetSiggen(true) 421 if !types.Identical(this, t) { 422 genwrapper(t, f, sig.tsym) 423 } 424 } 425 } 426 427 return ms 428 } 429 430 // imethods returns the methods of the interface type t, sorted by name. 431 func imethods(t *types.Type) []*Sig { 432 var methods []*Sig 433 for _, f := range t.Fields().Slice() { 434 if f.Type.Etype != TFUNC || f.Sym == nil { 435 continue 436 } 437 if f.Sym.IsBlank() { 438 Fatalf("unexpected blank symbol in interface method set") 439 } 440 if n := len(methods); n > 0 { 441 last := methods[n-1] 442 if !last.name.Less(f.Sym) { 443 Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym) 444 } 445 } 446 447 sig := &Sig{ 448 name: f.Sym, 449 mtype: f.Type, 450 type_: methodfunc(f.Type, nil), 451 } 452 methods = append(methods, sig) 453 454 // NOTE(rsc): Perhaps an oversight that 455 // IfaceType.Method is not in the reflect data. 456 // Generate the method body, so that compiled 457 // code can refer to it. 458 isym := methodSym(t, f.Sym) 459 if !isym.Siggen() { 460 isym.SetSiggen(true) 461 genwrapper(t, f, isym) 462 } 463 } 464 465 return methods 466 } 467 468 func dimportpath(p *types.Pkg) { 469 if p.Pathsym != nil { 470 return 471 } 472 473 // If we are compiling the runtime package, there are two runtime packages around 474 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for 475 // both of them, so just produce one for localpkg. 476 if myimportpath == "runtime" && p == Runtimepkg { 477 return 478 } 479 480 var str string 481 if p == localpkg { 482 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. 483 str = myimportpath 484 } else { 485 str = p.Path 486 } 487 488 s := Ctxt.Lookup("type..importpath." + p.Prefix + ".") 489 ot := dnameData(s, 0, str, "", nil, false) 490 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 491 p.Pathsym = s 492 } 493 494 func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int { 495 if pkg == nil { 496 return duintptr(s, ot, 0) 497 } 498 499 if pkg == localpkg && myimportpath == "" { 500 // If we don't know the full import path of the package being compiled 501 // (i.e. -p was not passed on the compiler command line), emit a reference to 502 // type..importpath.""., which the linker will rewrite using the correct import path. 503 // Every package that imports this one directly defines the symbol. 504 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 505 ns := Ctxt.Lookup(`type..importpath."".`) 506 return dsymptr(s, ot, ns, 0) 507 } 508 509 dimportpath(pkg) 510 return dsymptr(s, ot, pkg.Pathsym, 0) 511 } 512 513 // dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol. 514 func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { 515 if pkg == nil { 516 return duint32(s, ot, 0) 517 } 518 if pkg == localpkg && myimportpath == "" { 519 // If we don't know the full import path of the package being compiled 520 // (i.e. -p was not passed on the compiler command line), emit a reference to 521 // type..importpath.""., which the linker will rewrite using the correct import path. 522 // Every package that imports this one directly defines the symbol. 523 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 524 ns := Ctxt.Lookup(`type..importpath."".`) 525 return dsymptrOff(s, ot, ns) 526 } 527 528 dimportpath(pkg) 529 return dsymptrOff(s, ot, pkg.Pathsym) 530 } 531 532 // dnameField dumps a reflect.name for a struct field. 533 func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int { 534 if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg { 535 Fatalf("package mismatch for %v", ft.Sym) 536 } 537 nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name)) 538 return dsymptr(lsym, ot, nsym, 0) 539 } 540 541 // dnameData writes the contents of a reflect.name into s at offset ot. 542 func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int { 543 if len(name) > 1<<16-1 { 544 Fatalf("name too long: %s", name) 545 } 546 if len(tag) > 1<<16-1 { 547 Fatalf("tag too long: %s", tag) 548 } 549 550 // Encode name and tag. See reflect/type.go for details. 551 var bits byte 552 l := 1 + 2 + len(name) 553 if exported { 554 bits |= 1 << 0 555 } 556 if len(tag) > 0 { 557 l += 2 + len(tag) 558 bits |= 1 << 1 559 } 560 if pkg != nil { 561 bits |= 1 << 2 562 } 563 b := make([]byte, l) 564 b[0] = bits 565 b[1] = uint8(len(name) >> 8) 566 b[2] = uint8(len(name)) 567 copy(b[3:], name) 568 if len(tag) > 0 { 569 tb := b[3+len(name):] 570 tb[0] = uint8(len(tag) >> 8) 571 tb[1] = uint8(len(tag)) 572 copy(tb[2:], tag) 573 } 574 575 ot = int(s.WriteBytes(Ctxt, int64(ot), b)) 576 577 if pkg != nil { 578 ot = dgopkgpathOff(s, ot, pkg) 579 } 580 581 return ot 582 } 583 584 var dnameCount int 585 586 // dname creates a reflect.name for a struct field or method. 587 func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym { 588 // Write out data as "type.." to signal two things to the 589 // linker, first that when dynamically linking, the symbol 590 // should be moved to a relro section, and second that the 591 // contents should not be decoded as a type. 592 sname := "type..namedata." 593 if pkg == nil { 594 // In the common case, share data with other packages. 595 if name == "" { 596 if exported { 597 sname += "-noname-exported." + tag 598 } else { 599 sname += "-noname-unexported." + tag 600 } 601 } else { 602 if exported { 603 sname += name + "." + tag 604 } else { 605 sname += name + "-" + tag 606 } 607 } 608 } else { 609 sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount) 610 dnameCount++ 611 } 612 s := Ctxt.Lookup(sname) 613 if len(s.P) > 0 { 614 return s 615 } 616 ot := dnameData(s, 0, name, tag, pkg, exported) 617 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 618 return s 619 } 620 621 // dextratype dumps the fields of a runtime.uncommontype. 622 // dataAdd is the offset in bytes after the header where the 623 // backing array of the []method field is written (by dextratypeData). 624 func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { 625 m := methods(t) 626 if t.Sym == nil && len(m) == 0 { 627 return ot 628 } 629 noff := int(Rnd(int64(ot), int64(Widthptr))) 630 if noff != ot { 631 Fatalf("unexpected alignment in dextratype for %v", t) 632 } 633 634 for _, a := range m { 635 dtypesym(a.type_) 636 } 637 638 ot = dgopkgpathOff(lsym, ot, typePkg(t)) 639 640 dataAdd += uncommonSize(t) 641 mcount := len(m) 642 if mcount != int(uint16(mcount)) { 643 Fatalf("too many methods on %v: %d", t, mcount) 644 } 645 xcount := sort.Search(mcount, func(i int) bool { return !types.IsExported(m[i].name.Name) }) 646 if dataAdd != int(uint32(dataAdd)) { 647 Fatalf("methods are too far away on %v: %d", t, dataAdd) 648 } 649 650 ot = duint16(lsym, ot, uint16(mcount)) 651 ot = duint16(lsym, ot, uint16(xcount)) 652 ot = duint32(lsym, ot, uint32(dataAdd)) 653 ot = duint32(lsym, ot, 0) 654 return ot 655 } 656 657 func typePkg(t *types.Type) *types.Pkg { 658 tsym := t.Sym 659 if tsym == nil { 660 switch t.Etype { 661 case TARRAY, TSLICE, TPTR, TCHAN: 662 if t.Elem() != nil { 663 tsym = t.Elem().Sym 664 } 665 } 666 } 667 if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype { 668 return tsym.Pkg 669 } 670 return nil 671 } 672 673 // dextratypeData dumps the backing array for the []method field of 674 // runtime.uncommontype. 675 func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int { 676 for _, a := range methods(t) { 677 // ../../../../runtime/type.go:/method 678 exported := types.IsExported(a.name.Name) 679 var pkg *types.Pkg 680 if !exported && a.name.Pkg != typePkg(t) { 681 pkg = a.name.Pkg 682 } 683 nsym := dname(a.name.Name, "", pkg, exported) 684 685 ot = dsymptrOff(lsym, ot, nsym) 686 ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype)) 687 ot = dmethodptrOff(lsym, ot, a.isym.Linksym()) 688 ot = dmethodptrOff(lsym, ot, a.tsym.Linksym()) 689 } 690 return ot 691 } 692 693 func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int { 694 duint32(s, ot, 0) 695 r := obj.Addrel(s) 696 r.Off = int32(ot) 697 r.Siz = 4 698 r.Sym = x 699 r.Type = objabi.R_METHODOFF 700 return ot + 4 701 } 702 703 var kinds = []int{ 704 TINT: objabi.KindInt, 705 TUINT: objabi.KindUint, 706 TINT8: objabi.KindInt8, 707 TUINT8: objabi.KindUint8, 708 TINT16: objabi.KindInt16, 709 TUINT16: objabi.KindUint16, 710 TINT32: objabi.KindInt32, 711 TUINT32: objabi.KindUint32, 712 TINT64: objabi.KindInt64, 713 TUINT64: objabi.KindUint64, 714 TUINTPTR: objabi.KindUintptr, 715 TFLOAT32: objabi.KindFloat32, 716 TFLOAT64: objabi.KindFloat64, 717 TBOOL: objabi.KindBool, 718 TSTRING: objabi.KindString, 719 TPTR: objabi.KindPtr, 720 TSTRUCT: objabi.KindStruct, 721 TINTER: objabi.KindInterface, 722 TCHAN: objabi.KindChan, 723 TMAP: objabi.KindMap, 724 TARRAY: objabi.KindArray, 725 TSLICE: objabi.KindSlice, 726 TFUNC: objabi.KindFunc, 727 TCOMPLEX64: objabi.KindComplex64, 728 TCOMPLEX128: objabi.KindComplex128, 729 TUNSAFEPTR: objabi.KindUnsafePointer, 730 } 731 732 // typeptrdata returns the length in bytes of the prefix of t 733 // containing pointer data. Anything after this offset is scalar data. 734 func typeptrdata(t *types.Type) int64 { 735 if !types.Haspointers(t) { 736 return 0 737 } 738 739 switch t.Etype { 740 case TPTR, 741 TUNSAFEPTR, 742 TFUNC, 743 TCHAN, 744 TMAP: 745 return int64(Widthptr) 746 747 case TSTRING: 748 // struct { byte *str; intgo len; } 749 return int64(Widthptr) 750 751 case TINTER: 752 // struct { Itab *tab; void *data; } or 753 // struct { Type *type; void *data; } 754 // Note: see comment in plive.go:onebitwalktype1. 755 return 2 * int64(Widthptr) 756 757 case TSLICE: 758 // struct { byte *array; uintgo len; uintgo cap; } 759 return int64(Widthptr) 760 761 case TARRAY: 762 // haspointers already eliminated t.NumElem() == 0. 763 return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem()) 764 765 case TSTRUCT: 766 // Find the last field that has pointers. 767 var lastPtrField *types.Field 768 for _, t1 := range t.Fields().Slice() { 769 if types.Haspointers(t1.Type) { 770 lastPtrField = t1 771 } 772 } 773 return lastPtrField.Offset + typeptrdata(lastPtrField.Type) 774 775 default: 776 Fatalf("typeptrdata: unexpected type, %v", t) 777 return 0 778 } 779 } 780 781 // tflag is documented in reflect/type.go. 782 // 783 // tflag values must be kept in sync with copies in: 784 // cmd/compile/internal/gc/reflect.go 785 // cmd/link/internal/ld/decodesym.go 786 // reflect/type.go 787 // runtime/type.go 788 const ( 789 tflagUncommon = 1 << 0 790 tflagExtraStar = 1 << 1 791 tflagNamed = 1 << 2 792 ) 793 794 var ( 795 algarray *obj.LSym 796 memhashvarlen *obj.LSym 797 memequalvarlen *obj.LSym 798 ) 799 800 // dcommontype dumps the contents of a reflect.rtype (runtime._type). 801 func dcommontype(lsym *obj.LSym, t *types.Type) int { 802 sizeofAlg := 2 * Widthptr 803 if algarray == nil { 804 algarray = sysfunc("algarray") 805 } 806 dowidth(t) 807 alg := algtype(t) 808 var algsym *obj.LSym 809 if alg == ASPECIAL || alg == AMEM { 810 algsym = dalgsym(t) 811 } 812 813 sptrWeak := true 814 var sptr *obj.LSym 815 if !t.IsPtr() || t.PtrBase != nil { 816 tptr := types.NewPtr(t) 817 if t.Sym != nil || methods(tptr) != nil { 818 sptrWeak = false 819 } 820 sptr = dtypesym(tptr) 821 } 822 823 gcsym, useGCProg, ptrdata := dgcsym(t) 824 825 // ../../../../reflect/type.go:/^type.rtype 826 // actual type structure 827 // type rtype struct { 828 // size uintptr 829 // ptrdata uintptr 830 // hash uint32 831 // tflag tflag 832 // align uint8 833 // fieldAlign uint8 834 // kind uint8 835 // alg *typeAlg 836 // gcdata *byte 837 // str nameOff 838 // ptrToThis typeOff 839 // } 840 ot := 0 841 ot = duintptr(lsym, ot, uint64(t.Width)) 842 ot = duintptr(lsym, ot, uint64(ptrdata)) 843 ot = duint32(lsym, ot, typehash(t)) 844 845 var tflag uint8 846 if uncommonSize(t) != 0 { 847 tflag |= tflagUncommon 848 } 849 if t.Sym != nil && t.Sym.Name != "" { 850 tflag |= tflagNamed 851 } 852 853 exported := false 854 p := t.LongString() 855 // If we're writing out type T, 856 // we are very likely to write out type *T as well. 857 // Use the string "*T"[1:] for "T", so that the two 858 // share storage. This is a cheap way to reduce the 859 // amount of space taken up by reflect strings. 860 if !strings.HasPrefix(p, "*") { 861 p = "*" + p 862 tflag |= tflagExtraStar 863 if t.Sym != nil { 864 exported = types.IsExported(t.Sym.Name) 865 } 866 } else { 867 if t.Elem() != nil && t.Elem().Sym != nil { 868 exported = types.IsExported(t.Elem().Sym.Name) 869 } 870 } 871 872 ot = duint8(lsym, ot, tflag) 873 874 // runtime (and common sense) expects alignment to be a power of two. 875 i := int(t.Align) 876 877 if i == 0 { 878 i = 1 879 } 880 if i&(i-1) != 0 { 881 Fatalf("invalid alignment %d for %v", t.Align, t) 882 } 883 ot = duint8(lsym, ot, t.Align) // align 884 ot = duint8(lsym, ot, t.Align) // fieldAlign 885 886 i = kinds[t.Etype] 887 if !types.Haspointers(t) { 888 i |= objabi.KindNoPointers 889 } 890 if isdirectiface(t) { 891 i |= objabi.KindDirectIface 892 } 893 if useGCProg { 894 i |= objabi.KindGCProg 895 } 896 ot = duint8(lsym, ot, uint8(i)) // kind 897 if algsym == nil { 898 ot = dsymptr(lsym, ot, algarray, int(alg)*sizeofAlg) 899 } else { 900 ot = dsymptr(lsym, ot, algsym, 0) 901 } 902 ot = dsymptr(lsym, ot, gcsym, 0) // gcdata 903 904 nsym := dname(p, "", nil, exported) 905 ot = dsymptrOff(lsym, ot, nsym) // str 906 // ptrToThis 907 if sptr == nil { 908 ot = duint32(lsym, ot, 0) 909 } else if sptrWeak { 910 ot = dsymptrWeakOff(lsym, ot, sptr) 911 } else { 912 ot = dsymptrOff(lsym, ot, sptr) 913 } 914 915 return ot 916 } 917 918 // typeHasNoAlg returns whether t does not have any associated hash/eq 919 // algorithms because t, or some component of t, is marked Noalg. 920 func typeHasNoAlg(t *types.Type) bool { 921 a, bad := algtype1(t) 922 return a == ANOEQ && bad.Noalg() 923 } 924 925 func typesymname(t *types.Type) string { 926 name := t.ShortString() 927 // Use a separate symbol name for Noalg types for #17752. 928 if typeHasNoAlg(t) { 929 name = "noalg." + name 930 } 931 return name 932 } 933 934 // Fake package for runtime type info (headers) 935 // Don't access directly, use typeLookup below. 936 var ( 937 typepkgmu sync.Mutex // protects typepkg lookups 938 typepkg = types.NewPkg("type", "type") 939 ) 940 941 func typeLookup(name string) *types.Sym { 942 typepkgmu.Lock() 943 s := typepkg.Lookup(name) 944 typepkgmu.Unlock() 945 return s 946 } 947 948 func typesym(t *types.Type) *types.Sym { 949 return typeLookup(typesymname(t)) 950 } 951 952 // tracksym returns the symbol for tracking use of field/method f, assumed 953 // to be a member of struct/interface type t. 954 func tracksym(t *types.Type, f *types.Field) *types.Sym { 955 return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name) 956 } 957 958 func typesymprefix(prefix string, t *types.Type) *types.Sym { 959 p := prefix + "." + t.ShortString() 960 s := typeLookup(p) 961 962 // This function is for looking up type-related generated functions 963 // (e.g. eq and hash). Make sure they are indeed generated. 964 signatmu.Lock() 965 addsignat(t) 966 signatmu.Unlock() 967 968 //print("algsym: %s -> %+S\n", p, s); 969 970 return s 971 } 972 973 func typenamesym(t *types.Type) *types.Sym { 974 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { 975 Fatalf("typenamesym %v", t) 976 } 977 s := typesym(t) 978 signatmu.Lock() 979 addsignat(t) 980 signatmu.Unlock() 981 return s 982 } 983 984 func typename(t *types.Type) *Node { 985 s := typenamesym(t) 986 if s.Def == nil { 987 n := newnamel(src.NoXPos, s) 988 n.Type = types.Types[TUINT8] 989 n.SetClass(PEXTERN) 990 n.SetTypecheck(1) 991 s.Def = asTypesNode(n) 992 } 993 994 n := nod(OADDR, asNode(s.Def), nil) 995 n.Type = types.NewPtr(asNode(s.Def).Type) 996 n.SetAddable(true) 997 n.SetTypecheck(1) 998 return n 999 } 1000 1001 func itabname(t, itype *types.Type) *Node { 1002 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { 1003 Fatalf("itabname(%v, %v)", t, itype) 1004 } 1005 s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString()) 1006 if s.Def == nil { 1007 n := newname(s) 1008 n.Type = types.Types[TUINT8] 1009 n.SetClass(PEXTERN) 1010 n.SetTypecheck(1) 1011 s.Def = asTypesNode(n) 1012 itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()}) 1013 } 1014 1015 n := nod(OADDR, asNode(s.Def), nil) 1016 n.Type = types.NewPtr(asNode(s.Def).Type) 1017 n.SetAddable(true) 1018 n.SetTypecheck(1) 1019 return n 1020 } 1021 1022 // isreflexive reports whether t has a reflexive equality operator. 1023 // That is, if x==x for all x of type t. 1024 func isreflexive(t *types.Type) bool { 1025 switch t.Etype { 1026 case TBOOL, 1027 TINT, 1028 TUINT, 1029 TINT8, 1030 TUINT8, 1031 TINT16, 1032 TUINT16, 1033 TINT32, 1034 TUINT32, 1035 TINT64, 1036 TUINT64, 1037 TUINTPTR, 1038 TPTR, 1039 TUNSAFEPTR, 1040 TSTRING, 1041 TCHAN: 1042 return true 1043 1044 case TFLOAT32, 1045 TFLOAT64, 1046 TCOMPLEX64, 1047 TCOMPLEX128, 1048 TINTER: 1049 return false 1050 1051 case TARRAY: 1052 return isreflexive(t.Elem()) 1053 1054 case TSTRUCT: 1055 for _, t1 := range t.Fields().Slice() { 1056 if !isreflexive(t1.Type) { 1057 return false 1058 } 1059 } 1060 return true 1061 1062 default: 1063 Fatalf("bad type for map key: %v", t) 1064 return false 1065 } 1066 } 1067 1068 // needkeyupdate reports whether map updates with t as a key 1069 // need the key to be updated. 1070 func needkeyupdate(t *types.Type) bool { 1071 switch t.Etype { 1072 case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, 1073 TINT64, TUINT64, TUINTPTR, TPTR, TUNSAFEPTR, TCHAN: 1074 return false 1075 1076 case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0 1077 TINTER, 1078 TSTRING: // strings might have smaller backing stores 1079 return true 1080 1081 case TARRAY: 1082 return needkeyupdate(t.Elem()) 1083 1084 case TSTRUCT: 1085 for _, t1 := range t.Fields().Slice() { 1086 if needkeyupdate(t1.Type) { 1087 return true 1088 } 1089 } 1090 return false 1091 1092 default: 1093 Fatalf("bad type for map key: %v", t) 1094 return true 1095 } 1096 } 1097 1098 // formalType replaces byte and rune aliases with real types. 1099 // They've been separate internally to make error messages 1100 // better, but we have to merge them in the reflect tables. 1101 func formalType(t *types.Type) *types.Type { 1102 if t == types.Bytetype || t == types.Runetype { 1103 return types.Types[t.Etype] 1104 } 1105 return t 1106 } 1107 1108 func dtypesym(t *types.Type) *obj.LSym { 1109 t = formalType(t) 1110 if t.IsUntyped() { 1111 Fatalf("dtypesym %v", t) 1112 } 1113 1114 s := typesym(t) 1115 lsym := s.Linksym() 1116 if s.Siggen() { 1117 return lsym 1118 } 1119 s.SetSiggen(true) 1120 1121 // special case (look for runtime below): 1122 // when compiling package runtime, 1123 // emit the type structures for int, float, etc. 1124 tbase := t 1125 1126 if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil { 1127 tbase = t.Elem() 1128 } 1129 dupok := 0 1130 if tbase.Sym == nil { 1131 dupok = obj.DUPOK 1132 } 1133 1134 if myimportpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc 1135 // named types from other files are defined only by those files 1136 if tbase.Sym != nil && tbase.Sym.Pkg != localpkg { 1137 return lsym 1138 } 1139 // TODO(mdempsky): Investigate whether this can happen. 1140 if isforw[tbase.Etype] { 1141 return lsym 1142 } 1143 } 1144 1145 ot := 0 1146 switch t.Etype { 1147 default: 1148 ot = dcommontype(lsym, t) 1149 ot = dextratype(lsym, ot, t, 0) 1150 1151 case TARRAY: 1152 // ../../../../runtime/type.go:/arrayType 1153 s1 := dtypesym(t.Elem()) 1154 t2 := types.NewSlice(t.Elem()) 1155 s2 := dtypesym(t2) 1156 ot = dcommontype(lsym, t) 1157 ot = dsymptr(lsym, ot, s1, 0) 1158 ot = dsymptr(lsym, ot, s2, 0) 1159 ot = duintptr(lsym, ot, uint64(t.NumElem())) 1160 ot = dextratype(lsym, ot, t, 0) 1161 1162 case TSLICE: 1163 // ../../../../runtime/type.go:/sliceType 1164 s1 := dtypesym(t.Elem()) 1165 ot = dcommontype(lsym, t) 1166 ot = dsymptr(lsym, ot, s1, 0) 1167 ot = dextratype(lsym, ot, t, 0) 1168 1169 case TCHAN: 1170 // ../../../../runtime/type.go:/chanType 1171 s1 := dtypesym(t.Elem()) 1172 ot = dcommontype(lsym, t) 1173 ot = dsymptr(lsym, ot, s1, 0) 1174 ot = duintptr(lsym, ot, uint64(t.ChanDir())) 1175 ot = dextratype(lsym, ot, t, 0) 1176 1177 case TFUNC: 1178 for _, t1 := range t.Recvs().Fields().Slice() { 1179 dtypesym(t1.Type) 1180 } 1181 isddd := false 1182 for _, t1 := range t.Params().Fields().Slice() { 1183 isddd = t1.Isddd() 1184 dtypesym(t1.Type) 1185 } 1186 for _, t1 := range t.Results().Fields().Slice() { 1187 dtypesym(t1.Type) 1188 } 1189 1190 ot = dcommontype(lsym, t) 1191 inCount := t.NumRecvs() + t.NumParams() 1192 outCount := t.NumResults() 1193 if isddd { 1194 outCount |= 1 << 15 1195 } 1196 ot = duint16(lsym, ot, uint16(inCount)) 1197 ot = duint16(lsym, ot, uint16(outCount)) 1198 if Widthptr == 8 { 1199 ot += 4 // align for *rtype 1200 } 1201 1202 dataAdd := (inCount + t.NumResults()) * Widthptr 1203 ot = dextratype(lsym, ot, t, dataAdd) 1204 1205 // Array of rtype pointers follows funcType. 1206 for _, t1 := range t.Recvs().Fields().Slice() { 1207 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) 1208 } 1209 for _, t1 := range t.Params().Fields().Slice() { 1210 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) 1211 } 1212 for _, t1 := range t.Results().Fields().Slice() { 1213 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) 1214 } 1215 1216 case TINTER: 1217 m := imethods(t) 1218 n := len(m) 1219 for _, a := range m { 1220 dtypesym(a.type_) 1221 } 1222 1223 // ../../../../runtime/type.go:/interfaceType 1224 ot = dcommontype(lsym, t) 1225 1226 var tpkg *types.Pkg 1227 if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype { 1228 tpkg = t.Sym.Pkg 1229 } 1230 ot = dgopkgpath(lsym, ot, tpkg) 1231 1232 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1233 ot = duintptr(lsym, ot, uint64(n)) 1234 ot = duintptr(lsym, ot, uint64(n)) 1235 dataAdd := imethodSize() * n 1236 ot = dextratype(lsym, ot, t, dataAdd) 1237 1238 for _, a := range m { 1239 // ../../../../runtime/type.go:/imethod 1240 exported := types.IsExported(a.name.Name) 1241 var pkg *types.Pkg 1242 if !exported && a.name.Pkg != tpkg { 1243 pkg = a.name.Pkg 1244 } 1245 nsym := dname(a.name.Name, "", pkg, exported) 1246 1247 ot = dsymptrOff(lsym, ot, nsym) 1248 ot = dsymptrOff(lsym, ot, dtypesym(a.type_)) 1249 } 1250 1251 // ../../../../runtime/type.go:/mapType 1252 case TMAP: 1253 s1 := dtypesym(t.Key()) 1254 s2 := dtypesym(t.Elem()) 1255 s3 := dtypesym(bmap(t)) 1256 ot = dcommontype(lsym, t) 1257 ot = dsymptr(lsym, ot, s1, 0) 1258 ot = dsymptr(lsym, ot, s2, 0) 1259 ot = dsymptr(lsym, ot, s3, 0) 1260 if t.Key().Width > MAXKEYSIZE { 1261 ot = duint8(lsym, ot, uint8(Widthptr)) 1262 ot = duint8(lsym, ot, 1) // indirect 1263 } else { 1264 ot = duint8(lsym, ot, uint8(t.Key().Width)) 1265 ot = duint8(lsym, ot, 0) // not indirect 1266 } 1267 1268 if t.Elem().Width > MAXVALSIZE { 1269 ot = duint8(lsym, ot, uint8(Widthptr)) 1270 ot = duint8(lsym, ot, 1) // indirect 1271 } else { 1272 ot = duint8(lsym, ot, uint8(t.Elem().Width)) 1273 ot = duint8(lsym, ot, 0) // not indirect 1274 } 1275 1276 ot = duint16(lsym, ot, uint16(bmap(t).Width)) 1277 ot = duint8(lsym, ot, uint8(obj.Bool2int(isreflexive(t.Key())))) 1278 ot = duint8(lsym, ot, uint8(obj.Bool2int(needkeyupdate(t.Key())))) 1279 ot = dextratype(lsym, ot, t, 0) 1280 1281 case TPTR: 1282 if t.Elem().Etype == TANY { 1283 // ../../../../runtime/type.go:/UnsafePointerType 1284 ot = dcommontype(lsym, t) 1285 ot = dextratype(lsym, ot, t, 0) 1286 1287 break 1288 } 1289 1290 // ../../../../runtime/type.go:/ptrType 1291 s1 := dtypesym(t.Elem()) 1292 1293 ot = dcommontype(lsym, t) 1294 ot = dsymptr(lsym, ot, s1, 0) 1295 ot = dextratype(lsym, ot, t, 0) 1296 1297 // ../../../../runtime/type.go:/structType 1298 // for security, only the exported fields. 1299 case TSTRUCT: 1300 fields := t.Fields().Slice() 1301 for _, t1 := range fields { 1302 dtypesym(t1.Type) 1303 } 1304 1305 // All non-exported struct field names within a struct 1306 // type must originate from a single package. By 1307 // identifying and recording that package within the 1308 // struct type descriptor, we can omit that 1309 // information from the field descriptors. 1310 var spkg *types.Pkg 1311 for _, f := range fields { 1312 if !types.IsExported(f.Sym.Name) { 1313 spkg = f.Sym.Pkg 1314 break 1315 } 1316 } 1317 1318 ot = dcommontype(lsym, t) 1319 ot = dgopkgpath(lsym, ot, spkg) 1320 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1321 ot = duintptr(lsym, ot, uint64(len(fields))) 1322 ot = duintptr(lsym, ot, uint64(len(fields))) 1323 1324 dataAdd := len(fields) * structfieldSize() 1325 ot = dextratype(lsym, ot, t, dataAdd) 1326 1327 for _, f := range fields { 1328 // ../../../../runtime/type.go:/structField 1329 ot = dnameField(lsym, ot, spkg, f) 1330 ot = dsymptr(lsym, ot, dtypesym(f.Type), 0) 1331 offsetAnon := uint64(f.Offset) << 1 1332 if offsetAnon>>1 != uint64(f.Offset) { 1333 Fatalf("%v: bad field offset for %s", t, f.Sym.Name) 1334 } 1335 if f.Embedded != 0 { 1336 offsetAnon |= 1 1337 } 1338 ot = duintptr(lsym, ot, offsetAnon) 1339 } 1340 } 1341 1342 ot = dextratypeData(lsym, ot, t) 1343 ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA)) 1344 1345 // The linker will leave a table of all the typelinks for 1346 // types in the binary, so the runtime can find them. 1347 // 1348 // When buildmode=shared, all types are in typelinks so the 1349 // runtime can deduplicate type pointers. 1350 keep := Ctxt.Flag_dynlink 1351 if !keep && t.Sym == nil { 1352 // For an unnamed type, we only need the link if the type can 1353 // be created at run time by reflect.PtrTo and similar 1354 // functions. If the type exists in the program, those 1355 // functions must return the existing type structure rather 1356 // than creating a new one. 1357 switch t.Etype { 1358 case TPTR, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT: 1359 keep = true 1360 } 1361 } 1362 // Do not put Noalg types in typelinks. See issue #22605. 1363 if typeHasNoAlg(t) { 1364 keep = false 1365 } 1366 lsym.Set(obj.AttrMakeTypelink, keep) 1367 1368 return lsym 1369 } 1370 1371 // for each itabEntry, gather the methods on 1372 // the concrete type that implement the interface 1373 func peekitabs() { 1374 for i := range itabs { 1375 tab := &itabs[i] 1376 methods := genfun(tab.t, tab.itype) 1377 if len(methods) == 0 { 1378 continue 1379 } 1380 tab.entries = methods 1381 } 1382 } 1383 1384 // for the given concrete type and interface 1385 // type, return the (sorted) set of methods 1386 // on the concrete type that implement the interface 1387 func genfun(t, it *types.Type) []*obj.LSym { 1388 if t == nil || it == nil { 1389 return nil 1390 } 1391 sigs := imethods(it) 1392 methods := methods(t) 1393 out := make([]*obj.LSym, 0, len(sigs)) 1394 // TODO(mdempsky): Short circuit before calling methods(t)? 1395 // See discussion on CL 105039. 1396 if len(sigs) == 0 { 1397 return nil 1398 } 1399 1400 // both sigs and methods are sorted by name, 1401 // so we can find the intersect in a single pass 1402 for _, m := range methods { 1403 if m.name == sigs[0].name { 1404 out = append(out, m.isym.Linksym()) 1405 sigs = sigs[1:] 1406 if len(sigs) == 0 { 1407 break 1408 } 1409 } 1410 } 1411 1412 if len(sigs) != 0 { 1413 Fatalf("incomplete itab") 1414 } 1415 1416 return out 1417 } 1418 1419 // itabsym uses the information gathered in 1420 // peekitabs to de-virtualize interface methods. 1421 // Since this is called by the SSA backend, it shouldn't 1422 // generate additional Nodes, Syms, etc. 1423 func itabsym(it *obj.LSym, offset int64) *obj.LSym { 1424 var syms []*obj.LSym 1425 if it == nil { 1426 return nil 1427 } 1428 1429 for i := range itabs { 1430 e := &itabs[i] 1431 if e.lsym == it { 1432 syms = e.entries 1433 break 1434 } 1435 } 1436 if syms == nil { 1437 return nil 1438 } 1439 1440 // keep this arithmetic in sync with *itab layout 1441 methodnum := int((offset - 2*int64(Widthptr) - 8) / int64(Widthptr)) 1442 if methodnum >= len(syms) { 1443 return nil 1444 } 1445 return syms[methodnum] 1446 } 1447 1448 // addsignat ensures that a runtime type descriptor is emitted for t. 1449 func addsignat(t *types.Type) { 1450 if _, ok := signatset[t]; !ok { 1451 signatset[t] = struct{}{} 1452 signatslice = append(signatslice, t) 1453 } 1454 } 1455 1456 func addsignats(dcls []*Node) { 1457 // copy types from dcl list to signatset 1458 for _, n := range dcls { 1459 if n.Op == OTYPE { 1460 addsignat(n.Type) 1461 } 1462 } 1463 } 1464 1465 func dumpsignats() { 1466 // Process signatset. Use a loop, as dtypesym adds 1467 // entries to signatset while it is being processed. 1468 signats := make([]typeAndStr, len(signatslice)) 1469 for len(signatslice) > 0 { 1470 signats = signats[:0] 1471 // Transfer entries to a slice and sort, for reproducible builds. 1472 for _, t := range signatslice { 1473 signats = append(signats, typeAndStr{t: t, short: typesymname(t), regular: t.String()}) 1474 delete(signatset, t) 1475 } 1476 signatslice = signatslice[:0] 1477 sort.Sort(typesByString(signats)) 1478 for _, ts := range signats { 1479 t := ts.t 1480 dtypesym(t) 1481 if t.Sym != nil { 1482 dtypesym(types.NewPtr(t)) 1483 } 1484 } 1485 } 1486 } 1487 1488 func dumptabs() { 1489 // process itabs 1490 for _, i := range itabs { 1491 // dump empty itab symbol into i.sym 1492 // type itab struct { 1493 // inter *interfacetype 1494 // _type *_type 1495 // hash uint32 1496 // _ [4]byte 1497 // fun [1]uintptr // variable sized 1498 // } 1499 o := dsymptr(i.lsym, 0, dtypesym(i.itype), 0) 1500 o = dsymptr(i.lsym, o, dtypesym(i.t), 0) 1501 o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash 1502 o += 4 // skip unused field 1503 for _, fn := range genfun(i.t, i.itype) { 1504 o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method 1505 } 1506 // Nothing writes static itabs, so they are read only. 1507 ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA)) 1508 ilink := itablinkpkg.Lookup(i.t.ShortString() + "," + i.itype.ShortString()).Linksym() 1509 dsymptr(ilink, 0, i.lsym, 0) 1510 ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA)) 1511 } 1512 1513 // process ptabs 1514 if localpkg.Name == "main" && len(ptabs) > 0 { 1515 ot := 0 1516 s := Ctxt.Lookup("go.plugin.tabs") 1517 for _, p := range ptabs { 1518 // Dump ptab symbol into go.pluginsym package. 1519 // 1520 // type ptab struct { 1521 // name nameOff 1522 // typ typeOff // pointer to symbol 1523 // } 1524 nsym := dname(p.s.Name, "", nil, true) 1525 ot = dsymptrOff(s, ot, nsym) 1526 ot = dsymptrOff(s, ot, dtypesym(p.t)) 1527 } 1528 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1529 1530 ot = 0 1531 s = Ctxt.Lookup("go.plugin.exports") 1532 for _, p := range ptabs { 1533 ot = dsymptr(s, ot, p.s.Linksym(), 0) 1534 } 1535 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1536 } 1537 } 1538 1539 func dumpimportstrings() { 1540 // generate import strings for imported packages 1541 for _, p := range types.ImportedPkgList() { 1542 dimportpath(p) 1543 } 1544 } 1545 1546 func dumpbasictypes() { 1547 // do basic types if compiling package runtime. 1548 // they have to be in at least one package, 1549 // and runtime is always loaded implicitly, 1550 // so this is as good as any. 1551 // another possible choice would be package main, 1552 // but using runtime means fewer copies in object files. 1553 if myimportpath == "runtime" { 1554 for i := types.EType(1); i <= TBOOL; i++ { 1555 dtypesym(types.NewPtr(types.Types[i])) 1556 } 1557 dtypesym(types.NewPtr(types.Types[TSTRING])) 1558 dtypesym(types.NewPtr(types.Types[TUNSAFEPTR])) 1559 1560 // emit type structs for error and func(error) string. 1561 // The latter is the type of an auto-generated wrapper. 1562 dtypesym(types.NewPtr(types.Errortype)) 1563 1564 dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])})) 1565 1566 // add paths for runtime and main, which 6l imports implicitly. 1567 dimportpath(Runtimepkg) 1568 1569 if flag_race { 1570 dimportpath(racepkg) 1571 } 1572 if flag_msan { 1573 dimportpath(msanpkg) 1574 } 1575 dimportpath(types.NewPkg("main", "")) 1576 } 1577 } 1578 1579 type typeAndStr struct { 1580 t *types.Type 1581 short string 1582 regular string 1583 } 1584 1585 type typesByString []typeAndStr 1586 1587 func (a typesByString) Len() int { return len(a) } 1588 func (a typesByString) Less(i, j int) bool { 1589 if a[i].short != a[j].short { 1590 return a[i].short < a[j].short 1591 } 1592 // When the only difference between the types is whether 1593 // they refer to byte or uint8, such as **byte vs **uint8, 1594 // the types' ShortStrings can be identical. 1595 // To preserve deterministic sort ordering, sort these by String(). 1596 return a[i].regular < a[j].regular 1597 } 1598 func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1599 1600 func dalgsym(t *types.Type) *obj.LSym { 1601 var lsym *obj.LSym 1602 var hashfunc *obj.LSym 1603 var eqfunc *obj.LSym 1604 1605 // dalgsym is only called for a type that needs an algorithm table, 1606 // which implies that the type is comparable (or else it would use ANOEQ). 1607 1608 if algtype(t) == AMEM { 1609 // we use one algorithm table for all AMEM types of a given size 1610 p := fmt.Sprintf(".alg%d", t.Width) 1611 1612 s := typeLookup(p) 1613 lsym = s.Linksym() 1614 if s.AlgGen() { 1615 return lsym 1616 } 1617 s.SetAlgGen(true) 1618 1619 if memhashvarlen == nil { 1620 memhashvarlen = sysfunc("memhash_varlen") 1621 memequalvarlen = sysfunc("memequal_varlen") 1622 } 1623 1624 // make hash closure 1625 p = fmt.Sprintf(".hashfunc%d", t.Width) 1626 1627 hashfunc = typeLookup(p).Linksym() 1628 1629 ot := 0 1630 ot = dsymptr(hashfunc, ot, memhashvarlen, 0) 1631 ot = duintptr(hashfunc, ot, uint64(t.Width)) // size encoded in closure 1632 ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA) 1633 1634 // make equality closure 1635 p = fmt.Sprintf(".eqfunc%d", t.Width) 1636 1637 eqfunc = typeLookup(p).Linksym() 1638 1639 ot = 0 1640 ot = dsymptr(eqfunc, ot, memequalvarlen, 0) 1641 ot = duintptr(eqfunc, ot, uint64(t.Width)) 1642 ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA) 1643 } else { 1644 // generate an alg table specific to this type 1645 s := typesymprefix(".alg", t) 1646 lsym = s.Linksym() 1647 1648 hash := typesymprefix(".hash", t) 1649 eq := typesymprefix(".eq", t) 1650 hashfunc = typesymprefix(".hashfunc", t).Linksym() 1651 eqfunc = typesymprefix(".eqfunc", t).Linksym() 1652 1653 genhash(hash, t) 1654 geneq(eq, t) 1655 1656 // make Go funcs (closures) for calling hash and equal from Go 1657 dsymptr(hashfunc, 0, hash.Linksym(), 0) 1658 ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1659 dsymptr(eqfunc, 0, eq.Linksym(), 0) 1660 ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1661 } 1662 1663 // ../../../../runtime/alg.go:/typeAlg 1664 ot := 0 1665 1666 ot = dsymptr(lsym, ot, hashfunc, 0) 1667 ot = dsymptr(lsym, ot, eqfunc, 0) 1668 ggloblsym(lsym, int32(ot), obj.DUPOK|obj.RODATA) 1669 return lsym 1670 } 1671 1672 // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, 1673 // which holds 1-bit entries describing where pointers are in a given type. 1674 // Above this length, the GC information is recorded as a GC program, 1675 // which can express repetition compactly. In either form, the 1676 // information is used by the runtime to initialize the heap bitmap, 1677 // and for large types (like 128 or more words), they are roughly the 1678 // same speed. GC programs are never much larger and often more 1679 // compact. (If large arrays are involved, they can be arbitrarily 1680 // more compact.) 1681 // 1682 // The cutoff must be large enough that any allocation large enough to 1683 // use a GC program is large enough that it does not share heap bitmap 1684 // bytes with any other objects, allowing the GC program execution to 1685 // assume an aligned start and not use atomic operations. In the current 1686 // runtime, this means all malloc size classes larger than the cutoff must 1687 // be multiples of four words. On 32-bit systems that's 16 bytes, and 1688 // all size classes >= 16 bytes are 16-byte aligned, so no real constraint. 1689 // On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed 1690 // for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated 1691 // is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes 1692 // must be >= 4. 1693 // 1694 // We used to use 16 because the GC programs do have some constant overhead 1695 // to get started, and processing 128 pointers seems to be enough to 1696 // amortize that overhead well. 1697 // 1698 // To make sure that the runtime's chansend can call typeBitsBulkBarrier, 1699 // we raised the limit to 2048, so that even 32-bit systems are guaranteed to 1700 // use bitmaps for objects up to 64 kB in size. 1701 // 1702 // Also known to reflect/type.go. 1703 // 1704 const maxPtrmaskBytes = 2048 1705 1706 // dgcsym emits and returns a data symbol containing GC information for type t, 1707 // along with a boolean reporting whether the UseGCProg bit should be set in 1708 // the type kind, and the ptrdata field to record in the reflect type information. 1709 func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { 1710 ptrdata = typeptrdata(t) 1711 if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 { 1712 lsym = dgcptrmask(t) 1713 return 1714 } 1715 1716 useGCProg = true 1717 lsym, ptrdata = dgcprog(t) 1718 return 1719 } 1720 1721 // dgcptrmask emits and returns the symbol containing a pointer mask for type t. 1722 func dgcptrmask(t *types.Type) *obj.LSym { 1723 ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8) 1724 fillptrmask(t, ptrmask) 1725 p := fmt.Sprintf("gcbits.%x", ptrmask) 1726 1727 sym := Runtimepkg.Lookup(p) 1728 lsym := sym.Linksym() 1729 if !sym.Uniq() { 1730 sym.SetUniq(true) 1731 for i, x := range ptrmask { 1732 duint8(lsym, i, x) 1733 } 1734 ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) 1735 } 1736 return lsym 1737 } 1738 1739 // fillptrmask fills in ptrmask with 1s corresponding to the 1740 // word offsets in t that hold pointers. 1741 // ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits. 1742 func fillptrmask(t *types.Type, ptrmask []byte) { 1743 for i := range ptrmask { 1744 ptrmask[i] = 0 1745 } 1746 if !types.Haspointers(t) { 1747 return 1748 } 1749 1750 vec := bvalloc(8 * int32(len(ptrmask))) 1751 onebitwalktype1(t, 0, vec) 1752 1753 nptr := typeptrdata(t) / int64(Widthptr) 1754 for i := int64(0); i < nptr; i++ { 1755 if vec.Get(int32(i)) { 1756 ptrmask[i/8] |= 1 << (uint(i) % 8) 1757 } 1758 } 1759 } 1760 1761 // dgcprog emits and returns the symbol containing a GC program for type t 1762 // along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]). 1763 // In practice, the size is typeptrdata(t) except for non-trivial arrays. 1764 // For non-trivial arrays, the program describes the full t.Width size. 1765 func dgcprog(t *types.Type) (*obj.LSym, int64) { 1766 dowidth(t) 1767 if t.Width == BADWIDTH { 1768 Fatalf("dgcprog: %v badwidth", t) 1769 } 1770 lsym := typesymprefix(".gcprog", t).Linksym() 1771 var p GCProg 1772 p.init(lsym) 1773 p.emit(t, 0) 1774 offset := p.w.BitIndex() * int64(Widthptr) 1775 p.end() 1776 if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { 1777 Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) 1778 } 1779 return lsym, offset 1780 } 1781 1782 type GCProg struct { 1783 lsym *obj.LSym 1784 symoff int 1785 w gcprog.Writer 1786 } 1787 1788 var Debug_gcprog int // set by -d gcprog 1789 1790 func (p *GCProg) init(lsym *obj.LSym) { 1791 p.lsym = lsym 1792 p.symoff = 4 // first 4 bytes hold program length 1793 p.w.Init(p.writeByte) 1794 if Debug_gcprog > 0 { 1795 fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym) 1796 p.w.Debug(os.Stderr) 1797 } 1798 } 1799 1800 func (p *GCProg) writeByte(x byte) { 1801 p.symoff = duint8(p.lsym, p.symoff, x) 1802 } 1803 1804 func (p *GCProg) end() { 1805 p.w.End() 1806 duint32(p.lsym, 0, uint32(p.symoff-4)) 1807 ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) 1808 if Debug_gcprog > 0 { 1809 fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym) 1810 } 1811 } 1812 1813 func (p *GCProg) emit(t *types.Type, offset int64) { 1814 dowidth(t) 1815 if !types.Haspointers(t) { 1816 return 1817 } 1818 if t.Width == int64(Widthptr) { 1819 p.w.Ptr(offset / int64(Widthptr)) 1820 return 1821 } 1822 switch t.Etype { 1823 default: 1824 Fatalf("GCProg.emit: unexpected type %v", t) 1825 1826 case TSTRING: 1827 p.w.Ptr(offset / int64(Widthptr)) 1828 1829 case TINTER: 1830 // Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1. 1831 p.w.Ptr(offset/int64(Widthptr) + 1) 1832 1833 case TSLICE: 1834 p.w.Ptr(offset / int64(Widthptr)) 1835 1836 case TARRAY: 1837 if t.NumElem() == 0 { 1838 // should have been handled by haspointers check above 1839 Fatalf("GCProg.emit: empty array") 1840 } 1841 1842 // Flatten array-of-array-of-array to just a big array by multiplying counts. 1843 count := t.NumElem() 1844 elem := t.Elem() 1845 for elem.IsArray() { 1846 count *= elem.NumElem() 1847 elem = elem.Elem() 1848 } 1849 1850 if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) { 1851 // Cheaper to just emit the bits. 1852 for i := int64(0); i < count; i++ { 1853 p.emit(elem, offset+i*elem.Width) 1854 } 1855 return 1856 } 1857 p.emit(elem, offset) 1858 p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) 1859 p.w.Repeat(elem.Width/int64(Widthptr), count-1) 1860 1861 case TSTRUCT: 1862 for _, t1 := range t.Fields().Slice() { 1863 p.emit(t1.Type, offset+t1.Offset) 1864 } 1865 } 1866 } 1867 1868 // zeroaddr returns the address of a symbol with at least 1869 // size bytes of zeros. 1870 func zeroaddr(size int64) *Node { 1871 if size >= 1<<31 { 1872 Fatalf("map value too big %d", size) 1873 } 1874 if zerosize < size { 1875 zerosize = size 1876 } 1877 s := mappkg.Lookup("zero") 1878 if s.Def == nil { 1879 x := newname(s) 1880 x.Type = types.Types[TUINT8] 1881 x.SetClass(PEXTERN) 1882 x.SetTypecheck(1) 1883 s.Def = asTypesNode(x) 1884 } 1885 z := nod(OADDR, asNode(s.Def), nil) 1886 z.Type = types.NewPtr(types.Types[TUINT8]) 1887 z.SetAddable(true) 1888 z.SetTypecheck(1) 1889 return z 1890 }