github.com/megatontech/mynoteforgo@v0.0.0-20200507084910-5d0c6ea6e890/源码/cmd/compile/internal/gc/reflect.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "cmd/internal/gcprog" 10 "cmd/internal/obj" 11 "cmd/internal/objabi" 12 "cmd/internal/src" 13 "fmt" 14 "os" 15 "sort" 16 "strings" 17 "sync" 18 ) 19 20 type itabEntry struct { 21 t, itype *types.Type 22 lsym *obj.LSym // symbol of the itab itself 23 24 // symbols of each method in 25 // the itab, sorted by byte offset; 26 // filled in by peekitabs 27 entries []*obj.LSym 28 } 29 30 type ptabEntry struct { 31 s *types.Sym 32 t *types.Type 33 } 34 35 // runtime interface and reflection data structures 36 var ( 37 signatmu sync.Mutex // protects signatset and signatslice 38 signatset = make(map[*types.Type]struct{}) 39 signatslice []*types.Type 40 41 itabs []itabEntry 42 ptabs []ptabEntry 43 ) 44 45 type Sig struct { 46 name *types.Sym 47 isym *types.Sym 48 tsym *types.Sym 49 type_ *types.Type 50 mtype *types.Type 51 } 52 53 // Builds a type representing a Bucket structure for 54 // the given map type. This type is not visible to users - 55 // we include only enough information to generate a correct GC 56 // program for it. 57 // Make sure this stays in sync with runtime/map.go. 58 const ( 59 BUCKETSIZE = 8 60 MAXKEYSIZE = 128 61 MAXVALSIZE = 128 62 ) 63 64 func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) 65 func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) 66 67 func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) 68 if t.Sym == nil && len(methods(t)) == 0 { 69 return 0 70 } 71 return 4 + 2 + 2 + 4 + 4 72 } 73 74 func makefield(name string, t *types.Type) *types.Field { 75 f := types.NewField() 76 f.Type = t 77 f.Sym = (*types.Pkg)(nil).Lookup(name) 78 return f 79 } 80 81 // bmap makes the map bucket type given the type of the map. 82 func bmap(t *types.Type) *types.Type { 83 if t.MapType().Bucket != nil { 84 return t.MapType().Bucket 85 } 86 87 bucket := types.New(TSTRUCT) 88 keytype := t.Key() 89 valtype := t.Elem() 90 dowidth(keytype) 91 dowidth(valtype) 92 if keytype.Width > MAXKEYSIZE { 93 keytype = types.NewPtr(keytype) 94 } 95 if valtype.Width > MAXVALSIZE { 96 valtype = types.NewPtr(valtype) 97 } 98 99 field := make([]*types.Field, 0, 5) 100 101 // The first field is: uint8 topbits[BUCKETSIZE]. 102 arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE) 103 field = append(field, makefield("topbits", arr)) 104 105 arr = types.NewArray(keytype, BUCKETSIZE) 106 arr.SetNoalg(true) 107 keys := makefield("keys", arr) 108 field = append(field, keys) 109 110 arr = types.NewArray(valtype, BUCKETSIZE) 111 arr.SetNoalg(true) 112 values := makefield("values", arr) 113 field = append(field, values) 114 115 // Make sure the overflow pointer is the last memory in the struct, 116 // because the runtime assumes it can use size-ptrSize as the 117 // offset of the overflow pointer. We double-check that property 118 // below once the offsets and size are computed. 119 // 120 // BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point. 121 // On 32-bit systems, the max alignment is 32-bit, and the 122 // overflow pointer will add another 32-bit field, and the struct 123 // will end with no padding. 124 // On 64-bit systems, the max alignment is 64-bit, and the 125 // overflow pointer will add another 64-bit field, and the struct 126 // will end with no padding. 127 // On nacl/amd64p32, however, the max alignment is 64-bit, 128 // but the overflow pointer will add only a 32-bit field, 129 // so if the struct needs 64-bit padding (because a key or value does) 130 // then it would end with an extra 32-bit padding field. 131 // Preempt that by emitting the padding here. 132 if int(valtype.Align) > Widthptr || int(keytype.Align) > Widthptr { 133 field = append(field, makefield("pad", types.Types[TUINTPTR])) 134 } 135 136 // If keys and values have no pointers, the map implementation 137 // can keep a list of overflow pointers on the side so that 138 // buckets can be marked as having no pointers. 139 // Arrange for the bucket to have no pointers by changing 140 // the type of the overflow field to uintptr in this case. 141 // See comment on hmap.overflow in runtime/map.go. 142 otyp := types.NewPtr(bucket) 143 if !types.Haspointers(valtype) && !types.Haspointers(keytype) { 144 otyp = types.Types[TUINTPTR] 145 } 146 overflow := makefield("overflow", otyp) 147 field = append(field, overflow) 148 149 // link up fields 150 bucket.SetNoalg(true) 151 bucket.SetFields(field[:]) 152 dowidth(bucket) 153 154 // Check invariants that map code depends on. 155 if !IsComparable(t.Key()) { 156 Fatalf("unsupported map key type for %v", t) 157 } 158 if BUCKETSIZE < 8 { 159 Fatalf("bucket size too small for proper alignment") 160 } 161 if keytype.Align > BUCKETSIZE { 162 Fatalf("key align too big for %v", t) 163 } 164 if valtype.Align > BUCKETSIZE { 165 Fatalf("value align too big for %v", t) 166 } 167 if keytype.Width > MAXKEYSIZE { 168 Fatalf("key size to large for %v", t) 169 } 170 if valtype.Width > MAXVALSIZE { 171 Fatalf("value size to large for %v", t) 172 } 173 if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() { 174 Fatalf("key indirect incorrect for %v", t) 175 } 176 if t.Elem().Width > MAXVALSIZE && !valtype.IsPtr() { 177 Fatalf("value indirect incorrect for %v", t) 178 } 179 if keytype.Width%int64(keytype.Align) != 0 { 180 Fatalf("key size not a multiple of key align for %v", t) 181 } 182 if valtype.Width%int64(valtype.Align) != 0 { 183 Fatalf("value size not a multiple of value align for %v", t) 184 } 185 if bucket.Align%keytype.Align != 0 { 186 Fatalf("bucket align not multiple of key align %v", t) 187 } 188 if bucket.Align%valtype.Align != 0 { 189 Fatalf("bucket align not multiple of value align %v", t) 190 } 191 if keys.Offset%int64(keytype.Align) != 0 { 192 Fatalf("bad alignment of keys in bmap for %v", t) 193 } 194 if values.Offset%int64(valtype.Align) != 0 { 195 Fatalf("bad alignment of values in bmap for %v", t) 196 } 197 198 // Double-check that overflow field is final memory in struct, 199 // with no padding at end. See comment above. 200 if overflow.Offset != bucket.Width-int64(Widthptr) { 201 Fatalf("bad offset of overflow in bmap for %v", t) 202 } 203 204 t.MapType().Bucket = bucket 205 206 bucket.StructType().Map = t 207 return bucket 208 } 209 210 // hmap builds a type representing a Hmap structure for the given map type. 211 // Make sure this stays in sync with runtime/map.go. 212 func hmap(t *types.Type) *types.Type { 213 if t.MapType().Hmap != nil { 214 return t.MapType().Hmap 215 } 216 217 bmap := bmap(t) 218 219 // build a struct: 220 // type hmap struct { 221 // count int 222 // flags uint8 223 // B uint8 224 // noverflow uint16 225 // hash0 uint32 226 // buckets *bmap 227 // oldbuckets *bmap 228 // nevacuate uintptr 229 // extra unsafe.Pointer // *mapextra 230 // } 231 // must match runtime/map.go:hmap. 232 fields := []*types.Field{ 233 makefield("count", types.Types[TINT]), 234 makefield("flags", types.Types[TUINT8]), 235 makefield("B", types.Types[TUINT8]), 236 makefield("noverflow", types.Types[TUINT16]), 237 makefield("hash0", types.Types[TUINT32]), // Used in walk.go for OMAKEMAP. 238 makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP. 239 makefield("oldbuckets", types.NewPtr(bmap)), 240 makefield("nevacuate", types.Types[TUINTPTR]), 241 makefield("extra", types.Types[TUNSAFEPTR]), 242 } 243 244 hmap := types.New(TSTRUCT) 245 hmap.SetNoalg(true) 246 hmap.SetFields(fields) 247 dowidth(hmap) 248 249 // The size of hmap should be 48 bytes on 64 bit 250 // and 28 bytes on 32 bit platforms. 251 if size := int64(8 + 5*Widthptr); hmap.Width != size { 252 Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size) 253 } 254 255 t.MapType().Hmap = hmap 256 hmap.StructType().Map = t 257 return hmap 258 } 259 260 // hiter builds a type representing an Hiter structure for the given map type. 261 // Make sure this stays in sync with runtime/map.go. 262 func hiter(t *types.Type) *types.Type { 263 if t.MapType().Hiter != nil { 264 return t.MapType().Hiter 265 } 266 267 hmap := hmap(t) 268 bmap := bmap(t) 269 270 // build a struct: 271 // type hiter struct { 272 // key *Key 273 // val *Value 274 // t unsafe.Pointer // *MapType 275 // h *hmap 276 // buckets *bmap 277 // bptr *bmap 278 // overflow unsafe.Pointer // *[]*bmap 279 // oldoverflow unsafe.Pointer // *[]*bmap 280 // startBucket uintptr 281 // offset uint8 282 // wrapped bool 283 // B uint8 284 // i uint8 285 // bucket uintptr 286 // checkBucket uintptr 287 // } 288 // must match runtime/map.go:hiter. 289 fields := []*types.Field{ 290 makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP. 291 makefield("val", types.NewPtr(t.Elem())), // Used in range.go for TMAP. 292 makefield("t", types.Types[TUNSAFEPTR]), 293 makefield("h", types.NewPtr(hmap)), 294 makefield("buckets", types.NewPtr(bmap)), 295 makefield("bptr", types.NewPtr(bmap)), 296 makefield("overflow", types.Types[TUNSAFEPTR]), 297 makefield("oldoverflow", types.Types[TUNSAFEPTR]), 298 makefield("startBucket", types.Types[TUINTPTR]), 299 makefield("offset", types.Types[TUINT8]), 300 makefield("wrapped", types.Types[TBOOL]), 301 makefield("B", types.Types[TUINT8]), 302 makefield("i", types.Types[TUINT8]), 303 makefield("bucket", types.Types[TUINTPTR]), 304 makefield("checkBucket", types.Types[TUINTPTR]), 305 } 306 307 // build iterator struct holding the above fields 308 hiter := types.New(TSTRUCT) 309 hiter.SetNoalg(true) 310 hiter.SetFields(fields) 311 dowidth(hiter) 312 if hiter.Width != int64(12*Widthptr) { 313 Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr) 314 } 315 t.MapType().Hiter = hiter 316 hiter.StructType().Map = t 317 return hiter 318 } 319 320 // f is method type, with receiver. 321 // return function type, receiver as first argument (or not). 322 func methodfunc(f *types.Type, receiver *types.Type) *types.Type { 323 inLen := f.Params().Fields().Len() 324 if receiver != nil { 325 inLen++ 326 } 327 in := make([]*Node, 0, inLen) 328 329 if receiver != nil { 330 d := anonfield(receiver) 331 in = append(in, d) 332 } 333 334 for _, t := range f.Params().Fields().Slice() { 335 d := anonfield(t.Type) 336 d.SetIsDDD(t.IsDDD()) 337 in = append(in, d) 338 } 339 340 outLen := f.Results().Fields().Len() 341 out := make([]*Node, 0, outLen) 342 for _, t := range f.Results().Fields().Slice() { 343 d := anonfield(t.Type) 344 out = append(out, d) 345 } 346 347 t := functype(nil, in, out) 348 if f.Nname() != nil { 349 // Link to name of original method function. 350 t.SetNname(f.Nname()) 351 } 352 353 return t 354 } 355 356 // methods returns the methods of the non-interface type t, sorted by name. 357 // Generates stub functions as needed. 358 func methods(t *types.Type) []*Sig { 359 // method type 360 mt := methtype(t) 361 362 if mt == nil { 363 return nil 364 } 365 expandmeth(mt) 366 367 // type stored in interface word 368 it := t 369 370 if !isdirectiface(it) { 371 it = types.NewPtr(t) 372 } 373 374 // make list of methods for t, 375 // generating code if necessary. 376 var ms []*Sig 377 for _, f := range mt.AllMethods().Slice() { 378 if f.Type.Etype != TFUNC || f.Type.Recv() == nil { 379 Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) 380 } 381 if f.Type.Recv() == nil { 382 Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) 383 } 384 if f.Nointerface() { 385 continue 386 } 387 388 method := f.Sym 389 if method == nil { 390 break 391 } 392 393 // get receiver type for this particular method. 394 // if pointer receiver but non-pointer t and 395 // this is not an embedded pointer inside a struct, 396 // method does not apply. 397 if !isMethodApplicable(t, f) { 398 continue 399 } 400 401 sig := &Sig{ 402 name: method, 403 isym: methodSym(it, method), 404 tsym: methodSym(t, method), 405 type_: methodfunc(f.Type, t), 406 mtype: methodfunc(f.Type, nil), 407 } 408 ms = append(ms, sig) 409 410 this := f.Type.Recv().Type 411 412 if !sig.isym.Siggen() { 413 sig.isym.SetSiggen(true) 414 if !types.Identical(this, it) { 415 genwrapper(it, f, sig.isym) 416 } 417 } 418 419 if !sig.tsym.Siggen() { 420 sig.tsym.SetSiggen(true) 421 if !types.Identical(this, t) { 422 genwrapper(t, f, sig.tsym) 423 } 424 } 425 } 426 427 return ms 428 } 429 430 // imethods returns the methods of the interface type t, sorted by name. 431 func imethods(t *types.Type) []*Sig { 432 var methods []*Sig 433 for _, f := range t.Fields().Slice() { 434 if f.Type.Etype != TFUNC || f.Sym == nil { 435 continue 436 } 437 if f.Sym.IsBlank() { 438 Fatalf("unexpected blank symbol in interface method set") 439 } 440 if n := len(methods); n > 0 { 441 last := methods[n-1] 442 if !last.name.Less(f.Sym) { 443 Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym) 444 } 445 } 446 447 sig := &Sig{ 448 name: f.Sym, 449 mtype: f.Type, 450 type_: methodfunc(f.Type, nil), 451 } 452 methods = append(methods, sig) 453 454 // NOTE(rsc): Perhaps an oversight that 455 // IfaceType.Method is not in the reflect data. 456 // Generate the method body, so that compiled 457 // code can refer to it. 458 isym := methodSym(t, f.Sym) 459 if !isym.Siggen() { 460 isym.SetSiggen(true) 461 genwrapper(t, f, isym) 462 } 463 } 464 465 return methods 466 } 467 468 func dimportpath(p *types.Pkg) { 469 if p.Pathsym != nil { 470 return 471 } 472 473 // If we are compiling the runtime package, there are two runtime packages around 474 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for 475 // both of them, so just produce one for localpkg. 476 if myimportpath == "runtime" && p == Runtimepkg { 477 return 478 } 479 480 var str string 481 if p == localpkg { 482 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. 483 str = myimportpath 484 } else { 485 str = p.Path 486 } 487 488 s := Ctxt.Lookup("type..importpath." + p.Prefix + ".") 489 ot := dnameData(s, 0, str, "", nil, false) 490 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 491 p.Pathsym = s 492 } 493 494 func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int { 495 if pkg == nil { 496 return duintptr(s, ot, 0) 497 } 498 499 if pkg == localpkg && myimportpath == "" { 500 // If we don't know the full import path of the package being compiled 501 // (i.e. -p was not passed on the compiler command line), emit a reference to 502 // type..importpath.""., which the linker will rewrite using the correct import path. 503 // Every package that imports this one directly defines the symbol. 504 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 505 ns := Ctxt.Lookup(`type..importpath."".`) 506 return dsymptr(s, ot, ns, 0) 507 } 508 509 dimportpath(pkg) 510 return dsymptr(s, ot, pkg.Pathsym, 0) 511 } 512 513 // dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol. 514 func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { 515 if pkg == nil { 516 return duint32(s, ot, 0) 517 } 518 if pkg == localpkg && myimportpath == "" { 519 // If we don't know the full import path of the package being compiled 520 // (i.e. -p was not passed on the compiler command line), emit a reference to 521 // type..importpath.""., which the linker will rewrite using the correct import path. 522 // Every package that imports this one directly defines the symbol. 523 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 524 ns := Ctxt.Lookup(`type..importpath."".`) 525 return dsymptrOff(s, ot, ns) 526 } 527 528 dimportpath(pkg) 529 return dsymptrOff(s, ot, pkg.Pathsym) 530 } 531 532 // dnameField dumps a reflect.name for a struct field. 533 func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int { 534 if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg { 535 Fatalf("package mismatch for %v", ft.Sym) 536 } 537 nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name)) 538 return dsymptr(lsym, ot, nsym, 0) 539 } 540 541 // dnameData writes the contents of a reflect.name into s at offset ot. 542 func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int { 543 if len(name) > 1<<16-1 { 544 Fatalf("name too long: %s", name) 545 } 546 if len(tag) > 1<<16-1 { 547 Fatalf("tag too long: %s", tag) 548 } 549 550 // Encode name and tag. See reflect/type.go for details. 551 var bits byte 552 l := 1 + 2 + len(name) 553 if exported { 554 bits |= 1 << 0 555 } 556 if len(tag) > 0 { 557 l += 2 + len(tag) 558 bits |= 1 << 1 559 } 560 if pkg != nil { 561 bits |= 1 << 2 562 } 563 b := make([]byte, l) 564 b[0] = bits 565 b[1] = uint8(len(name) >> 8) 566 b[2] = uint8(len(name)) 567 copy(b[3:], name) 568 if len(tag) > 0 { 569 tb := b[3+len(name):] 570 tb[0] = uint8(len(tag) >> 8) 571 tb[1] = uint8(len(tag)) 572 copy(tb[2:], tag) 573 } 574 575 ot = int(s.WriteBytes(Ctxt, int64(ot), b)) 576 577 if pkg != nil { 578 ot = dgopkgpathOff(s, ot, pkg) 579 } 580 581 return ot 582 } 583 584 var dnameCount int 585 586 // dname creates a reflect.name for a struct field or method. 587 func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym { 588 // Write out data as "type.." to signal two things to the 589 // linker, first that when dynamically linking, the symbol 590 // should be moved to a relro section, and second that the 591 // contents should not be decoded as a type. 592 sname := "type..namedata." 593 if pkg == nil { 594 // In the common case, share data with other packages. 595 if name == "" { 596 if exported { 597 sname += "-noname-exported." + tag 598 } else { 599 sname += "-noname-unexported." + tag 600 } 601 } else { 602 if exported { 603 sname += name + "." + tag 604 } else { 605 sname += name + "-" + tag 606 } 607 } 608 } else { 609 sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount) 610 dnameCount++ 611 } 612 s := Ctxt.Lookup(sname) 613 if len(s.P) > 0 { 614 return s 615 } 616 ot := dnameData(s, 0, name, tag, pkg, exported) 617 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 618 return s 619 } 620 621 // dextratype dumps the fields of a runtime.uncommontype. 622 // dataAdd is the offset in bytes after the header where the 623 // backing array of the []method field is written (by dextratypeData). 624 func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { 625 m := methods(t) 626 if t.Sym == nil && len(m) == 0 { 627 return ot 628 } 629 noff := int(Rnd(int64(ot), int64(Widthptr))) 630 if noff != ot { 631 Fatalf("unexpected alignment in dextratype for %v", t) 632 } 633 634 for _, a := range m { 635 dtypesym(a.type_) 636 } 637 638 ot = dgopkgpathOff(lsym, ot, typePkg(t)) 639 640 dataAdd += uncommonSize(t) 641 mcount := len(m) 642 if mcount != int(uint16(mcount)) { 643 Fatalf("too many methods on %v: %d", t, mcount) 644 } 645 xcount := sort.Search(mcount, func(i int) bool { return !types.IsExported(m[i].name.Name) }) 646 if dataAdd != int(uint32(dataAdd)) { 647 Fatalf("methods are too far away on %v: %d", t, dataAdd) 648 } 649 650 ot = duint16(lsym, ot, uint16(mcount)) 651 ot = duint16(lsym, ot, uint16(xcount)) 652 ot = duint32(lsym, ot, uint32(dataAdd)) 653 ot = duint32(lsym, ot, 0) 654 return ot 655 } 656 657 func typePkg(t *types.Type) *types.Pkg { 658 tsym := t.Sym 659 if tsym == nil { 660 switch t.Etype { 661 case TARRAY, TSLICE, TPTR, TCHAN: 662 if t.Elem() != nil { 663 tsym = t.Elem().Sym 664 } 665 } 666 } 667 if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype { 668 return tsym.Pkg 669 } 670 return nil 671 } 672 673 // dextratypeData dumps the backing array for the []method field of 674 // runtime.uncommontype. 675 func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int { 676 for _, a := range methods(t) { 677 // ../../../../runtime/type.go:/method 678 exported := types.IsExported(a.name.Name) 679 var pkg *types.Pkg 680 if !exported && a.name.Pkg != typePkg(t) { 681 pkg = a.name.Pkg 682 } 683 nsym := dname(a.name.Name, "", pkg, exported) 684 685 ot = dsymptrOff(lsym, ot, nsym) 686 ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype)) 687 ot = dmethodptrOff(lsym, ot, a.isym.Linksym()) 688 ot = dmethodptrOff(lsym, ot, a.tsym.Linksym()) 689 } 690 return ot 691 } 692 693 func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int { 694 duint32(s, ot, 0) 695 r := obj.Addrel(s) 696 r.Off = int32(ot) 697 r.Siz = 4 698 r.Sym = x 699 r.Type = objabi.R_METHODOFF 700 return ot + 4 701 } 702 703 var kinds = []int{ 704 TINT: objabi.KindInt, 705 TUINT: objabi.KindUint, 706 TINT8: objabi.KindInt8, 707 TUINT8: objabi.KindUint8, 708 TINT16: objabi.KindInt16, 709 TUINT16: objabi.KindUint16, 710 TINT32: objabi.KindInt32, 711 TUINT32: objabi.KindUint32, 712 TINT64: objabi.KindInt64, 713 TUINT64: objabi.KindUint64, 714 TUINTPTR: objabi.KindUintptr, 715 TFLOAT32: objabi.KindFloat32, 716 TFLOAT64: objabi.KindFloat64, 717 TBOOL: objabi.KindBool, 718 TSTRING: objabi.KindString, 719 TPTR: objabi.KindPtr, 720 TSTRUCT: objabi.KindStruct, 721 TINTER: objabi.KindInterface, 722 TCHAN: objabi.KindChan, 723 TMAP: objabi.KindMap, 724 TARRAY: objabi.KindArray, 725 TSLICE: objabi.KindSlice, 726 TFUNC: objabi.KindFunc, 727 TCOMPLEX64: objabi.KindComplex64, 728 TCOMPLEX128: objabi.KindComplex128, 729 TUNSAFEPTR: objabi.KindUnsafePointer, 730 } 731 732 // typeptrdata returns the length in bytes of the prefix of t 733 // containing pointer data. Anything after this offset is scalar data. 734 func typeptrdata(t *types.Type) int64 { 735 if !types.Haspointers(t) { 736 return 0 737 } 738 739 switch t.Etype { 740 case TPTR, 741 TUNSAFEPTR, 742 TFUNC, 743 TCHAN, 744 TMAP: 745 return int64(Widthptr) 746 747 case TSTRING: 748 // struct { byte *str; intgo len; } 749 return int64(Widthptr) 750 751 case TINTER: 752 // struct { Itab *tab; void *data; } or 753 // struct { Type *type; void *data; } 754 // Note: see comment in plive.go:onebitwalktype1. 755 return 2 * int64(Widthptr) 756 757 case TSLICE: 758 // struct { byte *array; uintgo len; uintgo cap; } 759 return int64(Widthptr) 760 761 case TARRAY: 762 // haspointers already eliminated t.NumElem() == 0. 763 return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem()) 764 765 case TSTRUCT: 766 // Find the last field that has pointers. 767 var lastPtrField *types.Field 768 for _, t1 := range t.Fields().Slice() { 769 if types.Haspointers(t1.Type) { 770 lastPtrField = t1 771 } 772 } 773 return lastPtrField.Offset + typeptrdata(lastPtrField.Type) 774 775 default: 776 Fatalf("typeptrdata: unexpected type, %v", t) 777 return 0 778 } 779 } 780 781 // tflag is documented in reflect/type.go. 782 // 783 // tflag values must be kept in sync with copies in: 784 // cmd/compile/internal/gc/reflect.go 785 // cmd/link/internal/ld/decodesym.go 786 // reflect/type.go 787 // runtime/type.go 788 const ( 789 tflagUncommon = 1 << 0 790 tflagExtraStar = 1 << 1 791 tflagNamed = 1 << 2 792 ) 793 794 var ( 795 algarray *obj.LSym 796 memhashvarlen *obj.LSym 797 memequalvarlen *obj.LSym 798 ) 799 800 // dcommontype dumps the contents of a reflect.rtype (runtime._type). 801 func dcommontype(lsym *obj.LSym, t *types.Type) int { 802 sizeofAlg := 2 * Widthptr 803 if algarray == nil { 804 algarray = sysvar("algarray") 805 } 806 dowidth(t) 807 alg := algtype(t) 808 var algsym *obj.LSym 809 if alg == ASPECIAL || alg == AMEM { 810 algsym = dalgsym(t) 811 } 812 813 sptrWeak := true 814 var sptr *obj.LSym 815 if !t.IsPtr() || t.IsPtrElem() { 816 tptr := types.NewPtr(t) 817 if t.Sym != nil || methods(tptr) != nil { 818 sptrWeak = false 819 } 820 sptr = dtypesym(tptr) 821 } 822 823 gcsym, useGCProg, ptrdata := dgcsym(t) 824 825 // ../../../../reflect/type.go:/^type.rtype 826 // actual type structure 827 // type rtype struct { 828 // size uintptr 829 // ptrdata uintptr 830 // hash uint32 831 // tflag tflag 832 // align uint8 833 // fieldAlign uint8 834 // kind uint8 835 // alg *typeAlg 836 // gcdata *byte 837 // str nameOff 838 // ptrToThis typeOff 839 // } 840 ot := 0 841 ot = duintptr(lsym, ot, uint64(t.Width)) 842 ot = duintptr(lsym, ot, uint64(ptrdata)) 843 ot = duint32(lsym, ot, typehash(t)) 844 845 var tflag uint8 846 if uncommonSize(t) != 0 { 847 tflag |= tflagUncommon 848 } 849 if t.Sym != nil && t.Sym.Name != "" { 850 tflag |= tflagNamed 851 } 852 853 exported := false 854 p := t.LongString() 855 // If we're writing out type T, 856 // we are very likely to write out type *T as well. 857 // Use the string "*T"[1:] for "T", so that the two 858 // share storage. This is a cheap way to reduce the 859 // amount of space taken up by reflect strings. 860 if !strings.HasPrefix(p, "*") { 861 p = "*" + p 862 tflag |= tflagExtraStar 863 if t.Sym != nil { 864 exported = types.IsExported(t.Sym.Name) 865 } 866 } else { 867 if t.Elem() != nil && t.Elem().Sym != nil { 868 exported = types.IsExported(t.Elem().Sym.Name) 869 } 870 } 871 872 ot = duint8(lsym, ot, tflag) 873 874 // runtime (and common sense) expects alignment to be a power of two. 875 i := int(t.Align) 876 877 if i == 0 { 878 i = 1 879 } 880 if i&(i-1) != 0 { 881 Fatalf("invalid alignment %d for %v", t.Align, t) 882 } 883 ot = duint8(lsym, ot, t.Align) // align 884 ot = duint8(lsym, ot, t.Align) // fieldAlign 885 886 i = kinds[t.Etype] 887 if !types.Haspointers(t) { 888 i |= objabi.KindNoPointers 889 } 890 if isdirectiface(t) { 891 i |= objabi.KindDirectIface 892 } 893 if useGCProg { 894 i |= objabi.KindGCProg 895 } 896 ot = duint8(lsym, ot, uint8(i)) // kind 897 if algsym == nil { 898 ot = dsymptr(lsym, ot, algarray, int(alg)*sizeofAlg) 899 } else { 900 ot = dsymptr(lsym, ot, algsym, 0) 901 } 902 ot = dsymptr(lsym, ot, gcsym, 0) // gcdata 903 904 nsym := dname(p, "", nil, exported) 905 ot = dsymptrOff(lsym, ot, nsym) // str 906 // ptrToThis 907 if sptr == nil { 908 ot = duint32(lsym, ot, 0) 909 } else if sptrWeak { 910 ot = dsymptrWeakOff(lsym, ot, sptr) 911 } else { 912 ot = dsymptrOff(lsym, ot, sptr) 913 } 914 915 return ot 916 } 917 918 // typeHasNoAlg reports whether t does not have any associated hash/eq 919 // algorithms because t, or some component of t, is marked Noalg. 920 func typeHasNoAlg(t *types.Type) bool { 921 a, bad := algtype1(t) 922 return a == ANOEQ && bad.Noalg() 923 } 924 925 func typesymname(t *types.Type) string { 926 name := t.ShortString() 927 // Use a separate symbol name for Noalg types for #17752. 928 if typeHasNoAlg(t) { 929 name = "noalg." + name 930 } 931 return name 932 } 933 934 // Fake package for runtime type info (headers) 935 // Don't access directly, use typeLookup below. 936 var ( 937 typepkgmu sync.Mutex // protects typepkg lookups 938 typepkg = types.NewPkg("type", "type") 939 ) 940 941 func typeLookup(name string) *types.Sym { 942 typepkgmu.Lock() 943 s := typepkg.Lookup(name) 944 typepkgmu.Unlock() 945 return s 946 } 947 948 func typesym(t *types.Type) *types.Sym { 949 return typeLookup(typesymname(t)) 950 } 951 952 // tracksym returns the symbol for tracking use of field/method f, assumed 953 // to be a member of struct/interface type t. 954 func tracksym(t *types.Type, f *types.Field) *types.Sym { 955 return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name) 956 } 957 958 func typesymprefix(prefix string, t *types.Type) *types.Sym { 959 p := prefix + "." + t.ShortString() 960 s := typeLookup(p) 961 962 // This function is for looking up type-related generated functions 963 // (e.g. eq and hash). Make sure they are indeed generated. 964 signatmu.Lock() 965 addsignat(t) 966 signatmu.Unlock() 967 968 //print("algsym: %s -> %+S\n", p, s); 969 970 return s 971 } 972 973 func typenamesym(t *types.Type) *types.Sym { 974 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { 975 Fatalf("typenamesym %v", t) 976 } 977 s := typesym(t) 978 signatmu.Lock() 979 addsignat(t) 980 signatmu.Unlock() 981 return s 982 } 983 984 func typename(t *types.Type) *Node { 985 s := typenamesym(t) 986 if s.Def == nil { 987 n := newnamel(src.NoXPos, s) 988 n.Type = types.Types[TUINT8] 989 n.SetClass(PEXTERN) 990 n.SetTypecheck(1) 991 s.Def = asTypesNode(n) 992 } 993 994 n := nod(OADDR, asNode(s.Def), nil) 995 n.Type = types.NewPtr(asNode(s.Def).Type) 996 n.SetAddable(true) 997 n.SetTypecheck(1) 998 return n 999 } 1000 1001 func itabname(t, itype *types.Type) *Node { 1002 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { 1003 Fatalf("itabname(%v, %v)", t, itype) 1004 } 1005 s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString()) 1006 if s.Def == nil { 1007 n := newname(s) 1008 n.Type = types.Types[TUINT8] 1009 n.SetClass(PEXTERN) 1010 n.SetTypecheck(1) 1011 s.Def = asTypesNode(n) 1012 itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()}) 1013 } 1014 1015 n := nod(OADDR, asNode(s.Def), nil) 1016 n.Type = types.NewPtr(asNode(s.Def).Type) 1017 n.SetAddable(true) 1018 n.SetTypecheck(1) 1019 return n 1020 } 1021 1022 // isreflexive reports whether t has a reflexive equality operator. 1023 // That is, if x==x for all x of type t. 1024 func isreflexive(t *types.Type) bool { 1025 switch t.Etype { 1026 case TBOOL, 1027 TINT, 1028 TUINT, 1029 TINT8, 1030 TUINT8, 1031 TINT16, 1032 TUINT16, 1033 TINT32, 1034 TUINT32, 1035 TINT64, 1036 TUINT64, 1037 TUINTPTR, 1038 TPTR, 1039 TUNSAFEPTR, 1040 TSTRING, 1041 TCHAN: 1042 return true 1043 1044 case TFLOAT32, 1045 TFLOAT64, 1046 TCOMPLEX64, 1047 TCOMPLEX128, 1048 TINTER: 1049 return false 1050 1051 case TARRAY: 1052 return isreflexive(t.Elem()) 1053 1054 case TSTRUCT: 1055 for _, t1 := range t.Fields().Slice() { 1056 if !isreflexive(t1.Type) { 1057 return false 1058 } 1059 } 1060 return true 1061 1062 default: 1063 Fatalf("bad type for map key: %v", t) 1064 return false 1065 } 1066 } 1067 1068 // needkeyupdate reports whether map updates with t as a key 1069 // need the key to be updated. 1070 func needkeyupdate(t *types.Type) bool { 1071 switch t.Etype { 1072 case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, 1073 TINT64, TUINT64, TUINTPTR, TPTR, TUNSAFEPTR, TCHAN: 1074 return false 1075 1076 case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0 1077 TINTER, 1078 TSTRING: // strings might have smaller backing stores 1079 return true 1080 1081 case TARRAY: 1082 return needkeyupdate(t.Elem()) 1083 1084 case TSTRUCT: 1085 for _, t1 := range t.Fields().Slice() { 1086 if needkeyupdate(t1.Type) { 1087 return true 1088 } 1089 } 1090 return false 1091 1092 default: 1093 Fatalf("bad type for map key: %v", t) 1094 return true 1095 } 1096 } 1097 1098 // hashMightPanic reports whether the hash of a map key of type t might panic. 1099 func hashMightPanic(t *types.Type) bool { 1100 switch t.Etype { 1101 case TINTER: 1102 return true 1103 1104 case TARRAY: 1105 return hashMightPanic(t.Elem()) 1106 1107 case TSTRUCT: 1108 for _, t1 := range t.Fields().Slice() { 1109 if hashMightPanic(t1.Type) { 1110 return true 1111 } 1112 } 1113 return false 1114 1115 default: 1116 return false 1117 } 1118 } 1119 1120 // formalType replaces byte and rune aliases with real types. 1121 // They've been separate internally to make error messages 1122 // better, but we have to merge them in the reflect tables. 1123 func formalType(t *types.Type) *types.Type { 1124 if t == types.Bytetype || t == types.Runetype { 1125 return types.Types[t.Etype] 1126 } 1127 return t 1128 } 1129 1130 func dtypesym(t *types.Type) *obj.LSym { 1131 t = formalType(t) 1132 if t.IsUntyped() { 1133 Fatalf("dtypesym %v", t) 1134 } 1135 1136 s := typesym(t) 1137 lsym := s.Linksym() 1138 if s.Siggen() { 1139 return lsym 1140 } 1141 s.SetSiggen(true) 1142 1143 // special case (look for runtime below): 1144 // when compiling package runtime, 1145 // emit the type structures for int, float, etc. 1146 tbase := t 1147 1148 if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil { 1149 tbase = t.Elem() 1150 } 1151 dupok := 0 1152 if tbase.Sym == nil { 1153 dupok = obj.DUPOK 1154 } 1155 1156 if myimportpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc 1157 // named types from other files are defined only by those files 1158 if tbase.Sym != nil && tbase.Sym.Pkg != localpkg { 1159 return lsym 1160 } 1161 // TODO(mdempsky): Investigate whether this can happen. 1162 if tbase.Etype == TFORW { 1163 return lsym 1164 } 1165 } 1166 1167 ot := 0 1168 switch t.Etype { 1169 default: 1170 ot = dcommontype(lsym, t) 1171 ot = dextratype(lsym, ot, t, 0) 1172 1173 case TARRAY: 1174 // ../../../../runtime/type.go:/arrayType 1175 s1 := dtypesym(t.Elem()) 1176 t2 := types.NewSlice(t.Elem()) 1177 s2 := dtypesym(t2) 1178 ot = dcommontype(lsym, t) 1179 ot = dsymptr(lsym, ot, s1, 0) 1180 ot = dsymptr(lsym, ot, s2, 0) 1181 ot = duintptr(lsym, ot, uint64(t.NumElem())) 1182 ot = dextratype(lsym, ot, t, 0) 1183 1184 case TSLICE: 1185 // ../../../../runtime/type.go:/sliceType 1186 s1 := dtypesym(t.Elem()) 1187 ot = dcommontype(lsym, t) 1188 ot = dsymptr(lsym, ot, s1, 0) 1189 ot = dextratype(lsym, ot, t, 0) 1190 1191 case TCHAN: 1192 // ../../../../runtime/type.go:/chanType 1193 s1 := dtypesym(t.Elem()) 1194 ot = dcommontype(lsym, t) 1195 ot = dsymptr(lsym, ot, s1, 0) 1196 ot = duintptr(lsym, ot, uint64(t.ChanDir())) 1197 ot = dextratype(lsym, ot, t, 0) 1198 1199 case TFUNC: 1200 for _, t1 := range t.Recvs().Fields().Slice() { 1201 dtypesym(t1.Type) 1202 } 1203 isddd := false 1204 for _, t1 := range t.Params().Fields().Slice() { 1205 isddd = t1.IsDDD() 1206 dtypesym(t1.Type) 1207 } 1208 for _, t1 := range t.Results().Fields().Slice() { 1209 dtypesym(t1.Type) 1210 } 1211 1212 ot = dcommontype(lsym, t) 1213 inCount := t.NumRecvs() + t.NumParams() 1214 outCount := t.NumResults() 1215 if isddd { 1216 outCount |= 1 << 15 1217 } 1218 ot = duint16(lsym, ot, uint16(inCount)) 1219 ot = duint16(lsym, ot, uint16(outCount)) 1220 if Widthptr == 8 { 1221 ot += 4 // align for *rtype 1222 } 1223 1224 dataAdd := (inCount + t.NumResults()) * Widthptr 1225 ot = dextratype(lsym, ot, t, dataAdd) 1226 1227 // Array of rtype pointers follows funcType. 1228 for _, t1 := range t.Recvs().Fields().Slice() { 1229 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) 1230 } 1231 for _, t1 := range t.Params().Fields().Slice() { 1232 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) 1233 } 1234 for _, t1 := range t.Results().Fields().Slice() { 1235 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) 1236 } 1237 1238 case TINTER: 1239 m := imethods(t) 1240 n := len(m) 1241 for _, a := range m { 1242 dtypesym(a.type_) 1243 } 1244 1245 // ../../../../runtime/type.go:/interfaceType 1246 ot = dcommontype(lsym, t) 1247 1248 var tpkg *types.Pkg 1249 if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype { 1250 tpkg = t.Sym.Pkg 1251 } 1252 ot = dgopkgpath(lsym, ot, tpkg) 1253 1254 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1255 ot = duintptr(lsym, ot, uint64(n)) 1256 ot = duintptr(lsym, ot, uint64(n)) 1257 dataAdd := imethodSize() * n 1258 ot = dextratype(lsym, ot, t, dataAdd) 1259 1260 for _, a := range m { 1261 // ../../../../runtime/type.go:/imethod 1262 exported := types.IsExported(a.name.Name) 1263 var pkg *types.Pkg 1264 if !exported && a.name.Pkg != tpkg { 1265 pkg = a.name.Pkg 1266 } 1267 nsym := dname(a.name.Name, "", pkg, exported) 1268 1269 ot = dsymptrOff(lsym, ot, nsym) 1270 ot = dsymptrOff(lsym, ot, dtypesym(a.type_)) 1271 } 1272 1273 // ../../../../runtime/type.go:/mapType 1274 case TMAP: 1275 s1 := dtypesym(t.Key()) 1276 s2 := dtypesym(t.Elem()) 1277 s3 := dtypesym(bmap(t)) 1278 ot = dcommontype(lsym, t) 1279 ot = dsymptr(lsym, ot, s1, 0) 1280 ot = dsymptr(lsym, ot, s2, 0) 1281 ot = dsymptr(lsym, ot, s3, 0) 1282 var flags uint32 1283 // Note: flags must match maptype accessors in ../../../../runtime/type.go 1284 // and maptype builder in ../../../../reflect/type.go:MapOf. 1285 if t.Key().Width > MAXKEYSIZE { 1286 ot = duint8(lsym, ot, uint8(Widthptr)) 1287 flags |= 1 // indirect key 1288 } else { 1289 ot = duint8(lsym, ot, uint8(t.Key().Width)) 1290 } 1291 1292 if t.Elem().Width > MAXVALSIZE { 1293 ot = duint8(lsym, ot, uint8(Widthptr)) 1294 flags |= 2 // indirect value 1295 } else { 1296 ot = duint8(lsym, ot, uint8(t.Elem().Width)) 1297 } 1298 ot = duint16(lsym, ot, uint16(bmap(t).Width)) 1299 if isreflexive(t.Key()) { 1300 flags |= 4 // reflexive key 1301 } 1302 if needkeyupdate(t.Key()) { 1303 flags |= 8 // need key update 1304 } 1305 if hashMightPanic(t.Key()) { 1306 flags |= 16 // hash might panic 1307 } 1308 ot = duint32(lsym, ot, flags) 1309 ot = dextratype(lsym, ot, t, 0) 1310 1311 case TPTR: 1312 if t.Elem().Etype == TANY { 1313 // ../../../../runtime/type.go:/UnsafePointerType 1314 ot = dcommontype(lsym, t) 1315 ot = dextratype(lsym, ot, t, 0) 1316 1317 break 1318 } 1319 1320 // ../../../../runtime/type.go:/ptrType 1321 s1 := dtypesym(t.Elem()) 1322 1323 ot = dcommontype(lsym, t) 1324 ot = dsymptr(lsym, ot, s1, 0) 1325 ot = dextratype(lsym, ot, t, 0) 1326 1327 // ../../../../runtime/type.go:/structType 1328 // for security, only the exported fields. 1329 case TSTRUCT: 1330 fields := t.Fields().Slice() 1331 for _, t1 := range fields { 1332 dtypesym(t1.Type) 1333 } 1334 1335 // All non-exported struct field names within a struct 1336 // type must originate from a single package. By 1337 // identifying and recording that package within the 1338 // struct type descriptor, we can omit that 1339 // information from the field descriptors. 1340 var spkg *types.Pkg 1341 for _, f := range fields { 1342 if !types.IsExported(f.Sym.Name) { 1343 spkg = f.Sym.Pkg 1344 break 1345 } 1346 } 1347 1348 ot = dcommontype(lsym, t) 1349 ot = dgopkgpath(lsym, ot, spkg) 1350 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1351 ot = duintptr(lsym, ot, uint64(len(fields))) 1352 ot = duintptr(lsym, ot, uint64(len(fields))) 1353 1354 dataAdd := len(fields) * structfieldSize() 1355 ot = dextratype(lsym, ot, t, dataAdd) 1356 1357 for _, f := range fields { 1358 // ../../../../runtime/type.go:/structField 1359 ot = dnameField(lsym, ot, spkg, f) 1360 ot = dsymptr(lsym, ot, dtypesym(f.Type), 0) 1361 offsetAnon := uint64(f.Offset) << 1 1362 if offsetAnon>>1 != uint64(f.Offset) { 1363 Fatalf("%v: bad field offset for %s", t, f.Sym.Name) 1364 } 1365 if f.Embedded != 0 { 1366 offsetAnon |= 1 1367 } 1368 ot = duintptr(lsym, ot, offsetAnon) 1369 } 1370 } 1371 1372 ot = dextratypeData(lsym, ot, t) 1373 ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA)) 1374 1375 // The linker will leave a table of all the typelinks for 1376 // types in the binary, so the runtime can find them. 1377 // 1378 // When buildmode=shared, all types are in typelinks so the 1379 // runtime can deduplicate type pointers. 1380 keep := Ctxt.Flag_dynlink 1381 if !keep && t.Sym == nil { 1382 // For an unnamed type, we only need the link if the type can 1383 // be created at run time by reflect.PtrTo and similar 1384 // functions. If the type exists in the program, those 1385 // functions must return the existing type structure rather 1386 // than creating a new one. 1387 switch t.Etype { 1388 case TPTR, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT: 1389 keep = true 1390 } 1391 } 1392 // Do not put Noalg types in typelinks. See issue #22605. 1393 if typeHasNoAlg(t) { 1394 keep = false 1395 } 1396 lsym.Set(obj.AttrMakeTypelink, keep) 1397 1398 return lsym 1399 } 1400 1401 // for each itabEntry, gather the methods on 1402 // the concrete type that implement the interface 1403 func peekitabs() { 1404 for i := range itabs { 1405 tab := &itabs[i] 1406 methods := genfun(tab.t, tab.itype) 1407 if len(methods) == 0 { 1408 continue 1409 } 1410 tab.entries = methods 1411 } 1412 } 1413 1414 // for the given concrete type and interface 1415 // type, return the (sorted) set of methods 1416 // on the concrete type that implement the interface 1417 func genfun(t, it *types.Type) []*obj.LSym { 1418 if t == nil || it == nil { 1419 return nil 1420 } 1421 sigs := imethods(it) 1422 methods := methods(t) 1423 out := make([]*obj.LSym, 0, len(sigs)) 1424 // TODO(mdempsky): Short circuit before calling methods(t)? 1425 // See discussion on CL 105039. 1426 if len(sigs) == 0 { 1427 return nil 1428 } 1429 1430 // both sigs and methods are sorted by name, 1431 // so we can find the intersect in a single pass 1432 for _, m := range methods { 1433 if m.name == sigs[0].name { 1434 out = append(out, m.isym.Linksym()) 1435 sigs = sigs[1:] 1436 if len(sigs) == 0 { 1437 break 1438 } 1439 } 1440 } 1441 1442 if len(sigs) != 0 { 1443 Fatalf("incomplete itab") 1444 } 1445 1446 return out 1447 } 1448 1449 // itabsym uses the information gathered in 1450 // peekitabs to de-virtualize interface methods. 1451 // Since this is called by the SSA backend, it shouldn't 1452 // generate additional Nodes, Syms, etc. 1453 func itabsym(it *obj.LSym, offset int64) *obj.LSym { 1454 var syms []*obj.LSym 1455 if it == nil { 1456 return nil 1457 } 1458 1459 for i := range itabs { 1460 e := &itabs[i] 1461 if e.lsym == it { 1462 syms = e.entries 1463 break 1464 } 1465 } 1466 if syms == nil { 1467 return nil 1468 } 1469 1470 // keep this arithmetic in sync with *itab layout 1471 methodnum := int((offset - 2*int64(Widthptr) - 8) / int64(Widthptr)) 1472 if methodnum >= len(syms) { 1473 return nil 1474 } 1475 return syms[methodnum] 1476 } 1477 1478 // addsignat ensures that a runtime type descriptor is emitted for t. 1479 func addsignat(t *types.Type) { 1480 if _, ok := signatset[t]; !ok { 1481 signatset[t] = struct{}{} 1482 signatslice = append(signatslice, t) 1483 } 1484 } 1485 1486 func addsignats(dcls []*Node) { 1487 // copy types from dcl list to signatset 1488 for _, n := range dcls { 1489 if n.Op == OTYPE { 1490 addsignat(n.Type) 1491 } 1492 } 1493 } 1494 1495 func dumpsignats() { 1496 // Process signatset. Use a loop, as dtypesym adds 1497 // entries to signatset while it is being processed. 1498 signats := make([]typeAndStr, len(signatslice)) 1499 for len(signatslice) > 0 { 1500 signats = signats[:0] 1501 // Transfer entries to a slice and sort, for reproducible builds. 1502 for _, t := range signatslice { 1503 signats = append(signats, typeAndStr{t: t, short: typesymname(t), regular: t.String()}) 1504 delete(signatset, t) 1505 } 1506 signatslice = signatslice[:0] 1507 sort.Sort(typesByString(signats)) 1508 for _, ts := range signats { 1509 t := ts.t 1510 dtypesym(t) 1511 if t.Sym != nil { 1512 dtypesym(types.NewPtr(t)) 1513 } 1514 } 1515 } 1516 } 1517 1518 func dumptabs() { 1519 // process itabs 1520 for _, i := range itabs { 1521 // dump empty itab symbol into i.sym 1522 // type itab struct { 1523 // inter *interfacetype 1524 // _type *_type 1525 // hash uint32 1526 // _ [4]byte 1527 // fun [1]uintptr // variable sized 1528 // } 1529 o := dsymptr(i.lsym, 0, dtypesym(i.itype), 0) 1530 o = dsymptr(i.lsym, o, dtypesym(i.t), 0) 1531 o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash 1532 o += 4 // skip unused field 1533 for _, fn := range genfun(i.t, i.itype) { 1534 o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method 1535 } 1536 // Nothing writes static itabs, so they are read only. 1537 ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA)) 1538 ilink := itablinkpkg.Lookup(i.t.ShortString() + "," + i.itype.ShortString()).Linksym() 1539 dsymptr(ilink, 0, i.lsym, 0) 1540 ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA)) 1541 } 1542 1543 // process ptabs 1544 if localpkg.Name == "main" && len(ptabs) > 0 { 1545 ot := 0 1546 s := Ctxt.Lookup("go.plugin.tabs") 1547 for _, p := range ptabs { 1548 // Dump ptab symbol into go.pluginsym package. 1549 // 1550 // type ptab struct { 1551 // name nameOff 1552 // typ typeOff // pointer to symbol 1553 // } 1554 nsym := dname(p.s.Name, "", nil, true) 1555 ot = dsymptrOff(s, ot, nsym) 1556 ot = dsymptrOff(s, ot, dtypesym(p.t)) 1557 } 1558 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1559 1560 ot = 0 1561 s = Ctxt.Lookup("go.plugin.exports") 1562 for _, p := range ptabs { 1563 ot = dsymptr(s, ot, p.s.Linksym(), 0) 1564 } 1565 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1566 } 1567 } 1568 1569 func dumpimportstrings() { 1570 // generate import strings for imported packages 1571 for _, p := range types.ImportedPkgList() { 1572 dimportpath(p) 1573 } 1574 } 1575 1576 func dumpbasictypes() { 1577 // do basic types if compiling package runtime. 1578 // they have to be in at least one package, 1579 // and runtime is always loaded implicitly, 1580 // so this is as good as any. 1581 // another possible choice would be package main, 1582 // but using runtime means fewer copies in object files. 1583 if myimportpath == "runtime" { 1584 for i := types.EType(1); i <= TBOOL; i++ { 1585 dtypesym(types.NewPtr(types.Types[i])) 1586 } 1587 dtypesym(types.NewPtr(types.Types[TSTRING])) 1588 dtypesym(types.NewPtr(types.Types[TUNSAFEPTR])) 1589 1590 // emit type structs for error and func(error) string. 1591 // The latter is the type of an auto-generated wrapper. 1592 dtypesym(types.NewPtr(types.Errortype)) 1593 1594 dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])})) 1595 1596 // add paths for runtime and main, which 6l imports implicitly. 1597 dimportpath(Runtimepkg) 1598 1599 if flag_race { 1600 dimportpath(racepkg) 1601 } 1602 if flag_msan { 1603 dimportpath(msanpkg) 1604 } 1605 dimportpath(types.NewPkg("main", "")) 1606 } 1607 } 1608 1609 type typeAndStr struct { 1610 t *types.Type 1611 short string 1612 regular string 1613 } 1614 1615 type typesByString []typeAndStr 1616 1617 func (a typesByString) Len() int { return len(a) } 1618 func (a typesByString) Less(i, j int) bool { 1619 if a[i].short != a[j].short { 1620 return a[i].short < a[j].short 1621 } 1622 // When the only difference between the types is whether 1623 // they refer to byte or uint8, such as **byte vs **uint8, 1624 // the types' ShortStrings can be identical. 1625 // To preserve deterministic sort ordering, sort these by String(). 1626 return a[i].regular < a[j].regular 1627 } 1628 func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1629 1630 func dalgsym(t *types.Type) *obj.LSym { 1631 var lsym *obj.LSym 1632 var hashfunc *obj.LSym 1633 var eqfunc *obj.LSym 1634 1635 // dalgsym is only called for a type that needs an algorithm table, 1636 // which implies that the type is comparable (or else it would use ANOEQ). 1637 1638 if algtype(t) == AMEM { 1639 // we use one algorithm table for all AMEM types of a given size 1640 p := fmt.Sprintf(".alg%d", t.Width) 1641 1642 s := typeLookup(p) 1643 lsym = s.Linksym() 1644 if s.AlgGen() { 1645 return lsym 1646 } 1647 s.SetAlgGen(true) 1648 1649 if memhashvarlen == nil { 1650 memhashvarlen = sysfunc("memhash_varlen") 1651 memequalvarlen = sysvar("memequal_varlen") // asm func 1652 } 1653 1654 // make hash closure 1655 p = fmt.Sprintf(".hashfunc%d", t.Width) 1656 1657 hashfunc = typeLookup(p).Linksym() 1658 1659 ot := 0 1660 ot = dsymptr(hashfunc, ot, memhashvarlen, 0) 1661 ot = duintptr(hashfunc, ot, uint64(t.Width)) // size encoded in closure 1662 ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA) 1663 1664 // make equality closure 1665 p = fmt.Sprintf(".eqfunc%d", t.Width) 1666 1667 eqfunc = typeLookup(p).Linksym() 1668 1669 ot = 0 1670 ot = dsymptr(eqfunc, ot, memequalvarlen, 0) 1671 ot = duintptr(eqfunc, ot, uint64(t.Width)) 1672 ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA) 1673 } else { 1674 // generate an alg table specific to this type 1675 s := typesymprefix(".alg", t) 1676 lsym = s.Linksym() 1677 1678 hash := typesymprefix(".hash", t) 1679 eq := typesymprefix(".eq", t) 1680 hashfunc = typesymprefix(".hashfunc", t).Linksym() 1681 eqfunc = typesymprefix(".eqfunc", t).Linksym() 1682 1683 genhash(hash, t) 1684 geneq(eq, t) 1685 1686 // make Go funcs (closures) for calling hash and equal from Go 1687 dsymptr(hashfunc, 0, hash.Linksym(), 0) 1688 ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1689 dsymptr(eqfunc, 0, eq.Linksym(), 0) 1690 ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1691 } 1692 1693 // ../../../../runtime/alg.go:/typeAlg 1694 ot := 0 1695 1696 ot = dsymptr(lsym, ot, hashfunc, 0) 1697 ot = dsymptr(lsym, ot, eqfunc, 0) 1698 ggloblsym(lsym, int32(ot), obj.DUPOK|obj.RODATA) 1699 return lsym 1700 } 1701 1702 // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, 1703 // which holds 1-bit entries describing where pointers are in a given type. 1704 // Above this length, the GC information is recorded as a GC program, 1705 // which can express repetition compactly. In either form, the 1706 // information is used by the runtime to initialize the heap bitmap, 1707 // and for large types (like 128 or more words), they are roughly the 1708 // same speed. GC programs are never much larger and often more 1709 // compact. (If large arrays are involved, they can be arbitrarily 1710 // more compact.) 1711 // 1712 // The cutoff must be large enough that any allocation large enough to 1713 // use a GC program is large enough that it does not share heap bitmap 1714 // bytes with any other objects, allowing the GC program execution to 1715 // assume an aligned start and not use atomic operations. In the current 1716 // runtime, this means all malloc size classes larger than the cutoff must 1717 // be multiples of four words. On 32-bit systems that's 16 bytes, and 1718 // all size classes >= 16 bytes are 16-byte aligned, so no real constraint. 1719 // On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed 1720 // for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated 1721 // is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes 1722 // must be >= 4. 1723 // 1724 // We used to use 16 because the GC programs do have some constant overhead 1725 // to get started, and processing 128 pointers seems to be enough to 1726 // amortize that overhead well. 1727 // 1728 // To make sure that the runtime's chansend can call typeBitsBulkBarrier, 1729 // we raised the limit to 2048, so that even 32-bit systems are guaranteed to 1730 // use bitmaps for objects up to 64 kB in size. 1731 // 1732 // Also known to reflect/type.go. 1733 // 1734 const maxPtrmaskBytes = 2048 1735 1736 // dgcsym emits and returns a data symbol containing GC information for type t, 1737 // along with a boolean reporting whether the UseGCProg bit should be set in 1738 // the type kind, and the ptrdata field to record in the reflect type information. 1739 func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { 1740 ptrdata = typeptrdata(t) 1741 if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 { 1742 lsym = dgcptrmask(t) 1743 return 1744 } 1745 1746 useGCProg = true 1747 lsym, ptrdata = dgcprog(t) 1748 return 1749 } 1750 1751 // dgcptrmask emits and returns the symbol containing a pointer mask for type t. 1752 func dgcptrmask(t *types.Type) *obj.LSym { 1753 ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8) 1754 fillptrmask(t, ptrmask) 1755 p := fmt.Sprintf("gcbits.%x", ptrmask) 1756 1757 sym := Runtimepkg.Lookup(p) 1758 lsym := sym.Linksym() 1759 if !sym.Uniq() { 1760 sym.SetUniq(true) 1761 for i, x := range ptrmask { 1762 duint8(lsym, i, x) 1763 } 1764 ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) 1765 } 1766 return lsym 1767 } 1768 1769 // fillptrmask fills in ptrmask with 1s corresponding to the 1770 // word offsets in t that hold pointers. 1771 // ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits. 1772 func fillptrmask(t *types.Type, ptrmask []byte) { 1773 for i := range ptrmask { 1774 ptrmask[i] = 0 1775 } 1776 if !types.Haspointers(t) { 1777 return 1778 } 1779 1780 vec := bvalloc(8 * int32(len(ptrmask))) 1781 onebitwalktype1(t, 0, vec) 1782 1783 nptr := typeptrdata(t) / int64(Widthptr) 1784 for i := int64(0); i < nptr; i++ { 1785 if vec.Get(int32(i)) { 1786 ptrmask[i/8] |= 1 << (uint(i) % 8) 1787 } 1788 } 1789 } 1790 1791 // dgcprog emits and returns the symbol containing a GC program for type t 1792 // along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]). 1793 // In practice, the size is typeptrdata(t) except for non-trivial arrays. 1794 // For non-trivial arrays, the program describes the full t.Width size. 1795 func dgcprog(t *types.Type) (*obj.LSym, int64) { 1796 dowidth(t) 1797 if t.Width == BADWIDTH { 1798 Fatalf("dgcprog: %v badwidth", t) 1799 } 1800 lsym := typesymprefix(".gcprog", t).Linksym() 1801 var p GCProg 1802 p.init(lsym) 1803 p.emit(t, 0) 1804 offset := p.w.BitIndex() * int64(Widthptr) 1805 p.end() 1806 if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { 1807 Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) 1808 } 1809 return lsym, offset 1810 } 1811 1812 type GCProg struct { 1813 lsym *obj.LSym 1814 symoff int 1815 w gcprog.Writer 1816 } 1817 1818 var Debug_gcprog int // set by -d gcprog 1819 1820 func (p *GCProg) init(lsym *obj.LSym) { 1821 p.lsym = lsym 1822 p.symoff = 4 // first 4 bytes hold program length 1823 p.w.Init(p.writeByte) 1824 if Debug_gcprog > 0 { 1825 fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym) 1826 p.w.Debug(os.Stderr) 1827 } 1828 } 1829 1830 func (p *GCProg) writeByte(x byte) { 1831 p.symoff = duint8(p.lsym, p.symoff, x) 1832 } 1833 1834 func (p *GCProg) end() { 1835 p.w.End() 1836 duint32(p.lsym, 0, uint32(p.symoff-4)) 1837 ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) 1838 if Debug_gcprog > 0 { 1839 fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym) 1840 } 1841 } 1842 1843 func (p *GCProg) emit(t *types.Type, offset int64) { 1844 dowidth(t) 1845 if !types.Haspointers(t) { 1846 return 1847 } 1848 if t.Width == int64(Widthptr) { 1849 p.w.Ptr(offset / int64(Widthptr)) 1850 return 1851 } 1852 switch t.Etype { 1853 default: 1854 Fatalf("GCProg.emit: unexpected type %v", t) 1855 1856 case TSTRING: 1857 p.w.Ptr(offset / int64(Widthptr)) 1858 1859 case TINTER: 1860 // Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1. 1861 p.w.Ptr(offset/int64(Widthptr) + 1) 1862 1863 case TSLICE: 1864 p.w.Ptr(offset / int64(Widthptr)) 1865 1866 case TARRAY: 1867 if t.NumElem() == 0 { 1868 // should have been handled by haspointers check above 1869 Fatalf("GCProg.emit: empty array") 1870 } 1871 1872 // Flatten array-of-array-of-array to just a big array by multiplying counts. 1873 count := t.NumElem() 1874 elem := t.Elem() 1875 for elem.IsArray() { 1876 count *= elem.NumElem() 1877 elem = elem.Elem() 1878 } 1879 1880 if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) { 1881 // Cheaper to just emit the bits. 1882 for i := int64(0); i < count; i++ { 1883 p.emit(elem, offset+i*elem.Width) 1884 } 1885 return 1886 } 1887 p.emit(elem, offset) 1888 p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) 1889 p.w.Repeat(elem.Width/int64(Widthptr), count-1) 1890 1891 case TSTRUCT: 1892 for _, t1 := range t.Fields().Slice() { 1893 p.emit(t1.Type, offset+t1.Offset) 1894 } 1895 } 1896 } 1897 1898 // zeroaddr returns the address of a symbol with at least 1899 // size bytes of zeros. 1900 func zeroaddr(size int64) *Node { 1901 if size >= 1<<31 { 1902 Fatalf("map value too big %d", size) 1903 } 1904 if zerosize < size { 1905 zerosize = size 1906 } 1907 s := mappkg.Lookup("zero") 1908 if s.Def == nil { 1909 x := newname(s) 1910 x.Type = types.Types[TUINT8] 1911 x.SetClass(PEXTERN) 1912 x.SetTypecheck(1) 1913 s.Def = asTypesNode(x) 1914 } 1915 z := nod(OADDR, asNode(s.Def), nil) 1916 z.Type = types.NewPtr(types.Types[TUINT8]) 1917 z.SetAddable(true) 1918 z.SetTypecheck(1) 1919 return z 1920 }