github.com/zebozhuang/go@v0.0.0-20200207033046-f8a98f6f5c5d/src/cmd/compile/internal/gc/reflect.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "cmd/internal/gcprog" 10 "cmd/internal/obj" 11 "cmd/internal/objabi" 12 "cmd/internal/src" 13 "fmt" 14 "os" 15 "sort" 16 "strings" 17 "sync" 18 ) 19 20 type itabEntry struct { 21 t, itype *types.Type 22 lsym *obj.LSym // symbol of the itab itself 23 24 // symbols of each method in 25 // the itab, sorted by byte offset; 26 // filled in by peekitabs 27 entries []*obj.LSym 28 } 29 30 type ptabEntry struct { 31 s *types.Sym 32 t *types.Type 33 } 34 35 // runtime interface and reflection data structures 36 var ( 37 signatsetmu sync.Mutex // protects signatset 38 signatset = make(map[*types.Type]struct{}) 39 40 itabs []itabEntry 41 ptabs []ptabEntry 42 ) 43 44 type Sig struct { 45 name string 46 pkg *types.Pkg 47 isym *types.Sym 48 tsym *types.Sym 49 type_ *types.Type 50 mtype *types.Type 51 offset int32 52 } 53 54 // siglt sorts method signatures by name, then package path. 55 func siglt(a, b *Sig) bool { 56 if a.name != b.name { 57 return a.name < b.name 58 } 59 if a.pkg == b.pkg { 60 return false 61 } 62 if a.pkg == nil { 63 return true 64 } 65 if b.pkg == nil { 66 return false 67 } 68 return a.pkg.Path < b.pkg.Path 69 } 70 71 // Builds a type representing a Bucket structure for 72 // the given map type. This type is not visible to users - 73 // we include only enough information to generate a correct GC 74 // program for it. 75 // Make sure this stays in sync with ../../../../runtime/hashmap.go! 76 const ( 77 BUCKETSIZE = 8 78 MAXKEYSIZE = 128 79 MAXVALSIZE = 128 80 ) 81 82 func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) 83 func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) 84 func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) 85 if t.Sym == nil && len(methods(t)) == 0 { 86 return 0 87 } 88 return 4 + 2 + 2 + 4 + 4 89 } 90 91 func makefield(name string, t *types.Type) *types.Field { 92 f := types.NewField() 93 f.Type = t 94 f.Sym = (*types.Pkg)(nil).Lookup(name) 95 return f 96 } 97 98 func mapbucket(t *types.Type) *types.Type { 99 if t.MapType().Bucket != nil { 100 return t.MapType().Bucket 101 } 102 103 bucket := types.New(TSTRUCT) 104 keytype := t.Key() 105 valtype := t.Val() 106 dowidth(keytype) 107 dowidth(valtype) 108 if keytype.Width > MAXKEYSIZE { 109 keytype = types.NewPtr(keytype) 110 } 111 if valtype.Width > MAXVALSIZE { 112 valtype = types.NewPtr(valtype) 113 } 114 115 field := make([]*types.Field, 0, 5) 116 117 // The first field is: uint8 topbits[BUCKETSIZE]. 118 arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE) 119 field = append(field, makefield("topbits", arr)) 120 121 arr = types.NewArray(keytype, BUCKETSIZE) 122 arr.SetNoalg(true) 123 field = append(field, makefield("keys", arr)) 124 125 arr = types.NewArray(valtype, BUCKETSIZE) 126 arr.SetNoalg(true) 127 field = append(field, makefield("values", arr)) 128 129 // Make sure the overflow pointer is the last memory in the struct, 130 // because the runtime assumes it can use size-ptrSize as the 131 // offset of the overflow pointer. We double-check that property 132 // below once the offsets and size are computed. 133 // 134 // BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point. 135 // On 32-bit systems, the max alignment is 32-bit, and the 136 // overflow pointer will add another 32-bit field, and the struct 137 // will end with no padding. 138 // On 64-bit systems, the max alignment is 64-bit, and the 139 // overflow pointer will add another 64-bit field, and the struct 140 // will end with no padding. 141 // On nacl/amd64p32, however, the max alignment is 64-bit, 142 // but the overflow pointer will add only a 32-bit field, 143 // so if the struct needs 64-bit padding (because a key or value does) 144 // then it would end with an extra 32-bit padding field. 145 // Preempt that by emitting the padding here. 146 if int(t.Val().Align) > Widthptr || int(t.Key().Align) > Widthptr { 147 field = append(field, makefield("pad", types.Types[TUINTPTR])) 148 } 149 150 // If keys and values have no pointers, the map implementation 151 // can keep a list of overflow pointers on the side so that 152 // buckets can be marked as having no pointers. 153 // Arrange for the bucket to have no pointers by changing 154 // the type of the overflow field to uintptr in this case. 155 // See comment on hmap.overflow in ../../../../runtime/hashmap.go. 156 otyp := types.NewPtr(bucket) 157 if !types.Haspointers(t.Val()) && !types.Haspointers(t.Key()) && t.Val().Width <= MAXVALSIZE && t.Key().Width <= MAXKEYSIZE { 158 otyp = types.Types[TUINTPTR] 159 } 160 ovf := makefield("overflow", otyp) 161 field = append(field, ovf) 162 163 // link up fields 164 bucket.SetNoalg(true) 165 bucket.SetLocal(t.Local()) 166 bucket.SetFields(field[:]) 167 dowidth(bucket) 168 169 // Double-check that overflow field is final memory in struct, 170 // with no padding at end. See comment above. 171 if ovf.Offset != bucket.Width-int64(Widthptr) { 172 Fatalf("bad math in mapbucket for %v", t) 173 } 174 175 t.MapType().Bucket = bucket 176 177 bucket.StructType().Map = t 178 return bucket 179 } 180 181 // Builds a type representing a Hmap structure for the given map type. 182 // Make sure this stays in sync with ../../../../runtime/hashmap.go! 183 func hmap(t *types.Type) *types.Type { 184 if t.MapType().Hmap != nil { 185 return t.MapType().Hmap 186 } 187 188 bucket := mapbucket(t) 189 fields := []*types.Field{ 190 makefield("count", types.Types[TINT]), 191 makefield("flags", types.Types[TUINT8]), 192 makefield("B", types.Types[TUINT8]), 193 makefield("noverflow", types.Types[TUINT16]), 194 makefield("hash0", types.Types[TUINT32]), 195 makefield("buckets", types.NewPtr(bucket)), 196 makefield("oldbuckets", types.NewPtr(bucket)), 197 makefield("nevacuate", types.Types[TUINTPTR]), 198 makefield("overflow", types.Types[TUNSAFEPTR]), 199 } 200 201 h := types.New(TSTRUCT) 202 h.SetNoalg(true) 203 h.SetLocal(t.Local()) 204 h.SetFields(fields) 205 dowidth(h) 206 t.MapType().Hmap = h 207 h.StructType().Map = t 208 return h 209 } 210 211 func hiter(t *types.Type) *types.Type { 212 if t.MapType().Hiter != nil { 213 return t.MapType().Hiter 214 } 215 216 // build a struct: 217 // hiter { 218 // key *Key 219 // val *Value 220 // t *MapType 221 // h *Hmap 222 // buckets *Bucket 223 // bptr *Bucket 224 // overflow0 unsafe.Pointer 225 // overflow1 unsafe.Pointer 226 // startBucket uintptr 227 // stuff uintptr 228 // bucket uintptr 229 // checkBucket uintptr 230 // } 231 // must match ../../../../runtime/hashmap.go:hiter. 232 var field [12]*types.Field 233 field[0] = makefield("key", types.NewPtr(t.Key())) 234 field[1] = makefield("val", types.NewPtr(t.Val())) 235 field[2] = makefield("t", types.NewPtr(types.Types[TUINT8])) 236 field[3] = makefield("h", types.NewPtr(hmap(t))) 237 field[4] = makefield("buckets", types.NewPtr(mapbucket(t))) 238 field[5] = makefield("bptr", types.NewPtr(mapbucket(t))) 239 field[6] = makefield("overflow0", types.Types[TUNSAFEPTR]) 240 field[7] = makefield("overflow1", types.Types[TUNSAFEPTR]) 241 field[8] = makefield("startBucket", types.Types[TUINTPTR]) 242 field[9] = makefield("stuff", types.Types[TUINTPTR]) // offset+wrapped+B+I 243 field[10] = makefield("bucket", types.Types[TUINTPTR]) 244 field[11] = makefield("checkBucket", types.Types[TUINTPTR]) 245 246 // build iterator struct holding the above fields 247 i := types.New(TSTRUCT) 248 i.SetNoalg(true) 249 i.SetFields(field[:]) 250 dowidth(i) 251 if i.Width != int64(12*Widthptr) { 252 Fatalf("hash_iter size not correct %d %d", i.Width, 12*Widthptr) 253 } 254 t.MapType().Hiter = i 255 i.StructType().Map = t 256 return i 257 } 258 259 // f is method type, with receiver. 260 // return function type, receiver as first argument (or not). 261 func methodfunc(f *types.Type, receiver *types.Type) *types.Type { 262 var in []*Node 263 if receiver != nil { 264 d := nod(ODCLFIELD, nil, nil) 265 d.Type = receiver 266 in = append(in, d) 267 } 268 269 var d *Node 270 for _, t := range f.Params().Fields().Slice() { 271 d = nod(ODCLFIELD, nil, nil) 272 d.Type = t.Type 273 d.SetIsddd(t.Isddd()) 274 in = append(in, d) 275 } 276 277 var out []*Node 278 for _, t := range f.Results().Fields().Slice() { 279 d = nod(ODCLFIELD, nil, nil) 280 d.Type = t.Type 281 out = append(out, d) 282 } 283 284 t := functype(nil, in, out) 285 if f.Nname() != nil { 286 // Link to name of original method function. 287 t.SetNname(f.Nname()) 288 } 289 290 return t 291 } 292 293 // methods returns the methods of the non-interface type t, sorted by name. 294 // Generates stub functions as needed. 295 func methods(t *types.Type) []*Sig { 296 // method type 297 mt := methtype(t) 298 299 if mt == nil { 300 return nil 301 } 302 expandmeth(mt) 303 304 // type stored in interface word 305 it := t 306 307 if !isdirectiface(it) { 308 it = types.NewPtr(t) 309 } 310 311 // make list of methods for t, 312 // generating code if necessary. 313 var ms []*Sig 314 for _, f := range mt.AllMethods().Slice() { 315 if f.Type.Etype != TFUNC || f.Type.Recv() == nil { 316 Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) 317 } 318 if f.Type.Recv() == nil { 319 Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) 320 } 321 if f.Nointerface() { 322 continue 323 } 324 325 method := f.Sym 326 if method == nil { 327 continue 328 } 329 330 // get receiver type for this particular method. 331 // if pointer receiver but non-pointer t and 332 // this is not an embedded pointer inside a struct, 333 // method does not apply. 334 this := f.Type.Recv().Type 335 336 if this.IsPtr() && this.Elem() == t { 337 continue 338 } 339 if this.IsPtr() && !t.IsPtr() && f.Embedded != 2 && !isifacemethod(f.Type) { 340 continue 341 } 342 343 var sig Sig 344 ms = append(ms, &sig) 345 346 sig.name = method.Name 347 if !exportname(method.Name) { 348 if method.Pkg == nil { 349 Fatalf("methods: missing package") 350 } 351 sig.pkg = method.Pkg 352 } 353 354 sig.isym = methodsym(method, it, true) 355 sig.tsym = methodsym(method, t, false) 356 sig.type_ = methodfunc(f.Type, t) 357 sig.mtype = methodfunc(f.Type, nil) 358 359 if !sig.isym.Siggen() { 360 sig.isym.SetSiggen(true) 361 if !eqtype(this, it) || this.Width < int64(Widthptr) { 362 compiling_wrappers = 1 363 genwrapper(it, f, sig.isym, 1) 364 compiling_wrappers = 0 365 } 366 } 367 368 if !sig.tsym.Siggen() { 369 sig.tsym.SetSiggen(true) 370 if !eqtype(this, t) { 371 compiling_wrappers = 1 372 genwrapper(t, f, sig.tsym, 0) 373 compiling_wrappers = 0 374 } 375 } 376 } 377 378 obj.SortSlice(ms, func(i, j int) bool { return siglt(ms[i], ms[j]) }) 379 return ms 380 } 381 382 // imethods returns the methods of the interface type t, sorted by name. 383 func imethods(t *types.Type) []*Sig { 384 var methods []*Sig 385 for _, f := range t.Fields().Slice() { 386 if f.Type.Etype != TFUNC || f.Sym == nil { 387 continue 388 } 389 method := f.Sym 390 var sig = Sig{ 391 name: method.Name, 392 } 393 if !exportname(method.Name) { 394 if method.Pkg == nil { 395 Fatalf("imethods: missing package") 396 } 397 sig.pkg = method.Pkg 398 } 399 400 sig.mtype = f.Type 401 sig.offset = 0 402 sig.type_ = methodfunc(f.Type, nil) 403 404 if n := len(methods); n > 0 { 405 last := methods[n-1] 406 if !(siglt(last, &sig)) { 407 Fatalf("sigcmp vs sortinter %s %s", last.name, sig.name) 408 } 409 } 410 methods = append(methods, &sig) 411 412 // Compiler can only refer to wrappers for non-blank methods. 413 if method.IsBlank() { 414 continue 415 } 416 417 // NOTE(rsc): Perhaps an oversight that 418 // IfaceType.Method is not in the reflect data. 419 // Generate the method body, so that compiled 420 // code can refer to it. 421 isym := methodsym(method, t, false) 422 if !isym.Siggen() { 423 isym.SetSiggen(true) 424 genwrapper(t, f, isym, 0) 425 } 426 } 427 428 return methods 429 } 430 431 func dimportpath(p *types.Pkg) { 432 if p.Pathsym != nil { 433 return 434 } 435 436 // If we are compiling the runtime package, there are two runtime packages around 437 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for 438 // both of them, so just produce one for localpkg. 439 if myimportpath == "runtime" && p == Runtimepkg { 440 return 441 } 442 443 var str string 444 if p == localpkg { 445 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. 446 str = myimportpath 447 } else { 448 str = p.Path 449 } 450 451 s := Ctxt.Lookup("type..importpath." + p.Prefix + ".") 452 ot := dnameData(s, 0, str, "", nil, false) 453 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 454 p.Pathsym = s 455 } 456 457 func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int { 458 if pkg == nil { 459 return duintptr(s, ot, 0) 460 } 461 462 if pkg == localpkg && myimportpath == "" { 463 // If we don't know the full import path of the package being compiled 464 // (i.e. -p was not passed on the compiler command line), emit a reference to 465 // type..importpath.""., which the linker will rewrite using the correct import path. 466 // Every package that imports this one directly defines the symbol. 467 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 468 ns := Ctxt.Lookup(`type..importpath."".`) 469 return dsymptr(s, ot, ns, 0) 470 } 471 472 dimportpath(pkg) 473 return dsymptr(s, ot, pkg.Pathsym, 0) 474 } 475 476 // dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol. 477 func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { 478 if pkg == nil { 479 return duint32(s, ot, 0) 480 } 481 if pkg == localpkg && myimportpath == "" { 482 // If we don't know the full import path of the package being compiled 483 // (i.e. -p was not passed on the compiler command line), emit a reference to 484 // type..importpath.""., which the linker will rewrite using the correct import path. 485 // Every package that imports this one directly defines the symbol. 486 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 487 ns := Ctxt.Lookup(`type..importpath."".`) 488 return dsymptrOff(s, ot, ns, 0) 489 } 490 491 dimportpath(pkg) 492 return dsymptrOff(s, ot, pkg.Pathsym, 0) 493 } 494 495 // isExportedField reports whether a struct field is exported. 496 // It also returns the package to use for PkgPath for an unexported field. 497 func isExportedField(ft *types.Field) (bool, *types.Pkg) { 498 if ft.Sym != nil && ft.Embedded == 0 { 499 return exportname(ft.Sym.Name), ft.Sym.Pkg 500 } else { 501 if ft.Type.Sym != nil && 502 (ft.Type.Sym.Pkg == builtinpkg || !exportname(ft.Type.Sym.Name)) { 503 return false, ft.Type.Sym.Pkg 504 } else { 505 return true, nil 506 } 507 } 508 } 509 510 // dnameField dumps a reflect.name for a struct field. 511 func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int { 512 var name string 513 if ft.Sym != nil { 514 name = ft.Sym.Name 515 } 516 isExported, fpkg := isExportedField(ft) 517 if isExported || fpkg == spkg { 518 fpkg = nil 519 } 520 nsym := dname(name, ft.Note, fpkg, isExported) 521 return dsymptr(lsym, ot, nsym, 0) 522 } 523 524 // dnameData writes the contents of a reflect.name into s at offset ot. 525 func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int { 526 if len(name) > 1<<16-1 { 527 Fatalf("name too long: %s", name) 528 } 529 if len(tag) > 1<<16-1 { 530 Fatalf("tag too long: %s", tag) 531 } 532 533 // Encode name and tag. See reflect/type.go for details. 534 var bits byte 535 l := 1 + 2 + len(name) 536 if exported { 537 bits |= 1 << 0 538 } 539 if len(tag) > 0 { 540 l += 2 + len(tag) 541 bits |= 1 << 1 542 } 543 if pkg != nil { 544 bits |= 1 << 2 545 } 546 b := make([]byte, l) 547 b[0] = bits 548 b[1] = uint8(len(name) >> 8) 549 b[2] = uint8(len(name)) 550 copy(b[3:], name) 551 if len(tag) > 0 { 552 tb := b[3+len(name):] 553 tb[0] = uint8(len(tag) >> 8) 554 tb[1] = uint8(len(tag)) 555 copy(tb[2:], tag) 556 } 557 558 ot = int(s.WriteBytes(Ctxt, int64(ot), b)) 559 560 if pkg != nil { 561 ot = dgopkgpathOff(s, ot, pkg) 562 } 563 564 return ot 565 } 566 567 var dnameCount int 568 569 // dname creates a reflect.name for a struct field or method. 570 func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym { 571 // Write out data as "type.." to signal two things to the 572 // linker, first that when dynamically linking, the symbol 573 // should be moved to a relro section, and second that the 574 // contents should not be decoded as a type. 575 sname := "type..namedata." 576 if pkg == nil { 577 // In the common case, share data with other packages. 578 if name == "" { 579 if exported { 580 sname += "-noname-exported." + tag 581 } else { 582 sname += "-noname-unexported." + tag 583 } 584 } else { 585 if exported { 586 sname += name + "." + tag 587 } else { 588 sname += name + "-" + tag 589 } 590 } 591 } else { 592 sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount) 593 dnameCount++ 594 } 595 s := Ctxt.Lookup(sname) 596 if len(s.P) > 0 { 597 return s 598 } 599 ot := dnameData(s, 0, name, tag, pkg, exported) 600 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 601 return s 602 } 603 604 // dextratype dumps the fields of a runtime.uncommontype. 605 // dataAdd is the offset in bytes after the header where the 606 // backing array of the []method field is written (by dextratypeData). 607 func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { 608 m := methods(t) 609 if t.Sym == nil && len(m) == 0 { 610 return ot 611 } 612 noff := int(Rnd(int64(ot), int64(Widthptr))) 613 if noff != ot { 614 Fatalf("unexpected alignment in dextratype for %v", t) 615 } 616 617 for _, a := range m { 618 dtypesym(a.type_) 619 } 620 621 ot = dgopkgpathOff(lsym, ot, typePkg(t)) 622 623 dataAdd += uncommonSize(t) 624 mcount := len(m) 625 if mcount != int(uint16(mcount)) { 626 Fatalf("too many methods on %v: %d", t, mcount) 627 } 628 if dataAdd != int(uint32(dataAdd)) { 629 Fatalf("methods are too far away on %v: %d", t, dataAdd) 630 } 631 632 ot = duint16(lsym, ot, uint16(mcount)) 633 ot = duint16(lsym, ot, 0) 634 ot = duint32(lsym, ot, uint32(dataAdd)) 635 ot = duint32(lsym, ot, 0) 636 return ot 637 } 638 639 func typePkg(t *types.Type) *types.Pkg { 640 tsym := t.Sym 641 if tsym == nil { 642 switch t.Etype { 643 case TARRAY, TSLICE, TPTR32, TPTR64, TCHAN: 644 if t.Elem() != nil { 645 tsym = t.Elem().Sym 646 } 647 } 648 } 649 if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype { 650 return tsym.Pkg 651 } 652 return nil 653 } 654 655 // dextratypeData dumps the backing array for the []method field of 656 // runtime.uncommontype. 657 func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int { 658 for _, a := range methods(t) { 659 // ../../../../runtime/type.go:/method 660 exported := exportname(a.name) 661 var pkg *types.Pkg 662 if !exported && a.pkg != typePkg(t) { 663 pkg = a.pkg 664 } 665 nsym := dname(a.name, "", pkg, exported) 666 667 ot = dsymptrOff(lsym, ot, nsym, 0) 668 ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype).Linksym()) 669 ot = dmethodptrOff(lsym, ot, a.isym.Linksym()) 670 ot = dmethodptrOff(lsym, ot, a.tsym.Linksym()) 671 } 672 return ot 673 } 674 675 func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int { 676 duint32(s, ot, 0) 677 r := obj.Addrel(s) 678 r.Off = int32(ot) 679 r.Siz = 4 680 r.Sym = x 681 r.Type = objabi.R_METHODOFF 682 return ot + 4 683 } 684 685 var kinds = []int{ 686 TINT: objabi.KindInt, 687 TUINT: objabi.KindUint, 688 TINT8: objabi.KindInt8, 689 TUINT8: objabi.KindUint8, 690 TINT16: objabi.KindInt16, 691 TUINT16: objabi.KindUint16, 692 TINT32: objabi.KindInt32, 693 TUINT32: objabi.KindUint32, 694 TINT64: objabi.KindInt64, 695 TUINT64: objabi.KindUint64, 696 TUINTPTR: objabi.KindUintptr, 697 TFLOAT32: objabi.KindFloat32, 698 TFLOAT64: objabi.KindFloat64, 699 TBOOL: objabi.KindBool, 700 TSTRING: objabi.KindString, 701 TPTR32: objabi.KindPtr, 702 TPTR64: objabi.KindPtr, 703 TSTRUCT: objabi.KindStruct, 704 TINTER: objabi.KindInterface, 705 TCHAN: objabi.KindChan, 706 TMAP: objabi.KindMap, 707 TARRAY: objabi.KindArray, 708 TSLICE: objabi.KindSlice, 709 TFUNC: objabi.KindFunc, 710 TCOMPLEX64: objabi.KindComplex64, 711 TCOMPLEX128: objabi.KindComplex128, 712 TUNSAFEPTR: objabi.KindUnsafePointer, 713 } 714 715 // typeptrdata returns the length in bytes of the prefix of t 716 // containing pointer data. Anything after this offset is scalar data. 717 func typeptrdata(t *types.Type) int64 { 718 if !types.Haspointers(t) { 719 return 0 720 } 721 722 switch t.Etype { 723 case TPTR32, 724 TPTR64, 725 TUNSAFEPTR, 726 TFUNC, 727 TCHAN, 728 TMAP: 729 return int64(Widthptr) 730 731 case TSTRING: 732 // struct { byte *str; intgo len; } 733 return int64(Widthptr) 734 735 case TINTER: 736 // struct { Itab *tab; void *data; } or 737 // struct { Type *type; void *data; } 738 return 2 * int64(Widthptr) 739 740 case TSLICE: 741 // struct { byte *array; uintgo len; uintgo cap; } 742 return int64(Widthptr) 743 744 case TARRAY: 745 // haspointers already eliminated t.NumElem() == 0. 746 return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem()) 747 748 case TSTRUCT: 749 // Find the last field that has pointers. 750 var lastPtrField *types.Field 751 for _, t1 := range t.Fields().Slice() { 752 if types.Haspointers(t1.Type) { 753 lastPtrField = t1 754 } 755 } 756 return lastPtrField.Offset + typeptrdata(lastPtrField.Type) 757 758 default: 759 Fatalf("typeptrdata: unexpected type, %v", t) 760 return 0 761 } 762 } 763 764 // tflag is documented in reflect/type.go. 765 // 766 // tflag values must be kept in sync with copies in: 767 // cmd/compile/internal/gc/reflect.go 768 // cmd/link/internal/ld/decodesym.go 769 // reflect/type.go 770 // runtime/type.go 771 const ( 772 tflagUncommon = 1 << 0 773 tflagExtraStar = 1 << 1 774 tflagNamed = 1 << 2 775 ) 776 777 var ( 778 algarray *obj.LSym 779 memhashvarlen *obj.LSym 780 memequalvarlen *obj.LSym 781 ) 782 783 // dcommontype dumps the contents of a reflect.rtype (runtime._type). 784 func dcommontype(lsym *obj.LSym, ot int, t *types.Type) int { 785 if ot != 0 { 786 Fatalf("dcommontype %d", ot) 787 } 788 789 sizeofAlg := 2 * Widthptr 790 if algarray == nil { 791 algarray = Sysfunc("algarray") 792 } 793 dowidth(t) 794 alg := algtype(t) 795 var algsym *obj.LSym 796 if alg == ASPECIAL || alg == AMEM { 797 algsym = dalgsym(t) 798 } 799 800 sptrWeak := true 801 var sptr *obj.LSym 802 if !t.IsPtr() || t.PtrBase != nil { 803 tptr := types.NewPtr(t) 804 if t.Sym != nil || methods(tptr) != nil { 805 sptrWeak = false 806 } 807 sptr = dtypesym(tptr).Linksym() 808 } 809 810 gcsym, useGCProg, ptrdata := dgcsym(t) 811 812 // ../../../../reflect/type.go:/^type.rtype 813 // actual type structure 814 // type rtype struct { 815 // size uintptr 816 // ptrdata uintptr 817 // hash uint32 818 // tflag tflag 819 // align uint8 820 // fieldAlign uint8 821 // kind uint8 822 // alg *typeAlg 823 // gcdata *byte 824 // str nameOff 825 // ptrToThis typeOff 826 // } 827 ot = duintptr(lsym, ot, uint64(t.Width)) 828 ot = duintptr(lsym, ot, uint64(ptrdata)) 829 ot = duint32(lsym, ot, typehash(t)) 830 831 var tflag uint8 832 if uncommonSize(t) != 0 { 833 tflag |= tflagUncommon 834 } 835 if t.Sym != nil && t.Sym.Name != "" { 836 tflag |= tflagNamed 837 } 838 839 exported := false 840 p := t.LongString() 841 // If we're writing out type T, 842 // we are very likely to write out type *T as well. 843 // Use the string "*T"[1:] for "T", so that the two 844 // share storage. This is a cheap way to reduce the 845 // amount of space taken up by reflect strings. 846 if !strings.HasPrefix(p, "*") { 847 p = "*" + p 848 tflag |= tflagExtraStar 849 if t.Sym != nil { 850 exported = exportname(t.Sym.Name) 851 } 852 } else { 853 if t.Elem() != nil && t.Elem().Sym != nil { 854 exported = exportname(t.Elem().Sym.Name) 855 } 856 } 857 858 ot = duint8(lsym, ot, tflag) 859 860 // runtime (and common sense) expects alignment to be a power of two. 861 i := int(t.Align) 862 863 if i == 0 { 864 i = 1 865 } 866 if i&(i-1) != 0 { 867 Fatalf("invalid alignment %d for %v", t.Align, t) 868 } 869 ot = duint8(lsym, ot, t.Align) // align 870 ot = duint8(lsym, ot, t.Align) // fieldAlign 871 872 i = kinds[t.Etype] 873 if !types.Haspointers(t) { 874 i |= objabi.KindNoPointers 875 } 876 if isdirectiface(t) { 877 i |= objabi.KindDirectIface 878 } 879 if useGCProg { 880 i |= objabi.KindGCProg 881 } 882 ot = duint8(lsym, ot, uint8(i)) // kind 883 if algsym == nil { 884 ot = dsymptr(lsym, ot, algarray, int(alg)*sizeofAlg) 885 } else { 886 ot = dsymptr(lsym, ot, algsym, 0) 887 } 888 ot = dsymptr(lsym, ot, gcsym, 0) // gcdata 889 890 nsym := dname(p, "", nil, exported) 891 ot = dsymptrOff(lsym, ot, nsym, 0) // str 892 // ptrToThis 893 if sptr == nil { 894 ot = duint32(lsym, ot, 0) 895 } else if sptrWeak { 896 ot = dsymptrWeakOff(lsym, ot, sptr) 897 } else { 898 ot = dsymptrOff(lsym, ot, sptr, 0) 899 } 900 901 return ot 902 } 903 904 func typesymname(t *types.Type) string { 905 name := t.ShortString() 906 // Use a separate symbol name for Noalg types for #17752. 907 if a, bad := algtype1(t); a == ANOEQ && bad.Noalg() { 908 name = "noalg." + name 909 } 910 return name 911 } 912 913 // Fake package for runtime type info (headers) 914 // Don't access directly, use typeLookup below. 915 var ( 916 typepkgmu sync.Mutex // protects typepkg lookups 917 typepkg = types.NewPkg("type", "type") 918 ) 919 920 func typeLookup(name string) *types.Sym { 921 typepkgmu.Lock() 922 s := typepkg.Lookup(name) 923 typepkgmu.Unlock() 924 return s 925 } 926 927 func typesym(t *types.Type) *types.Sym { 928 return typeLookup(typesymname(t)) 929 } 930 931 // tracksym returns the symbol for tracking use of field/method f, assumed 932 // to be a member of struct/interface type t. 933 func tracksym(t *types.Type, f *types.Field) *types.Sym { 934 return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name) 935 } 936 937 func typesymprefix(prefix string, t *types.Type) *types.Sym { 938 p := prefix + "." + t.ShortString() 939 s := typeLookup(p) 940 941 //print("algsym: %s -> %+S\n", p, s); 942 943 return s 944 } 945 946 func typenamesym(t *types.Type) *types.Sym { 947 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { 948 Fatalf("typenamesym %v", t) 949 } 950 s := typesym(t) 951 signatsetmu.Lock() 952 addsignat(t) 953 signatsetmu.Unlock() 954 return s 955 } 956 957 func typename(t *types.Type) *Node { 958 s := typenamesym(t) 959 if s.Def == nil { 960 n := newnamel(src.NoXPos, s) 961 n.Type = types.Types[TUINT8] 962 n.SetClass(PEXTERN) 963 n.SetTypecheck(1) 964 s.Def = asTypesNode(n) 965 } 966 967 n := nod(OADDR, asNode(s.Def), nil) 968 n.Type = types.NewPtr(asNode(s.Def).Type) 969 n.SetAddable(true) 970 n.SetTypecheck(1) 971 return n 972 } 973 974 func itabname(t, itype *types.Type) *Node { 975 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { 976 Fatalf("itabname(%v, %v)", t, itype) 977 } 978 s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString()) 979 if s.Def == nil { 980 n := newname(s) 981 n.Type = types.Types[TUINT8] 982 n.SetClass(PEXTERN) 983 n.SetTypecheck(1) 984 s.Def = asTypesNode(n) 985 itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()}) 986 } 987 988 n := nod(OADDR, asNode(s.Def), nil) 989 n.Type = types.NewPtr(asNode(s.Def).Type) 990 n.SetAddable(true) 991 n.SetTypecheck(1) 992 return n 993 } 994 995 // isreflexive reports whether t has a reflexive equality operator. 996 // That is, if x==x for all x of type t. 997 func isreflexive(t *types.Type) bool { 998 switch t.Etype { 999 case TBOOL, 1000 TINT, 1001 TUINT, 1002 TINT8, 1003 TUINT8, 1004 TINT16, 1005 TUINT16, 1006 TINT32, 1007 TUINT32, 1008 TINT64, 1009 TUINT64, 1010 TUINTPTR, 1011 TPTR32, 1012 TPTR64, 1013 TUNSAFEPTR, 1014 TSTRING, 1015 TCHAN: 1016 return true 1017 1018 case TFLOAT32, 1019 TFLOAT64, 1020 TCOMPLEX64, 1021 TCOMPLEX128, 1022 TINTER: 1023 return false 1024 1025 case TARRAY: 1026 return isreflexive(t.Elem()) 1027 1028 case TSTRUCT: 1029 for _, t1 := range t.Fields().Slice() { 1030 if !isreflexive(t1.Type) { 1031 return false 1032 } 1033 } 1034 return true 1035 1036 default: 1037 Fatalf("bad type for map key: %v", t) 1038 return false 1039 } 1040 } 1041 1042 // needkeyupdate reports whether map updates with t as a key 1043 // need the key to be updated. 1044 func needkeyupdate(t *types.Type) bool { 1045 switch t.Etype { 1046 case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, 1047 TINT64, TUINT64, TUINTPTR, TPTR32, TPTR64, TUNSAFEPTR, TCHAN: 1048 return false 1049 1050 case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0 1051 TINTER, 1052 TSTRING: // strings might have smaller backing stores 1053 return true 1054 1055 case TARRAY: 1056 return needkeyupdate(t.Elem()) 1057 1058 case TSTRUCT: 1059 for _, t1 := range t.Fields().Slice() { 1060 if needkeyupdate(t1.Type) { 1061 return true 1062 } 1063 } 1064 return false 1065 1066 default: 1067 Fatalf("bad type for map key: %v", t) 1068 return true 1069 } 1070 } 1071 1072 // formalType replaces byte and rune aliases with real types. 1073 // They've been separate internally to make error messages 1074 // better, but we have to merge them in the reflect tables. 1075 func formalType(t *types.Type) *types.Type { 1076 if t == types.Bytetype || t == types.Runetype { 1077 return types.Types[t.Etype] 1078 } 1079 return t 1080 } 1081 1082 func dtypesym(t *types.Type) *types.Sym { 1083 t = formalType(t) 1084 if t.IsUntyped() { 1085 Fatalf("dtypesym %v", t) 1086 } 1087 1088 s := typesym(t) 1089 if s.Siggen() { 1090 return s 1091 } 1092 s.SetSiggen(true) 1093 1094 // special case (look for runtime below): 1095 // when compiling package runtime, 1096 // emit the type structures for int, float, etc. 1097 tbase := t 1098 1099 if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil { 1100 tbase = t.Elem() 1101 } 1102 dupok := 0 1103 if tbase.Sym == nil { 1104 dupok = obj.DUPOK 1105 } 1106 1107 if myimportpath == "runtime" && (tbase == types.Types[tbase.Etype] || tbase == types.Bytetype || tbase == types.Runetype || tbase == types.Errortype) { // int, float, etc 1108 goto ok 1109 } 1110 1111 // named types from other files are defined only by those files 1112 if tbase.Sym != nil && !tbase.Local() { 1113 return s 1114 } 1115 if isforw[tbase.Etype] { 1116 return s 1117 } 1118 1119 ok: 1120 ot := 0 1121 lsym := s.Linksym() 1122 switch t.Etype { 1123 default: 1124 ot = dcommontype(lsym, ot, t) 1125 ot = dextratype(lsym, ot, t, 0) 1126 1127 case TARRAY: 1128 // ../../../../runtime/type.go:/arrayType 1129 s1 := dtypesym(t.Elem()) 1130 t2 := types.NewSlice(t.Elem()) 1131 s2 := dtypesym(t2) 1132 ot = dcommontype(lsym, ot, t) 1133 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1134 ot = dsymptr(lsym, ot, s2.Linksym(), 0) 1135 ot = duintptr(lsym, ot, uint64(t.NumElem())) 1136 ot = dextratype(lsym, ot, t, 0) 1137 1138 case TSLICE: 1139 // ../../../../runtime/type.go:/sliceType 1140 s1 := dtypesym(t.Elem()) 1141 ot = dcommontype(lsym, ot, t) 1142 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1143 ot = dextratype(lsym, ot, t, 0) 1144 1145 case TCHAN: 1146 // ../../../../runtime/type.go:/chanType 1147 s1 := dtypesym(t.Elem()) 1148 ot = dcommontype(lsym, ot, t) 1149 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1150 ot = duintptr(lsym, ot, uint64(t.ChanDir())) 1151 ot = dextratype(lsym, ot, t, 0) 1152 1153 case TFUNC: 1154 for _, t1 := range t.Recvs().Fields().Slice() { 1155 dtypesym(t1.Type) 1156 } 1157 isddd := false 1158 for _, t1 := range t.Params().Fields().Slice() { 1159 isddd = t1.Isddd() 1160 dtypesym(t1.Type) 1161 } 1162 for _, t1 := range t.Results().Fields().Slice() { 1163 dtypesym(t1.Type) 1164 } 1165 1166 ot = dcommontype(lsym, ot, t) 1167 inCount := t.Recvs().NumFields() + t.Params().NumFields() 1168 outCount := t.Results().NumFields() 1169 if isddd { 1170 outCount |= 1 << 15 1171 } 1172 ot = duint16(lsym, ot, uint16(inCount)) 1173 ot = duint16(lsym, ot, uint16(outCount)) 1174 if Widthptr == 8 { 1175 ot += 4 // align for *rtype 1176 } 1177 1178 dataAdd := (inCount + t.Results().NumFields()) * Widthptr 1179 ot = dextratype(lsym, ot, t, dataAdd) 1180 1181 // Array of rtype pointers follows funcType. 1182 for _, t1 := range t.Recvs().Fields().Slice() { 1183 ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0) 1184 } 1185 for _, t1 := range t.Params().Fields().Slice() { 1186 ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0) 1187 } 1188 for _, t1 := range t.Results().Fields().Slice() { 1189 ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0) 1190 } 1191 1192 case TINTER: 1193 m := imethods(t) 1194 n := len(m) 1195 for _, a := range m { 1196 dtypesym(a.type_) 1197 } 1198 1199 // ../../../../runtime/type.go:/interfaceType 1200 ot = dcommontype(lsym, ot, t) 1201 1202 var tpkg *types.Pkg 1203 if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype { 1204 tpkg = t.Sym.Pkg 1205 } 1206 ot = dgopkgpath(lsym, ot, tpkg) 1207 1208 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1209 ot = duintptr(lsym, ot, uint64(n)) 1210 ot = duintptr(lsym, ot, uint64(n)) 1211 dataAdd := imethodSize() * n 1212 ot = dextratype(lsym, ot, t, dataAdd) 1213 1214 for _, a := range m { 1215 // ../../../../runtime/type.go:/imethod 1216 exported := exportname(a.name) 1217 var pkg *types.Pkg 1218 if !exported && a.pkg != tpkg { 1219 pkg = a.pkg 1220 } 1221 nsym := dname(a.name, "", pkg, exported) 1222 1223 ot = dsymptrOff(lsym, ot, nsym, 0) 1224 ot = dsymptrOff(lsym, ot, dtypesym(a.type_).Linksym(), 0) 1225 } 1226 1227 // ../../../../runtime/type.go:/mapType 1228 case TMAP: 1229 s1 := dtypesym(t.Key()) 1230 s2 := dtypesym(t.Val()) 1231 s3 := dtypesym(mapbucket(t)) 1232 s4 := dtypesym(hmap(t)) 1233 ot = dcommontype(lsym, ot, t) 1234 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1235 ot = dsymptr(lsym, ot, s2.Linksym(), 0) 1236 ot = dsymptr(lsym, ot, s3.Linksym(), 0) 1237 ot = dsymptr(lsym, ot, s4.Linksym(), 0) 1238 if t.Key().Width > MAXKEYSIZE { 1239 ot = duint8(lsym, ot, uint8(Widthptr)) 1240 ot = duint8(lsym, ot, 1) // indirect 1241 } else { 1242 ot = duint8(lsym, ot, uint8(t.Key().Width)) 1243 ot = duint8(lsym, ot, 0) // not indirect 1244 } 1245 1246 if t.Val().Width > MAXVALSIZE { 1247 ot = duint8(lsym, ot, uint8(Widthptr)) 1248 ot = duint8(lsym, ot, 1) // indirect 1249 } else { 1250 ot = duint8(lsym, ot, uint8(t.Val().Width)) 1251 ot = duint8(lsym, ot, 0) // not indirect 1252 } 1253 1254 ot = duint16(lsym, ot, uint16(mapbucket(t).Width)) 1255 ot = duint8(lsym, ot, uint8(obj.Bool2int(isreflexive(t.Key())))) 1256 ot = duint8(lsym, ot, uint8(obj.Bool2int(needkeyupdate(t.Key())))) 1257 ot = dextratype(lsym, ot, t, 0) 1258 1259 case TPTR32, TPTR64: 1260 if t.Elem().Etype == TANY { 1261 // ../../../../runtime/type.go:/UnsafePointerType 1262 ot = dcommontype(lsym, ot, t) 1263 ot = dextratype(lsym, ot, t, 0) 1264 1265 break 1266 } 1267 1268 // ../../../../runtime/type.go:/ptrType 1269 s1 := dtypesym(t.Elem()) 1270 1271 ot = dcommontype(lsym, ot, t) 1272 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1273 ot = dextratype(lsym, ot, t, 0) 1274 1275 // ../../../../runtime/type.go:/structType 1276 // for security, only the exported fields. 1277 case TSTRUCT: 1278 n := 0 1279 1280 for _, t1 := range t.Fields().Slice() { 1281 dtypesym(t1.Type) 1282 n++ 1283 } 1284 1285 ot = dcommontype(lsym, ot, t) 1286 pkg := localpkg 1287 if t.Sym != nil { 1288 pkg = t.Sym.Pkg 1289 } else { 1290 // Unnamed type. Grab the package from the first field, if any. 1291 for _, f := range t.Fields().Slice() { 1292 if f.Embedded != 0 { 1293 continue 1294 } 1295 pkg = f.Sym.Pkg 1296 break 1297 } 1298 } 1299 ot = dgopkgpath(lsym, ot, pkg) 1300 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1301 ot = duintptr(lsym, ot, uint64(n)) 1302 ot = duintptr(lsym, ot, uint64(n)) 1303 1304 dataAdd := n * structfieldSize() 1305 ot = dextratype(lsym, ot, t, dataAdd) 1306 1307 for _, f := range t.Fields().Slice() { 1308 // ../../../../runtime/type.go:/structField 1309 ot = dnameField(lsym, ot, pkg, f) 1310 ot = dsymptr(lsym, ot, dtypesym(f.Type).Linksym(), 0) 1311 offsetAnon := uint64(f.Offset) << 1 1312 if offsetAnon>>1 != uint64(f.Offset) { 1313 Fatalf("%v: bad field offset for %s", t, f.Sym.Name) 1314 } 1315 if f.Embedded != 0 { 1316 offsetAnon |= 1 1317 } 1318 ot = duintptr(lsym, ot, offsetAnon) 1319 } 1320 } 1321 1322 ot = dextratypeData(lsym, ot, t) 1323 ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA)) 1324 1325 // The linker will leave a table of all the typelinks for 1326 // types in the binary, so the runtime can find them. 1327 // 1328 // When buildmode=shared, all types are in typelinks so the 1329 // runtime can deduplicate type pointers. 1330 keep := Ctxt.Flag_dynlink 1331 if !keep && t.Sym == nil { 1332 // For an unnamed type, we only need the link if the type can 1333 // be created at run time by reflect.PtrTo and similar 1334 // functions. If the type exists in the program, those 1335 // functions must return the existing type structure rather 1336 // than creating a new one. 1337 switch t.Etype { 1338 case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT: 1339 keep = true 1340 } 1341 } 1342 lsym.Set(obj.AttrMakeTypelink, keep) 1343 1344 return s 1345 } 1346 1347 // for each itabEntry, gather the methods on 1348 // the concrete type that implement the interface 1349 func peekitabs() { 1350 for i := range itabs { 1351 tab := &itabs[i] 1352 methods := genfun(tab.t, tab.itype) 1353 if len(methods) == 0 { 1354 continue 1355 } 1356 tab.entries = methods 1357 } 1358 } 1359 1360 // for the given concrete type and interface 1361 // type, return the (sorted) set of methods 1362 // on the concrete type that implement the interface 1363 func genfun(t, it *types.Type) []*obj.LSym { 1364 if t == nil || it == nil { 1365 return nil 1366 } 1367 sigs := imethods(it) 1368 methods := methods(t) 1369 out := make([]*obj.LSym, 0, len(sigs)) 1370 if len(sigs) == 0 { 1371 return nil 1372 } 1373 1374 // both sigs and methods are sorted by name, 1375 // so we can find the intersect in a single pass 1376 for _, m := range methods { 1377 if m.name == sigs[0].name { 1378 out = append(out, m.isym.Linksym()) 1379 sigs = sigs[1:] 1380 if len(sigs) == 0 { 1381 break 1382 } 1383 } 1384 } 1385 1386 return out 1387 } 1388 1389 // itabsym uses the information gathered in 1390 // peekitabs to de-virtualize interface methods. 1391 // Since this is called by the SSA backend, it shouldn't 1392 // generate additional Nodes, Syms, etc. 1393 func itabsym(it *obj.LSym, offset int64) *obj.LSym { 1394 var syms []*obj.LSym 1395 if it == nil { 1396 return nil 1397 } 1398 1399 for i := range itabs { 1400 e := &itabs[i] 1401 if e.lsym == it { 1402 syms = e.entries 1403 break 1404 } 1405 } 1406 if syms == nil { 1407 return nil 1408 } 1409 1410 // keep this arithmetic in sync with *itab layout 1411 methodnum := int((offset - 3*int64(Widthptr) - 8) / int64(Widthptr)) 1412 if methodnum >= len(syms) { 1413 return nil 1414 } 1415 return syms[methodnum] 1416 } 1417 1418 func addsignat(t *types.Type) { 1419 signatset[t] = struct{}{} 1420 } 1421 1422 func addsignats(dcls []*Node) { 1423 // copy types from dcl list to signatset 1424 for _, n := range dcls { 1425 if n.Op == OTYPE { 1426 addsignat(n.Type) 1427 } 1428 } 1429 } 1430 1431 func dumpsignats() { 1432 // Process signatset. Use a loop, as dtypesym adds 1433 // entries to signatset while it is being processed. 1434 signats := make([]typeAndStr, len(signatset)) 1435 for len(signatset) > 0 { 1436 signats = signats[:0] 1437 // Transfer entries to a slice and sort, for reproducible builds. 1438 for t := range signatset { 1439 signats = append(signats, typeAndStr{t: t, short: typesymname(t), regular: t.String()}) 1440 delete(signatset, t) 1441 } 1442 sort.Sort(typesByString(signats)) 1443 for _, ts := range signats { 1444 t := ts.t 1445 dtypesym(t) 1446 if t.Sym != nil { 1447 dtypesym(types.NewPtr(t)) 1448 } 1449 } 1450 } 1451 } 1452 1453 func dumptabs() { 1454 // process itabs 1455 for _, i := range itabs { 1456 // dump empty itab symbol into i.sym 1457 // type itab struct { 1458 // inter *interfacetype 1459 // _type *_type 1460 // link *itab 1461 // hash uint32 1462 // bad bool 1463 // inhash bool 1464 // unused [2]byte 1465 // fun [1]uintptr // variable sized 1466 // } 1467 o := dsymptr(i.lsym, 0, dtypesym(i.itype).Linksym(), 0) 1468 o = dsymptr(i.lsym, o, dtypesym(i.t).Linksym(), 0) 1469 o += Widthptr // skip link field 1470 o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash 1471 o += 4 // skip bad/inhash/unused fields 1472 o += len(imethods(i.itype)) * Widthptr // skip fun method pointers 1473 // at runtime the itab will contain pointers to types, other itabs and 1474 // method functions. None are allocated on heap, so we can use obj.NOPTR. 1475 ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.NOPTR)) 1476 1477 ilink := itablinkpkg.Lookup(i.t.ShortString() + "," + i.itype.ShortString()).Linksym() 1478 dsymptr(ilink, 0, i.lsym, 0) 1479 ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA)) 1480 } 1481 1482 // process ptabs 1483 if localpkg.Name == "main" && len(ptabs) > 0 { 1484 ot := 0 1485 s := Ctxt.Lookup("go.plugin.tabs") 1486 for _, p := range ptabs { 1487 // Dump ptab symbol into go.pluginsym package. 1488 // 1489 // type ptab struct { 1490 // name nameOff 1491 // typ typeOff // pointer to symbol 1492 // } 1493 nsym := dname(p.s.Name, "", nil, true) 1494 ot = dsymptrOff(s, ot, nsym, 0) 1495 ot = dsymptrOff(s, ot, dtypesym(p.t).Linksym(), 0) 1496 } 1497 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1498 1499 ot = 0 1500 s = Ctxt.Lookup("go.plugin.exports") 1501 for _, p := range ptabs { 1502 ot = dsymptr(s, ot, p.s.Linksym(), 0) 1503 } 1504 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1505 } 1506 } 1507 1508 func dumpimportstrings() { 1509 // generate import strings for imported packages 1510 for _, p := range types.ImportedPkgList() { 1511 dimportpath(p) 1512 } 1513 } 1514 1515 func dumpbasictypes() { 1516 // do basic types if compiling package runtime. 1517 // they have to be in at least one package, 1518 // and runtime is always loaded implicitly, 1519 // so this is as good as any. 1520 // another possible choice would be package main, 1521 // but using runtime means fewer copies in object files. 1522 if myimportpath == "runtime" { 1523 for i := types.EType(1); i <= TBOOL; i++ { 1524 dtypesym(types.NewPtr(types.Types[i])) 1525 } 1526 dtypesym(types.NewPtr(types.Types[TSTRING])) 1527 dtypesym(types.NewPtr(types.Types[TUNSAFEPTR])) 1528 1529 // emit type structs for error and func(error) string. 1530 // The latter is the type of an auto-generated wrapper. 1531 dtypesym(types.NewPtr(types.Errortype)) 1532 1533 dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])})) 1534 1535 // add paths for runtime and main, which 6l imports implicitly. 1536 dimportpath(Runtimepkg) 1537 1538 if flag_race { 1539 dimportpath(racepkg) 1540 } 1541 if flag_msan { 1542 dimportpath(msanpkg) 1543 } 1544 dimportpath(types.NewPkg("main", "")) 1545 } 1546 } 1547 1548 type typeAndStr struct { 1549 t *types.Type 1550 short string 1551 regular string 1552 } 1553 1554 type typesByString []typeAndStr 1555 1556 func (a typesByString) Len() int { return len(a) } 1557 func (a typesByString) Less(i, j int) bool { 1558 if a[i].short != a[j].short { 1559 return a[i].short < a[j].short 1560 } 1561 // When the only difference between the types is whether 1562 // they refer to byte or uint8, such as **byte vs **uint8, 1563 // the types' ShortStrings can be identical. 1564 // To preserve deterministic sort ordering, sort these by String(). 1565 return a[i].regular < a[j].regular 1566 } 1567 func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1568 1569 func dalgsym(t *types.Type) *obj.LSym { 1570 var lsym *obj.LSym 1571 var hashfunc *obj.LSym 1572 var eqfunc *obj.LSym 1573 1574 // dalgsym is only called for a type that needs an algorithm table, 1575 // which implies that the type is comparable (or else it would use ANOEQ). 1576 1577 if algtype(t) == AMEM { 1578 // we use one algorithm table for all AMEM types of a given size 1579 p := fmt.Sprintf(".alg%d", t.Width) 1580 1581 s := typeLookup(p) 1582 lsym = s.Linksym() 1583 if s.AlgGen() { 1584 return lsym 1585 } 1586 s.SetAlgGen(true) 1587 1588 if memhashvarlen == nil { 1589 memhashvarlen = Sysfunc("memhash_varlen") 1590 memequalvarlen = Sysfunc("memequal_varlen") 1591 } 1592 1593 // make hash closure 1594 p = fmt.Sprintf(".hashfunc%d", t.Width) 1595 1596 hashfunc = typeLookup(p).Linksym() 1597 1598 ot := 0 1599 ot = dsymptr(hashfunc, ot, memhashvarlen, 0) 1600 ot = duintptr(hashfunc, ot, uint64(t.Width)) // size encoded in closure 1601 ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA) 1602 1603 // make equality closure 1604 p = fmt.Sprintf(".eqfunc%d", t.Width) 1605 1606 eqfunc = typeLookup(p).Linksym() 1607 1608 ot = 0 1609 ot = dsymptr(eqfunc, ot, memequalvarlen, 0) 1610 ot = duintptr(eqfunc, ot, uint64(t.Width)) 1611 ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA) 1612 } else { 1613 // generate an alg table specific to this type 1614 s := typesymprefix(".alg", t) 1615 lsym = s.Linksym() 1616 1617 hash := typesymprefix(".hash", t) 1618 eq := typesymprefix(".eq", t) 1619 hashfunc = typesymprefix(".hashfunc", t).Linksym() 1620 eqfunc = typesymprefix(".eqfunc", t).Linksym() 1621 1622 genhash(hash, t) 1623 geneq(eq, t) 1624 1625 // make Go funcs (closures) for calling hash and equal from Go 1626 dsymptr(hashfunc, 0, hash.Linksym(), 0) 1627 ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1628 dsymptr(eqfunc, 0, eq.Linksym(), 0) 1629 ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1630 } 1631 1632 // ../../../../runtime/alg.go:/typeAlg 1633 ot := 0 1634 1635 ot = dsymptr(lsym, ot, hashfunc, 0) 1636 ot = dsymptr(lsym, ot, eqfunc, 0) 1637 ggloblsym(lsym, int32(ot), obj.DUPOK|obj.RODATA) 1638 return lsym 1639 } 1640 1641 // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, 1642 // which holds 1-bit entries describing where pointers are in a given type. 1643 // Above this length, the GC information is recorded as a GC program, 1644 // which can express repetition compactly. In either form, the 1645 // information is used by the runtime to initialize the heap bitmap, 1646 // and for large types (like 128 or more words), they are roughly the 1647 // same speed. GC programs are never much larger and often more 1648 // compact. (If large arrays are involved, they can be arbitrarily 1649 // more compact.) 1650 // 1651 // The cutoff must be large enough that any allocation large enough to 1652 // use a GC program is large enough that it does not share heap bitmap 1653 // bytes with any other objects, allowing the GC program execution to 1654 // assume an aligned start and not use atomic operations. In the current 1655 // runtime, this means all malloc size classes larger than the cutoff must 1656 // be multiples of four words. On 32-bit systems that's 16 bytes, and 1657 // all size classes >= 16 bytes are 16-byte aligned, so no real constraint. 1658 // On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed 1659 // for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated 1660 // is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes 1661 // must be >= 4. 1662 // 1663 // We used to use 16 because the GC programs do have some constant overhead 1664 // to get started, and processing 128 pointers seems to be enough to 1665 // amortize that overhead well. 1666 // 1667 // To make sure that the runtime's chansend can call typeBitsBulkBarrier, 1668 // we raised the limit to 2048, so that even 32-bit systems are guaranteed to 1669 // use bitmaps for objects up to 64 kB in size. 1670 // 1671 // Also known to reflect/type.go. 1672 // 1673 const maxPtrmaskBytes = 2048 1674 1675 // dgcsym emits and returns a data symbol containing GC information for type t, 1676 // along with a boolean reporting whether the UseGCProg bit should be set in 1677 // the type kind, and the ptrdata field to record in the reflect type information. 1678 func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { 1679 ptrdata = typeptrdata(t) 1680 if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 { 1681 lsym = dgcptrmask(t) 1682 return 1683 } 1684 1685 useGCProg = true 1686 lsym, ptrdata = dgcprog(t) 1687 return 1688 } 1689 1690 // dgcptrmask emits and returns the symbol containing a pointer mask for type t. 1691 func dgcptrmask(t *types.Type) *obj.LSym { 1692 ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8) 1693 fillptrmask(t, ptrmask) 1694 p := fmt.Sprintf("gcbits.%x", ptrmask) 1695 1696 sym := Runtimepkg.Lookup(p) 1697 lsym := sym.Linksym() 1698 if !sym.Uniq() { 1699 sym.SetUniq(true) 1700 for i, x := range ptrmask { 1701 duint8(lsym, i, x) 1702 } 1703 ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) 1704 } 1705 return lsym 1706 } 1707 1708 // fillptrmask fills in ptrmask with 1s corresponding to the 1709 // word offsets in t that hold pointers. 1710 // ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits. 1711 func fillptrmask(t *types.Type, ptrmask []byte) { 1712 for i := range ptrmask { 1713 ptrmask[i] = 0 1714 } 1715 if !types.Haspointers(t) { 1716 return 1717 } 1718 1719 vec := bvalloc(8 * int32(len(ptrmask))) 1720 xoffset := int64(0) 1721 onebitwalktype1(t, &xoffset, vec) 1722 1723 nptr := typeptrdata(t) / int64(Widthptr) 1724 for i := int64(0); i < nptr; i++ { 1725 if vec.Get(int32(i)) { 1726 ptrmask[i/8] |= 1 << (uint(i) % 8) 1727 } 1728 } 1729 } 1730 1731 // dgcprog emits and returns the symbol containing a GC program for type t 1732 // along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]). 1733 // In practice, the size is typeptrdata(t) except for non-trivial arrays. 1734 // For non-trivial arrays, the program describes the full t.Width size. 1735 func dgcprog(t *types.Type) (*obj.LSym, int64) { 1736 dowidth(t) 1737 if t.Width == BADWIDTH { 1738 Fatalf("dgcprog: %v badwidth", t) 1739 } 1740 lsym := typesymprefix(".gcprog", t).Linksym() 1741 var p GCProg 1742 p.init(lsym) 1743 p.emit(t, 0) 1744 offset := p.w.BitIndex() * int64(Widthptr) 1745 p.end() 1746 if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { 1747 Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) 1748 } 1749 return lsym, offset 1750 } 1751 1752 type GCProg struct { 1753 lsym *obj.LSym 1754 symoff int 1755 w gcprog.Writer 1756 } 1757 1758 var Debug_gcprog int // set by -d gcprog 1759 1760 func (p *GCProg) init(lsym *obj.LSym) { 1761 p.lsym = lsym 1762 p.symoff = 4 // first 4 bytes hold program length 1763 p.w.Init(p.writeByte) 1764 if Debug_gcprog > 0 { 1765 fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym) 1766 p.w.Debug(os.Stderr) 1767 } 1768 } 1769 1770 func (p *GCProg) writeByte(x byte) { 1771 p.symoff = duint8(p.lsym, p.symoff, x) 1772 } 1773 1774 func (p *GCProg) end() { 1775 p.w.End() 1776 duint32(p.lsym, 0, uint32(p.symoff-4)) 1777 ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) 1778 if Debug_gcprog > 0 { 1779 fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym) 1780 } 1781 } 1782 1783 func (p *GCProg) emit(t *types.Type, offset int64) { 1784 dowidth(t) 1785 if !types.Haspointers(t) { 1786 return 1787 } 1788 if t.Width == int64(Widthptr) { 1789 p.w.Ptr(offset / int64(Widthptr)) 1790 return 1791 } 1792 switch t.Etype { 1793 default: 1794 Fatalf("GCProg.emit: unexpected type %v", t) 1795 1796 case TSTRING: 1797 p.w.Ptr(offset / int64(Widthptr)) 1798 1799 case TINTER: 1800 p.w.Ptr(offset / int64(Widthptr)) 1801 p.w.Ptr(offset/int64(Widthptr) + 1) 1802 1803 case TSLICE: 1804 p.w.Ptr(offset / int64(Widthptr)) 1805 1806 case TARRAY: 1807 if t.NumElem() == 0 { 1808 // should have been handled by haspointers check above 1809 Fatalf("GCProg.emit: empty array") 1810 } 1811 1812 // Flatten array-of-array-of-array to just a big array by multiplying counts. 1813 count := t.NumElem() 1814 elem := t.Elem() 1815 for elem.IsArray() { 1816 count *= elem.NumElem() 1817 elem = elem.Elem() 1818 } 1819 1820 if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) { 1821 // Cheaper to just emit the bits. 1822 for i := int64(0); i < count; i++ { 1823 p.emit(elem, offset+i*elem.Width) 1824 } 1825 return 1826 } 1827 p.emit(elem, offset) 1828 p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) 1829 p.w.Repeat(elem.Width/int64(Widthptr), count-1) 1830 1831 case TSTRUCT: 1832 for _, t1 := range t.Fields().Slice() { 1833 p.emit(t1.Type, offset+t1.Offset) 1834 } 1835 } 1836 } 1837 1838 // zeroaddr returns the address of a symbol with at least 1839 // size bytes of zeros. 1840 func zeroaddr(size int64) *Node { 1841 if size >= 1<<31 { 1842 Fatalf("map value too big %d", size) 1843 } 1844 if zerosize < size { 1845 zerosize = size 1846 } 1847 s := mappkg.Lookup("zero") 1848 if s.Def == nil { 1849 x := newname(s) 1850 x.Type = types.Types[TUINT8] 1851 x.SetClass(PEXTERN) 1852 x.SetTypecheck(1) 1853 s.Def = asTypesNode(x) 1854 } 1855 z := nod(OADDR, asNode(s.Def), nil) 1856 z.Type = types.NewPtr(types.Types[TUINT8]) 1857 z.SetAddable(true) 1858 z.SetTypecheck(1) 1859 return z 1860 }