github.com/karrick/go@v0.0.0-20170817181416-d5b0ec858b37/src/cmd/compile/internal/gc/reflect.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "cmd/internal/gcprog" 10 "cmd/internal/obj" 11 "cmd/internal/objabi" 12 "cmd/internal/src" 13 "fmt" 14 "os" 15 "sort" 16 "strings" 17 "sync" 18 ) 19 20 type itabEntry struct { 21 t, itype *types.Type 22 lsym *obj.LSym // symbol of the itab itself 23 24 // symbols of each method in 25 // the itab, sorted by byte offset; 26 // filled in by peekitabs 27 entries []*obj.LSym 28 } 29 30 type ptabEntry struct { 31 s *types.Sym 32 t *types.Type 33 } 34 35 // runtime interface and reflection data structures 36 var ( 37 signatsetmu sync.Mutex // protects signatset 38 signatset = make(map[*types.Type]struct{}) 39 40 itabs []itabEntry 41 ptabs []ptabEntry 42 ) 43 44 type Sig struct { 45 name string 46 pkg *types.Pkg 47 isym *types.Sym 48 tsym *types.Sym 49 type_ *types.Type 50 mtype *types.Type 51 offset int32 52 } 53 54 // siglt sorts method signatures by name, then package path. 55 func siglt(a, b *Sig) bool { 56 if a.name != b.name { 57 return a.name < b.name 58 } 59 if a.pkg == b.pkg { 60 return false 61 } 62 if a.pkg == nil { 63 return true 64 } 65 if b.pkg == nil { 66 return false 67 } 68 return a.pkg.Path < b.pkg.Path 69 } 70 71 // Builds a type representing a Bucket structure for 72 // the given map type. This type is not visible to users - 73 // we include only enough information to generate a correct GC 74 // program for it. 75 // Make sure this stays in sync with ../../../../runtime/hashmap.go! 76 const ( 77 BUCKETSIZE = 8 78 MAXKEYSIZE = 128 79 MAXVALSIZE = 128 80 ) 81 82 func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) 83 func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) 84 func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) 85 if t.Sym == nil && len(methods(t)) == 0 { 86 return 0 87 } 88 return 4 + 2 + 2 + 4 + 4 89 } 90 91 func makefield(name string, t *types.Type) *types.Field { 92 f := types.NewField() 93 f.Type = t 94 f.Sym = (*types.Pkg)(nil).Lookup(name) 95 return f 96 } 97 98 func mapbucket(t *types.Type) *types.Type { 99 if t.MapType().Bucket != nil { 100 return t.MapType().Bucket 101 } 102 103 bucket := types.New(TSTRUCT) 104 keytype := t.Key() 105 valtype := t.Val() 106 dowidth(keytype) 107 dowidth(valtype) 108 if keytype.Width > MAXKEYSIZE { 109 keytype = types.NewPtr(keytype) 110 } 111 if valtype.Width > MAXVALSIZE { 112 valtype = types.NewPtr(valtype) 113 } 114 115 field := make([]*types.Field, 0, 5) 116 117 // The first field is: uint8 topbits[BUCKETSIZE]. 118 arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE) 119 field = append(field, makefield("topbits", arr)) 120 121 arr = types.NewArray(keytype, BUCKETSIZE) 122 arr.SetNoalg(true) 123 field = append(field, makefield("keys", arr)) 124 125 arr = types.NewArray(valtype, BUCKETSIZE) 126 arr.SetNoalg(true) 127 field = append(field, makefield("values", arr)) 128 129 // Make sure the overflow pointer is the last memory in the struct, 130 // because the runtime assumes it can use size-ptrSize as the 131 // offset of the overflow pointer. We double-check that property 132 // below once the offsets and size are computed. 133 // 134 // BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point. 135 // On 32-bit systems, the max alignment is 32-bit, and the 136 // overflow pointer will add another 32-bit field, and the struct 137 // will end with no padding. 138 // On 64-bit systems, the max alignment is 64-bit, and the 139 // overflow pointer will add another 64-bit field, and the struct 140 // will end with no padding. 141 // On nacl/amd64p32, however, the max alignment is 64-bit, 142 // but the overflow pointer will add only a 32-bit field, 143 // so if the struct needs 64-bit padding (because a key or value does) 144 // then it would end with an extra 32-bit padding field. 145 // Preempt that by emitting the padding here. 146 if int(t.Val().Align) > Widthptr || int(t.Key().Align) > Widthptr { 147 field = append(field, makefield("pad", types.Types[TUINTPTR])) 148 } 149 150 // If keys and values have no pointers, the map implementation 151 // can keep a list of overflow pointers on the side so that 152 // buckets can be marked as having no pointers. 153 // Arrange for the bucket to have no pointers by changing 154 // the type of the overflow field to uintptr in this case. 155 // See comment on hmap.overflow in ../../../../runtime/hashmap.go. 156 otyp := types.NewPtr(bucket) 157 if !types.Haspointers(t.Val()) && !types.Haspointers(t.Key()) && t.Val().Width <= MAXVALSIZE && t.Key().Width <= MAXKEYSIZE { 158 otyp = types.Types[TUINTPTR] 159 } 160 ovf := makefield("overflow", otyp) 161 field = append(field, ovf) 162 163 // link up fields 164 bucket.SetNoalg(true) 165 bucket.SetLocal(t.Local()) 166 bucket.SetFields(field[:]) 167 dowidth(bucket) 168 169 // Double-check that overflow field is final memory in struct, 170 // with no padding at end. See comment above. 171 if ovf.Offset != bucket.Width-int64(Widthptr) { 172 Fatalf("bad math in mapbucket for %v", t) 173 } 174 175 t.MapType().Bucket = bucket 176 177 bucket.StructType().Map = t 178 return bucket 179 } 180 181 // Builds a type representing a Hmap structure for the given map type. 182 // Make sure this stays in sync with ../../../../runtime/hashmap.go! 183 func hmap(t *types.Type) *types.Type { 184 if t.MapType().Hmap != nil { 185 return t.MapType().Hmap 186 } 187 188 bucket := mapbucket(t) 189 fields := []*types.Field{ 190 makefield("count", types.Types[TINT]), 191 makefield("flags", types.Types[TUINT8]), 192 makefield("B", types.Types[TUINT8]), 193 makefield("noverflow", types.Types[TUINT16]), 194 makefield("hash0", types.Types[TUINT32]), 195 makefield("buckets", types.NewPtr(bucket)), 196 makefield("oldbuckets", types.NewPtr(bucket)), 197 makefield("nevacuate", types.Types[TUINTPTR]), 198 makefield("overflow", types.Types[TUNSAFEPTR]), 199 } 200 201 h := types.New(TSTRUCT) 202 h.SetNoalg(true) 203 h.SetLocal(t.Local()) 204 h.SetFields(fields) 205 dowidth(h) 206 t.MapType().Hmap = h 207 h.StructType().Map = t 208 return h 209 } 210 211 func hiter(t *types.Type) *types.Type { 212 if t.MapType().Hiter != nil { 213 return t.MapType().Hiter 214 } 215 216 // build a struct: 217 // hiter { 218 // key *Key 219 // val *Value 220 // t *MapType 221 // h *Hmap 222 // buckets *Bucket 223 // bptr *Bucket 224 // overflow0 unsafe.Pointer 225 // overflow1 unsafe.Pointer 226 // startBucket uintptr 227 // stuff uintptr 228 // bucket uintptr 229 // checkBucket uintptr 230 // } 231 // must match ../../../../runtime/hashmap.go:hiter. 232 var field [12]*types.Field 233 field[0] = makefield("key", types.NewPtr(t.Key())) 234 field[1] = makefield("val", types.NewPtr(t.Val())) 235 field[2] = makefield("t", types.NewPtr(types.Types[TUINT8])) 236 field[3] = makefield("h", types.NewPtr(hmap(t))) 237 field[4] = makefield("buckets", types.NewPtr(mapbucket(t))) 238 field[5] = makefield("bptr", types.NewPtr(mapbucket(t))) 239 field[6] = makefield("overflow0", types.Types[TUNSAFEPTR]) 240 field[7] = makefield("overflow1", types.Types[TUNSAFEPTR]) 241 field[8] = makefield("startBucket", types.Types[TUINTPTR]) 242 field[9] = makefield("stuff", types.Types[TUINTPTR]) // offset+wrapped+B+I 243 field[10] = makefield("bucket", types.Types[TUINTPTR]) 244 field[11] = makefield("checkBucket", types.Types[TUINTPTR]) 245 246 // build iterator struct holding the above fields 247 i := types.New(TSTRUCT) 248 i.SetNoalg(true) 249 i.SetFields(field[:]) 250 dowidth(i) 251 if i.Width != int64(12*Widthptr) { 252 Fatalf("hash_iter size not correct %d %d", i.Width, 12*Widthptr) 253 } 254 t.MapType().Hiter = i 255 i.StructType().Map = t 256 return i 257 } 258 259 // f is method type, with receiver. 260 // return function type, receiver as first argument (or not). 261 func methodfunc(f *types.Type, receiver *types.Type) *types.Type { 262 var in []*Node 263 if receiver != nil { 264 d := nod(ODCLFIELD, nil, nil) 265 d.Type = receiver 266 in = append(in, d) 267 } 268 269 var d *Node 270 for _, t := range f.Params().Fields().Slice() { 271 d = nod(ODCLFIELD, nil, nil) 272 d.Type = t.Type 273 d.SetIsddd(t.Isddd()) 274 in = append(in, d) 275 } 276 277 var out []*Node 278 for _, t := range f.Results().Fields().Slice() { 279 d = nod(ODCLFIELD, nil, nil) 280 d.Type = t.Type 281 out = append(out, d) 282 } 283 284 t := functype(nil, in, out) 285 if f.Nname() != nil { 286 // Link to name of original method function. 287 t.SetNname(f.Nname()) 288 } 289 290 return t 291 } 292 293 // methods returns the methods of the non-interface type t, sorted by name. 294 // Generates stub functions as needed. 295 func methods(t *types.Type) []*Sig { 296 // method type 297 mt := methtype(t) 298 299 if mt == nil { 300 return nil 301 } 302 expandmeth(mt) 303 304 // type stored in interface word 305 it := t 306 307 if !isdirectiface(it) { 308 it = types.NewPtr(t) 309 } 310 311 // make list of methods for t, 312 // generating code if necessary. 313 var ms []*Sig 314 for _, f := range mt.AllMethods().Slice() { 315 if f.Type.Etype != TFUNC || f.Type.Recv() == nil { 316 Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) 317 } 318 if f.Type.Recv() == nil { 319 Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) 320 } 321 if f.Nointerface() { 322 continue 323 } 324 325 method := f.Sym 326 if method == nil { 327 continue 328 } 329 330 // get receiver type for this particular method. 331 // if pointer receiver but non-pointer t and 332 // this is not an embedded pointer inside a struct, 333 // method does not apply. 334 this := f.Type.Recv().Type 335 336 if this.IsPtr() && this.Elem() == t { 337 continue 338 } 339 if this.IsPtr() && !t.IsPtr() && f.Embedded != 2 && !isifacemethod(f.Type) { 340 continue 341 } 342 343 var sig Sig 344 ms = append(ms, &sig) 345 346 sig.name = method.Name 347 if !exportname(method.Name) { 348 if method.Pkg == nil { 349 Fatalf("methods: missing package") 350 } 351 sig.pkg = method.Pkg 352 } 353 354 sig.isym = methodsym(method, it, true) 355 sig.tsym = methodsym(method, t, false) 356 sig.type_ = methodfunc(f.Type, t) 357 sig.mtype = methodfunc(f.Type, nil) 358 359 if !sig.isym.Siggen() { 360 sig.isym.SetSiggen(true) 361 if !eqtype(this, it) || this.Width < int64(Widthptr) { 362 compiling_wrappers = 1 363 genwrapper(it, f, sig.isym, 1) 364 compiling_wrappers = 0 365 } 366 } 367 368 if !sig.tsym.Siggen() { 369 sig.tsym.SetSiggen(true) 370 if !eqtype(this, t) { 371 compiling_wrappers = 1 372 genwrapper(t, f, sig.tsym, 0) 373 compiling_wrappers = 0 374 } 375 } 376 } 377 378 obj.SortSlice(ms, func(i, j int) bool { return siglt(ms[i], ms[j]) }) 379 return ms 380 } 381 382 // imethods returns the methods of the interface type t, sorted by name. 383 func imethods(t *types.Type) []*Sig { 384 var methods []*Sig 385 for _, f := range t.Fields().Slice() { 386 if f.Type.Etype != TFUNC || f.Sym == nil { 387 continue 388 } 389 method := f.Sym 390 var sig = Sig{ 391 name: method.Name, 392 } 393 if !exportname(method.Name) { 394 if method.Pkg == nil { 395 Fatalf("imethods: missing package") 396 } 397 sig.pkg = method.Pkg 398 } 399 400 sig.mtype = f.Type 401 sig.offset = 0 402 sig.type_ = methodfunc(f.Type, nil) 403 404 if n := len(methods); n > 0 { 405 last := methods[n-1] 406 if !(siglt(last, &sig)) { 407 Fatalf("sigcmp vs sortinter %s %s", last.name, sig.name) 408 } 409 } 410 methods = append(methods, &sig) 411 412 // Compiler can only refer to wrappers for non-blank methods. 413 if method.IsBlank() { 414 continue 415 } 416 417 // NOTE(rsc): Perhaps an oversight that 418 // IfaceType.Method is not in the reflect data. 419 // Generate the method body, so that compiled 420 // code can refer to it. 421 isym := methodsym(method, t, false) 422 if !isym.Siggen() { 423 isym.SetSiggen(true) 424 genwrapper(t, f, isym, 0) 425 } 426 } 427 428 return methods 429 } 430 431 func dimportpath(p *types.Pkg) { 432 if p.Pathsym != nil { 433 return 434 } 435 436 // If we are compiling the runtime package, there are two runtime packages around 437 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for 438 // both of them, so just produce one for localpkg. 439 if myimportpath == "runtime" && p == Runtimepkg { 440 return 441 } 442 443 var str string 444 if p == localpkg { 445 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. 446 str = myimportpath 447 } else { 448 str = p.Path 449 } 450 451 s := Ctxt.Lookup("type..importpath." + p.Prefix + ".") 452 ot := dnameData(s, 0, str, "", nil, false) 453 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 454 p.Pathsym = s 455 } 456 457 func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int { 458 if pkg == nil { 459 return duintptr(s, ot, 0) 460 } 461 462 if pkg == localpkg && myimportpath == "" { 463 // If we don't know the full import path of the package being compiled 464 // (i.e. -p was not passed on the compiler command line), emit a reference to 465 // type..importpath.""., which the linker will rewrite using the correct import path. 466 // Every package that imports this one directly defines the symbol. 467 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 468 ns := Ctxt.Lookup(`type..importpath."".`) 469 return dsymptr(s, ot, ns, 0) 470 } 471 472 dimportpath(pkg) 473 return dsymptr(s, ot, pkg.Pathsym, 0) 474 } 475 476 // dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol. 477 func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { 478 if pkg == nil { 479 return duint32(s, ot, 0) 480 } 481 if pkg == localpkg && myimportpath == "" { 482 // If we don't know the full import path of the package being compiled 483 // (i.e. -p was not passed on the compiler command line), emit a reference to 484 // type..importpath.""., which the linker will rewrite using the correct import path. 485 // Every package that imports this one directly defines the symbol. 486 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 487 ns := Ctxt.Lookup(`type..importpath."".`) 488 return dsymptrOff(s, ot, ns, 0) 489 } 490 491 dimportpath(pkg) 492 return dsymptrOff(s, ot, pkg.Pathsym, 0) 493 } 494 495 // isExportedField reports whether a struct field is exported. 496 // It also returns the package to use for PkgPath for an unexported field. 497 func isExportedField(ft *types.Field) (bool, *types.Pkg) { 498 if ft.Sym != nil && ft.Embedded == 0 { 499 return exportname(ft.Sym.Name), ft.Sym.Pkg 500 } 501 if ft.Type.Sym != nil && 502 (ft.Type.Sym.Pkg == builtinpkg || !exportname(ft.Type.Sym.Name)) { 503 return false, ft.Type.Sym.Pkg 504 } 505 return true, nil 506 } 507 508 // dnameField dumps a reflect.name for a struct field. 509 func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int { 510 var name string 511 if ft.Sym != nil { 512 name = ft.Sym.Name 513 } 514 isExported, fpkg := isExportedField(ft) 515 if isExported || fpkg == spkg { 516 fpkg = nil 517 } 518 nsym := dname(name, ft.Note, fpkg, isExported) 519 return dsymptr(lsym, ot, nsym, 0) 520 } 521 522 // dnameData writes the contents of a reflect.name into s at offset ot. 523 func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int { 524 if len(name) > 1<<16-1 { 525 Fatalf("name too long: %s", name) 526 } 527 if len(tag) > 1<<16-1 { 528 Fatalf("tag too long: %s", tag) 529 } 530 531 // Encode name and tag. See reflect/type.go for details. 532 var bits byte 533 l := 1 + 2 + len(name) 534 if exported { 535 bits |= 1 << 0 536 } 537 if len(tag) > 0 { 538 l += 2 + len(tag) 539 bits |= 1 << 1 540 } 541 if pkg != nil { 542 bits |= 1 << 2 543 } 544 b := make([]byte, l) 545 b[0] = bits 546 b[1] = uint8(len(name) >> 8) 547 b[2] = uint8(len(name)) 548 copy(b[3:], name) 549 if len(tag) > 0 { 550 tb := b[3+len(name):] 551 tb[0] = uint8(len(tag) >> 8) 552 tb[1] = uint8(len(tag)) 553 copy(tb[2:], tag) 554 } 555 556 ot = int(s.WriteBytes(Ctxt, int64(ot), b)) 557 558 if pkg != nil { 559 ot = dgopkgpathOff(s, ot, pkg) 560 } 561 562 return ot 563 } 564 565 var dnameCount int 566 567 // dname creates a reflect.name for a struct field or method. 568 func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym { 569 // Write out data as "type.." to signal two things to the 570 // linker, first that when dynamically linking, the symbol 571 // should be moved to a relro section, and second that the 572 // contents should not be decoded as a type. 573 sname := "type..namedata." 574 if pkg == nil { 575 // In the common case, share data with other packages. 576 if name == "" { 577 if exported { 578 sname += "-noname-exported." + tag 579 } else { 580 sname += "-noname-unexported." + tag 581 } 582 } else { 583 if exported { 584 sname += name + "." + tag 585 } else { 586 sname += name + "-" + tag 587 } 588 } 589 } else { 590 sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount) 591 dnameCount++ 592 } 593 s := Ctxt.Lookup(sname) 594 if len(s.P) > 0 { 595 return s 596 } 597 ot := dnameData(s, 0, name, tag, pkg, exported) 598 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 599 return s 600 } 601 602 // dextratype dumps the fields of a runtime.uncommontype. 603 // dataAdd is the offset in bytes after the header where the 604 // backing array of the []method field is written (by dextratypeData). 605 func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { 606 m := methods(t) 607 if t.Sym == nil && len(m) == 0 { 608 return ot 609 } 610 noff := int(Rnd(int64(ot), int64(Widthptr))) 611 if noff != ot { 612 Fatalf("unexpected alignment in dextratype for %v", t) 613 } 614 615 for _, a := range m { 616 dtypesym(a.type_) 617 } 618 619 ot = dgopkgpathOff(lsym, ot, typePkg(t)) 620 621 dataAdd += uncommonSize(t) 622 mcount := len(m) 623 if mcount != int(uint16(mcount)) { 624 Fatalf("too many methods on %v: %d", t, mcount) 625 } 626 if dataAdd != int(uint32(dataAdd)) { 627 Fatalf("methods are too far away on %v: %d", t, dataAdd) 628 } 629 630 ot = duint16(lsym, ot, uint16(mcount)) 631 ot = duint16(lsym, ot, 0) 632 ot = duint32(lsym, ot, uint32(dataAdd)) 633 ot = duint32(lsym, ot, 0) 634 return ot 635 } 636 637 func typePkg(t *types.Type) *types.Pkg { 638 tsym := t.Sym 639 if tsym == nil { 640 switch t.Etype { 641 case TARRAY, TSLICE, TPTR32, TPTR64, TCHAN: 642 if t.Elem() != nil { 643 tsym = t.Elem().Sym 644 } 645 } 646 } 647 if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype { 648 return tsym.Pkg 649 } 650 return nil 651 } 652 653 // dextratypeData dumps the backing array for the []method field of 654 // runtime.uncommontype. 655 func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int { 656 for _, a := range methods(t) { 657 // ../../../../runtime/type.go:/method 658 exported := exportname(a.name) 659 var pkg *types.Pkg 660 if !exported && a.pkg != typePkg(t) { 661 pkg = a.pkg 662 } 663 nsym := dname(a.name, "", pkg, exported) 664 665 ot = dsymptrOff(lsym, ot, nsym, 0) 666 ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype).Linksym()) 667 ot = dmethodptrOff(lsym, ot, a.isym.Linksym()) 668 ot = dmethodptrOff(lsym, ot, a.tsym.Linksym()) 669 } 670 return ot 671 } 672 673 func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int { 674 duint32(s, ot, 0) 675 r := obj.Addrel(s) 676 r.Off = int32(ot) 677 r.Siz = 4 678 r.Sym = x 679 r.Type = objabi.R_METHODOFF 680 return ot + 4 681 } 682 683 var kinds = []int{ 684 TINT: objabi.KindInt, 685 TUINT: objabi.KindUint, 686 TINT8: objabi.KindInt8, 687 TUINT8: objabi.KindUint8, 688 TINT16: objabi.KindInt16, 689 TUINT16: objabi.KindUint16, 690 TINT32: objabi.KindInt32, 691 TUINT32: objabi.KindUint32, 692 TINT64: objabi.KindInt64, 693 TUINT64: objabi.KindUint64, 694 TUINTPTR: objabi.KindUintptr, 695 TFLOAT32: objabi.KindFloat32, 696 TFLOAT64: objabi.KindFloat64, 697 TBOOL: objabi.KindBool, 698 TSTRING: objabi.KindString, 699 TPTR32: objabi.KindPtr, 700 TPTR64: objabi.KindPtr, 701 TSTRUCT: objabi.KindStruct, 702 TINTER: objabi.KindInterface, 703 TCHAN: objabi.KindChan, 704 TMAP: objabi.KindMap, 705 TARRAY: objabi.KindArray, 706 TSLICE: objabi.KindSlice, 707 TFUNC: objabi.KindFunc, 708 TCOMPLEX64: objabi.KindComplex64, 709 TCOMPLEX128: objabi.KindComplex128, 710 TUNSAFEPTR: objabi.KindUnsafePointer, 711 } 712 713 // typeptrdata returns the length in bytes of the prefix of t 714 // containing pointer data. Anything after this offset is scalar data. 715 func typeptrdata(t *types.Type) int64 { 716 if !types.Haspointers(t) { 717 return 0 718 } 719 720 switch t.Etype { 721 case TPTR32, 722 TPTR64, 723 TUNSAFEPTR, 724 TFUNC, 725 TCHAN, 726 TMAP: 727 return int64(Widthptr) 728 729 case TSTRING: 730 // struct { byte *str; intgo len; } 731 return int64(Widthptr) 732 733 case TINTER: 734 // struct { Itab *tab; void *data; } or 735 // struct { Type *type; void *data; } 736 return 2 * int64(Widthptr) 737 738 case TSLICE: 739 // struct { byte *array; uintgo len; uintgo cap; } 740 return int64(Widthptr) 741 742 case TARRAY: 743 // haspointers already eliminated t.NumElem() == 0. 744 return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem()) 745 746 case TSTRUCT: 747 // Find the last field that has pointers. 748 var lastPtrField *types.Field 749 for _, t1 := range t.Fields().Slice() { 750 if types.Haspointers(t1.Type) { 751 lastPtrField = t1 752 } 753 } 754 return lastPtrField.Offset + typeptrdata(lastPtrField.Type) 755 756 default: 757 Fatalf("typeptrdata: unexpected type, %v", t) 758 return 0 759 } 760 } 761 762 // tflag is documented in reflect/type.go. 763 // 764 // tflag values must be kept in sync with copies in: 765 // cmd/compile/internal/gc/reflect.go 766 // cmd/link/internal/ld/decodesym.go 767 // reflect/type.go 768 // runtime/type.go 769 const ( 770 tflagUncommon = 1 << 0 771 tflagExtraStar = 1 << 1 772 tflagNamed = 1 << 2 773 ) 774 775 var ( 776 algarray *obj.LSym 777 memhashvarlen *obj.LSym 778 memequalvarlen *obj.LSym 779 ) 780 781 // dcommontype dumps the contents of a reflect.rtype (runtime._type). 782 func dcommontype(lsym *obj.LSym, ot int, t *types.Type) int { 783 if ot != 0 { 784 Fatalf("dcommontype %d", ot) 785 } 786 787 sizeofAlg := 2 * Widthptr 788 if algarray == nil { 789 algarray = sysfunc("algarray") 790 } 791 dowidth(t) 792 alg := algtype(t) 793 var algsym *obj.LSym 794 if alg == ASPECIAL || alg == AMEM { 795 algsym = dalgsym(t) 796 } 797 798 sptrWeak := true 799 var sptr *obj.LSym 800 if !t.IsPtr() || t.PtrBase != nil { 801 tptr := types.NewPtr(t) 802 if t.Sym != nil || methods(tptr) != nil { 803 sptrWeak = false 804 } 805 sptr = dtypesym(tptr).Linksym() 806 } 807 808 gcsym, useGCProg, ptrdata := dgcsym(t) 809 810 // ../../../../reflect/type.go:/^type.rtype 811 // actual type structure 812 // type rtype struct { 813 // size uintptr 814 // ptrdata uintptr 815 // hash uint32 816 // tflag tflag 817 // align uint8 818 // fieldAlign uint8 819 // kind uint8 820 // alg *typeAlg 821 // gcdata *byte 822 // str nameOff 823 // ptrToThis typeOff 824 // } 825 ot = duintptr(lsym, ot, uint64(t.Width)) 826 ot = duintptr(lsym, ot, uint64(ptrdata)) 827 ot = duint32(lsym, ot, typehash(t)) 828 829 var tflag uint8 830 if uncommonSize(t) != 0 { 831 tflag |= tflagUncommon 832 } 833 if t.Sym != nil && t.Sym.Name != "" { 834 tflag |= tflagNamed 835 } 836 837 exported := false 838 p := t.LongString() 839 // If we're writing out type T, 840 // we are very likely to write out type *T as well. 841 // Use the string "*T"[1:] for "T", so that the two 842 // share storage. This is a cheap way to reduce the 843 // amount of space taken up by reflect strings. 844 if !strings.HasPrefix(p, "*") { 845 p = "*" + p 846 tflag |= tflagExtraStar 847 if t.Sym != nil { 848 exported = exportname(t.Sym.Name) 849 } 850 } else { 851 if t.Elem() != nil && t.Elem().Sym != nil { 852 exported = exportname(t.Elem().Sym.Name) 853 } 854 } 855 856 ot = duint8(lsym, ot, tflag) 857 858 // runtime (and common sense) expects alignment to be a power of two. 859 i := int(t.Align) 860 861 if i == 0 { 862 i = 1 863 } 864 if i&(i-1) != 0 { 865 Fatalf("invalid alignment %d for %v", t.Align, t) 866 } 867 ot = duint8(lsym, ot, t.Align) // align 868 ot = duint8(lsym, ot, t.Align) // fieldAlign 869 870 i = kinds[t.Etype] 871 if !types.Haspointers(t) { 872 i |= objabi.KindNoPointers 873 } 874 if isdirectiface(t) { 875 i |= objabi.KindDirectIface 876 } 877 if useGCProg { 878 i |= objabi.KindGCProg 879 } 880 ot = duint8(lsym, ot, uint8(i)) // kind 881 if algsym == nil { 882 ot = dsymptr(lsym, ot, algarray, int(alg)*sizeofAlg) 883 } else { 884 ot = dsymptr(lsym, ot, algsym, 0) 885 } 886 ot = dsymptr(lsym, ot, gcsym, 0) // gcdata 887 888 nsym := dname(p, "", nil, exported) 889 ot = dsymptrOff(lsym, ot, nsym, 0) // str 890 // ptrToThis 891 if sptr == nil { 892 ot = duint32(lsym, ot, 0) 893 } else if sptrWeak { 894 ot = dsymptrWeakOff(lsym, ot, sptr) 895 } else { 896 ot = dsymptrOff(lsym, ot, sptr, 0) 897 } 898 899 return ot 900 } 901 902 func typesymname(t *types.Type) string { 903 name := t.ShortString() 904 // Use a separate symbol name for Noalg types for #17752. 905 if a, bad := algtype1(t); a == ANOEQ && bad.Noalg() { 906 name = "noalg." + name 907 } 908 return name 909 } 910 911 // Fake package for runtime type info (headers) 912 // Don't access directly, use typeLookup below. 913 var ( 914 typepkgmu sync.Mutex // protects typepkg lookups 915 typepkg = types.NewPkg("type", "type") 916 ) 917 918 func typeLookup(name string) *types.Sym { 919 typepkgmu.Lock() 920 s := typepkg.Lookup(name) 921 typepkgmu.Unlock() 922 return s 923 } 924 925 func typesym(t *types.Type) *types.Sym { 926 return typeLookup(typesymname(t)) 927 } 928 929 // tracksym returns the symbol for tracking use of field/method f, assumed 930 // to be a member of struct/interface type t. 931 func tracksym(t *types.Type, f *types.Field) *types.Sym { 932 return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name) 933 } 934 935 func typesymprefix(prefix string, t *types.Type) *types.Sym { 936 p := prefix + "." + t.ShortString() 937 s := typeLookup(p) 938 939 //print("algsym: %s -> %+S\n", p, s); 940 941 return s 942 } 943 944 func typenamesym(t *types.Type) *types.Sym { 945 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { 946 Fatalf("typenamesym %v", t) 947 } 948 s := typesym(t) 949 signatsetmu.Lock() 950 addsignat(t) 951 signatsetmu.Unlock() 952 return s 953 } 954 955 func typename(t *types.Type) *Node { 956 s := typenamesym(t) 957 if s.Def == nil { 958 n := newnamel(src.NoXPos, s) 959 n.Type = types.Types[TUINT8] 960 n.SetClass(PEXTERN) 961 n.SetTypecheck(1) 962 s.Def = asTypesNode(n) 963 } 964 965 n := nod(OADDR, asNode(s.Def), nil) 966 n.Type = types.NewPtr(asNode(s.Def).Type) 967 n.SetAddable(true) 968 n.SetTypecheck(1) 969 return n 970 } 971 972 func itabname(t, itype *types.Type) *Node { 973 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { 974 Fatalf("itabname(%v, %v)", t, itype) 975 } 976 s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString()) 977 if s.Def == nil { 978 n := newname(s) 979 n.Type = types.Types[TUINT8] 980 n.SetClass(PEXTERN) 981 n.SetTypecheck(1) 982 s.Def = asTypesNode(n) 983 itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()}) 984 } 985 986 n := nod(OADDR, asNode(s.Def), nil) 987 n.Type = types.NewPtr(asNode(s.Def).Type) 988 n.SetAddable(true) 989 n.SetTypecheck(1) 990 return n 991 } 992 993 // isreflexive reports whether t has a reflexive equality operator. 994 // That is, if x==x for all x of type t. 995 func isreflexive(t *types.Type) bool { 996 switch t.Etype { 997 case TBOOL, 998 TINT, 999 TUINT, 1000 TINT8, 1001 TUINT8, 1002 TINT16, 1003 TUINT16, 1004 TINT32, 1005 TUINT32, 1006 TINT64, 1007 TUINT64, 1008 TUINTPTR, 1009 TPTR32, 1010 TPTR64, 1011 TUNSAFEPTR, 1012 TSTRING, 1013 TCHAN: 1014 return true 1015 1016 case TFLOAT32, 1017 TFLOAT64, 1018 TCOMPLEX64, 1019 TCOMPLEX128, 1020 TINTER: 1021 return false 1022 1023 case TARRAY: 1024 return isreflexive(t.Elem()) 1025 1026 case TSTRUCT: 1027 for _, t1 := range t.Fields().Slice() { 1028 if !isreflexive(t1.Type) { 1029 return false 1030 } 1031 } 1032 return true 1033 1034 default: 1035 Fatalf("bad type for map key: %v", t) 1036 return false 1037 } 1038 } 1039 1040 // needkeyupdate reports whether map updates with t as a key 1041 // need the key to be updated. 1042 func needkeyupdate(t *types.Type) bool { 1043 switch t.Etype { 1044 case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, 1045 TINT64, TUINT64, TUINTPTR, TPTR32, TPTR64, TUNSAFEPTR, TCHAN: 1046 return false 1047 1048 case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0 1049 TINTER, 1050 TSTRING: // strings might have smaller backing stores 1051 return true 1052 1053 case TARRAY: 1054 return needkeyupdate(t.Elem()) 1055 1056 case TSTRUCT: 1057 for _, t1 := range t.Fields().Slice() { 1058 if needkeyupdate(t1.Type) { 1059 return true 1060 } 1061 } 1062 return false 1063 1064 default: 1065 Fatalf("bad type for map key: %v", t) 1066 return true 1067 } 1068 } 1069 1070 // formalType replaces byte and rune aliases with real types. 1071 // They've been separate internally to make error messages 1072 // better, but we have to merge them in the reflect tables. 1073 func formalType(t *types.Type) *types.Type { 1074 if t == types.Bytetype || t == types.Runetype { 1075 return types.Types[t.Etype] 1076 } 1077 return t 1078 } 1079 1080 func dtypesym(t *types.Type) *types.Sym { 1081 t = formalType(t) 1082 if t.IsUntyped() { 1083 Fatalf("dtypesym %v", t) 1084 } 1085 1086 s := typesym(t) 1087 if s.Siggen() { 1088 return s 1089 } 1090 s.SetSiggen(true) 1091 1092 // special case (look for runtime below): 1093 // when compiling package runtime, 1094 // emit the type structures for int, float, etc. 1095 tbase := t 1096 1097 if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil { 1098 tbase = t.Elem() 1099 } 1100 dupok := 0 1101 if tbase.Sym == nil { 1102 dupok = obj.DUPOK 1103 } 1104 1105 if myimportpath == "runtime" && (tbase == types.Types[tbase.Etype] || tbase == types.Bytetype || tbase == types.Runetype || tbase == types.Errortype) { // int, float, etc 1106 goto ok 1107 } 1108 1109 // named types from other files are defined only by those files 1110 if tbase.Sym != nil && !tbase.Local() { 1111 return s 1112 } 1113 if isforw[tbase.Etype] { 1114 return s 1115 } 1116 1117 ok: 1118 ot := 0 1119 lsym := s.Linksym() 1120 switch t.Etype { 1121 default: 1122 ot = dcommontype(lsym, ot, t) 1123 ot = dextratype(lsym, ot, t, 0) 1124 1125 case TARRAY: 1126 // ../../../../runtime/type.go:/arrayType 1127 s1 := dtypesym(t.Elem()) 1128 t2 := types.NewSlice(t.Elem()) 1129 s2 := dtypesym(t2) 1130 ot = dcommontype(lsym, ot, t) 1131 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1132 ot = dsymptr(lsym, ot, s2.Linksym(), 0) 1133 ot = duintptr(lsym, ot, uint64(t.NumElem())) 1134 ot = dextratype(lsym, ot, t, 0) 1135 1136 case TSLICE: 1137 // ../../../../runtime/type.go:/sliceType 1138 s1 := dtypesym(t.Elem()) 1139 ot = dcommontype(lsym, ot, t) 1140 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1141 ot = dextratype(lsym, ot, t, 0) 1142 1143 case TCHAN: 1144 // ../../../../runtime/type.go:/chanType 1145 s1 := dtypesym(t.Elem()) 1146 ot = dcommontype(lsym, ot, t) 1147 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1148 ot = duintptr(lsym, ot, uint64(t.ChanDir())) 1149 ot = dextratype(lsym, ot, t, 0) 1150 1151 case TFUNC: 1152 for _, t1 := range t.Recvs().Fields().Slice() { 1153 dtypesym(t1.Type) 1154 } 1155 isddd := false 1156 for _, t1 := range t.Params().Fields().Slice() { 1157 isddd = t1.Isddd() 1158 dtypesym(t1.Type) 1159 } 1160 for _, t1 := range t.Results().Fields().Slice() { 1161 dtypesym(t1.Type) 1162 } 1163 1164 ot = dcommontype(lsym, ot, t) 1165 inCount := t.Recvs().NumFields() + t.Params().NumFields() 1166 outCount := t.Results().NumFields() 1167 if isddd { 1168 outCount |= 1 << 15 1169 } 1170 ot = duint16(lsym, ot, uint16(inCount)) 1171 ot = duint16(lsym, ot, uint16(outCount)) 1172 if Widthptr == 8 { 1173 ot += 4 // align for *rtype 1174 } 1175 1176 dataAdd := (inCount + t.Results().NumFields()) * Widthptr 1177 ot = dextratype(lsym, ot, t, dataAdd) 1178 1179 // Array of rtype pointers follows funcType. 1180 for _, t1 := range t.Recvs().Fields().Slice() { 1181 ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0) 1182 } 1183 for _, t1 := range t.Params().Fields().Slice() { 1184 ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0) 1185 } 1186 for _, t1 := range t.Results().Fields().Slice() { 1187 ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0) 1188 } 1189 1190 case TINTER: 1191 m := imethods(t) 1192 n := len(m) 1193 for _, a := range m { 1194 dtypesym(a.type_) 1195 } 1196 1197 // ../../../../runtime/type.go:/interfaceType 1198 ot = dcommontype(lsym, ot, t) 1199 1200 var tpkg *types.Pkg 1201 if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype { 1202 tpkg = t.Sym.Pkg 1203 } 1204 ot = dgopkgpath(lsym, ot, tpkg) 1205 1206 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1207 ot = duintptr(lsym, ot, uint64(n)) 1208 ot = duintptr(lsym, ot, uint64(n)) 1209 dataAdd := imethodSize() * n 1210 ot = dextratype(lsym, ot, t, dataAdd) 1211 1212 for _, a := range m { 1213 // ../../../../runtime/type.go:/imethod 1214 exported := exportname(a.name) 1215 var pkg *types.Pkg 1216 if !exported && a.pkg != tpkg { 1217 pkg = a.pkg 1218 } 1219 nsym := dname(a.name, "", pkg, exported) 1220 1221 ot = dsymptrOff(lsym, ot, nsym, 0) 1222 ot = dsymptrOff(lsym, ot, dtypesym(a.type_).Linksym(), 0) 1223 } 1224 1225 // ../../../../runtime/type.go:/mapType 1226 case TMAP: 1227 s1 := dtypesym(t.Key()) 1228 s2 := dtypesym(t.Val()) 1229 s3 := dtypesym(mapbucket(t)) 1230 s4 := dtypesym(hmap(t)) 1231 ot = dcommontype(lsym, ot, t) 1232 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1233 ot = dsymptr(lsym, ot, s2.Linksym(), 0) 1234 ot = dsymptr(lsym, ot, s3.Linksym(), 0) 1235 ot = dsymptr(lsym, ot, s4.Linksym(), 0) 1236 if t.Key().Width > MAXKEYSIZE { 1237 ot = duint8(lsym, ot, uint8(Widthptr)) 1238 ot = duint8(lsym, ot, 1) // indirect 1239 } else { 1240 ot = duint8(lsym, ot, uint8(t.Key().Width)) 1241 ot = duint8(lsym, ot, 0) // not indirect 1242 } 1243 1244 if t.Val().Width > MAXVALSIZE { 1245 ot = duint8(lsym, ot, uint8(Widthptr)) 1246 ot = duint8(lsym, ot, 1) // indirect 1247 } else { 1248 ot = duint8(lsym, ot, uint8(t.Val().Width)) 1249 ot = duint8(lsym, ot, 0) // not indirect 1250 } 1251 1252 ot = duint16(lsym, ot, uint16(mapbucket(t).Width)) 1253 ot = duint8(lsym, ot, uint8(obj.Bool2int(isreflexive(t.Key())))) 1254 ot = duint8(lsym, ot, uint8(obj.Bool2int(needkeyupdate(t.Key())))) 1255 ot = dextratype(lsym, ot, t, 0) 1256 1257 case TPTR32, TPTR64: 1258 if t.Elem().Etype == TANY { 1259 // ../../../../runtime/type.go:/UnsafePointerType 1260 ot = dcommontype(lsym, ot, t) 1261 ot = dextratype(lsym, ot, t, 0) 1262 1263 break 1264 } 1265 1266 // ../../../../runtime/type.go:/ptrType 1267 s1 := dtypesym(t.Elem()) 1268 1269 ot = dcommontype(lsym, ot, t) 1270 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1271 ot = dextratype(lsym, ot, t, 0) 1272 1273 // ../../../../runtime/type.go:/structType 1274 // for security, only the exported fields. 1275 case TSTRUCT: 1276 n := 0 1277 1278 for _, t1 := range t.Fields().Slice() { 1279 dtypesym(t1.Type) 1280 n++ 1281 } 1282 1283 ot = dcommontype(lsym, ot, t) 1284 pkg := localpkg 1285 if t.Sym != nil { 1286 pkg = t.Sym.Pkg 1287 } else { 1288 // Unnamed type. Grab the package from the first field, if any. 1289 for _, f := range t.Fields().Slice() { 1290 if f.Embedded != 0 { 1291 continue 1292 } 1293 pkg = f.Sym.Pkg 1294 break 1295 } 1296 } 1297 ot = dgopkgpath(lsym, ot, pkg) 1298 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1299 ot = duintptr(lsym, ot, uint64(n)) 1300 ot = duintptr(lsym, ot, uint64(n)) 1301 1302 dataAdd := n * structfieldSize() 1303 ot = dextratype(lsym, ot, t, dataAdd) 1304 1305 for _, f := range t.Fields().Slice() { 1306 // ../../../../runtime/type.go:/structField 1307 ot = dnameField(lsym, ot, pkg, f) 1308 ot = dsymptr(lsym, ot, dtypesym(f.Type).Linksym(), 0) 1309 offsetAnon := uint64(f.Offset) << 1 1310 if offsetAnon>>1 != uint64(f.Offset) { 1311 Fatalf("%v: bad field offset for %s", t, f.Sym.Name) 1312 } 1313 if f.Embedded != 0 { 1314 offsetAnon |= 1 1315 } 1316 ot = duintptr(lsym, ot, offsetAnon) 1317 } 1318 } 1319 1320 ot = dextratypeData(lsym, ot, t) 1321 ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA)) 1322 1323 // The linker will leave a table of all the typelinks for 1324 // types in the binary, so the runtime can find them. 1325 // 1326 // When buildmode=shared, all types are in typelinks so the 1327 // runtime can deduplicate type pointers. 1328 keep := Ctxt.Flag_dynlink 1329 if !keep && t.Sym == nil { 1330 // For an unnamed type, we only need the link if the type can 1331 // be created at run time by reflect.PtrTo and similar 1332 // functions. If the type exists in the program, those 1333 // functions must return the existing type structure rather 1334 // than creating a new one. 1335 switch t.Etype { 1336 case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT: 1337 keep = true 1338 } 1339 } 1340 lsym.Set(obj.AttrMakeTypelink, keep) 1341 1342 return s 1343 } 1344 1345 // for each itabEntry, gather the methods on 1346 // the concrete type that implement the interface 1347 func peekitabs() { 1348 for i := range itabs { 1349 tab := &itabs[i] 1350 methods := genfun(tab.t, tab.itype) 1351 if len(methods) == 0 { 1352 continue 1353 } 1354 tab.entries = methods 1355 } 1356 } 1357 1358 // for the given concrete type and interface 1359 // type, return the (sorted) set of methods 1360 // on the concrete type that implement the interface 1361 func genfun(t, it *types.Type) []*obj.LSym { 1362 if t == nil || it == nil { 1363 return nil 1364 } 1365 sigs := imethods(it) 1366 methods := methods(t) 1367 out := make([]*obj.LSym, 0, len(sigs)) 1368 if len(sigs) == 0 { 1369 return nil 1370 } 1371 1372 // both sigs and methods are sorted by name, 1373 // so we can find the intersect in a single pass 1374 for _, m := range methods { 1375 if m.name == sigs[0].name { 1376 out = append(out, m.isym.Linksym()) 1377 sigs = sigs[1:] 1378 if len(sigs) == 0 { 1379 break 1380 } 1381 } 1382 } 1383 1384 return out 1385 } 1386 1387 // itabsym uses the information gathered in 1388 // peekitabs to de-virtualize interface methods. 1389 // Since this is called by the SSA backend, it shouldn't 1390 // generate additional Nodes, Syms, etc. 1391 func itabsym(it *obj.LSym, offset int64) *obj.LSym { 1392 var syms []*obj.LSym 1393 if it == nil { 1394 return nil 1395 } 1396 1397 for i := range itabs { 1398 e := &itabs[i] 1399 if e.lsym == it { 1400 syms = e.entries 1401 break 1402 } 1403 } 1404 if syms == nil { 1405 return nil 1406 } 1407 1408 // keep this arithmetic in sync with *itab layout 1409 methodnum := int((offset - 2*int64(Widthptr) - 8) / int64(Widthptr)) 1410 if methodnum >= len(syms) { 1411 return nil 1412 } 1413 return syms[methodnum] 1414 } 1415 1416 func addsignat(t *types.Type) { 1417 signatset[t] = struct{}{} 1418 } 1419 1420 func addsignats(dcls []*Node) { 1421 // copy types from dcl list to signatset 1422 for _, n := range dcls { 1423 if n.Op == OTYPE { 1424 addsignat(n.Type) 1425 } 1426 } 1427 } 1428 1429 func dumpsignats() { 1430 // Process signatset. Use a loop, as dtypesym adds 1431 // entries to signatset while it is being processed. 1432 signats := make([]typeAndStr, len(signatset)) 1433 for len(signatset) > 0 { 1434 signats = signats[:0] 1435 // Transfer entries to a slice and sort, for reproducible builds. 1436 for t := range signatset { 1437 signats = append(signats, typeAndStr{t: t, short: typesymname(t), regular: t.String()}) 1438 delete(signatset, t) 1439 } 1440 sort.Sort(typesByString(signats)) 1441 for _, ts := range signats { 1442 t := ts.t 1443 dtypesym(t) 1444 if t.Sym != nil { 1445 dtypesym(types.NewPtr(t)) 1446 } 1447 } 1448 } 1449 } 1450 1451 func dumptabs() { 1452 // process itabs 1453 for _, i := range itabs { 1454 // dump empty itab symbol into i.sym 1455 // type itab struct { 1456 // inter *interfacetype 1457 // _type *_type 1458 // hash uint32 1459 // _ [4]byte 1460 // fun [1]uintptr // variable sized 1461 // } 1462 o := dsymptr(i.lsym, 0, dtypesym(i.itype).Linksym(), 0) 1463 o = dsymptr(i.lsym, o, dtypesym(i.t).Linksym(), 0) 1464 o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash 1465 o += 4 // skip unused field 1466 for _, fn := range genfun(i.t, i.itype) { 1467 o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method 1468 } 1469 // Nothing writes static itabs, so they are read only. 1470 ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA)) 1471 ilink := itablinkpkg.Lookup(i.t.ShortString() + "," + i.itype.ShortString()).Linksym() 1472 dsymptr(ilink, 0, i.lsym, 0) 1473 ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA)) 1474 } 1475 1476 // process ptabs 1477 if localpkg.Name == "main" && len(ptabs) > 0 { 1478 ot := 0 1479 s := Ctxt.Lookup("go.plugin.tabs") 1480 for _, p := range ptabs { 1481 // Dump ptab symbol into go.pluginsym package. 1482 // 1483 // type ptab struct { 1484 // name nameOff 1485 // typ typeOff // pointer to symbol 1486 // } 1487 nsym := dname(p.s.Name, "", nil, true) 1488 ot = dsymptrOff(s, ot, nsym, 0) 1489 ot = dsymptrOff(s, ot, dtypesym(p.t).Linksym(), 0) 1490 } 1491 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1492 1493 ot = 0 1494 s = Ctxt.Lookup("go.plugin.exports") 1495 for _, p := range ptabs { 1496 ot = dsymptr(s, ot, p.s.Linksym(), 0) 1497 } 1498 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1499 } 1500 } 1501 1502 func dumpimportstrings() { 1503 // generate import strings for imported packages 1504 for _, p := range types.ImportedPkgList() { 1505 dimportpath(p) 1506 } 1507 } 1508 1509 func dumpbasictypes() { 1510 // do basic types if compiling package runtime. 1511 // they have to be in at least one package, 1512 // and runtime is always loaded implicitly, 1513 // so this is as good as any. 1514 // another possible choice would be package main, 1515 // but using runtime means fewer copies in object files. 1516 if myimportpath == "runtime" { 1517 for i := types.EType(1); i <= TBOOL; i++ { 1518 dtypesym(types.NewPtr(types.Types[i])) 1519 } 1520 dtypesym(types.NewPtr(types.Types[TSTRING])) 1521 dtypesym(types.NewPtr(types.Types[TUNSAFEPTR])) 1522 1523 // emit type structs for error and func(error) string. 1524 // The latter is the type of an auto-generated wrapper. 1525 dtypesym(types.NewPtr(types.Errortype)) 1526 1527 dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])})) 1528 1529 // add paths for runtime and main, which 6l imports implicitly. 1530 dimportpath(Runtimepkg) 1531 1532 if flag_race { 1533 dimportpath(racepkg) 1534 } 1535 if flag_msan { 1536 dimportpath(msanpkg) 1537 } 1538 dimportpath(types.NewPkg("main", "")) 1539 } 1540 } 1541 1542 type typeAndStr struct { 1543 t *types.Type 1544 short string 1545 regular string 1546 } 1547 1548 type typesByString []typeAndStr 1549 1550 func (a typesByString) Len() int { return len(a) } 1551 func (a typesByString) Less(i, j int) bool { 1552 if a[i].short != a[j].short { 1553 return a[i].short < a[j].short 1554 } 1555 // When the only difference between the types is whether 1556 // they refer to byte or uint8, such as **byte vs **uint8, 1557 // the types' ShortStrings can be identical. 1558 // To preserve deterministic sort ordering, sort these by String(). 1559 return a[i].regular < a[j].regular 1560 } 1561 func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1562 1563 func dalgsym(t *types.Type) *obj.LSym { 1564 var lsym *obj.LSym 1565 var hashfunc *obj.LSym 1566 var eqfunc *obj.LSym 1567 1568 // dalgsym is only called for a type that needs an algorithm table, 1569 // which implies that the type is comparable (or else it would use ANOEQ). 1570 1571 if algtype(t) == AMEM { 1572 // we use one algorithm table for all AMEM types of a given size 1573 p := fmt.Sprintf(".alg%d", t.Width) 1574 1575 s := typeLookup(p) 1576 lsym = s.Linksym() 1577 if s.AlgGen() { 1578 return lsym 1579 } 1580 s.SetAlgGen(true) 1581 1582 if memhashvarlen == nil { 1583 memhashvarlen = sysfunc("memhash_varlen") 1584 memequalvarlen = sysfunc("memequal_varlen") 1585 } 1586 1587 // make hash closure 1588 p = fmt.Sprintf(".hashfunc%d", t.Width) 1589 1590 hashfunc = typeLookup(p).Linksym() 1591 1592 ot := 0 1593 ot = dsymptr(hashfunc, ot, memhashvarlen, 0) 1594 ot = duintptr(hashfunc, ot, uint64(t.Width)) // size encoded in closure 1595 ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA) 1596 1597 // make equality closure 1598 p = fmt.Sprintf(".eqfunc%d", t.Width) 1599 1600 eqfunc = typeLookup(p).Linksym() 1601 1602 ot = 0 1603 ot = dsymptr(eqfunc, ot, memequalvarlen, 0) 1604 ot = duintptr(eqfunc, ot, uint64(t.Width)) 1605 ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA) 1606 } else { 1607 // generate an alg table specific to this type 1608 s := typesymprefix(".alg", t) 1609 lsym = s.Linksym() 1610 1611 hash := typesymprefix(".hash", t) 1612 eq := typesymprefix(".eq", t) 1613 hashfunc = typesymprefix(".hashfunc", t).Linksym() 1614 eqfunc = typesymprefix(".eqfunc", t).Linksym() 1615 1616 genhash(hash, t) 1617 geneq(eq, t) 1618 1619 // make Go funcs (closures) for calling hash and equal from Go 1620 dsymptr(hashfunc, 0, hash.Linksym(), 0) 1621 ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1622 dsymptr(eqfunc, 0, eq.Linksym(), 0) 1623 ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1624 } 1625 1626 // ../../../../runtime/alg.go:/typeAlg 1627 ot := 0 1628 1629 ot = dsymptr(lsym, ot, hashfunc, 0) 1630 ot = dsymptr(lsym, ot, eqfunc, 0) 1631 ggloblsym(lsym, int32(ot), obj.DUPOK|obj.RODATA) 1632 return lsym 1633 } 1634 1635 // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, 1636 // which holds 1-bit entries describing where pointers are in a given type. 1637 // Above this length, the GC information is recorded as a GC program, 1638 // which can express repetition compactly. In either form, the 1639 // information is used by the runtime to initialize the heap bitmap, 1640 // and for large types (like 128 or more words), they are roughly the 1641 // same speed. GC programs are never much larger and often more 1642 // compact. (If large arrays are involved, they can be arbitrarily 1643 // more compact.) 1644 // 1645 // The cutoff must be large enough that any allocation large enough to 1646 // use a GC program is large enough that it does not share heap bitmap 1647 // bytes with any other objects, allowing the GC program execution to 1648 // assume an aligned start and not use atomic operations. In the current 1649 // runtime, this means all malloc size classes larger than the cutoff must 1650 // be multiples of four words. On 32-bit systems that's 16 bytes, and 1651 // all size classes >= 16 bytes are 16-byte aligned, so no real constraint. 1652 // On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed 1653 // for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated 1654 // is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes 1655 // must be >= 4. 1656 // 1657 // We used to use 16 because the GC programs do have some constant overhead 1658 // to get started, and processing 128 pointers seems to be enough to 1659 // amortize that overhead well. 1660 // 1661 // To make sure that the runtime's chansend can call typeBitsBulkBarrier, 1662 // we raised the limit to 2048, so that even 32-bit systems are guaranteed to 1663 // use bitmaps for objects up to 64 kB in size. 1664 // 1665 // Also known to reflect/type.go. 1666 // 1667 const maxPtrmaskBytes = 2048 1668 1669 // dgcsym emits and returns a data symbol containing GC information for type t, 1670 // along with a boolean reporting whether the UseGCProg bit should be set in 1671 // the type kind, and the ptrdata field to record in the reflect type information. 1672 func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { 1673 ptrdata = typeptrdata(t) 1674 if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 { 1675 lsym = dgcptrmask(t) 1676 return 1677 } 1678 1679 useGCProg = true 1680 lsym, ptrdata = dgcprog(t) 1681 return 1682 } 1683 1684 // dgcptrmask emits and returns the symbol containing a pointer mask for type t. 1685 func dgcptrmask(t *types.Type) *obj.LSym { 1686 ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8) 1687 fillptrmask(t, ptrmask) 1688 p := fmt.Sprintf("gcbits.%x", ptrmask) 1689 1690 sym := Runtimepkg.Lookup(p) 1691 lsym := sym.Linksym() 1692 if !sym.Uniq() { 1693 sym.SetUniq(true) 1694 for i, x := range ptrmask { 1695 duint8(lsym, i, x) 1696 } 1697 ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) 1698 } 1699 return lsym 1700 } 1701 1702 // fillptrmask fills in ptrmask with 1s corresponding to the 1703 // word offsets in t that hold pointers. 1704 // ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits. 1705 func fillptrmask(t *types.Type, ptrmask []byte) { 1706 for i := range ptrmask { 1707 ptrmask[i] = 0 1708 } 1709 if !types.Haspointers(t) { 1710 return 1711 } 1712 1713 vec := bvalloc(8 * int32(len(ptrmask))) 1714 xoffset := int64(0) 1715 onebitwalktype1(t, &xoffset, vec) 1716 1717 nptr := typeptrdata(t) / int64(Widthptr) 1718 for i := int64(0); i < nptr; i++ { 1719 if vec.Get(int32(i)) { 1720 ptrmask[i/8] |= 1 << (uint(i) % 8) 1721 } 1722 } 1723 } 1724 1725 // dgcprog emits and returns the symbol containing a GC program for type t 1726 // along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]). 1727 // In practice, the size is typeptrdata(t) except for non-trivial arrays. 1728 // For non-trivial arrays, the program describes the full t.Width size. 1729 func dgcprog(t *types.Type) (*obj.LSym, int64) { 1730 dowidth(t) 1731 if t.Width == BADWIDTH { 1732 Fatalf("dgcprog: %v badwidth", t) 1733 } 1734 lsym := typesymprefix(".gcprog", t).Linksym() 1735 var p GCProg 1736 p.init(lsym) 1737 p.emit(t, 0) 1738 offset := p.w.BitIndex() * int64(Widthptr) 1739 p.end() 1740 if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { 1741 Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) 1742 } 1743 return lsym, offset 1744 } 1745 1746 type GCProg struct { 1747 lsym *obj.LSym 1748 symoff int 1749 w gcprog.Writer 1750 } 1751 1752 var Debug_gcprog int // set by -d gcprog 1753 1754 func (p *GCProg) init(lsym *obj.LSym) { 1755 p.lsym = lsym 1756 p.symoff = 4 // first 4 bytes hold program length 1757 p.w.Init(p.writeByte) 1758 if Debug_gcprog > 0 { 1759 fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym) 1760 p.w.Debug(os.Stderr) 1761 } 1762 } 1763 1764 func (p *GCProg) writeByte(x byte) { 1765 p.symoff = duint8(p.lsym, p.symoff, x) 1766 } 1767 1768 func (p *GCProg) end() { 1769 p.w.End() 1770 duint32(p.lsym, 0, uint32(p.symoff-4)) 1771 ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) 1772 if Debug_gcprog > 0 { 1773 fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym) 1774 } 1775 } 1776 1777 func (p *GCProg) emit(t *types.Type, offset int64) { 1778 dowidth(t) 1779 if !types.Haspointers(t) { 1780 return 1781 } 1782 if t.Width == int64(Widthptr) { 1783 p.w.Ptr(offset / int64(Widthptr)) 1784 return 1785 } 1786 switch t.Etype { 1787 default: 1788 Fatalf("GCProg.emit: unexpected type %v", t) 1789 1790 case TSTRING: 1791 p.w.Ptr(offset / int64(Widthptr)) 1792 1793 case TINTER: 1794 p.w.Ptr(offset / int64(Widthptr)) 1795 p.w.Ptr(offset/int64(Widthptr) + 1) 1796 1797 case TSLICE: 1798 p.w.Ptr(offset / int64(Widthptr)) 1799 1800 case TARRAY: 1801 if t.NumElem() == 0 { 1802 // should have been handled by haspointers check above 1803 Fatalf("GCProg.emit: empty array") 1804 } 1805 1806 // Flatten array-of-array-of-array to just a big array by multiplying counts. 1807 count := t.NumElem() 1808 elem := t.Elem() 1809 for elem.IsArray() { 1810 count *= elem.NumElem() 1811 elem = elem.Elem() 1812 } 1813 1814 if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) { 1815 // Cheaper to just emit the bits. 1816 for i := int64(0); i < count; i++ { 1817 p.emit(elem, offset+i*elem.Width) 1818 } 1819 return 1820 } 1821 p.emit(elem, offset) 1822 p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) 1823 p.w.Repeat(elem.Width/int64(Widthptr), count-1) 1824 1825 case TSTRUCT: 1826 for _, t1 := range t.Fields().Slice() { 1827 p.emit(t1.Type, offset+t1.Offset) 1828 } 1829 } 1830 } 1831 1832 // zeroaddr returns the address of a symbol with at least 1833 // size bytes of zeros. 1834 func zeroaddr(size int64) *Node { 1835 if size >= 1<<31 { 1836 Fatalf("map value too big %d", size) 1837 } 1838 if zerosize < size { 1839 zerosize = size 1840 } 1841 s := mappkg.Lookup("zero") 1842 if s.Def == nil { 1843 x := newname(s) 1844 x.Type = types.Types[TUINT8] 1845 x.SetClass(PEXTERN) 1846 x.SetTypecheck(1) 1847 s.Def = asTypesNode(x) 1848 } 1849 z := nod(OADDR, asNode(s.Def), nil) 1850 z.Type = types.NewPtr(types.Types[TUINT8]) 1851 z.SetAddable(true) 1852 z.SetTypecheck(1) 1853 return z 1854 }