github.com/sanprasirt/go@v0.0.0-20170607001320-a027466e4b6d/src/cmd/compile/internal/gc/reflect.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "cmd/internal/gcprog" 10 "cmd/internal/obj" 11 "cmd/internal/objabi" 12 "cmd/internal/src" 13 "fmt" 14 "os" 15 "sort" 16 "strings" 17 "sync" 18 ) 19 20 type itabEntry struct { 21 t, itype *types.Type 22 lsym *obj.LSym // symbol of the itab itself 23 24 // symbols of each method in 25 // the itab, sorted by byte offset; 26 // filled in by peekitabs 27 entries []*obj.LSym 28 } 29 30 type ptabEntry struct { 31 s *types.Sym 32 t *types.Type 33 } 34 35 // runtime interface and reflection data structures 36 var ( 37 signatsetmu sync.Mutex // protects signatset 38 signatset = make(map[*types.Type]struct{}) 39 40 itabs []itabEntry 41 ptabs []ptabEntry 42 ) 43 44 type Sig struct { 45 name string 46 pkg *types.Pkg 47 isym *types.Sym 48 tsym *types.Sym 49 type_ *types.Type 50 mtype *types.Type 51 offset int32 52 } 53 54 // siglt sorts method signatures by name, then package path. 55 func siglt(a, b *Sig) bool { 56 if a.name != b.name { 57 return a.name < b.name 58 } 59 if a.pkg == b.pkg { 60 return false 61 } 62 if a.pkg == nil { 63 return true 64 } 65 if b.pkg == nil { 66 return false 67 } 68 return a.pkg.Path < b.pkg.Path 69 } 70 71 // Builds a type representing a Bucket structure for 72 // the given map type. This type is not visible to users - 73 // we include only enough information to generate a correct GC 74 // program for it. 75 // Make sure this stays in sync with ../../../../runtime/hashmap.go! 76 const ( 77 BUCKETSIZE = 8 78 MAXKEYSIZE = 128 79 MAXVALSIZE = 128 80 ) 81 82 func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) 83 func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) 84 func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) 85 if t.Sym == nil && len(methods(t)) == 0 { 86 return 0 87 } 88 return 4 + 2 + 2 + 4 + 4 89 } 90 91 func makefield(name string, t *types.Type) *types.Field { 92 f := types.NewField() 93 f.Type = t 94 f.Sym = (*types.Pkg)(nil).Lookup(name) 95 return f 96 } 97 98 func mapbucket(t *types.Type) *types.Type { 99 if t.MapType().Bucket != nil { 100 return t.MapType().Bucket 101 } 102 103 bucket := types.New(TSTRUCT) 104 keytype := t.Key() 105 valtype := t.Val() 106 dowidth(keytype) 107 dowidth(valtype) 108 if keytype.Width > MAXKEYSIZE { 109 keytype = types.NewPtr(keytype) 110 } 111 if valtype.Width > MAXVALSIZE { 112 valtype = types.NewPtr(valtype) 113 } 114 115 field := make([]*types.Field, 0, 5) 116 117 // The first field is: uint8 topbits[BUCKETSIZE]. 118 arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE) 119 field = append(field, makefield("topbits", arr)) 120 121 arr = types.NewArray(keytype, BUCKETSIZE) 122 arr.SetNoalg(true) 123 field = append(field, makefield("keys", arr)) 124 125 arr = types.NewArray(valtype, BUCKETSIZE) 126 arr.SetNoalg(true) 127 field = append(field, makefield("values", arr)) 128 129 // Make sure the overflow pointer is the last memory in the struct, 130 // because the runtime assumes it can use size-ptrSize as the 131 // offset of the overflow pointer. We double-check that property 132 // below once the offsets and size are computed. 133 // 134 // BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point. 135 // On 32-bit systems, the max alignment is 32-bit, and the 136 // overflow pointer will add another 32-bit field, and the struct 137 // will end with no padding. 138 // On 64-bit systems, the max alignment is 64-bit, and the 139 // overflow pointer will add another 64-bit field, and the struct 140 // will end with no padding. 141 // On nacl/amd64p32, however, the max alignment is 64-bit, 142 // but the overflow pointer will add only a 32-bit field, 143 // so if the struct needs 64-bit padding (because a key or value does) 144 // then it would end with an extra 32-bit padding field. 145 // Preempt that by emitting the padding here. 146 if int(t.Val().Align) > Widthptr || int(t.Key().Align) > Widthptr { 147 field = append(field, makefield("pad", types.Types[TUINTPTR])) 148 } 149 150 // If keys and values have no pointers, the map implementation 151 // can keep a list of overflow pointers on the side so that 152 // buckets can be marked as having no pointers. 153 // Arrange for the bucket to have no pointers by changing 154 // the type of the overflow field to uintptr in this case. 155 // See comment on hmap.overflow in ../../../../runtime/hashmap.go. 156 otyp := types.NewPtr(bucket) 157 if !types.Haspointers(t.Val()) && !types.Haspointers(t.Key()) && t.Val().Width <= MAXVALSIZE && t.Key().Width <= MAXKEYSIZE { 158 otyp = types.Types[TUINTPTR] 159 } 160 ovf := makefield("overflow", otyp) 161 field = append(field, ovf) 162 163 // link up fields 164 bucket.SetNoalg(true) 165 bucket.SetLocal(t.Local()) 166 bucket.SetFields(field[:]) 167 dowidth(bucket) 168 169 // Double-check that overflow field is final memory in struct, 170 // with no padding at end. See comment above. 171 if ovf.Offset != bucket.Width-int64(Widthptr) { 172 Fatalf("bad math in mapbucket for %v", t) 173 } 174 175 t.MapType().Bucket = bucket 176 177 bucket.StructType().Map = t 178 return bucket 179 } 180 181 // Builds a type representing a Hmap structure for the given map type. 182 // Make sure this stays in sync with ../../../../runtime/hashmap.go! 183 func hmap(t *types.Type) *types.Type { 184 if t.MapType().Hmap != nil { 185 return t.MapType().Hmap 186 } 187 188 bucket := mapbucket(t) 189 fields := []*types.Field{ 190 makefield("count", types.Types[TINT]), 191 makefield("flags", types.Types[TUINT8]), 192 makefield("B", types.Types[TUINT8]), 193 makefield("noverflow", types.Types[TUINT16]), 194 makefield("hash0", types.Types[TUINT32]), 195 makefield("buckets", types.NewPtr(bucket)), 196 makefield("oldbuckets", types.NewPtr(bucket)), 197 makefield("nevacuate", types.Types[TUINTPTR]), 198 makefield("overflow", types.Types[TUNSAFEPTR]), 199 } 200 201 h := types.New(TSTRUCT) 202 h.SetNoalg(true) 203 h.SetLocal(t.Local()) 204 h.SetFields(fields) 205 dowidth(h) 206 t.MapType().Hmap = h 207 h.StructType().Map = t 208 return h 209 } 210 211 func hiter(t *types.Type) *types.Type { 212 if t.MapType().Hiter != nil { 213 return t.MapType().Hiter 214 } 215 216 // build a struct: 217 // hiter { 218 // key *Key 219 // val *Value 220 // t *MapType 221 // h *Hmap 222 // buckets *Bucket 223 // bptr *Bucket 224 // overflow0 unsafe.Pointer 225 // overflow1 unsafe.Pointer 226 // startBucket uintptr 227 // stuff uintptr 228 // bucket uintptr 229 // checkBucket uintptr 230 // } 231 // must match ../../../../runtime/hashmap.go:hiter. 232 var field [12]*types.Field 233 field[0] = makefield("key", types.NewPtr(t.Key())) 234 field[1] = makefield("val", types.NewPtr(t.Val())) 235 field[2] = makefield("t", types.NewPtr(types.Types[TUINT8])) 236 field[3] = makefield("h", types.NewPtr(hmap(t))) 237 field[4] = makefield("buckets", types.NewPtr(mapbucket(t))) 238 field[5] = makefield("bptr", types.NewPtr(mapbucket(t))) 239 field[6] = makefield("overflow0", types.Types[TUNSAFEPTR]) 240 field[7] = makefield("overflow1", types.Types[TUNSAFEPTR]) 241 field[8] = makefield("startBucket", types.Types[TUINTPTR]) 242 field[9] = makefield("stuff", types.Types[TUINTPTR]) // offset+wrapped+B+I 243 field[10] = makefield("bucket", types.Types[TUINTPTR]) 244 field[11] = makefield("checkBucket", types.Types[TUINTPTR]) 245 246 // build iterator struct holding the above fields 247 i := types.New(TSTRUCT) 248 i.SetNoalg(true) 249 i.SetFields(field[:]) 250 dowidth(i) 251 if i.Width != int64(12*Widthptr) { 252 Fatalf("hash_iter size not correct %d %d", i.Width, 12*Widthptr) 253 } 254 t.MapType().Hiter = i 255 i.StructType().Map = t 256 return i 257 } 258 259 // f is method type, with receiver. 260 // return function type, receiver as first argument (or not). 261 func methodfunc(f *types.Type, receiver *types.Type) *types.Type { 262 var in []*Node 263 if receiver != nil { 264 d := nod(ODCLFIELD, nil, nil) 265 d.Type = receiver 266 in = append(in, d) 267 } 268 269 var d *Node 270 for _, t := range f.Params().Fields().Slice() { 271 d = nod(ODCLFIELD, nil, nil) 272 d.Type = t.Type 273 d.SetIsddd(t.Isddd()) 274 in = append(in, d) 275 } 276 277 var out []*Node 278 for _, t := range f.Results().Fields().Slice() { 279 d = nod(ODCLFIELD, nil, nil) 280 d.Type = t.Type 281 out = append(out, d) 282 } 283 284 t := functype(nil, in, out) 285 if f.Nname() != nil { 286 // Link to name of original method function. 287 t.SetNname(f.Nname()) 288 } 289 290 return t 291 } 292 293 // methods returns the methods of the non-interface type t, sorted by name. 294 // Generates stub functions as needed. 295 func methods(t *types.Type) []*Sig { 296 // method type 297 mt := methtype(t) 298 299 if mt == nil { 300 return nil 301 } 302 expandmeth(mt) 303 304 // type stored in interface word 305 it := t 306 307 if !isdirectiface(it) { 308 it = types.NewPtr(t) 309 } 310 311 // make list of methods for t, 312 // generating code if necessary. 313 var ms []*Sig 314 for _, f := range mt.AllMethods().Slice() { 315 if f.Type.Etype != TFUNC || f.Type.Recv() == nil { 316 Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) 317 } 318 if f.Type.Recv() == nil { 319 Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) 320 } 321 if f.Nointerface() { 322 continue 323 } 324 325 method := f.Sym 326 if method == nil { 327 continue 328 } 329 330 // get receiver type for this particular method. 331 // if pointer receiver but non-pointer t and 332 // this is not an embedded pointer inside a struct, 333 // method does not apply. 334 this := f.Type.Recv().Type 335 336 if this.IsPtr() && this.Elem() == t { 337 continue 338 } 339 if this.IsPtr() && !t.IsPtr() && f.Embedded != 2 && !isifacemethod(f.Type) { 340 continue 341 } 342 343 var sig Sig 344 ms = append(ms, &sig) 345 346 sig.name = method.Name 347 if !exportname(method.Name) { 348 if method.Pkg == nil { 349 Fatalf("methods: missing package") 350 } 351 sig.pkg = method.Pkg 352 } 353 354 sig.isym = methodsym(method, it, true) 355 sig.tsym = methodsym(method, t, false) 356 sig.type_ = methodfunc(f.Type, t) 357 sig.mtype = methodfunc(f.Type, nil) 358 359 if !sig.isym.Siggen() { 360 sig.isym.SetSiggen(true) 361 if !eqtype(this, it) || this.Width < int64(Widthptr) { 362 compiling_wrappers = 1 363 genwrapper(it, f, sig.isym, 1) 364 compiling_wrappers = 0 365 } 366 } 367 368 if !sig.tsym.Siggen() { 369 sig.tsym.SetSiggen(true) 370 if !eqtype(this, t) { 371 compiling_wrappers = 1 372 genwrapper(t, f, sig.tsym, 0) 373 compiling_wrappers = 0 374 } 375 } 376 } 377 378 obj.SortSlice(ms, func(i, j int) bool { return siglt(ms[i], ms[j]) }) 379 return ms 380 } 381 382 // imethods returns the methods of the interface type t, sorted by name. 383 func imethods(t *types.Type) []*Sig { 384 var methods []*Sig 385 for _, f := range t.Fields().Slice() { 386 if f.Type.Etype != TFUNC || f.Sym == nil { 387 continue 388 } 389 method := f.Sym 390 var sig = Sig{ 391 name: method.Name, 392 } 393 if !exportname(method.Name) { 394 if method.Pkg == nil { 395 Fatalf("imethods: missing package") 396 } 397 sig.pkg = method.Pkg 398 } 399 400 sig.mtype = f.Type 401 sig.offset = 0 402 sig.type_ = methodfunc(f.Type, nil) 403 404 if n := len(methods); n > 0 { 405 last := methods[n-1] 406 if !(siglt(last, &sig)) { 407 Fatalf("sigcmp vs sortinter %s %s", last.name, sig.name) 408 } 409 } 410 methods = append(methods, &sig) 411 412 // Compiler can only refer to wrappers for non-blank methods. 413 if method.IsBlank() { 414 continue 415 } 416 417 // NOTE(rsc): Perhaps an oversight that 418 // IfaceType.Method is not in the reflect data. 419 // Generate the method body, so that compiled 420 // code can refer to it. 421 isym := methodsym(method, t, false) 422 if !isym.Siggen() { 423 isym.SetSiggen(true) 424 genwrapper(t, f, isym, 0) 425 } 426 } 427 428 return methods 429 } 430 431 func dimportpath(p *types.Pkg) { 432 if p.Pathsym != nil { 433 return 434 } 435 436 // If we are compiling the runtime package, there are two runtime packages around 437 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for 438 // both of them, so just produce one for localpkg. 439 if myimportpath == "runtime" && p == Runtimepkg { 440 return 441 } 442 443 var str string 444 if p == localpkg { 445 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. 446 str = myimportpath 447 } else { 448 str = p.Path 449 } 450 451 s := Ctxt.Lookup("type..importpath." + p.Prefix + ".") 452 ot := dnameData(s, 0, str, "", nil, false) 453 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 454 p.Pathsym = s 455 } 456 457 func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int { 458 if pkg == nil { 459 return duintptr(s, ot, 0) 460 } 461 462 if pkg == localpkg && myimportpath == "" { 463 // If we don't know the full import path of the package being compiled 464 // (i.e. -p was not passed on the compiler command line), emit a reference to 465 // type..importpath.""., which the linker will rewrite using the correct import path. 466 // Every package that imports this one directly defines the symbol. 467 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 468 ns := Ctxt.Lookup(`type..importpath."".`) 469 return dsymptr(s, ot, ns, 0) 470 } 471 472 dimportpath(pkg) 473 return dsymptr(s, ot, pkg.Pathsym, 0) 474 } 475 476 // dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol. 477 func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { 478 if pkg == nil { 479 return duint32(s, ot, 0) 480 } 481 if pkg == localpkg && myimportpath == "" { 482 // If we don't know the full import path of the package being compiled 483 // (i.e. -p was not passed on the compiler command line), emit a reference to 484 // type..importpath.""., which the linker will rewrite using the correct import path. 485 // Every package that imports this one directly defines the symbol. 486 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 487 ns := Ctxt.Lookup(`type..importpath."".`) 488 return dsymptrOff(s, ot, ns, 0) 489 } 490 491 dimportpath(pkg) 492 return dsymptrOff(s, ot, pkg.Pathsym, 0) 493 } 494 495 // isExportedField reports whether a struct field is exported. 496 // It also returns the package to use for PkgPath for an unexported field. 497 func isExportedField(ft *types.Field) (bool, *types.Pkg) { 498 if ft.Sym != nil && ft.Embedded == 0 { 499 return exportname(ft.Sym.Name), ft.Sym.Pkg 500 } else { 501 if ft.Type.Sym != nil && 502 (ft.Type.Sym.Pkg == builtinpkg || !exportname(ft.Type.Sym.Name)) { 503 return false, ft.Type.Sym.Pkg 504 } else { 505 return true, nil 506 } 507 } 508 } 509 510 // dnameField dumps a reflect.name for a struct field. 511 func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int { 512 var name string 513 if ft.Sym != nil { 514 name = ft.Sym.Name 515 } 516 isExported, fpkg := isExportedField(ft) 517 if isExported || fpkg == spkg { 518 fpkg = nil 519 } 520 nsym := dname(name, ft.Note, fpkg, isExported) 521 return dsymptr(lsym, ot, nsym, 0) 522 } 523 524 // dnameData writes the contents of a reflect.name into s at offset ot. 525 func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int { 526 if len(name) > 1<<16-1 { 527 Fatalf("name too long: %s", name) 528 } 529 if len(tag) > 1<<16-1 { 530 Fatalf("tag too long: %s", tag) 531 } 532 533 // Encode name and tag. See reflect/type.go for details. 534 var bits byte 535 l := 1 + 2 + len(name) 536 if exported { 537 bits |= 1 << 0 538 } 539 if len(tag) > 0 { 540 l += 2 + len(tag) 541 bits |= 1 << 1 542 } 543 if pkg != nil { 544 bits |= 1 << 2 545 } 546 b := make([]byte, l) 547 b[0] = bits 548 b[1] = uint8(len(name) >> 8) 549 b[2] = uint8(len(name)) 550 copy(b[3:], name) 551 if len(tag) > 0 { 552 tb := b[3+len(name):] 553 tb[0] = uint8(len(tag) >> 8) 554 tb[1] = uint8(len(tag)) 555 copy(tb[2:], tag) 556 } 557 558 ot = int(s.WriteBytes(Ctxt, int64(ot), b)) 559 560 if pkg != nil { 561 ot = dgopkgpathOff(s, ot, pkg) 562 } 563 564 return ot 565 } 566 567 var dnameCount int 568 569 // dname creates a reflect.name for a struct field or method. 570 func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym { 571 // Write out data as "type.." to signal two things to the 572 // linker, first that when dynamically linking, the symbol 573 // should be moved to a relro section, and second that the 574 // contents should not be decoded as a type. 575 sname := "type..namedata." 576 if pkg == nil { 577 // In the common case, share data with other packages. 578 if name == "" { 579 if exported { 580 sname += "-noname-exported." + tag 581 } else { 582 sname += "-noname-unexported." + tag 583 } 584 } else { 585 sname += name + "." + tag 586 } 587 } else { 588 sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount) 589 dnameCount++ 590 } 591 s := Ctxt.Lookup(sname) 592 if len(s.P) > 0 { 593 return s 594 } 595 ot := dnameData(s, 0, name, tag, pkg, exported) 596 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 597 return s 598 } 599 600 // dextratype dumps the fields of a runtime.uncommontype. 601 // dataAdd is the offset in bytes after the header where the 602 // backing array of the []method field is written (by dextratypeData). 603 func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int { 604 m := methods(t) 605 if t.Sym == nil && len(m) == 0 { 606 return ot 607 } 608 noff := int(Rnd(int64(ot), int64(Widthptr))) 609 if noff != ot { 610 Fatalf("unexpected alignment in dextratype for %v", t) 611 } 612 613 for _, a := range m { 614 dtypesym(a.type_) 615 } 616 617 ot = dgopkgpathOff(lsym, ot, typePkg(t)) 618 619 dataAdd += uncommonSize(t) 620 mcount := len(m) 621 if mcount != int(uint16(mcount)) { 622 Fatalf("too many methods on %v: %d", t, mcount) 623 } 624 if dataAdd != int(uint32(dataAdd)) { 625 Fatalf("methods are too far away on %v: %d", t, dataAdd) 626 } 627 628 ot = duint16(lsym, ot, uint16(mcount)) 629 ot = duint16(lsym, ot, 0) 630 ot = duint32(lsym, ot, uint32(dataAdd)) 631 ot = duint32(lsym, ot, 0) 632 return ot 633 } 634 635 func typePkg(t *types.Type) *types.Pkg { 636 tsym := t.Sym 637 if tsym == nil { 638 switch t.Etype { 639 case TARRAY, TSLICE, TPTR32, TPTR64, TCHAN: 640 if t.Elem() != nil { 641 tsym = t.Elem().Sym 642 } 643 } 644 } 645 if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype { 646 return tsym.Pkg 647 } 648 return nil 649 } 650 651 // dextratypeData dumps the backing array for the []method field of 652 // runtime.uncommontype. 653 func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int { 654 for _, a := range methods(t) { 655 // ../../../../runtime/type.go:/method 656 exported := exportname(a.name) 657 var pkg *types.Pkg 658 if !exported && a.pkg != typePkg(t) { 659 pkg = a.pkg 660 } 661 nsym := dname(a.name, "", pkg, exported) 662 663 ot = dsymptrOff(lsym, ot, nsym, 0) 664 ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype).Linksym()) 665 ot = dmethodptrOff(lsym, ot, a.isym.Linksym()) 666 ot = dmethodptrOff(lsym, ot, a.tsym.Linksym()) 667 } 668 return ot 669 } 670 671 func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int { 672 duint32(s, ot, 0) 673 r := obj.Addrel(s) 674 r.Off = int32(ot) 675 r.Siz = 4 676 r.Sym = x 677 r.Type = objabi.R_METHODOFF 678 return ot + 4 679 } 680 681 var kinds = []int{ 682 TINT: objabi.KindInt, 683 TUINT: objabi.KindUint, 684 TINT8: objabi.KindInt8, 685 TUINT8: objabi.KindUint8, 686 TINT16: objabi.KindInt16, 687 TUINT16: objabi.KindUint16, 688 TINT32: objabi.KindInt32, 689 TUINT32: objabi.KindUint32, 690 TINT64: objabi.KindInt64, 691 TUINT64: objabi.KindUint64, 692 TUINTPTR: objabi.KindUintptr, 693 TFLOAT32: objabi.KindFloat32, 694 TFLOAT64: objabi.KindFloat64, 695 TBOOL: objabi.KindBool, 696 TSTRING: objabi.KindString, 697 TPTR32: objabi.KindPtr, 698 TPTR64: objabi.KindPtr, 699 TSTRUCT: objabi.KindStruct, 700 TINTER: objabi.KindInterface, 701 TCHAN: objabi.KindChan, 702 TMAP: objabi.KindMap, 703 TARRAY: objabi.KindArray, 704 TSLICE: objabi.KindSlice, 705 TFUNC: objabi.KindFunc, 706 TCOMPLEX64: objabi.KindComplex64, 707 TCOMPLEX128: objabi.KindComplex128, 708 TUNSAFEPTR: objabi.KindUnsafePointer, 709 } 710 711 // typeptrdata returns the length in bytes of the prefix of t 712 // containing pointer data. Anything after this offset is scalar data. 713 func typeptrdata(t *types.Type) int64 { 714 if !types.Haspointers(t) { 715 return 0 716 } 717 718 switch t.Etype { 719 case TPTR32, 720 TPTR64, 721 TUNSAFEPTR, 722 TFUNC, 723 TCHAN, 724 TMAP: 725 return int64(Widthptr) 726 727 case TSTRING: 728 // struct { byte *str; intgo len; } 729 return int64(Widthptr) 730 731 case TINTER: 732 // struct { Itab *tab; void *data; } or 733 // struct { Type *type; void *data; } 734 return 2 * int64(Widthptr) 735 736 case TSLICE: 737 // struct { byte *array; uintgo len; uintgo cap; } 738 return int64(Widthptr) 739 740 case TARRAY: 741 // haspointers already eliminated t.NumElem() == 0. 742 return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem()) 743 744 case TSTRUCT: 745 // Find the last field that has pointers. 746 var lastPtrField *types.Field 747 for _, t1 := range t.Fields().Slice() { 748 if types.Haspointers(t1.Type) { 749 lastPtrField = t1 750 } 751 } 752 return lastPtrField.Offset + typeptrdata(lastPtrField.Type) 753 754 default: 755 Fatalf("typeptrdata: unexpected type, %v", t) 756 return 0 757 } 758 } 759 760 // tflag is documented in reflect/type.go. 761 // 762 // tflag values must be kept in sync with copies in: 763 // cmd/compile/internal/gc/reflect.go 764 // cmd/link/internal/ld/decodesym.go 765 // reflect/type.go 766 // runtime/type.go 767 const ( 768 tflagUncommon = 1 << 0 769 tflagExtraStar = 1 << 1 770 tflagNamed = 1 << 2 771 ) 772 773 var ( 774 algarray *obj.LSym 775 memhashvarlen *obj.LSym 776 memequalvarlen *obj.LSym 777 ) 778 779 // dcommontype dumps the contents of a reflect.rtype (runtime._type). 780 func dcommontype(lsym *obj.LSym, ot int, t *types.Type) int { 781 if ot != 0 { 782 Fatalf("dcommontype %d", ot) 783 } 784 785 sizeofAlg := 2 * Widthptr 786 if algarray == nil { 787 algarray = Sysfunc("algarray") 788 } 789 dowidth(t) 790 alg := algtype(t) 791 var algsym *obj.LSym 792 if alg == ASPECIAL || alg == AMEM { 793 algsym = dalgsym(t) 794 } 795 796 sptrWeak := true 797 var sptr *obj.LSym 798 if !t.IsPtr() || t.PtrBase != nil { 799 tptr := types.NewPtr(t) 800 if t.Sym != nil || methods(tptr) != nil { 801 sptrWeak = false 802 } 803 sptr = dtypesym(tptr).Linksym() 804 } 805 806 gcsym, useGCProg, ptrdata := dgcsym(t) 807 808 // ../../../../reflect/type.go:/^type.rtype 809 // actual type structure 810 // type rtype struct { 811 // size uintptr 812 // ptrdata uintptr 813 // hash uint32 814 // tflag tflag 815 // align uint8 816 // fieldAlign uint8 817 // kind uint8 818 // alg *typeAlg 819 // gcdata *byte 820 // str nameOff 821 // ptrToThis typeOff 822 // } 823 ot = duintptr(lsym, ot, uint64(t.Width)) 824 ot = duintptr(lsym, ot, uint64(ptrdata)) 825 ot = duint32(lsym, ot, typehash(t)) 826 827 var tflag uint8 828 if uncommonSize(t) != 0 { 829 tflag |= tflagUncommon 830 } 831 if t.Sym != nil && t.Sym.Name != "" { 832 tflag |= tflagNamed 833 } 834 835 exported := false 836 p := t.LongString() 837 // If we're writing out type T, 838 // we are very likely to write out type *T as well. 839 // Use the string "*T"[1:] for "T", so that the two 840 // share storage. This is a cheap way to reduce the 841 // amount of space taken up by reflect strings. 842 if !strings.HasPrefix(p, "*") { 843 p = "*" + p 844 tflag |= tflagExtraStar 845 if t.Sym != nil { 846 exported = exportname(t.Sym.Name) 847 } 848 } else { 849 if t.Elem() != nil && t.Elem().Sym != nil { 850 exported = exportname(t.Elem().Sym.Name) 851 } 852 } 853 854 ot = duint8(lsym, ot, tflag) 855 856 // runtime (and common sense) expects alignment to be a power of two. 857 i := int(t.Align) 858 859 if i == 0 { 860 i = 1 861 } 862 if i&(i-1) != 0 { 863 Fatalf("invalid alignment %d for %v", t.Align, t) 864 } 865 ot = duint8(lsym, ot, t.Align) // align 866 ot = duint8(lsym, ot, t.Align) // fieldAlign 867 868 i = kinds[t.Etype] 869 if !types.Haspointers(t) { 870 i |= objabi.KindNoPointers 871 } 872 if isdirectiface(t) { 873 i |= objabi.KindDirectIface 874 } 875 if useGCProg { 876 i |= objabi.KindGCProg 877 } 878 ot = duint8(lsym, ot, uint8(i)) // kind 879 if algsym == nil { 880 ot = dsymptr(lsym, ot, algarray, int(alg)*sizeofAlg) 881 } else { 882 ot = dsymptr(lsym, ot, algsym, 0) 883 } 884 ot = dsymptr(lsym, ot, gcsym, 0) // gcdata 885 886 nsym := dname(p, "", nil, exported) 887 ot = dsymptrOff(lsym, ot, nsym, 0) // str 888 // ptrToThis 889 if sptr == nil { 890 ot = duint32(lsym, ot, 0) 891 } else if sptrWeak { 892 ot = dsymptrWeakOff(lsym, ot, sptr) 893 } else { 894 ot = dsymptrOff(lsym, ot, sptr, 0) 895 } 896 897 return ot 898 } 899 900 func typesymname(t *types.Type) string { 901 name := t.ShortString() 902 // Use a separate symbol name for Noalg types for #17752. 903 if a, bad := algtype1(t); a == ANOEQ && bad.Noalg() { 904 name = "noalg." + name 905 } 906 return name 907 } 908 909 // Fake package for runtime type info (headers) 910 // Don't access directly, use typeLookup below. 911 var ( 912 typepkgmu sync.Mutex // protects typepkg lookups 913 typepkg = types.NewPkg("type", "type") 914 ) 915 916 func typeLookup(name string) *types.Sym { 917 typepkgmu.Lock() 918 s := typepkg.Lookup(name) 919 typepkgmu.Unlock() 920 return s 921 } 922 923 func typesym(t *types.Type) *types.Sym { 924 return typeLookup(typesymname(t)) 925 } 926 927 // tracksym returns the symbol for tracking use of field/method f, assumed 928 // to be a member of struct/interface type t. 929 func tracksym(t *types.Type, f *types.Field) *types.Sym { 930 return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name) 931 } 932 933 func typesymprefix(prefix string, t *types.Type) *types.Sym { 934 p := prefix + "." + t.ShortString() 935 s := typeLookup(p) 936 937 //print("algsym: %s -> %+S\n", p, s); 938 939 return s 940 } 941 942 func typenamesym(t *types.Type) *types.Sym { 943 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { 944 Fatalf("typenamesym %v", t) 945 } 946 s := typesym(t) 947 signatsetmu.Lock() 948 addsignat(t) 949 signatsetmu.Unlock() 950 return s 951 } 952 953 func typename(t *types.Type) *Node { 954 s := typenamesym(t) 955 if s.Def == nil { 956 n := newnamel(src.NoXPos, s) 957 n.Type = types.Types[TUINT8] 958 n.SetClass(PEXTERN) 959 n.SetTypecheck(1) 960 s.Def = asTypesNode(n) 961 } 962 963 n := nod(OADDR, asNode(s.Def), nil) 964 n.Type = types.NewPtr(asNode(s.Def).Type) 965 n.SetAddable(true) 966 n.SetTypecheck(1) 967 return n 968 } 969 970 func itabname(t, itype *types.Type) *Node { 971 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { 972 Fatalf("itabname(%v, %v)", t, itype) 973 } 974 s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString()) 975 if s.Def == nil { 976 n := newname(s) 977 n.Type = types.Types[TUINT8] 978 n.SetClass(PEXTERN) 979 n.SetTypecheck(1) 980 s.Def = asTypesNode(n) 981 itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()}) 982 } 983 984 n := nod(OADDR, asNode(s.Def), nil) 985 n.Type = types.NewPtr(asNode(s.Def).Type) 986 n.SetAddable(true) 987 n.SetTypecheck(1) 988 return n 989 } 990 991 // isreflexive reports whether t has a reflexive equality operator. 992 // That is, if x==x for all x of type t. 993 func isreflexive(t *types.Type) bool { 994 switch t.Etype { 995 case TBOOL, 996 TINT, 997 TUINT, 998 TINT8, 999 TUINT8, 1000 TINT16, 1001 TUINT16, 1002 TINT32, 1003 TUINT32, 1004 TINT64, 1005 TUINT64, 1006 TUINTPTR, 1007 TPTR32, 1008 TPTR64, 1009 TUNSAFEPTR, 1010 TSTRING, 1011 TCHAN: 1012 return true 1013 1014 case TFLOAT32, 1015 TFLOAT64, 1016 TCOMPLEX64, 1017 TCOMPLEX128, 1018 TINTER: 1019 return false 1020 1021 case TARRAY: 1022 return isreflexive(t.Elem()) 1023 1024 case TSTRUCT: 1025 for _, t1 := range t.Fields().Slice() { 1026 if !isreflexive(t1.Type) { 1027 return false 1028 } 1029 } 1030 return true 1031 1032 default: 1033 Fatalf("bad type for map key: %v", t) 1034 return false 1035 } 1036 } 1037 1038 // needkeyupdate reports whether map updates with t as a key 1039 // need the key to be updated. 1040 func needkeyupdate(t *types.Type) bool { 1041 switch t.Etype { 1042 case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, 1043 TINT64, TUINT64, TUINTPTR, TPTR32, TPTR64, TUNSAFEPTR, TCHAN: 1044 return false 1045 1046 case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0 1047 TINTER, 1048 TSTRING: // strings might have smaller backing stores 1049 return true 1050 1051 case TARRAY: 1052 return needkeyupdate(t.Elem()) 1053 1054 case TSTRUCT: 1055 for _, t1 := range t.Fields().Slice() { 1056 if needkeyupdate(t1.Type) { 1057 return true 1058 } 1059 } 1060 return false 1061 1062 default: 1063 Fatalf("bad type for map key: %v", t) 1064 return true 1065 } 1066 } 1067 1068 // formalType replaces byte and rune aliases with real types. 1069 // They've been separate internally to make error messages 1070 // better, but we have to merge them in the reflect tables. 1071 func formalType(t *types.Type) *types.Type { 1072 if t == types.Bytetype || t == types.Runetype { 1073 return types.Types[t.Etype] 1074 } 1075 return t 1076 } 1077 1078 func dtypesym(t *types.Type) *types.Sym { 1079 t = formalType(t) 1080 if t.IsUntyped() { 1081 Fatalf("dtypesym %v", t) 1082 } 1083 1084 s := typesym(t) 1085 if s.Siggen() { 1086 return s 1087 } 1088 s.SetSiggen(true) 1089 1090 // special case (look for runtime below): 1091 // when compiling package runtime, 1092 // emit the type structures for int, float, etc. 1093 tbase := t 1094 1095 if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil { 1096 tbase = t.Elem() 1097 } 1098 dupok := 0 1099 if tbase.Sym == nil { 1100 dupok = obj.DUPOK 1101 } 1102 1103 if myimportpath == "runtime" && (tbase == types.Types[tbase.Etype] || tbase == types.Bytetype || tbase == types.Runetype || tbase == types.Errortype) { // int, float, etc 1104 goto ok 1105 } 1106 1107 // named types from other files are defined only by those files 1108 if tbase.Sym != nil && !tbase.Local() { 1109 return s 1110 } 1111 if isforw[tbase.Etype] { 1112 return s 1113 } 1114 1115 ok: 1116 ot := 0 1117 lsym := s.Linksym() 1118 switch t.Etype { 1119 default: 1120 ot = dcommontype(lsym, ot, t) 1121 ot = dextratype(lsym, ot, t, 0) 1122 1123 case TARRAY: 1124 // ../../../../runtime/type.go:/arrayType 1125 s1 := dtypesym(t.Elem()) 1126 t2 := types.NewSlice(t.Elem()) 1127 s2 := dtypesym(t2) 1128 ot = dcommontype(lsym, ot, t) 1129 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1130 ot = dsymptr(lsym, ot, s2.Linksym(), 0) 1131 ot = duintptr(lsym, ot, uint64(t.NumElem())) 1132 ot = dextratype(lsym, ot, t, 0) 1133 1134 case TSLICE: 1135 // ../../../../runtime/type.go:/sliceType 1136 s1 := dtypesym(t.Elem()) 1137 ot = dcommontype(lsym, ot, t) 1138 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1139 ot = dextratype(lsym, ot, t, 0) 1140 1141 case TCHAN: 1142 // ../../../../runtime/type.go:/chanType 1143 s1 := dtypesym(t.Elem()) 1144 ot = dcommontype(lsym, ot, t) 1145 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1146 ot = duintptr(lsym, ot, uint64(t.ChanDir())) 1147 ot = dextratype(lsym, ot, t, 0) 1148 1149 case TFUNC: 1150 for _, t1 := range t.Recvs().Fields().Slice() { 1151 dtypesym(t1.Type) 1152 } 1153 isddd := false 1154 for _, t1 := range t.Params().Fields().Slice() { 1155 isddd = t1.Isddd() 1156 dtypesym(t1.Type) 1157 } 1158 for _, t1 := range t.Results().Fields().Slice() { 1159 dtypesym(t1.Type) 1160 } 1161 1162 ot = dcommontype(lsym, ot, t) 1163 inCount := t.Recvs().NumFields() + t.Params().NumFields() 1164 outCount := t.Results().NumFields() 1165 if isddd { 1166 outCount |= 1 << 15 1167 } 1168 ot = duint16(lsym, ot, uint16(inCount)) 1169 ot = duint16(lsym, ot, uint16(outCount)) 1170 if Widthptr == 8 { 1171 ot += 4 // align for *rtype 1172 } 1173 1174 dataAdd := (inCount + t.Results().NumFields()) * Widthptr 1175 ot = dextratype(lsym, ot, t, dataAdd) 1176 1177 // Array of rtype pointers follows funcType. 1178 for _, t1 := range t.Recvs().Fields().Slice() { 1179 ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0) 1180 } 1181 for _, t1 := range t.Params().Fields().Slice() { 1182 ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0) 1183 } 1184 for _, t1 := range t.Results().Fields().Slice() { 1185 ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0) 1186 } 1187 1188 case TINTER: 1189 m := imethods(t) 1190 n := len(m) 1191 for _, a := range m { 1192 dtypesym(a.type_) 1193 } 1194 1195 // ../../../../runtime/type.go:/interfaceType 1196 ot = dcommontype(lsym, ot, t) 1197 1198 var tpkg *types.Pkg 1199 if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype { 1200 tpkg = t.Sym.Pkg 1201 } 1202 ot = dgopkgpath(lsym, ot, tpkg) 1203 1204 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1205 ot = duintptr(lsym, ot, uint64(n)) 1206 ot = duintptr(lsym, ot, uint64(n)) 1207 dataAdd := imethodSize() * n 1208 ot = dextratype(lsym, ot, t, dataAdd) 1209 1210 for _, a := range m { 1211 // ../../../../runtime/type.go:/imethod 1212 exported := exportname(a.name) 1213 var pkg *types.Pkg 1214 if !exported && a.pkg != tpkg { 1215 pkg = a.pkg 1216 } 1217 nsym := dname(a.name, "", pkg, exported) 1218 1219 ot = dsymptrOff(lsym, ot, nsym, 0) 1220 ot = dsymptrOff(lsym, ot, dtypesym(a.type_).Linksym(), 0) 1221 } 1222 1223 // ../../../../runtime/type.go:/mapType 1224 case TMAP: 1225 s1 := dtypesym(t.Key()) 1226 s2 := dtypesym(t.Val()) 1227 s3 := dtypesym(mapbucket(t)) 1228 s4 := dtypesym(hmap(t)) 1229 ot = dcommontype(lsym, ot, t) 1230 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1231 ot = dsymptr(lsym, ot, s2.Linksym(), 0) 1232 ot = dsymptr(lsym, ot, s3.Linksym(), 0) 1233 ot = dsymptr(lsym, ot, s4.Linksym(), 0) 1234 if t.Key().Width > MAXKEYSIZE { 1235 ot = duint8(lsym, ot, uint8(Widthptr)) 1236 ot = duint8(lsym, ot, 1) // indirect 1237 } else { 1238 ot = duint8(lsym, ot, uint8(t.Key().Width)) 1239 ot = duint8(lsym, ot, 0) // not indirect 1240 } 1241 1242 if t.Val().Width > MAXVALSIZE { 1243 ot = duint8(lsym, ot, uint8(Widthptr)) 1244 ot = duint8(lsym, ot, 1) // indirect 1245 } else { 1246 ot = duint8(lsym, ot, uint8(t.Val().Width)) 1247 ot = duint8(lsym, ot, 0) // not indirect 1248 } 1249 1250 ot = duint16(lsym, ot, uint16(mapbucket(t).Width)) 1251 ot = duint8(lsym, ot, uint8(obj.Bool2int(isreflexive(t.Key())))) 1252 ot = duint8(lsym, ot, uint8(obj.Bool2int(needkeyupdate(t.Key())))) 1253 ot = dextratype(lsym, ot, t, 0) 1254 1255 case TPTR32, TPTR64: 1256 if t.Elem().Etype == TANY { 1257 // ../../../../runtime/type.go:/UnsafePointerType 1258 ot = dcommontype(lsym, ot, t) 1259 ot = dextratype(lsym, ot, t, 0) 1260 1261 break 1262 } 1263 1264 // ../../../../runtime/type.go:/ptrType 1265 s1 := dtypesym(t.Elem()) 1266 1267 ot = dcommontype(lsym, ot, t) 1268 ot = dsymptr(lsym, ot, s1.Linksym(), 0) 1269 ot = dextratype(lsym, ot, t, 0) 1270 1271 // ../../../../runtime/type.go:/structType 1272 // for security, only the exported fields. 1273 case TSTRUCT: 1274 n := 0 1275 1276 for _, t1 := range t.Fields().Slice() { 1277 dtypesym(t1.Type) 1278 n++ 1279 } 1280 1281 ot = dcommontype(lsym, ot, t) 1282 pkg := localpkg 1283 if t.Sym != nil { 1284 pkg = t.Sym.Pkg 1285 } else { 1286 // Unnamed type. Grab the package from the first field, if any. 1287 for _, f := range t.Fields().Slice() { 1288 if f.Embedded != 0 { 1289 continue 1290 } 1291 pkg = f.Sym.Pkg 1292 break 1293 } 1294 } 1295 ot = dgopkgpath(lsym, ot, pkg) 1296 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) 1297 ot = duintptr(lsym, ot, uint64(n)) 1298 ot = duintptr(lsym, ot, uint64(n)) 1299 1300 dataAdd := n * structfieldSize() 1301 ot = dextratype(lsym, ot, t, dataAdd) 1302 1303 for _, f := range t.Fields().Slice() { 1304 // ../../../../runtime/type.go:/structField 1305 ot = dnameField(lsym, ot, pkg, f) 1306 ot = dsymptr(lsym, ot, dtypesym(f.Type).Linksym(), 0) 1307 offsetAnon := uint64(f.Offset) << 1 1308 if offsetAnon>>1 != uint64(f.Offset) { 1309 Fatalf("%v: bad field offset for %s", t, f.Sym.Name) 1310 } 1311 if f.Embedded != 0 { 1312 offsetAnon |= 1 1313 } 1314 ot = duintptr(lsym, ot, offsetAnon) 1315 } 1316 } 1317 1318 ot = dextratypeData(lsym, ot, t) 1319 ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA)) 1320 1321 // The linker will leave a table of all the typelinks for 1322 // types in the binary, so the runtime can find them. 1323 // 1324 // When buildmode=shared, all types are in typelinks so the 1325 // runtime can deduplicate type pointers. 1326 keep := Ctxt.Flag_dynlink 1327 if !keep && t.Sym == nil { 1328 // For an unnamed type, we only need the link if the type can 1329 // be created at run time by reflect.PtrTo and similar 1330 // functions. If the type exists in the program, those 1331 // functions must return the existing type structure rather 1332 // than creating a new one. 1333 switch t.Etype { 1334 case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT: 1335 keep = true 1336 } 1337 } 1338 lsym.Set(obj.AttrMakeTypelink, keep) 1339 1340 return s 1341 } 1342 1343 // for each itabEntry, gather the methods on 1344 // the concrete type that implement the interface 1345 func peekitabs() { 1346 for i := range itabs { 1347 tab := &itabs[i] 1348 methods := genfun(tab.t, tab.itype) 1349 if len(methods) == 0 { 1350 continue 1351 } 1352 tab.entries = methods 1353 } 1354 } 1355 1356 // for the given concrete type and interface 1357 // type, return the (sorted) set of methods 1358 // on the concrete type that implement the interface 1359 func genfun(t, it *types.Type) []*obj.LSym { 1360 if t == nil || it == nil { 1361 return nil 1362 } 1363 sigs := imethods(it) 1364 methods := methods(t) 1365 out := make([]*obj.LSym, 0, len(sigs)) 1366 if len(sigs) == 0 { 1367 return nil 1368 } 1369 1370 // both sigs and methods are sorted by name, 1371 // so we can find the intersect in a single pass 1372 for _, m := range methods { 1373 if m.name == sigs[0].name { 1374 out = append(out, m.isym.Linksym()) 1375 sigs = sigs[1:] 1376 if len(sigs) == 0 { 1377 break 1378 } 1379 } 1380 } 1381 1382 return out 1383 } 1384 1385 // itabsym uses the information gathered in 1386 // peekitabs to de-virtualize interface methods. 1387 // Since this is called by the SSA backend, it shouldn't 1388 // generate additional Nodes, Syms, etc. 1389 func itabsym(it *obj.LSym, offset int64) *obj.LSym { 1390 var syms []*obj.LSym 1391 if it == nil { 1392 return nil 1393 } 1394 1395 for i := range itabs { 1396 e := &itabs[i] 1397 if e.lsym == it { 1398 syms = e.entries 1399 break 1400 } 1401 } 1402 if syms == nil { 1403 return nil 1404 } 1405 1406 // keep this arithmetic in sync with *itab layout 1407 methodnum := int((offset - 3*int64(Widthptr) - 8) / int64(Widthptr)) 1408 if methodnum >= len(syms) { 1409 return nil 1410 } 1411 return syms[methodnum] 1412 } 1413 1414 func addsignat(t *types.Type) { 1415 signatset[t] = struct{}{} 1416 } 1417 1418 func addsignats(dcls []*Node) { 1419 // copy types from dcl list to signatset 1420 for _, n := range dcls { 1421 if n.Op == OTYPE { 1422 addsignat(n.Type) 1423 } 1424 } 1425 } 1426 1427 func dumpsignats() { 1428 // Process signatset. Use a loop, as dtypesym adds 1429 // entries to signatset while it is being processed. 1430 signats := make([]typeAndStr, len(signatset)) 1431 for len(signatset) > 0 { 1432 signats = signats[:0] 1433 // Transfer entries to a slice and sort, for reproducible builds. 1434 for t := range signatset { 1435 signats = append(signats, typeAndStr{t: t, short: typesymname(t), regular: t.String()}) 1436 delete(signatset, t) 1437 } 1438 sort.Sort(typesByString(signats)) 1439 for _, ts := range signats { 1440 t := ts.t 1441 dtypesym(t) 1442 if t.Sym != nil { 1443 dtypesym(types.NewPtr(t)) 1444 } 1445 } 1446 } 1447 } 1448 1449 func dumptabs() { 1450 // process itabs 1451 for _, i := range itabs { 1452 // dump empty itab symbol into i.sym 1453 // type itab struct { 1454 // inter *interfacetype 1455 // _type *_type 1456 // link *itab 1457 // hash uint32 1458 // bad bool 1459 // inhash bool 1460 // unused [2]byte 1461 // fun [1]uintptr // variable sized 1462 // } 1463 o := dsymptr(i.lsym, 0, dtypesym(i.itype).Linksym(), 0) 1464 o = dsymptr(i.lsym, o, dtypesym(i.t).Linksym(), 0) 1465 o += Widthptr // skip link field 1466 o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash 1467 o += 4 // skip bad/inhash/unused fields 1468 o += len(imethods(i.itype)) * Widthptr // skip fun method pointers 1469 // at runtime the itab will contain pointers to types, other itabs and 1470 // method functions. None are allocated on heap, so we can use obj.NOPTR. 1471 ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.NOPTR)) 1472 1473 ilink := itablinkpkg.Lookup(i.t.ShortString() + "," + i.itype.ShortString()).Linksym() 1474 dsymptr(ilink, 0, i.lsym, 0) 1475 ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA)) 1476 } 1477 1478 // process ptabs 1479 if localpkg.Name == "main" && len(ptabs) > 0 { 1480 ot := 0 1481 s := Ctxt.Lookup("go.plugin.tabs") 1482 for _, p := range ptabs { 1483 // Dump ptab symbol into go.pluginsym package. 1484 // 1485 // type ptab struct { 1486 // name nameOff 1487 // typ typeOff // pointer to symbol 1488 // } 1489 nsym := dname(p.s.Name, "", nil, true) 1490 ot = dsymptrOff(s, ot, nsym, 0) 1491 ot = dsymptrOff(s, ot, dtypesym(p.t).Linksym(), 0) 1492 } 1493 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1494 1495 ot = 0 1496 s = Ctxt.Lookup("go.plugin.exports") 1497 for _, p := range ptabs { 1498 ot = dsymptr(s, ot, p.s.Linksym(), 0) 1499 } 1500 ggloblsym(s, int32(ot), int16(obj.RODATA)) 1501 } 1502 } 1503 1504 func dumpimportstrings() { 1505 // generate import strings for imported packages 1506 for _, p := range types.ImportedPkgList() { 1507 dimportpath(p) 1508 } 1509 } 1510 1511 func dumpbasictypes() { 1512 // do basic types if compiling package runtime. 1513 // they have to be in at least one package, 1514 // and runtime is always loaded implicitly, 1515 // so this is as good as any. 1516 // another possible choice would be package main, 1517 // but using runtime means fewer copies in object files. 1518 if myimportpath == "runtime" { 1519 for i := types.EType(1); i <= TBOOL; i++ { 1520 dtypesym(types.NewPtr(types.Types[i])) 1521 } 1522 dtypesym(types.NewPtr(types.Types[TSTRING])) 1523 dtypesym(types.NewPtr(types.Types[TUNSAFEPTR])) 1524 1525 // emit type structs for error and func(error) string. 1526 // The latter is the type of an auto-generated wrapper. 1527 dtypesym(types.NewPtr(types.Errortype)) 1528 1529 dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])})) 1530 1531 // add paths for runtime and main, which 6l imports implicitly. 1532 dimportpath(Runtimepkg) 1533 1534 if flag_race { 1535 dimportpath(racepkg) 1536 } 1537 if flag_msan { 1538 dimportpath(msanpkg) 1539 } 1540 dimportpath(types.NewPkg("main", "")) 1541 } 1542 } 1543 1544 type typeAndStr struct { 1545 t *types.Type 1546 short string 1547 regular string 1548 } 1549 1550 type typesByString []typeAndStr 1551 1552 func (a typesByString) Len() int { return len(a) } 1553 func (a typesByString) Less(i, j int) bool { 1554 if a[i].short != a[j].short { 1555 return a[i].short < a[j].short 1556 } 1557 // When the only difference between the types is whether 1558 // they refer to byte or uint8, such as **byte vs **uint8, 1559 // the types' ShortStrings can be identical. 1560 // To preserve deterministic sort ordering, sort these by String(). 1561 return a[i].regular < a[j].regular 1562 } 1563 func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1564 1565 func dalgsym(t *types.Type) *obj.LSym { 1566 var lsym *obj.LSym 1567 var hashfunc *obj.LSym 1568 var eqfunc *obj.LSym 1569 1570 // dalgsym is only called for a type that needs an algorithm table, 1571 // which implies that the type is comparable (or else it would use ANOEQ). 1572 1573 if algtype(t) == AMEM { 1574 // we use one algorithm table for all AMEM types of a given size 1575 p := fmt.Sprintf(".alg%d", t.Width) 1576 1577 s := typeLookup(p) 1578 lsym = s.Linksym() 1579 if s.AlgGen() { 1580 return lsym 1581 } 1582 s.SetAlgGen(true) 1583 1584 if memhashvarlen == nil { 1585 memhashvarlen = Sysfunc("memhash_varlen") 1586 memequalvarlen = Sysfunc("memequal_varlen") 1587 } 1588 1589 // make hash closure 1590 p = fmt.Sprintf(".hashfunc%d", t.Width) 1591 1592 hashfunc = typeLookup(p).Linksym() 1593 1594 ot := 0 1595 ot = dsymptr(hashfunc, ot, memhashvarlen, 0) 1596 ot = duintptr(hashfunc, ot, uint64(t.Width)) // size encoded in closure 1597 ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA) 1598 1599 // make equality closure 1600 p = fmt.Sprintf(".eqfunc%d", t.Width) 1601 1602 eqfunc = typeLookup(p).Linksym() 1603 1604 ot = 0 1605 ot = dsymptr(eqfunc, ot, memequalvarlen, 0) 1606 ot = duintptr(eqfunc, ot, uint64(t.Width)) 1607 ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA) 1608 } else { 1609 // generate an alg table specific to this type 1610 s := typesymprefix(".alg", t) 1611 lsym = s.Linksym() 1612 1613 hash := typesymprefix(".hash", t) 1614 eq := typesymprefix(".eq", t) 1615 hashfunc = typesymprefix(".hashfunc", t).Linksym() 1616 eqfunc = typesymprefix(".eqfunc", t).Linksym() 1617 1618 genhash(hash, t) 1619 geneq(eq, t) 1620 1621 // make Go funcs (closures) for calling hash and equal from Go 1622 dsymptr(hashfunc, 0, hash.Linksym(), 0) 1623 ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1624 dsymptr(eqfunc, 0, eq.Linksym(), 0) 1625 ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1626 } 1627 1628 // ../../../../runtime/alg.go:/typeAlg 1629 ot := 0 1630 1631 ot = dsymptr(lsym, ot, hashfunc, 0) 1632 ot = dsymptr(lsym, ot, eqfunc, 0) 1633 ggloblsym(lsym, int32(ot), obj.DUPOK|obj.RODATA) 1634 return lsym 1635 } 1636 1637 // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, 1638 // which holds 1-bit entries describing where pointers are in a given type. 1639 // Above this length, the GC information is recorded as a GC program, 1640 // which can express repetition compactly. In either form, the 1641 // information is used by the runtime to initialize the heap bitmap, 1642 // and for large types (like 128 or more words), they are roughly the 1643 // same speed. GC programs are never much larger and often more 1644 // compact. (If large arrays are involved, they can be arbitrarily 1645 // more compact.) 1646 // 1647 // The cutoff must be large enough that any allocation large enough to 1648 // use a GC program is large enough that it does not share heap bitmap 1649 // bytes with any other objects, allowing the GC program execution to 1650 // assume an aligned start and not use atomic operations. In the current 1651 // runtime, this means all malloc size classes larger than the cutoff must 1652 // be multiples of four words. On 32-bit systems that's 16 bytes, and 1653 // all size classes >= 16 bytes are 16-byte aligned, so no real constraint. 1654 // On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed 1655 // for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated 1656 // is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes 1657 // must be >= 4. 1658 // 1659 // We used to use 16 because the GC programs do have some constant overhead 1660 // to get started, and processing 128 pointers seems to be enough to 1661 // amortize that overhead well. 1662 // 1663 // To make sure that the runtime's chansend can call typeBitsBulkBarrier, 1664 // we raised the limit to 2048, so that even 32-bit systems are guaranteed to 1665 // use bitmaps for objects up to 64 kB in size. 1666 // 1667 // Also known to reflect/type.go. 1668 // 1669 const maxPtrmaskBytes = 2048 1670 1671 // dgcsym emits and returns a data symbol containing GC information for type t, 1672 // along with a boolean reporting whether the UseGCProg bit should be set in 1673 // the type kind, and the ptrdata field to record in the reflect type information. 1674 func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { 1675 ptrdata = typeptrdata(t) 1676 if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 { 1677 lsym = dgcptrmask(t) 1678 return 1679 } 1680 1681 useGCProg = true 1682 lsym, ptrdata = dgcprog(t) 1683 return 1684 } 1685 1686 // dgcptrmask emits and returns the symbol containing a pointer mask for type t. 1687 func dgcptrmask(t *types.Type) *obj.LSym { 1688 ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8) 1689 fillptrmask(t, ptrmask) 1690 p := fmt.Sprintf("gcbits.%x", ptrmask) 1691 1692 sym := Runtimepkg.Lookup(p) 1693 lsym := sym.Linksym() 1694 if !sym.Uniq() { 1695 sym.SetUniq(true) 1696 for i, x := range ptrmask { 1697 duint8(lsym, i, x) 1698 } 1699 ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) 1700 } 1701 return lsym 1702 } 1703 1704 // fillptrmask fills in ptrmask with 1s corresponding to the 1705 // word offsets in t that hold pointers. 1706 // ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits. 1707 func fillptrmask(t *types.Type, ptrmask []byte) { 1708 for i := range ptrmask { 1709 ptrmask[i] = 0 1710 } 1711 if !types.Haspointers(t) { 1712 return 1713 } 1714 1715 vec := bvalloc(8 * int32(len(ptrmask))) 1716 xoffset := int64(0) 1717 onebitwalktype1(t, &xoffset, vec) 1718 1719 nptr := typeptrdata(t) / int64(Widthptr) 1720 for i := int64(0); i < nptr; i++ { 1721 if vec.Get(int32(i)) { 1722 ptrmask[i/8] |= 1 << (uint(i) % 8) 1723 } 1724 } 1725 } 1726 1727 // dgcprog emits and returns the symbol containing a GC program for type t 1728 // along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]). 1729 // In practice, the size is typeptrdata(t) except for non-trivial arrays. 1730 // For non-trivial arrays, the program describes the full t.Width size. 1731 func dgcprog(t *types.Type) (*obj.LSym, int64) { 1732 dowidth(t) 1733 if t.Width == BADWIDTH { 1734 Fatalf("dgcprog: %v badwidth", t) 1735 } 1736 lsym := typesymprefix(".gcprog", t).Linksym() 1737 var p GCProg 1738 p.init(lsym) 1739 p.emit(t, 0) 1740 offset := p.w.BitIndex() * int64(Widthptr) 1741 p.end() 1742 if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { 1743 Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) 1744 } 1745 return lsym, offset 1746 } 1747 1748 type GCProg struct { 1749 lsym *obj.LSym 1750 symoff int 1751 w gcprog.Writer 1752 } 1753 1754 var Debug_gcprog int // set by -d gcprog 1755 1756 func (p *GCProg) init(lsym *obj.LSym) { 1757 p.lsym = lsym 1758 p.symoff = 4 // first 4 bytes hold program length 1759 p.w.Init(p.writeByte) 1760 if Debug_gcprog > 0 { 1761 fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym) 1762 p.w.Debug(os.Stderr) 1763 } 1764 } 1765 1766 func (p *GCProg) writeByte(x byte) { 1767 p.symoff = duint8(p.lsym, p.symoff, x) 1768 } 1769 1770 func (p *GCProg) end() { 1771 p.w.End() 1772 duint32(p.lsym, 0, uint32(p.symoff-4)) 1773 ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) 1774 if Debug_gcprog > 0 { 1775 fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym) 1776 } 1777 } 1778 1779 func (p *GCProg) emit(t *types.Type, offset int64) { 1780 dowidth(t) 1781 if !types.Haspointers(t) { 1782 return 1783 } 1784 if t.Width == int64(Widthptr) { 1785 p.w.Ptr(offset / int64(Widthptr)) 1786 return 1787 } 1788 switch t.Etype { 1789 default: 1790 Fatalf("GCProg.emit: unexpected type %v", t) 1791 1792 case TSTRING: 1793 p.w.Ptr(offset / int64(Widthptr)) 1794 1795 case TINTER: 1796 p.w.Ptr(offset / int64(Widthptr)) 1797 p.w.Ptr(offset/int64(Widthptr) + 1) 1798 1799 case TSLICE: 1800 p.w.Ptr(offset / int64(Widthptr)) 1801 1802 case TARRAY: 1803 if t.NumElem() == 0 { 1804 // should have been handled by haspointers check above 1805 Fatalf("GCProg.emit: empty array") 1806 } 1807 1808 // Flatten array-of-array-of-array to just a big array by multiplying counts. 1809 count := t.NumElem() 1810 elem := t.Elem() 1811 for elem.IsArray() { 1812 count *= elem.NumElem() 1813 elem = elem.Elem() 1814 } 1815 1816 if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) { 1817 // Cheaper to just emit the bits. 1818 for i := int64(0); i < count; i++ { 1819 p.emit(elem, offset+i*elem.Width) 1820 } 1821 return 1822 } 1823 p.emit(elem, offset) 1824 p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) 1825 p.w.Repeat(elem.Width/int64(Widthptr), count-1) 1826 1827 case TSTRUCT: 1828 for _, t1 := range t.Fields().Slice() { 1829 p.emit(t1.Type, offset+t1.Offset) 1830 } 1831 } 1832 } 1833 1834 // zeroaddr returns the address of a symbol with at least 1835 // size bytes of zeros. 1836 func zeroaddr(size int64) *Node { 1837 if size >= 1<<31 { 1838 Fatalf("map value too big %d", size) 1839 } 1840 if zerosize < size { 1841 zerosize = size 1842 } 1843 s := mappkg.Lookup("zero") 1844 if s.Def == nil { 1845 x := newname(s) 1846 x.Type = types.Types[TUINT8] 1847 x.SetClass(PEXTERN) 1848 x.SetTypecheck(1) 1849 s.Def = asTypesNode(x) 1850 } 1851 z := nod(OADDR, asNode(s.Def), nil) 1852 z.Type = types.NewPtr(types.Types[TUINT8]) 1853 z.SetAddable(true) 1854 z.SetTypecheck(1) 1855 return z 1856 }