github.com/tidwall/go@v0.0.0-20170415222209-6694a6888b7d/src/cmd/compile/internal/gc/reflect.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "cmd/internal/gcprog" 10 "cmd/internal/obj" 11 "cmd/internal/src" 12 "fmt" 13 "os" 14 "sort" 15 "strings" 16 ) 17 18 type itabEntry struct { 19 t, itype *types.Type 20 sym *types.Sym 21 22 // symbol of the itab itself; 23 // filled in lazily after typecheck 24 lsym *obj.LSym 25 26 // symbols of each method in 27 // the itab, sorted by byte offset; 28 // filled in at the same time as lsym 29 entries []*obj.LSym 30 } 31 32 type ptabEntry struct { 33 s *types.Sym 34 t *types.Type 35 } 36 37 // runtime interface and reflection data structures 38 var signatlist = make(map[*types.Type]bool) 39 var itabs []itabEntry 40 var ptabs []ptabEntry 41 42 type Sig struct { 43 name string 44 pkg *types.Pkg 45 isym *types.Sym 46 tsym *types.Sym 47 type_ *types.Type 48 mtype *types.Type 49 offset int32 50 } 51 52 // siglt sorts method signatures by name, then package path. 53 func siglt(a, b *Sig) bool { 54 if a.name != b.name { 55 return a.name < b.name 56 } 57 if a.pkg == b.pkg { 58 return false 59 } 60 if a.pkg == nil { 61 return true 62 } 63 if b.pkg == nil { 64 return false 65 } 66 return a.pkg.Path < b.pkg.Path 67 } 68 69 // Builds a type representing a Bucket structure for 70 // the given map type. This type is not visible to users - 71 // we include only enough information to generate a correct GC 72 // program for it. 73 // Make sure this stays in sync with ../../../../runtime/hashmap.go! 74 const ( 75 BUCKETSIZE = 8 76 MAXKEYSIZE = 128 77 MAXVALSIZE = 128 78 ) 79 80 func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) 81 func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) 82 func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) 83 if t.Sym == nil && len(methods(t)) == 0 { 84 return 0 85 } 86 return 4 + 2 + 2 + 4 + 4 87 } 88 89 func makefield(name string, t *types.Type) *types.Field { 90 f := types.NewField() 91 f.Type = t 92 f.Sym = types.Nopkg.Lookup(name) 93 return f 94 } 95 96 func mapbucket(t *types.Type) *types.Type { 97 if t.MapType().Bucket != nil { 98 return t.MapType().Bucket 99 } 100 101 bucket := types.New(TSTRUCT) 102 keytype := t.Key() 103 valtype := t.Val() 104 dowidth(keytype) 105 dowidth(valtype) 106 if keytype.Width > MAXKEYSIZE { 107 keytype = types.NewPtr(keytype) 108 } 109 if valtype.Width > MAXVALSIZE { 110 valtype = types.NewPtr(valtype) 111 } 112 113 field := make([]*types.Field, 0, 5) 114 115 // The first field is: uint8 topbits[BUCKETSIZE]. 116 arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE) 117 field = append(field, makefield("topbits", arr)) 118 119 arr = types.NewArray(keytype, BUCKETSIZE) 120 arr.SetNoalg(true) 121 field = append(field, makefield("keys", arr)) 122 123 arr = types.NewArray(valtype, BUCKETSIZE) 124 arr.SetNoalg(true) 125 field = append(field, makefield("values", arr)) 126 127 // Make sure the overflow pointer is the last memory in the struct, 128 // because the runtime assumes it can use size-ptrSize as the 129 // offset of the overflow pointer. We double-check that property 130 // below once the offsets and size are computed. 131 // 132 // BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point. 133 // On 32-bit systems, the max alignment is 32-bit, and the 134 // overflow pointer will add another 32-bit field, and the struct 135 // will end with no padding. 136 // On 64-bit systems, the max alignment is 64-bit, and the 137 // overflow pointer will add another 64-bit field, and the struct 138 // will end with no padding. 139 // On nacl/amd64p32, however, the max alignment is 64-bit, 140 // but the overflow pointer will add only a 32-bit field, 141 // so if the struct needs 64-bit padding (because a key or value does) 142 // then it would end with an extra 32-bit padding field. 143 // Preempt that by emitting the padding here. 144 if int(t.Val().Align) > Widthptr || int(t.Key().Align) > Widthptr { 145 field = append(field, makefield("pad", types.Types[TUINTPTR])) 146 } 147 148 // If keys and values have no pointers, the map implementation 149 // can keep a list of overflow pointers on the side so that 150 // buckets can be marked as having no pointers. 151 // Arrange for the bucket to have no pointers by changing 152 // the type of the overflow field to uintptr in this case. 153 // See comment on hmap.overflow in ../../../../runtime/hashmap.go. 154 otyp := types.NewPtr(bucket) 155 if !types.Haspointers(t.Val()) && !types.Haspointers(t.Key()) && t.Val().Width <= MAXVALSIZE && t.Key().Width <= MAXKEYSIZE { 156 otyp = types.Types[TUINTPTR] 157 } 158 ovf := makefield("overflow", otyp) 159 field = append(field, ovf) 160 161 // link up fields 162 bucket.SetNoalg(true) 163 bucket.SetLocal(t.Local()) 164 bucket.SetFields(field[:]) 165 dowidth(bucket) 166 167 // Double-check that overflow field is final memory in struct, 168 // with no padding at end. See comment above. 169 if ovf.Offset != bucket.Width-int64(Widthptr) { 170 Fatalf("bad math in mapbucket for %v", t) 171 } 172 173 t.MapType().Bucket = bucket 174 175 bucket.StructType().Map = t 176 return bucket 177 } 178 179 // Builds a type representing a Hmap structure for the given map type. 180 // Make sure this stays in sync with ../../../../runtime/hashmap.go! 181 func hmap(t *types.Type) *types.Type { 182 if t.MapType().Hmap != nil { 183 return t.MapType().Hmap 184 } 185 186 bucket := mapbucket(t) 187 fields := []*types.Field{ 188 makefield("count", types.Types[TINT]), 189 makefield("flags", types.Types[TUINT8]), 190 makefield("B", types.Types[TUINT8]), 191 makefield("noverflow", types.Types[TUINT16]), 192 makefield("hash0", types.Types[TUINT32]), 193 makefield("buckets", types.NewPtr(bucket)), 194 makefield("oldbuckets", types.NewPtr(bucket)), 195 makefield("nevacuate", types.Types[TUINTPTR]), 196 makefield("overflow", types.Types[TUNSAFEPTR]), 197 } 198 199 h := types.New(TSTRUCT) 200 h.SetNoalg(true) 201 h.SetLocal(t.Local()) 202 h.SetFields(fields) 203 dowidth(h) 204 t.MapType().Hmap = h 205 h.StructType().Map = t 206 return h 207 } 208 209 func hiter(t *types.Type) *types.Type { 210 if t.MapType().Hiter != nil { 211 return t.MapType().Hiter 212 } 213 214 // build a struct: 215 // hiter { 216 // key *Key 217 // val *Value 218 // t *MapType 219 // h *Hmap 220 // buckets *Bucket 221 // bptr *Bucket 222 // overflow0 unsafe.Pointer 223 // overflow1 unsafe.Pointer 224 // startBucket uintptr 225 // stuff uintptr 226 // bucket uintptr 227 // checkBucket uintptr 228 // } 229 // must match ../../../../runtime/hashmap.go:hiter. 230 var field [12]*types.Field 231 field[0] = makefield("key", types.NewPtr(t.Key())) 232 field[1] = makefield("val", types.NewPtr(t.Val())) 233 field[2] = makefield("t", types.NewPtr(types.Types[TUINT8])) 234 field[3] = makefield("h", types.NewPtr(hmap(t))) 235 field[4] = makefield("buckets", types.NewPtr(mapbucket(t))) 236 field[5] = makefield("bptr", types.NewPtr(mapbucket(t))) 237 field[6] = makefield("overflow0", types.Types[TUNSAFEPTR]) 238 field[7] = makefield("overflow1", types.Types[TUNSAFEPTR]) 239 field[8] = makefield("startBucket", types.Types[TUINTPTR]) 240 field[9] = makefield("stuff", types.Types[TUINTPTR]) // offset+wrapped+B+I 241 field[10] = makefield("bucket", types.Types[TUINTPTR]) 242 field[11] = makefield("checkBucket", types.Types[TUINTPTR]) 243 244 // build iterator struct holding the above fields 245 i := types.New(TSTRUCT) 246 i.SetNoalg(true) 247 i.SetFields(field[:]) 248 dowidth(i) 249 if i.Width != int64(12*Widthptr) { 250 Fatalf("hash_iter size not correct %d %d", i.Width, 12*Widthptr) 251 } 252 t.MapType().Hiter = i 253 i.StructType().Map = t 254 return i 255 } 256 257 // f is method type, with receiver. 258 // return function type, receiver as first argument (or not). 259 func methodfunc(f *types.Type, receiver *types.Type) *types.Type { 260 var in []*Node 261 if receiver != nil { 262 d := nod(ODCLFIELD, nil, nil) 263 d.Type = receiver 264 in = append(in, d) 265 } 266 267 var d *Node 268 for _, t := range f.Params().Fields().Slice() { 269 d = nod(ODCLFIELD, nil, nil) 270 d.Type = t.Type 271 d.SetIsddd(t.Isddd()) 272 in = append(in, d) 273 } 274 275 var out []*Node 276 for _, t := range f.Results().Fields().Slice() { 277 d = nod(ODCLFIELD, nil, nil) 278 d.Type = t.Type 279 out = append(out, d) 280 } 281 282 t := functype(nil, in, out) 283 if f.Nname() != nil { 284 // Link to name of original method function. 285 t.SetNname(f.Nname()) 286 } 287 288 return t 289 } 290 291 // methods returns the methods of the non-interface type t, sorted by name. 292 // Generates stub functions as needed. 293 func methods(t *types.Type) []*Sig { 294 // method type 295 mt := methtype(t) 296 297 if mt == nil { 298 return nil 299 } 300 expandmeth(mt) 301 302 // type stored in interface word 303 it := t 304 305 if !isdirectiface(it) { 306 it = types.NewPtr(t) 307 } 308 309 // make list of methods for t, 310 // generating code if necessary. 311 var ms []*Sig 312 for _, f := range mt.AllMethods().Slice() { 313 if f.Type.Etype != TFUNC || f.Type.Recv() == nil { 314 Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) 315 } 316 if f.Type.Recv() == nil { 317 Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) 318 } 319 if f.Nointerface() { 320 continue 321 } 322 323 method := f.Sym 324 if method == nil { 325 continue 326 } 327 328 // get receiver type for this particular method. 329 // if pointer receiver but non-pointer t and 330 // this is not an embedded pointer inside a struct, 331 // method does not apply. 332 this := f.Type.Recv().Type 333 334 if this.IsPtr() && this.Elem() == t { 335 continue 336 } 337 if this.IsPtr() && !t.IsPtr() && f.Embedded != 2 && !isifacemethod(f.Type) { 338 continue 339 } 340 341 var sig Sig 342 ms = append(ms, &sig) 343 344 sig.name = method.Name 345 if !exportname(method.Name) { 346 if method.Pkg == nil { 347 Fatalf("methods: missing package") 348 } 349 sig.pkg = method.Pkg 350 } 351 352 sig.isym = methodsym(method, it, true) 353 sig.tsym = methodsym(method, t, false) 354 sig.type_ = methodfunc(f.Type, t) 355 sig.mtype = methodfunc(f.Type, nil) 356 357 if !sig.isym.Siggen() { 358 sig.isym.SetSiggen(true) 359 if !eqtype(this, it) || this.Width < int64(Widthptr) { 360 compiling_wrappers = 1 361 genwrapper(it, f, sig.isym, 1) 362 compiling_wrappers = 0 363 } 364 } 365 366 if !sig.tsym.Siggen() { 367 sig.tsym.SetSiggen(true) 368 if !eqtype(this, t) { 369 compiling_wrappers = 1 370 genwrapper(t, f, sig.tsym, 0) 371 compiling_wrappers = 0 372 } 373 } 374 } 375 376 obj.SortSlice(ms, func(i, j int) bool { return siglt(ms[i], ms[j]) }) 377 return ms 378 } 379 380 // imethods returns the methods of the interface type t, sorted by name. 381 func imethods(t *types.Type) []*Sig { 382 var methods []*Sig 383 for _, f := range t.Fields().Slice() { 384 if f.Type.Etype != TFUNC || f.Sym == nil { 385 continue 386 } 387 method := f.Sym 388 var sig = Sig{ 389 name: method.Name, 390 } 391 if !exportname(method.Name) { 392 if method.Pkg == nil { 393 Fatalf("imethods: missing package") 394 } 395 sig.pkg = method.Pkg 396 } 397 398 sig.mtype = f.Type 399 sig.offset = 0 400 sig.type_ = methodfunc(f.Type, nil) 401 402 if n := len(methods); n > 0 { 403 last := methods[n-1] 404 if !(siglt(last, &sig)) { 405 Fatalf("sigcmp vs sortinter %s %s", last.name, sig.name) 406 } 407 } 408 methods = append(methods, &sig) 409 410 // Compiler can only refer to wrappers for non-blank methods. 411 if isblanksym(method) { 412 continue 413 } 414 415 // NOTE(rsc): Perhaps an oversight that 416 // IfaceType.Method is not in the reflect data. 417 // Generate the method body, so that compiled 418 // code can refer to it. 419 isym := methodsym(method, t, false) 420 if !isym.Siggen() { 421 isym.SetSiggen(true) 422 genwrapper(t, f, isym, 0) 423 } 424 } 425 426 return methods 427 } 428 429 func dimportpath(p *types.Pkg) { 430 if p.Pathsym != nil { 431 return 432 } 433 434 // If we are compiling the runtime package, there are two runtime packages around 435 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for 436 // both of them, so just produce one for localpkg. 437 if myimportpath == "runtime" && p == Runtimepkg { 438 return 439 } 440 441 var str string 442 if p == localpkg { 443 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. 444 str = myimportpath 445 } else { 446 str = p.Path 447 } 448 449 s := Ctxt.Lookup("type..importpath."+p.Prefix+".", 0) 450 ot := dnameData(s, 0, str, "", nil, false) 451 ggloblLSym(s, int32(ot), obj.DUPOK|obj.RODATA) 452 p.Pathsym = s 453 } 454 455 func dgopkgpath(s *types.Sym, ot int, pkg *types.Pkg) int { 456 return dgopkgpathLSym(Linksym(s), ot, pkg) 457 } 458 459 func dgopkgpathLSym(s *obj.LSym, ot int, pkg *types.Pkg) int { 460 if pkg == nil { 461 return duintxxLSym(s, ot, 0, Widthptr) 462 } 463 464 if pkg == localpkg && myimportpath == "" { 465 // If we don't know the full import path of the package being compiled 466 // (i.e. -p was not passed on the compiler command line), emit a reference to 467 // type..importpath.""., which the linker will rewrite using the correct import path. 468 // Every package that imports this one directly defines the symbol. 469 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 470 ns := Ctxt.Lookup(`type..importpath."".`, 0) 471 return dsymptrLSym(s, ot, ns, 0) 472 } 473 474 dimportpath(pkg) 475 return dsymptrLSym(s, ot, pkg.Pathsym, 0) 476 } 477 478 // dgopkgpathOffLSym writes an offset relocation in s at offset ot to the pkg path symbol. 479 func dgopkgpathOffLSym(s *obj.LSym, ot int, pkg *types.Pkg) int { 480 if pkg == nil { 481 return duintxxLSym(s, ot, 0, 4) 482 } 483 if pkg == localpkg && myimportpath == "" { 484 // If we don't know the full import path of the package being compiled 485 // (i.e. -p was not passed on the compiler command line), emit a reference to 486 // type..importpath.""., which the linker will rewrite using the correct import path. 487 // Every package that imports this one directly defines the symbol. 488 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 489 ns := Ctxt.Lookup(`type..importpath."".`, 0) 490 return dsymptrOffLSym(s, ot, ns, 0) 491 } 492 493 dimportpath(pkg) 494 return dsymptrOffLSym(s, ot, pkg.Pathsym, 0) 495 } 496 497 // isExportedField reports whether a struct field is exported. 498 // It also returns the package to use for PkgPath for an unexported field. 499 func isExportedField(ft *types.Field) (bool, *types.Pkg) { 500 if ft.Sym != nil && ft.Embedded == 0 { 501 return exportname(ft.Sym.Name), ft.Sym.Pkg 502 } else { 503 if ft.Type.Sym != nil && 504 (ft.Type.Sym.Pkg == builtinpkg || !exportname(ft.Type.Sym.Name)) { 505 return false, ft.Type.Sym.Pkg 506 } else { 507 return true, nil 508 } 509 } 510 } 511 512 // dnameField dumps a reflect.name for a struct field. 513 func dnameField(s *types.Sym, ot int, spkg *types.Pkg, ft *types.Field) int { 514 var name string 515 if ft.Sym != nil { 516 name = ft.Sym.Name 517 } 518 isExported, fpkg := isExportedField(ft) 519 if isExported || fpkg == spkg { 520 fpkg = nil 521 } 522 nsym := dname(name, ft.Note, fpkg, isExported) 523 return dsymptrLSym(Linksym(s), ot, nsym, 0) 524 } 525 526 // dnameData writes the contents of a reflect.name into s at offset ot. 527 func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int { 528 if len(name) > 1<<16-1 { 529 Fatalf("name too long: %s", name) 530 } 531 if len(tag) > 1<<16-1 { 532 Fatalf("tag too long: %s", tag) 533 } 534 535 // Encode name and tag. See reflect/type.go for details. 536 var bits byte 537 l := 1 + 2 + len(name) 538 if exported { 539 bits |= 1 << 0 540 } 541 if len(tag) > 0 { 542 l += 2 + len(tag) 543 bits |= 1 << 1 544 } 545 if pkg != nil { 546 bits |= 1 << 2 547 } 548 b := make([]byte, l) 549 b[0] = bits 550 b[1] = uint8(len(name) >> 8) 551 b[2] = uint8(len(name)) 552 copy(b[3:], name) 553 if len(tag) > 0 { 554 tb := b[3+len(name):] 555 tb[0] = uint8(len(tag) >> 8) 556 tb[1] = uint8(len(tag)) 557 copy(tb[2:], tag) 558 } 559 560 ot = int(s.WriteBytes(Ctxt, int64(ot), b)) 561 562 if pkg != nil { 563 ot = dgopkgpathOffLSym(s, ot, pkg) 564 } 565 566 return ot 567 } 568 569 var dnameCount int 570 571 // dname creates a reflect.name for a struct field or method. 572 func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym { 573 // Write out data as "type.." to signal two things to the 574 // linker, first that when dynamically linking, the symbol 575 // should be moved to a relro section, and second that the 576 // contents should not be decoded as a type. 577 sname := "type..namedata." 578 if pkg == nil { 579 // In the common case, share data with other packages. 580 if name == "" { 581 if exported { 582 sname += "-noname-exported." + tag 583 } else { 584 sname += "-noname-unexported." + tag 585 } 586 } else { 587 sname += name + "." + tag 588 } 589 } else { 590 sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount) 591 dnameCount++ 592 } 593 s := Ctxt.Lookup(sname, 0) 594 if len(s.P) > 0 { 595 return s 596 } 597 ot := dnameData(s, 0, name, tag, pkg, exported) 598 ggloblLSym(s, int32(ot), obj.DUPOK|obj.RODATA) 599 return s 600 } 601 602 // dextratype dumps the fields of a runtime.uncommontype. 603 // dataAdd is the offset in bytes after the header where the 604 // backing array of the []method field is written (by dextratypeData). 605 func dextratype(s *types.Sym, ot int, t *types.Type, dataAdd int) int { 606 m := methods(t) 607 if t.Sym == nil && len(m) == 0 { 608 return ot 609 } 610 noff := int(Rnd(int64(ot), int64(Widthptr))) 611 if noff != ot { 612 Fatalf("unexpected alignment in dextratype for %v", t) 613 } 614 615 for _, a := range m { 616 dtypesym(a.type_) 617 } 618 619 ot = dgopkgpathOffLSym(Linksym(s), ot, typePkg(t)) 620 621 dataAdd += uncommonSize(t) 622 mcount := len(m) 623 if mcount != int(uint16(mcount)) { 624 Fatalf("too many methods on %v: %d", t, mcount) 625 } 626 if dataAdd != int(uint32(dataAdd)) { 627 Fatalf("methods are too far away on %v: %d", t, dataAdd) 628 } 629 630 ot = duint16(s, ot, uint16(mcount)) 631 ot = duint16(s, ot, 0) 632 ot = duint32(s, ot, uint32(dataAdd)) 633 ot = duint32(s, ot, 0) 634 return ot 635 } 636 637 func typePkg(t *types.Type) *types.Pkg { 638 tsym := t.Sym 639 if tsym == nil { 640 switch t.Etype { 641 case TARRAY, TSLICE, TPTR32, TPTR64, TCHAN: 642 if t.Elem() != nil { 643 tsym = t.Elem().Sym 644 } 645 } 646 } 647 if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype { 648 return tsym.Pkg 649 } 650 return nil 651 } 652 653 // dextratypeData dumps the backing array for the []method field of 654 // runtime.uncommontype. 655 func dextratypeData(s *types.Sym, ot int, t *types.Type) int { 656 lsym := Linksym(s) 657 for _, a := range methods(t) { 658 // ../../../../runtime/type.go:/method 659 exported := exportname(a.name) 660 var pkg *types.Pkg 661 if !exported && a.pkg != typePkg(t) { 662 pkg = a.pkg 663 } 664 nsym := dname(a.name, "", pkg, exported) 665 666 ot = dsymptrOffLSym(lsym, ot, nsym, 0) 667 ot = dmethodptrOffLSym(lsym, ot, Linksym(dtypesym(a.mtype))) 668 ot = dmethodptrOffLSym(lsym, ot, Linksym(a.isym)) 669 ot = dmethodptrOffLSym(lsym, ot, Linksym(a.tsym)) 670 } 671 return ot 672 } 673 674 func dmethodptrOffLSym(s *obj.LSym, ot int, x *obj.LSym) int { 675 duintxxLSym(s, ot, 0, 4) 676 r := obj.Addrel(s) 677 r.Off = int32(ot) 678 r.Siz = 4 679 r.Sym = x 680 r.Type = obj.R_METHODOFF 681 return ot + 4 682 } 683 684 var kinds = []int{ 685 TINT: obj.KindInt, 686 TUINT: obj.KindUint, 687 TINT8: obj.KindInt8, 688 TUINT8: obj.KindUint8, 689 TINT16: obj.KindInt16, 690 TUINT16: obj.KindUint16, 691 TINT32: obj.KindInt32, 692 TUINT32: obj.KindUint32, 693 TINT64: obj.KindInt64, 694 TUINT64: obj.KindUint64, 695 TUINTPTR: obj.KindUintptr, 696 TFLOAT32: obj.KindFloat32, 697 TFLOAT64: obj.KindFloat64, 698 TBOOL: obj.KindBool, 699 TSTRING: obj.KindString, 700 TPTR32: obj.KindPtr, 701 TPTR64: obj.KindPtr, 702 TSTRUCT: obj.KindStruct, 703 TINTER: obj.KindInterface, 704 TCHAN: obj.KindChan, 705 TMAP: obj.KindMap, 706 TARRAY: obj.KindArray, 707 TSLICE: obj.KindSlice, 708 TFUNC: obj.KindFunc, 709 TCOMPLEX64: obj.KindComplex64, 710 TCOMPLEX128: obj.KindComplex128, 711 TUNSAFEPTR: obj.KindUnsafePointer, 712 } 713 714 // typeptrdata returns the length in bytes of the prefix of t 715 // containing pointer data. Anything after this offset is scalar data. 716 func typeptrdata(t *types.Type) int64 { 717 if !types.Haspointers(t) { 718 return 0 719 } 720 721 switch t.Etype { 722 case TPTR32, 723 TPTR64, 724 TUNSAFEPTR, 725 TFUNC, 726 TCHAN, 727 TMAP: 728 return int64(Widthptr) 729 730 case TSTRING: 731 // struct { byte *str; intgo len; } 732 return int64(Widthptr) 733 734 case TINTER: 735 // struct { Itab *tab; void *data; } or 736 // struct { Type *type; void *data; } 737 return 2 * int64(Widthptr) 738 739 case TSLICE: 740 // struct { byte *array; uintgo len; uintgo cap; } 741 return int64(Widthptr) 742 743 case TARRAY: 744 // haspointers already eliminated t.NumElem() == 0. 745 return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem()) 746 747 case TSTRUCT: 748 // Find the last field that has pointers. 749 var lastPtrField *types.Field 750 for _, t1 := range t.Fields().Slice() { 751 if types.Haspointers(t1.Type) { 752 lastPtrField = t1 753 } 754 } 755 return lastPtrField.Offset + typeptrdata(lastPtrField.Type) 756 757 default: 758 Fatalf("typeptrdata: unexpected type, %v", t) 759 return 0 760 } 761 } 762 763 // tflag is documented in reflect/type.go. 764 // 765 // tflag values must be kept in sync with copies in: 766 // cmd/compile/internal/gc/reflect.go 767 // cmd/link/internal/ld/decodesym.go 768 // reflect/type.go 769 // runtime/type.go 770 const ( 771 tflagUncommon = 1 << 0 772 tflagExtraStar = 1 << 1 773 tflagNamed = 1 << 2 774 ) 775 776 var dcommontype_algarray *types.Sym 777 778 // dcommontype dumps the contents of a reflect.rtype (runtime._type). 779 func dcommontype(s *types.Sym, ot int, t *types.Type) int { 780 if ot != 0 { 781 Fatalf("dcommontype %d", ot) 782 } 783 784 sizeofAlg := 2 * Widthptr 785 if dcommontype_algarray == nil { 786 dcommontype_algarray = Runtimepkg.Lookup("algarray") 787 } 788 dowidth(t) 789 alg := algtype(t) 790 var algsym *types.Sym 791 if alg == ASPECIAL || alg == AMEM { 792 algsym = dalgsym(t) 793 } 794 795 sptrWeak := true 796 var sptr *types.Sym 797 if !t.IsPtr() || t.PtrBase != nil { 798 tptr := types.NewPtr(t) 799 if t.Sym != nil || methods(tptr) != nil { 800 sptrWeak = false 801 } 802 sptr = dtypesym(tptr) 803 } 804 805 gcsym, useGCProg, ptrdata := dgcsym(t) 806 807 // ../../../../reflect/type.go:/^type.rtype 808 // actual type structure 809 // type rtype struct { 810 // size uintptr 811 // ptrdata uintptr 812 // hash uint32 813 // tflag tflag 814 // align uint8 815 // fieldAlign uint8 816 // kind uint8 817 // alg *typeAlg 818 // gcdata *byte 819 // str nameOff 820 // ptrToThis typeOff 821 // } 822 ot = duintptr(s, ot, uint64(t.Width)) 823 ot = duintptr(s, ot, uint64(ptrdata)) 824 825 ot = duint32(s, ot, typehash(t)) 826 827 var tflag uint8 828 if uncommonSize(t) != 0 { 829 tflag |= tflagUncommon 830 } 831 if t.Sym != nil && t.Sym.Name != "" { 832 tflag |= tflagNamed 833 } 834 835 exported := false 836 p := t.LongString() 837 // If we're writing out type T, 838 // we are very likely to write out type *T as well. 839 // Use the string "*T"[1:] for "T", so that the two 840 // share storage. This is a cheap way to reduce the 841 // amount of space taken up by reflect strings. 842 if !strings.HasPrefix(p, "*") { 843 p = "*" + p 844 tflag |= tflagExtraStar 845 if t.Sym != nil { 846 exported = exportname(t.Sym.Name) 847 } 848 } else { 849 if t.Elem() != nil && t.Elem().Sym != nil { 850 exported = exportname(t.Elem().Sym.Name) 851 } 852 } 853 854 ot = duint8(s, ot, tflag) 855 856 // runtime (and common sense) expects alignment to be a power of two. 857 i := int(t.Align) 858 859 if i == 0 { 860 i = 1 861 } 862 if i&(i-1) != 0 { 863 Fatalf("invalid alignment %d for %v", t.Align, t) 864 } 865 ot = duint8(s, ot, t.Align) // align 866 ot = duint8(s, ot, t.Align) // fieldAlign 867 868 i = kinds[t.Etype] 869 if !types.Haspointers(t) { 870 i |= obj.KindNoPointers 871 } 872 if isdirectiface(t) { 873 i |= obj.KindDirectIface 874 } 875 if useGCProg { 876 i |= obj.KindGCProg 877 } 878 ot = duint8(s, ot, uint8(i)) // kind 879 if algsym == nil { 880 ot = dsymptr(s, ot, dcommontype_algarray, int(alg)*sizeofAlg) 881 } else { 882 ot = dsymptr(s, ot, algsym, 0) 883 } 884 ot = dsymptr(s, ot, gcsym, 0) // gcdata 885 886 nsym := dname(p, "", nil, exported) 887 ot = dsymptrOffLSym(Linksym(s), ot, nsym, 0) // str 888 // ptrToThis 889 if sptr == nil { 890 ot = duint32(s, ot, 0) 891 } else if sptrWeak { 892 ot = dsymptrWeakOffLSym(Linksym(s), ot, Linksym(sptr)) 893 } else { 894 ot = dsymptrOffLSym(Linksym(s), ot, Linksym(sptr), 0) 895 } 896 897 return ot 898 } 899 900 func typesymname(t *types.Type) string { 901 name := t.ShortString() 902 // Use a separate symbol name for Noalg types for #17752. 903 if a, bad := algtype1(t); a == ANOEQ && bad.Noalg() { 904 name = "noalg." + name 905 } 906 return name 907 } 908 909 func typesym(t *types.Type) *types.Sym { 910 return typepkg.Lookup(typesymname(t)) 911 } 912 913 // tracksym returns the symbol for tracking use of field/method f, assumed 914 // to be a member of struct/interface type t. 915 func tracksym(t *types.Type, f *types.Field) *types.Sym { 916 return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name) 917 } 918 919 func typesymprefix(prefix string, t *types.Type) *types.Sym { 920 p := prefix + "." + t.ShortString() 921 s := typepkg.Lookup(p) 922 923 //print("algsym: %s -> %+S\n", p, s); 924 925 return s 926 } 927 928 func typenamesym(t *types.Type) *types.Sym { 929 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { 930 Fatalf("typenamesym %v", t) 931 } 932 s := typesym(t) 933 addsignat(t) 934 return s 935 } 936 937 func typename(t *types.Type) *Node { 938 s := typenamesym(t) 939 if s.Def == nil { 940 n := newnamel(src.NoXPos, s) 941 n.Type = types.Types[TUINT8] 942 n.Class = PEXTERN 943 n.Typecheck = 1 944 s.Def = asTypesNode(n) 945 } 946 947 n := nod(OADDR, asNode(s.Def), nil) 948 n.Type = types.NewPtr(asNode(s.Def).Type) 949 n.SetAddable(true) 950 n.Typecheck = 1 951 return n 952 } 953 954 func itabname(t, itype *types.Type) *Node { 955 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() { 956 Fatalf("itabname(%v, %v)", t, itype) 957 } 958 s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString()) 959 if s.Def == nil { 960 n := newname(s) 961 n.Type = types.Types[TUINT8] 962 n.Class = PEXTERN 963 n.Typecheck = 1 964 s.Def = asTypesNode(n) 965 966 itabs = append(itabs, itabEntry{t: t, itype: itype, sym: s}) 967 } 968 969 n := nod(OADDR, asNode(s.Def), nil) 970 n.Type = types.NewPtr(asNode(s.Def).Type) 971 n.SetAddable(true) 972 n.Typecheck = 1 973 return n 974 } 975 976 // isreflexive reports whether t has a reflexive equality operator. 977 // That is, if x==x for all x of type t. 978 func isreflexive(t *types.Type) bool { 979 switch t.Etype { 980 case TBOOL, 981 TINT, 982 TUINT, 983 TINT8, 984 TUINT8, 985 TINT16, 986 TUINT16, 987 TINT32, 988 TUINT32, 989 TINT64, 990 TUINT64, 991 TUINTPTR, 992 TPTR32, 993 TPTR64, 994 TUNSAFEPTR, 995 TSTRING, 996 TCHAN: 997 return true 998 999 case TFLOAT32, 1000 TFLOAT64, 1001 TCOMPLEX64, 1002 TCOMPLEX128, 1003 TINTER: 1004 return false 1005 1006 case TARRAY: 1007 return isreflexive(t.Elem()) 1008 1009 case TSTRUCT: 1010 for _, t1 := range t.Fields().Slice() { 1011 if !isreflexive(t1.Type) { 1012 return false 1013 } 1014 } 1015 return true 1016 1017 default: 1018 Fatalf("bad type for map key: %v", t) 1019 return false 1020 } 1021 } 1022 1023 // needkeyupdate reports whether map updates with t as a key 1024 // need the key to be updated. 1025 func needkeyupdate(t *types.Type) bool { 1026 switch t.Etype { 1027 case TBOOL, 1028 TINT, 1029 TUINT, 1030 TINT8, 1031 TUINT8, 1032 TINT16, 1033 TUINT16, 1034 TINT32, 1035 TUINT32, 1036 TINT64, 1037 TUINT64, 1038 TUINTPTR, 1039 TPTR32, 1040 TPTR64, 1041 TUNSAFEPTR, 1042 TCHAN: 1043 return false 1044 1045 case TFLOAT32, // floats can be +0/-0 1046 TFLOAT64, 1047 TCOMPLEX64, 1048 TCOMPLEX128, 1049 TINTER, 1050 TSTRING: // strings might have smaller backing stores 1051 return true 1052 1053 case TARRAY: 1054 return needkeyupdate(t.Elem()) 1055 1056 case TSTRUCT: 1057 for _, t1 := range t.Fields().Slice() { 1058 if needkeyupdate(t1.Type) { 1059 return true 1060 } 1061 } 1062 return false 1063 1064 default: 1065 Fatalf("bad type for map key: %v", t) 1066 return true 1067 } 1068 } 1069 1070 // formalType replaces byte and rune aliases with real types. 1071 // They've been separate internally to make error messages 1072 // better, but we have to merge them in the reflect tables. 1073 func formalType(t *types.Type) *types.Type { 1074 if t == types.Bytetype || t == types.Runetype { 1075 return types.Types[t.Etype] 1076 } 1077 return t 1078 } 1079 1080 func dtypesym(t *types.Type) *types.Sym { 1081 t = formalType(t) 1082 if t.IsUntyped() { 1083 Fatalf("dtypesym %v", t) 1084 } 1085 1086 s := typesym(t) 1087 if s.Siggen() { 1088 return s 1089 } 1090 s.SetSiggen(true) 1091 1092 // special case (look for runtime below): 1093 // when compiling package runtime, 1094 // emit the type structures for int, float, etc. 1095 tbase := t 1096 1097 if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil { 1098 tbase = t.Elem() 1099 } 1100 dupok := 0 1101 if tbase.Sym == nil { 1102 dupok = obj.DUPOK 1103 } 1104 1105 if myimportpath == "runtime" && (tbase == types.Types[tbase.Etype] || tbase == types.Bytetype || tbase == types.Runetype || tbase == types.Errortype) { // int, float, etc 1106 goto ok 1107 } 1108 1109 // named types from other files are defined only by those files 1110 if tbase.Sym != nil && !tbase.Local() { 1111 return s 1112 } 1113 if isforw[tbase.Etype] { 1114 return s 1115 } 1116 1117 ok: 1118 ot := 0 1119 switch t.Etype { 1120 default: 1121 ot = dcommontype(s, ot, t) 1122 ot = dextratype(s, ot, t, 0) 1123 1124 case TARRAY: 1125 // ../../../../runtime/type.go:/arrayType 1126 s1 := dtypesym(t.Elem()) 1127 t2 := types.NewSlice(t.Elem()) 1128 s2 := dtypesym(t2) 1129 ot = dcommontype(s, ot, t) 1130 ot = dsymptr(s, ot, s1, 0) 1131 ot = dsymptr(s, ot, s2, 0) 1132 ot = duintptr(s, ot, uint64(t.NumElem())) 1133 ot = dextratype(s, ot, t, 0) 1134 1135 case TSLICE: 1136 // ../../../../runtime/type.go:/sliceType 1137 s1 := dtypesym(t.Elem()) 1138 ot = dcommontype(s, ot, t) 1139 ot = dsymptr(s, ot, s1, 0) 1140 ot = dextratype(s, ot, t, 0) 1141 1142 case TCHAN: 1143 // ../../../../runtime/type.go:/chanType 1144 s1 := dtypesym(t.Elem()) 1145 ot = dcommontype(s, ot, t) 1146 ot = dsymptr(s, ot, s1, 0) 1147 ot = duintptr(s, ot, uint64(t.ChanDir())) 1148 ot = dextratype(s, ot, t, 0) 1149 1150 case TFUNC: 1151 for _, t1 := range t.Recvs().Fields().Slice() { 1152 dtypesym(t1.Type) 1153 } 1154 isddd := false 1155 for _, t1 := range t.Params().Fields().Slice() { 1156 isddd = t1.Isddd() 1157 dtypesym(t1.Type) 1158 } 1159 for _, t1 := range t.Results().Fields().Slice() { 1160 dtypesym(t1.Type) 1161 } 1162 1163 ot = dcommontype(s, ot, t) 1164 inCount := t.Recvs().NumFields() + t.Params().NumFields() 1165 outCount := t.Results().NumFields() 1166 if isddd { 1167 outCount |= 1 << 15 1168 } 1169 ot = duint16(s, ot, uint16(inCount)) 1170 ot = duint16(s, ot, uint16(outCount)) 1171 if Widthptr == 8 { 1172 ot += 4 // align for *rtype 1173 } 1174 1175 dataAdd := (inCount + t.Results().NumFields()) * Widthptr 1176 ot = dextratype(s, ot, t, dataAdd) 1177 1178 // Array of rtype pointers follows funcType. 1179 for _, t1 := range t.Recvs().Fields().Slice() { 1180 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1181 } 1182 for _, t1 := range t.Params().Fields().Slice() { 1183 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1184 } 1185 for _, t1 := range t.Results().Fields().Slice() { 1186 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1187 } 1188 1189 case TINTER: 1190 m := imethods(t) 1191 n := len(m) 1192 for _, a := range m { 1193 dtypesym(a.type_) 1194 } 1195 1196 // ../../../../runtime/type.go:/interfaceType 1197 ot = dcommontype(s, ot, t) 1198 1199 var tpkg *types.Pkg 1200 if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype { 1201 tpkg = t.Sym.Pkg 1202 } 1203 ot = dgopkgpath(s, ot, tpkg) 1204 1205 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint+uncommonSize(t)) 1206 ot = duintxx(s, ot, uint64(n), Widthint) 1207 ot = duintxx(s, ot, uint64(n), Widthint) 1208 dataAdd := imethodSize() * n 1209 ot = dextratype(s, ot, t, dataAdd) 1210 1211 lsym := Linksym(s) 1212 for _, a := range m { 1213 // ../../../../runtime/type.go:/imethod 1214 exported := exportname(a.name) 1215 var pkg *types.Pkg 1216 if !exported && a.pkg != tpkg { 1217 pkg = a.pkg 1218 } 1219 nsym := dname(a.name, "", pkg, exported) 1220 1221 ot = dsymptrOffLSym(lsym, ot, nsym, 0) 1222 ot = dsymptrOffLSym(lsym, ot, Linksym(dtypesym(a.type_)), 0) 1223 } 1224 1225 // ../../../../runtime/type.go:/mapType 1226 case TMAP: 1227 s1 := dtypesym(t.Key()) 1228 s2 := dtypesym(t.Val()) 1229 s3 := dtypesym(mapbucket(t)) 1230 s4 := dtypesym(hmap(t)) 1231 ot = dcommontype(s, ot, t) 1232 ot = dsymptr(s, ot, s1, 0) 1233 ot = dsymptr(s, ot, s2, 0) 1234 ot = dsymptr(s, ot, s3, 0) 1235 ot = dsymptr(s, ot, s4, 0) 1236 if t.Key().Width > MAXKEYSIZE { 1237 ot = duint8(s, ot, uint8(Widthptr)) 1238 ot = duint8(s, ot, 1) // indirect 1239 } else { 1240 ot = duint8(s, ot, uint8(t.Key().Width)) 1241 ot = duint8(s, ot, 0) // not indirect 1242 } 1243 1244 if t.Val().Width > MAXVALSIZE { 1245 ot = duint8(s, ot, uint8(Widthptr)) 1246 ot = duint8(s, ot, 1) // indirect 1247 } else { 1248 ot = duint8(s, ot, uint8(t.Val().Width)) 1249 ot = duint8(s, ot, 0) // not indirect 1250 } 1251 1252 ot = duint16(s, ot, uint16(mapbucket(t).Width)) 1253 ot = duint8(s, ot, uint8(obj.Bool2int(isreflexive(t.Key())))) 1254 ot = duint8(s, ot, uint8(obj.Bool2int(needkeyupdate(t.Key())))) 1255 ot = dextratype(s, ot, t, 0) 1256 1257 case TPTR32, TPTR64: 1258 if t.Elem().Etype == TANY { 1259 // ../../../../runtime/type.go:/UnsafePointerType 1260 ot = dcommontype(s, ot, t) 1261 ot = dextratype(s, ot, t, 0) 1262 1263 break 1264 } 1265 1266 // ../../../../runtime/type.go:/ptrType 1267 s1 := dtypesym(t.Elem()) 1268 1269 ot = dcommontype(s, ot, t) 1270 ot = dsymptr(s, ot, s1, 0) 1271 ot = dextratype(s, ot, t, 0) 1272 1273 // ../../../../runtime/type.go:/structType 1274 // for security, only the exported fields. 1275 case TSTRUCT: 1276 n := 0 1277 1278 for _, t1 := range t.Fields().Slice() { 1279 dtypesym(t1.Type) 1280 n++ 1281 } 1282 1283 ot = dcommontype(s, ot, t) 1284 pkg := localpkg 1285 if t.Sym != nil { 1286 pkg = t.Sym.Pkg 1287 } else { 1288 // Unnamed type. Grab the package from the first field, if any. 1289 for _, f := range t.Fields().Slice() { 1290 if f.Embedded != 0 { 1291 continue 1292 } 1293 pkg = f.Sym.Pkg 1294 break 1295 } 1296 } 1297 ot = dgopkgpath(s, ot, pkg) 1298 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint+uncommonSize(t)) 1299 ot = duintxx(s, ot, uint64(n), Widthint) 1300 ot = duintxx(s, ot, uint64(n), Widthint) 1301 1302 dataAdd := n * structfieldSize() 1303 ot = dextratype(s, ot, t, dataAdd) 1304 1305 for _, f := range t.Fields().Slice() { 1306 // ../../../../runtime/type.go:/structField 1307 ot = dnameField(s, ot, pkg, f) 1308 ot = dsymptr(s, ot, dtypesym(f.Type), 0) 1309 offsetAnon := uint64(f.Offset) << 1 1310 if offsetAnon>>1 != uint64(f.Offset) { 1311 Fatalf("%v: bad field offset for %s", t, f.Sym.Name) 1312 } 1313 if f.Embedded != 0 { 1314 offsetAnon |= 1 1315 } 1316 ot = duintptr(s, ot, offsetAnon) 1317 } 1318 } 1319 1320 ot = dextratypeData(s, ot, t) 1321 ggloblsym(s, int32(ot), int16(dupok|obj.RODATA)) 1322 1323 // The linker will leave a table of all the typelinks for 1324 // types in the binary, so the runtime can find them. 1325 // 1326 // When buildmode=shared, all types are in typelinks so the 1327 // runtime can deduplicate type pointers. 1328 keep := Ctxt.Flag_dynlink 1329 if !keep && t.Sym == nil { 1330 // For an unnamed type, we only need the link if the type can 1331 // be created at run time by reflect.PtrTo and similar 1332 // functions. If the type exists in the program, those 1333 // functions must return the existing type structure rather 1334 // than creating a new one. 1335 switch t.Etype { 1336 case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT: 1337 keep = true 1338 } 1339 } 1340 s.Lsym.Set(obj.AttrMakeTypelink, keep) 1341 1342 return s 1343 } 1344 1345 // for each itabEntry, gather the methods on 1346 // the concrete type that implement the interface 1347 func peekitabs() { 1348 for i := range itabs { 1349 tab := &itabs[i] 1350 methods := genfun(tab.t, tab.itype) 1351 if len(methods) == 0 { 1352 continue 1353 } 1354 tab.lsym = Linksym(tab.sym) 1355 tab.entries = methods 1356 } 1357 } 1358 1359 // for the given concrete type and interface 1360 // type, return the (sorted) set of methods 1361 // on the concrete type that implement the interface 1362 func genfun(t, it *types.Type) []*obj.LSym { 1363 if t == nil || it == nil { 1364 return nil 1365 } 1366 sigs := imethods(it) 1367 methods := methods(t) 1368 out := make([]*obj.LSym, 0, len(sigs)) 1369 if len(sigs) == 0 { 1370 return nil 1371 } 1372 1373 // both sigs and methods are sorted by name, 1374 // so we can find the intersect in a single pass 1375 for _, m := range methods { 1376 if m.name == sigs[0].name { 1377 out = append(out, Linksym(m.isym)) 1378 sigs = sigs[1:] 1379 if len(sigs) == 0 { 1380 break 1381 } 1382 } 1383 } 1384 1385 return out 1386 } 1387 1388 // itabsym uses the information gathered in 1389 // peekitabs to de-virtualize interface methods. 1390 // Since this is called by the SSA backend, it shouldn't 1391 // generate additional Nodes, Syms, etc. 1392 func itabsym(it *obj.LSym, offset int64) *obj.LSym { 1393 var syms []*obj.LSym 1394 if it == nil { 1395 return nil 1396 } 1397 1398 for i := range itabs { 1399 e := &itabs[i] 1400 if e.lsym == it { 1401 syms = e.entries 1402 break 1403 } 1404 } 1405 if syms == nil { 1406 return nil 1407 } 1408 1409 // keep this arithmetic in sync with *itab layout 1410 methodnum := int((offset - 3*int64(Widthptr) - 8) / int64(Widthptr)) 1411 if methodnum >= len(syms) { 1412 return nil 1413 } 1414 return syms[methodnum] 1415 } 1416 1417 func addsignat(t *types.Type) { 1418 signatlist[formalType(t)] = true 1419 } 1420 1421 func dumptypestructs() { 1422 // copy types from externdcl list to signatlist 1423 for _, n := range externdcl { 1424 if n.Op == OTYPE { 1425 addsignat(n.Type) 1426 } 1427 } 1428 1429 // Process signatlist. Use a loop, as dtypesym adds 1430 // entries to signatlist while it is being processed. 1431 signats := make([]typeAndStr, len(signatlist)) 1432 for len(signatlist) > 0 { 1433 signats = signats[:0] 1434 // Transfer entries to a slice and sort, for reproducible builds. 1435 for t := range signatlist { 1436 signats = append(signats, typeAndStr{t: t, s: typesymname(t)}) 1437 delete(signatlist, t) 1438 } 1439 sort.Sort(typesByString(signats)) 1440 for _, ts := range signats { 1441 t := ts.t 1442 dtypesym(t) 1443 if t.Sym != nil { 1444 dtypesym(types.NewPtr(t)) 1445 } 1446 } 1447 } 1448 1449 // process itabs 1450 for _, i := range itabs { 1451 // dump empty itab symbol into i.sym 1452 // type itab struct { 1453 // inter *interfacetype 1454 // _type *_type 1455 // link *itab 1456 // hash uint32 1457 // bad bool 1458 // inhash bool 1459 // unused [2]byte 1460 // fun [1]uintptr // variable sized 1461 // } 1462 o := dsymptr(i.sym, 0, dtypesym(i.itype), 0) 1463 o = dsymptr(i.sym, o, dtypesym(i.t), 0) 1464 o += Widthptr // skip link field 1465 o = duint32(i.sym, o, typehash(i.t)) // copy of type hash 1466 o += 4 // skip bad/inhash/unused fields 1467 o += len(imethods(i.itype)) * Widthptr // skip fun method pointers 1468 // at runtime the itab will contain pointers to types, other itabs and 1469 // method functions. None are allocated on heap, so we can use obj.NOPTR. 1470 ggloblsym(i.sym, int32(o), int16(obj.DUPOK|obj.NOPTR)) 1471 1472 ilink := itablinkpkg.Lookup(i.t.ShortString() + "," + i.itype.ShortString()) 1473 dsymptr(ilink, 0, i.sym, 0) 1474 ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA)) 1475 } 1476 1477 // process ptabs 1478 if localpkg.Name == "main" && len(ptabs) > 0 { 1479 ot := 0 1480 s := Ctxt.Lookup("go.plugin.tabs", 0) 1481 for _, p := range ptabs { 1482 // Dump ptab symbol into go.pluginsym package. 1483 // 1484 // type ptab struct { 1485 // name nameOff 1486 // typ typeOff // pointer to symbol 1487 // } 1488 nsym := dname(p.s.Name, "", nil, true) 1489 ot = dsymptrOffLSym(s, ot, nsym, 0) 1490 ot = dsymptrOffLSym(s, ot, Linksym(dtypesym(p.t)), 0) 1491 } 1492 ggloblLSym(s, int32(ot), int16(obj.RODATA)) 1493 1494 ot = 0 1495 s = Ctxt.Lookup("go.plugin.exports", 0) 1496 for _, p := range ptabs { 1497 ot = dsymptrLSym(s, ot, Linksym(p.s), 0) 1498 } 1499 ggloblLSym(s, int32(ot), int16(obj.RODATA)) 1500 } 1501 1502 // generate import strings for imported packages 1503 for _, p := range pkgs { 1504 if p.Direct { 1505 dimportpath(p) 1506 } 1507 } 1508 1509 // do basic types if compiling package runtime. 1510 // they have to be in at least one package, 1511 // and runtime is always loaded implicitly, 1512 // so this is as good as any. 1513 // another possible choice would be package main, 1514 // but using runtime means fewer copies in .6 files. 1515 if myimportpath == "runtime" { 1516 for i := types.EType(1); i <= TBOOL; i++ { 1517 dtypesym(types.NewPtr(types.Types[i])) 1518 } 1519 dtypesym(types.NewPtr(types.Types[TSTRING])) 1520 dtypesym(types.NewPtr(types.Types[TUNSAFEPTR])) 1521 1522 // emit type structs for error and func(error) string. 1523 // The latter is the type of an auto-generated wrapper. 1524 dtypesym(types.NewPtr(types.Errortype)) 1525 1526 dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])})) 1527 1528 // add paths for runtime and main, which 6l imports implicitly. 1529 dimportpath(Runtimepkg) 1530 1531 if flag_race { 1532 dimportpath(racepkg) 1533 } 1534 if flag_msan { 1535 dimportpath(msanpkg) 1536 } 1537 dimportpath(mkpkg("main")) 1538 } 1539 } 1540 1541 type typeAndStr struct { 1542 t *types.Type 1543 s string 1544 } 1545 1546 type typesByString []typeAndStr 1547 1548 func (a typesByString) Len() int { return len(a) } 1549 func (a typesByString) Less(i, j int) bool { return a[i].s < a[j].s } 1550 func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1551 1552 func dalgsym(t *types.Type) *types.Sym { 1553 var s *types.Sym 1554 var hashfunc *types.Sym 1555 var eqfunc *types.Sym 1556 1557 // dalgsym is only called for a type that needs an algorithm table, 1558 // which implies that the type is comparable (or else it would use ANOEQ). 1559 1560 if algtype(t) == AMEM { 1561 // we use one algorithm table for all AMEM types of a given size 1562 p := fmt.Sprintf(".alg%d", t.Width) 1563 1564 s = typepkg.Lookup(p) 1565 1566 if s.AlgGen() { 1567 return s 1568 } 1569 s.SetAlgGen(true) 1570 1571 // make hash closure 1572 p = fmt.Sprintf(".hashfunc%d", t.Width) 1573 1574 hashfunc = typepkg.Lookup(p) 1575 1576 ot := 0 1577 ot = dsymptr(hashfunc, ot, Runtimepkg.Lookup("memhash_varlen"), 0) 1578 ot = duintxx(hashfunc, ot, uint64(t.Width), Widthptr) // size encoded in closure 1579 ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA) 1580 1581 // make equality closure 1582 p = fmt.Sprintf(".eqfunc%d", t.Width) 1583 1584 eqfunc = typepkg.Lookup(p) 1585 1586 ot = 0 1587 ot = dsymptr(eqfunc, ot, Runtimepkg.Lookup("memequal_varlen"), 0) 1588 ot = duintxx(eqfunc, ot, uint64(t.Width), Widthptr) 1589 ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA) 1590 } else { 1591 // generate an alg table specific to this type 1592 s = typesymprefix(".alg", t) 1593 1594 hash := typesymprefix(".hash", t) 1595 eq := typesymprefix(".eq", t) 1596 hashfunc = typesymprefix(".hashfunc", t) 1597 eqfunc = typesymprefix(".eqfunc", t) 1598 1599 genhash(hash, t) 1600 geneq(eq, t) 1601 1602 // make Go funcs (closures) for calling hash and equal from Go 1603 dsymptr(hashfunc, 0, hash, 0) 1604 1605 ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1606 dsymptr(eqfunc, 0, eq, 0) 1607 ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1608 } 1609 1610 // ../../../../runtime/alg.go:/typeAlg 1611 ot := 0 1612 1613 ot = dsymptr(s, ot, hashfunc, 0) 1614 ot = dsymptr(s, ot, eqfunc, 0) 1615 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 1616 return s 1617 } 1618 1619 // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, 1620 // which holds 1-bit entries describing where pointers are in a given type. 1621 // Above this length, the GC information is recorded as a GC program, 1622 // which can express repetition compactly. In either form, the 1623 // information is used by the runtime to initialize the heap bitmap, 1624 // and for large types (like 128 or more words), they are roughly the 1625 // same speed. GC programs are never much larger and often more 1626 // compact. (If large arrays are involved, they can be arbitrarily 1627 // more compact.) 1628 // 1629 // The cutoff must be large enough that any allocation large enough to 1630 // use a GC program is large enough that it does not share heap bitmap 1631 // bytes with any other objects, allowing the GC program execution to 1632 // assume an aligned start and not use atomic operations. In the current 1633 // runtime, this means all malloc size classes larger than the cutoff must 1634 // be multiples of four words. On 32-bit systems that's 16 bytes, and 1635 // all size classes >= 16 bytes are 16-byte aligned, so no real constraint. 1636 // On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed 1637 // for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated 1638 // is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes 1639 // must be >= 4. 1640 // 1641 // We used to use 16 because the GC programs do have some constant overhead 1642 // to get started, and processing 128 pointers seems to be enough to 1643 // amortize that overhead well. 1644 // 1645 // To make sure that the runtime's chansend can call typeBitsBulkBarrier, 1646 // we raised the limit to 2048, so that even 32-bit systems are guaranteed to 1647 // use bitmaps for objects up to 64 kB in size. 1648 // 1649 // Also known to reflect/type.go. 1650 // 1651 const maxPtrmaskBytes = 2048 1652 1653 // dgcsym emits and returns a data symbol containing GC information for type t, 1654 // along with a boolean reporting whether the UseGCProg bit should be set in 1655 // the type kind, and the ptrdata field to record in the reflect type information. 1656 func dgcsym(t *types.Type) (sym *types.Sym, useGCProg bool, ptrdata int64) { 1657 ptrdata = typeptrdata(t) 1658 if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 { 1659 sym = dgcptrmask(t) 1660 return 1661 } 1662 1663 useGCProg = true 1664 sym, ptrdata = dgcprog(t) 1665 return 1666 } 1667 1668 // dgcptrmask emits and returns the symbol containing a pointer mask for type t. 1669 func dgcptrmask(t *types.Type) *types.Sym { 1670 ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8) 1671 fillptrmask(t, ptrmask) 1672 p := fmt.Sprintf("gcbits.%x", ptrmask) 1673 1674 sym := Runtimepkg.Lookup(p) 1675 if !sym.Uniq() { 1676 sym.SetUniq(true) 1677 for i, x := range ptrmask { 1678 duint8(sym, i, x) 1679 } 1680 ggloblsym(sym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) 1681 } 1682 return sym 1683 } 1684 1685 // fillptrmask fills in ptrmask with 1s corresponding to the 1686 // word offsets in t that hold pointers. 1687 // ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits. 1688 func fillptrmask(t *types.Type, ptrmask []byte) { 1689 for i := range ptrmask { 1690 ptrmask[i] = 0 1691 } 1692 if !types.Haspointers(t) { 1693 return 1694 } 1695 1696 vec := bvalloc(8 * int32(len(ptrmask))) 1697 xoffset := int64(0) 1698 onebitwalktype1(t, &xoffset, vec) 1699 1700 nptr := typeptrdata(t) / int64(Widthptr) 1701 for i := int64(0); i < nptr; i++ { 1702 if vec.Get(int32(i)) { 1703 ptrmask[i/8] |= 1 << (uint(i) % 8) 1704 } 1705 } 1706 } 1707 1708 // dgcprog emits and returns the symbol containing a GC program for type t 1709 // along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]). 1710 // In practice, the size is typeptrdata(t) except for non-trivial arrays. 1711 // For non-trivial arrays, the program describes the full t.Width size. 1712 func dgcprog(t *types.Type) (*types.Sym, int64) { 1713 dowidth(t) 1714 if t.Width == BADWIDTH { 1715 Fatalf("dgcprog: %v badwidth", t) 1716 } 1717 sym := typesymprefix(".gcprog", t) 1718 var p GCProg 1719 p.init(sym) 1720 p.emit(t, 0) 1721 offset := p.w.BitIndex() * int64(Widthptr) 1722 p.end() 1723 if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { 1724 Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) 1725 } 1726 return sym, offset 1727 } 1728 1729 type GCProg struct { 1730 sym *types.Sym 1731 symoff int 1732 w gcprog.Writer 1733 } 1734 1735 var Debug_gcprog int // set by -d gcprog 1736 1737 func (p *GCProg) init(sym *types.Sym) { 1738 p.sym = sym 1739 p.symoff = 4 // first 4 bytes hold program length 1740 p.w.Init(p.writeByte) 1741 if Debug_gcprog > 0 { 1742 fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", sym) 1743 p.w.Debug(os.Stderr) 1744 } 1745 } 1746 1747 func (p *GCProg) writeByte(x byte) { 1748 p.symoff = duint8(p.sym, p.symoff, x) 1749 } 1750 1751 func (p *GCProg) end() { 1752 p.w.End() 1753 duint32(p.sym, 0, uint32(p.symoff-4)) 1754 ggloblsym(p.sym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) 1755 if Debug_gcprog > 0 { 1756 fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.sym) 1757 } 1758 } 1759 1760 func (p *GCProg) emit(t *types.Type, offset int64) { 1761 dowidth(t) 1762 if !types.Haspointers(t) { 1763 return 1764 } 1765 if t.Width == int64(Widthptr) { 1766 p.w.Ptr(offset / int64(Widthptr)) 1767 return 1768 } 1769 switch t.Etype { 1770 default: 1771 Fatalf("GCProg.emit: unexpected type %v", t) 1772 1773 case TSTRING: 1774 p.w.Ptr(offset / int64(Widthptr)) 1775 1776 case TINTER: 1777 p.w.Ptr(offset / int64(Widthptr)) 1778 p.w.Ptr(offset/int64(Widthptr) + 1) 1779 1780 case TSLICE: 1781 p.w.Ptr(offset / int64(Widthptr)) 1782 1783 case TARRAY: 1784 if t.NumElem() == 0 { 1785 // should have been handled by haspointers check above 1786 Fatalf("GCProg.emit: empty array") 1787 } 1788 1789 // Flatten array-of-array-of-array to just a big array by multiplying counts. 1790 count := t.NumElem() 1791 elem := t.Elem() 1792 for elem.IsArray() { 1793 count *= elem.NumElem() 1794 elem = elem.Elem() 1795 } 1796 1797 if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) { 1798 // Cheaper to just emit the bits. 1799 for i := int64(0); i < count; i++ { 1800 p.emit(elem, offset+i*elem.Width) 1801 } 1802 return 1803 } 1804 p.emit(elem, offset) 1805 p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) 1806 p.w.Repeat(elem.Width/int64(Widthptr), count-1) 1807 1808 case TSTRUCT: 1809 for _, t1 := range t.Fields().Slice() { 1810 p.emit(t1.Type, offset+t1.Offset) 1811 } 1812 } 1813 } 1814 1815 // zeroaddr returns the address of a symbol with at least 1816 // size bytes of zeros. 1817 func zeroaddr(size int64) *Node { 1818 if size >= 1<<31 { 1819 Fatalf("map value too big %d", size) 1820 } 1821 if zerosize < size { 1822 zerosize = size 1823 } 1824 s := mappkg.Lookup("zero") 1825 if s.Def == nil { 1826 x := newname(s) 1827 x.Type = types.Types[TUINT8] 1828 x.Class = PEXTERN 1829 x.Typecheck = 1 1830 s.Def = asTypesNode(x) 1831 } 1832 z := nod(OADDR, asNode(s.Def), nil) 1833 z.Type = types.NewPtr(types.Types[TUINT8]) 1834 z.SetAddable(true) 1835 z.Typecheck = 1 1836 return z 1837 }