github.com/euank/go@v0.0.0-20160829210321-495514729181/src/cmd/compile/internal/gc/reflect.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/internal/gcprog" 9 "cmd/internal/obj" 10 "fmt" 11 "os" 12 "sort" 13 "strings" 14 ) 15 16 type itabEntry struct { 17 t, itype *Type 18 sym *Sym 19 } 20 21 // runtime interface and reflection data structures 22 var signatlist []*Node 23 var itabs []itabEntry 24 25 type Sig struct { 26 name string 27 pkg *Pkg 28 isym *Sym 29 tsym *Sym 30 type_ *Type 31 mtype *Type 32 offset int32 33 } 34 35 // byMethodNameAndPackagePath sorts method signatures by name, then package path. 36 type byMethodNameAndPackagePath []*Sig 37 38 func (x byMethodNameAndPackagePath) Len() int { return len(x) } 39 func (x byMethodNameAndPackagePath) Swap(i, j int) { x[i], x[j] = x[j], x[i] } 40 func (x byMethodNameAndPackagePath) Less(i, j int) bool { 41 return siglt(x[i], x[j]) 42 } 43 44 // siglt reports whether a < b 45 func siglt(a, b *Sig) bool { 46 if a.name != b.name { 47 return a.name < b.name 48 } 49 if a.pkg == b.pkg { 50 return false 51 } 52 if a.pkg == nil { 53 return true 54 } 55 if b.pkg == nil { 56 return false 57 } 58 return a.pkg.Path < b.pkg.Path 59 } 60 61 // Builds a type representing a Bucket structure for 62 // the given map type. This type is not visible to users - 63 // we include only enough information to generate a correct GC 64 // program for it. 65 // Make sure this stays in sync with ../../../../runtime/hashmap.go! 66 const ( 67 BUCKETSIZE = 8 68 MAXKEYSIZE = 128 69 MAXVALSIZE = 128 70 ) 71 72 func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) 73 func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) 74 func uncommonSize(t *Type) int { // Sizeof(runtime.uncommontype{}) 75 if t.Sym == nil && len(methods(t)) == 0 { 76 return 0 77 } 78 return 4 + 2 + 2 + 4 + 4 79 } 80 81 func makefield(name string, t *Type) *Field { 82 f := newField() 83 f.Type = t 84 f.Sym = nopkg.Lookup(name) 85 return f 86 } 87 88 func mapbucket(t *Type) *Type { 89 if t.MapType().Bucket != nil { 90 return t.MapType().Bucket 91 } 92 93 bucket := typ(TSTRUCT) 94 keytype := t.Key() 95 valtype := t.Val() 96 dowidth(keytype) 97 dowidth(valtype) 98 if keytype.Width > MAXKEYSIZE { 99 keytype = Ptrto(keytype) 100 } 101 if valtype.Width > MAXVALSIZE { 102 valtype = Ptrto(valtype) 103 } 104 105 field := make([]*Field, 0, 5) 106 107 // The first field is: uint8 topbits[BUCKETSIZE]. 108 arr := typArray(Types[TUINT8], BUCKETSIZE) 109 field = append(field, makefield("topbits", arr)) 110 111 arr = typArray(keytype, BUCKETSIZE) 112 arr.Noalg = true 113 field = append(field, makefield("keys", arr)) 114 115 arr = typArray(valtype, BUCKETSIZE) 116 arr.Noalg = true 117 field = append(field, makefield("values", arr)) 118 119 // Make sure the overflow pointer is the last memory in the struct, 120 // because the runtime assumes it can use size-ptrSize as the 121 // offset of the overflow pointer. We double-check that property 122 // below once the offsets and size are computed. 123 // 124 // BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point. 125 // On 32-bit systems, the max alignment is 32-bit, and the 126 // overflow pointer will add another 32-bit field, and the struct 127 // will end with no padding. 128 // On 64-bit systems, the max alignment is 64-bit, and the 129 // overflow pointer will add another 64-bit field, and the struct 130 // will end with no padding. 131 // On nacl/amd64p32, however, the max alignment is 64-bit, 132 // but the overflow pointer will add only a 32-bit field, 133 // so if the struct needs 64-bit padding (because a key or value does) 134 // then it would end with an extra 32-bit padding field. 135 // Preempt that by emitting the padding here. 136 if int(t.Val().Align) > Widthptr || int(t.Key().Align) > Widthptr { 137 field = append(field, makefield("pad", Types[TUINTPTR])) 138 } 139 140 // If keys and values have no pointers, the map implementation 141 // can keep a list of overflow pointers on the side so that 142 // buckets can be marked as having no pointers. 143 // Arrange for the bucket to have no pointers by changing 144 // the type of the overflow field to uintptr in this case. 145 // See comment on hmap.overflow in ../../../../runtime/hashmap.go. 146 otyp := Ptrto(bucket) 147 if !haspointers(t.Val()) && !haspointers(t.Key()) && t.Val().Width <= MAXVALSIZE && t.Key().Width <= MAXKEYSIZE { 148 otyp = Types[TUINTPTR] 149 } 150 ovf := makefield("overflow", otyp) 151 field = append(field, ovf) 152 153 // link up fields 154 bucket.Noalg = true 155 bucket.Local = t.Local 156 bucket.SetFields(field[:]) 157 dowidth(bucket) 158 159 // Double-check that overflow field is final memory in struct, 160 // with no padding at end. See comment above. 161 if ovf.Offset != bucket.Width-int64(Widthptr) { 162 Yyerror("bad math in mapbucket for %v", t) 163 } 164 165 t.MapType().Bucket = bucket 166 167 bucket.StructType().Map = t 168 return bucket 169 } 170 171 // Builds a type representing a Hmap structure for the given map type. 172 // Make sure this stays in sync with ../../../../runtime/hashmap.go! 173 func hmap(t *Type) *Type { 174 if t.MapType().Hmap != nil { 175 return t.MapType().Hmap 176 } 177 178 bucket := mapbucket(t) 179 var field [8]*Field 180 field[0] = makefield("count", Types[TINT]) 181 field[1] = makefield("flags", Types[TUINT8]) 182 field[2] = makefield("B", Types[TUINT8]) 183 field[3] = makefield("hash0", Types[TUINT32]) 184 field[4] = makefield("buckets", Ptrto(bucket)) 185 field[5] = makefield("oldbuckets", Ptrto(bucket)) 186 field[6] = makefield("nevacuate", Types[TUINTPTR]) 187 field[7] = makefield("overflow", Types[TUNSAFEPTR]) 188 189 h := typ(TSTRUCT) 190 h.Noalg = true 191 h.Local = t.Local 192 h.SetFields(field[:]) 193 dowidth(h) 194 t.MapType().Hmap = h 195 h.StructType().Map = t 196 return h 197 } 198 199 func hiter(t *Type) *Type { 200 if t.MapType().Hiter != nil { 201 return t.MapType().Hiter 202 } 203 204 // build a struct: 205 // hiter { 206 // key *Key 207 // val *Value 208 // t *MapType 209 // h *Hmap 210 // buckets *Bucket 211 // bptr *Bucket 212 // overflow0 unsafe.Pointer 213 // overflow1 unsafe.Pointer 214 // startBucket uintptr 215 // stuff uintptr 216 // bucket uintptr 217 // checkBucket uintptr 218 // } 219 // must match ../../../../runtime/hashmap.go:hiter. 220 var field [12]*Field 221 field[0] = makefield("key", Ptrto(t.Key())) 222 field[1] = makefield("val", Ptrto(t.Val())) 223 field[2] = makefield("t", Ptrto(Types[TUINT8])) 224 field[3] = makefield("h", Ptrto(hmap(t))) 225 field[4] = makefield("buckets", Ptrto(mapbucket(t))) 226 field[5] = makefield("bptr", Ptrto(mapbucket(t))) 227 field[6] = makefield("overflow0", Types[TUNSAFEPTR]) 228 field[7] = makefield("overflow1", Types[TUNSAFEPTR]) 229 field[8] = makefield("startBucket", Types[TUINTPTR]) 230 field[9] = makefield("stuff", Types[TUINTPTR]) // offset+wrapped+B+I 231 field[10] = makefield("bucket", Types[TUINTPTR]) 232 field[11] = makefield("checkBucket", Types[TUINTPTR]) 233 234 // build iterator struct holding the above fields 235 i := typ(TSTRUCT) 236 i.Noalg = true 237 i.SetFields(field[:]) 238 dowidth(i) 239 if i.Width != int64(12*Widthptr) { 240 Yyerror("hash_iter size not correct %d %d", i.Width, 12*Widthptr) 241 } 242 t.MapType().Hiter = i 243 i.StructType().Map = t 244 return i 245 } 246 247 // f is method type, with receiver. 248 // return function type, receiver as first argument (or not). 249 func methodfunc(f *Type, receiver *Type) *Type { 250 var in []*Node 251 if receiver != nil { 252 d := Nod(ODCLFIELD, nil, nil) 253 d.Type = receiver 254 in = append(in, d) 255 } 256 257 var d *Node 258 for _, t := range f.Params().Fields().Slice() { 259 d = Nod(ODCLFIELD, nil, nil) 260 d.Type = t.Type 261 d.Isddd = t.Isddd 262 in = append(in, d) 263 } 264 265 var out []*Node 266 for _, t := range f.Results().Fields().Slice() { 267 d = Nod(ODCLFIELD, nil, nil) 268 d.Type = t.Type 269 out = append(out, d) 270 } 271 272 t := functype(nil, in, out) 273 if f.Nname() != nil { 274 // Link to name of original method function. 275 t.SetNname(f.Nname()) 276 } 277 278 return t 279 } 280 281 // methods returns the methods of the non-interface type t, sorted by name. 282 // Generates stub functions as needed. 283 func methods(t *Type) []*Sig { 284 // method type 285 mt := methtype(t, 0) 286 287 if mt == nil { 288 return nil 289 } 290 expandmeth(mt) 291 292 // type stored in interface word 293 it := t 294 295 if !isdirectiface(it) { 296 it = Ptrto(t) 297 } 298 299 // make list of methods for t, 300 // generating code if necessary. 301 var ms []*Sig 302 for _, f := range mt.AllMethods().Slice() { 303 if f.Type.Etype != TFUNC || f.Type.Recv() == nil { 304 Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) 305 } 306 if f.Type.Recv() == nil { 307 Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) 308 } 309 if f.Nointerface { 310 continue 311 } 312 313 method := f.Sym 314 if method == nil { 315 continue 316 } 317 318 // get receiver type for this particular method. 319 // if pointer receiver but non-pointer t and 320 // this is not an embedded pointer inside a struct, 321 // method does not apply. 322 this := f.Type.Recv().Type 323 324 if this.IsPtr() && this.Elem() == t { 325 continue 326 } 327 if this.IsPtr() && !t.IsPtr() && f.Embedded != 2 && !isifacemethod(f.Type) { 328 continue 329 } 330 331 var sig Sig 332 ms = append(ms, &sig) 333 334 sig.name = method.Name 335 if !exportname(method.Name) { 336 if method.Pkg == nil { 337 Fatalf("methods: missing package") 338 } 339 sig.pkg = method.Pkg 340 } 341 342 sig.isym = methodsym(method, it, 1) 343 sig.tsym = methodsym(method, t, 0) 344 sig.type_ = methodfunc(f.Type, t) 345 sig.mtype = methodfunc(f.Type, nil) 346 347 if sig.isym.Flags&SymSiggen == 0 { 348 sig.isym.Flags |= SymSiggen 349 if !Eqtype(this, it) || this.Width < Types[Tptr].Width { 350 compiling_wrappers = 1 351 genwrapper(it, f, sig.isym, 1) 352 compiling_wrappers = 0 353 } 354 } 355 356 if sig.tsym.Flags&SymSiggen == 0 { 357 sig.tsym.Flags |= SymSiggen 358 if !Eqtype(this, t) { 359 compiling_wrappers = 1 360 genwrapper(t, f, sig.tsym, 0) 361 compiling_wrappers = 0 362 } 363 } 364 } 365 366 sort.Sort(byMethodNameAndPackagePath(ms)) 367 return ms 368 } 369 370 // imethods returns the methods of the interface type t, sorted by name. 371 func imethods(t *Type) []*Sig { 372 var methods []*Sig 373 for _, f := range t.Fields().Slice() { 374 if f.Type.Etype != TFUNC || f.Sym == nil { 375 continue 376 } 377 method := f.Sym 378 var sig = Sig{ 379 name: method.Name, 380 } 381 if !exportname(method.Name) { 382 if method.Pkg == nil { 383 Fatalf("imethods: missing package") 384 } 385 sig.pkg = method.Pkg 386 } 387 388 sig.mtype = f.Type 389 sig.offset = 0 390 sig.type_ = methodfunc(f.Type, nil) 391 392 if n := len(methods); n > 0 { 393 last := methods[n-1] 394 if !(siglt(last, &sig)) { 395 Fatalf("sigcmp vs sortinter %s %s", last.name, sig.name) 396 } 397 } 398 methods = append(methods, &sig) 399 400 // Compiler can only refer to wrappers for non-blank methods. 401 if isblanksym(method) { 402 continue 403 } 404 405 // NOTE(rsc): Perhaps an oversight that 406 // IfaceType.Method is not in the reflect data. 407 // Generate the method body, so that compiled 408 // code can refer to it. 409 isym := methodsym(method, t, 0) 410 411 if isym.Flags&SymSiggen == 0 { 412 isym.Flags |= SymSiggen 413 genwrapper(t, f, isym, 0) 414 } 415 } 416 417 return methods 418 } 419 420 func dimportpath(p *Pkg) { 421 if p.Pathsym != nil { 422 return 423 } 424 425 // If we are compiling the runtime package, there are two runtime packages around 426 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for 427 // both of them, so just produce one for localpkg. 428 if myimportpath == "runtime" && p == Runtimepkg { 429 return 430 } 431 432 var str string 433 if p == localpkg { 434 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. 435 str = myimportpath 436 } else { 437 str = p.Path 438 } 439 440 s := obj.Linklookup(Ctxt, "type..importpath."+p.Prefix+".", 0) 441 ot := dnameData(s, 0, str, "", nil, false) 442 ggloblLSym(s, int32(ot), obj.DUPOK|obj.RODATA) 443 p.Pathsym = s 444 } 445 446 func dgopkgpath(s *Sym, ot int, pkg *Pkg) int { 447 return dgopkgpathLSym(Linksym(s), ot, pkg) 448 } 449 450 func dgopkgpathLSym(s *obj.LSym, ot int, pkg *Pkg) int { 451 if pkg == nil { 452 return duintxxLSym(s, ot, 0, Widthptr) 453 } 454 455 if pkg == localpkg && myimportpath == "" { 456 // If we don't know the full import path of the package being compiled 457 // (i.e. -p was not passed on the compiler command line), emit a reference to 458 // type..importpath.""., which the linker will rewrite using the correct import path. 459 // Every package that imports this one directly defines the symbol. 460 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 461 ns := obj.Linklookup(Ctxt, `type..importpath."".`, 0) 462 return dsymptrLSym(s, ot, ns, 0) 463 } 464 465 dimportpath(pkg) 466 return dsymptrLSym(s, ot, pkg.Pathsym, 0) 467 } 468 469 // dgopkgpathOffLSym writes an offset relocation in s at offset ot to the pkg path symbol. 470 func dgopkgpathOffLSym(s *obj.LSym, ot int, pkg *Pkg) int { 471 if pkg == nil { 472 return duintxxLSym(s, ot, 0, 4) 473 } 474 if pkg == localpkg && myimportpath == "" { 475 // If we don't know the full import path of the package being compiled 476 // (i.e. -p was not passed on the compiler command line), emit a reference to 477 // type..importpath.""., which the linker will rewrite using the correct import path. 478 // Every package that imports this one directly defines the symbol. 479 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 480 ns := obj.Linklookup(Ctxt, `type..importpath."".`, 0) 481 return dsymptrOffLSym(s, ot, ns, 0) 482 } 483 484 dimportpath(pkg) 485 return dsymptrOffLSym(s, ot, pkg.Pathsym, 0) 486 } 487 488 // isExportedField reports whether a struct field is exported. 489 func isExportedField(ft *Field) bool { 490 if ft.Sym != nil && ft.Embedded == 0 { 491 return exportname(ft.Sym.Name) 492 } else { 493 if ft.Type.Sym != nil && 494 (ft.Type.Sym.Pkg == builtinpkg || !exportname(ft.Type.Sym.Name)) { 495 return false 496 } else { 497 return true 498 } 499 } 500 } 501 502 // dnameField dumps a reflect.name for a struct field. 503 func dnameField(s *Sym, ot int, ft *Field) int { 504 var name string 505 if ft.Sym != nil && ft.Embedded == 0 { 506 name = ft.Sym.Name 507 } 508 nsym := dname(name, ft.Note, nil, isExportedField(ft)) 509 return dsymptrLSym(Linksym(s), ot, nsym, 0) 510 } 511 512 // dnameData writes the contents of a reflect.name into s at offset ot. 513 func dnameData(s *obj.LSym, ot int, name, tag string, pkg *Pkg, exported bool) int { 514 if len(name) > 1<<16-1 { 515 Fatalf("name too long: %s", name) 516 } 517 if len(tag) > 1<<16-1 { 518 Fatalf("tag too long: %s", tag) 519 } 520 521 // Encode name and tag. See reflect/type.go for details. 522 var bits byte 523 l := 1 + 2 + len(name) 524 if exported { 525 bits |= 1 << 0 526 } 527 if len(tag) > 0 { 528 l += 2 + len(tag) 529 bits |= 1 << 1 530 } 531 if pkg != nil { 532 bits |= 1 << 2 533 } 534 b := make([]byte, l) 535 b[0] = bits 536 b[1] = uint8(len(name) >> 8) 537 b[2] = uint8(len(name)) 538 copy(b[3:], name) 539 if len(tag) > 0 { 540 tb := b[3+len(name):] 541 tb[0] = uint8(len(tag) >> 8) 542 tb[1] = uint8(len(tag)) 543 copy(tb[2:], tag) 544 } 545 546 ot = int(s.WriteBytes(Ctxt, int64(ot), b)) 547 548 if pkg != nil { 549 ot = dgopkgpathOffLSym(s, ot, pkg) 550 } 551 552 return ot 553 } 554 555 var dnameCount int 556 557 // dname creates a reflect.name for a struct field or method. 558 func dname(name, tag string, pkg *Pkg, exported bool) *obj.LSym { 559 // Write out data as "type.." to signal two things to the 560 // linker, first that when dynamically linking, the symbol 561 // should be moved to a relro section, and second that the 562 // contents should not be decoded as a type. 563 sname := "type..namedata." 564 if pkg == nil { 565 // In the common case, share data with other packages. 566 if name == "" { 567 if exported { 568 sname += "-noname-exported." + tag 569 } else { 570 sname += "-noname-unexported." + tag 571 } 572 } else { 573 sname += name + "." + tag 574 } 575 } else { 576 sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount) 577 dnameCount++ 578 } 579 s := obj.Linklookup(Ctxt, sname, 0) 580 if len(s.P) > 0 { 581 return s 582 } 583 ot := dnameData(s, 0, name, tag, pkg, exported) 584 ggloblLSym(s, int32(ot), obj.DUPOK|obj.RODATA) 585 return s 586 } 587 588 // dextratype dumps the fields of a runtime.uncommontype. 589 // dataAdd is the offset in bytes after the header where the 590 // backing array of the []method field is written (by dextratypeData). 591 func dextratype(s *Sym, ot int, t *Type, dataAdd int) int { 592 m := methods(t) 593 if t.Sym == nil && len(m) == 0 { 594 return ot 595 } 596 noff := int(Rnd(int64(ot), int64(Widthptr))) 597 if noff != ot { 598 Fatalf("unexpected alignment in dextratype for %s", t) 599 } 600 601 for _, a := range m { 602 dtypesym(a.type_) 603 } 604 605 ot = dgopkgpathOffLSym(Linksym(s), ot, typePkg(t)) 606 607 dataAdd += uncommonSize(t) 608 mcount := len(m) 609 if mcount != int(uint16(mcount)) { 610 Fatalf("too many methods on %s: %d", t, mcount) 611 } 612 if dataAdd != int(uint32(dataAdd)) { 613 Fatalf("methods are too far away on %s: %d", t, dataAdd) 614 } 615 616 ot = duint16(s, ot, uint16(mcount)) 617 ot = duint16(s, ot, 0) 618 ot = duint32(s, ot, uint32(dataAdd)) 619 ot = duint32(s, ot, 0) 620 return ot 621 } 622 623 func typePkg(t *Type) *Pkg { 624 tsym := t.Sym 625 if tsym == nil { 626 switch t.Etype { 627 case TARRAY, TSLICE, TPTR32, TPTR64, TCHAN: 628 if t.Elem() != nil { 629 tsym = t.Elem().Sym 630 } 631 } 632 } 633 if tsym != nil && t != Types[t.Etype] && t != errortype { 634 return tsym.Pkg 635 } 636 return nil 637 } 638 639 // dextratypeData dumps the backing array for the []method field of 640 // runtime.uncommontype. 641 func dextratypeData(s *Sym, ot int, t *Type) int { 642 lsym := Linksym(s) 643 for _, a := range methods(t) { 644 // ../../../../runtime/type.go:/method 645 exported := exportname(a.name) 646 var pkg *Pkg 647 if !exported && a.pkg != typePkg(t) { 648 pkg = a.pkg 649 } 650 nsym := dname(a.name, "", pkg, exported) 651 652 ot = dsymptrOffLSym(lsym, ot, nsym, 0) 653 ot = dmethodptrOffLSym(lsym, ot, Linksym(dtypesym(a.mtype))) 654 ot = dmethodptrOffLSym(lsym, ot, Linksym(a.isym)) 655 ot = dmethodptrOffLSym(lsym, ot, Linksym(a.tsym)) 656 } 657 return ot 658 } 659 660 func dmethodptrOffLSym(s *obj.LSym, ot int, x *obj.LSym) int { 661 duintxxLSym(s, ot, 0, 4) 662 r := obj.Addrel(s) 663 r.Off = int32(ot) 664 r.Siz = 4 665 r.Sym = x 666 r.Type = obj.R_METHODOFF 667 return ot + 4 668 } 669 670 var kinds = []int{ 671 TINT: obj.KindInt, 672 TUINT: obj.KindUint, 673 TINT8: obj.KindInt8, 674 TUINT8: obj.KindUint8, 675 TINT16: obj.KindInt16, 676 TUINT16: obj.KindUint16, 677 TINT32: obj.KindInt32, 678 TUINT32: obj.KindUint32, 679 TINT64: obj.KindInt64, 680 TUINT64: obj.KindUint64, 681 TUINTPTR: obj.KindUintptr, 682 TFLOAT32: obj.KindFloat32, 683 TFLOAT64: obj.KindFloat64, 684 TBOOL: obj.KindBool, 685 TSTRING: obj.KindString, 686 TPTR32: obj.KindPtr, 687 TPTR64: obj.KindPtr, 688 TSTRUCT: obj.KindStruct, 689 TINTER: obj.KindInterface, 690 TCHAN: obj.KindChan, 691 TMAP: obj.KindMap, 692 TARRAY: obj.KindArray, 693 TSLICE: obj.KindSlice, 694 TFUNC: obj.KindFunc, 695 TCOMPLEX64: obj.KindComplex64, 696 TCOMPLEX128: obj.KindComplex128, 697 TUNSAFEPTR: obj.KindUnsafePointer, 698 } 699 700 func haspointers(t *Type) bool { 701 switch t.Etype { 702 case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, 703 TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL: 704 return false 705 706 case TSLICE: 707 return true 708 709 case TARRAY: 710 at := t.Extra.(*ArrayType) 711 if at.Haspointers != 0 { 712 return at.Haspointers-1 != 0 713 } 714 715 ret := false 716 if t.NumElem() != 0 { // non-empty array 717 ret = haspointers(t.Elem()) 718 } 719 720 at.Haspointers = 1 + uint8(obj.Bool2int(ret)) 721 return ret 722 723 case TSTRUCT: 724 st := t.StructType() 725 if st.Haspointers != 0 { 726 return st.Haspointers-1 != 0 727 } 728 729 ret := false 730 for _, t1 := range t.Fields().Slice() { 731 if haspointers(t1.Type) { 732 ret = true 733 break 734 } 735 } 736 st.Haspointers = 1 + uint8(obj.Bool2int(ret)) 737 return ret 738 } 739 740 return true 741 } 742 743 // typeptrdata returns the length in bytes of the prefix of t 744 // containing pointer data. Anything after this offset is scalar data. 745 func typeptrdata(t *Type) int64 { 746 if !haspointers(t) { 747 return 0 748 } 749 750 switch t.Etype { 751 case TPTR32, 752 TPTR64, 753 TUNSAFEPTR, 754 TFUNC, 755 TCHAN, 756 TMAP: 757 return int64(Widthptr) 758 759 case TSTRING: 760 // struct { byte *str; intgo len; } 761 return int64(Widthptr) 762 763 case TINTER: 764 // struct { Itab *tab; void *data; } or 765 // struct { Type *type; void *data; } 766 return 2 * int64(Widthptr) 767 768 case TSLICE: 769 // struct { byte *array; uintgo len; uintgo cap; } 770 return int64(Widthptr) 771 772 case TARRAY: 773 // haspointers already eliminated t.NumElem() == 0. 774 return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem()) 775 776 case TSTRUCT: 777 // Find the last field that has pointers. 778 var lastPtrField *Field 779 for _, t1 := range t.Fields().Slice() { 780 if haspointers(t1.Type) { 781 lastPtrField = t1 782 } 783 } 784 return lastPtrField.Offset + typeptrdata(lastPtrField.Type) 785 786 default: 787 Fatalf("typeptrdata: unexpected type, %v", t) 788 return 0 789 } 790 } 791 792 // tflag is documented in reflect/type.go. 793 // 794 // tflag values must be kept in sync with copies in: 795 // cmd/compile/internal/gc/reflect.go 796 // cmd/link/internal/ld/decodesym.go 797 // reflect/type.go 798 // runtime/type.go 799 const ( 800 tflagUncommon = 1 << 0 801 tflagExtraStar = 1 << 1 802 tflagNamed = 1 << 2 803 ) 804 805 var dcommontype_algarray *Sym 806 807 // dcommontype dumps the contents of a reflect.rtype (runtime._type). 808 func dcommontype(s *Sym, ot int, t *Type) int { 809 if ot != 0 { 810 Fatalf("dcommontype %d", ot) 811 } 812 813 sizeofAlg := 2 * Widthptr 814 if dcommontype_algarray == nil { 815 dcommontype_algarray = Pkglookup("algarray", Runtimepkg) 816 } 817 dowidth(t) 818 alg := algtype(t) 819 var algsym *Sym 820 if alg == ASPECIAL || alg == AMEM { 821 algsym = dalgsym(t) 822 } 823 824 var sptr *Sym 825 tptr := Ptrto(t) 826 if !t.IsPtr() && (t.Sym != nil || methods(tptr) != nil) { 827 sptr = dtypesym(tptr) 828 } 829 830 gcsym, useGCProg, ptrdata := dgcsym(t) 831 832 // ../../../../reflect/type.go:/^type.rtype 833 // actual type structure 834 // type rtype struct { 835 // size uintptr 836 // ptrdata uintptr 837 // hash uint32 838 // tflag tflag 839 // align uint8 840 // fieldAlign uint8 841 // kind uint8 842 // alg *typeAlg 843 // gcdata *byte 844 // str nameOff 845 // ptrToThis typeOff 846 // } 847 ot = duintptr(s, ot, uint64(t.Width)) 848 ot = duintptr(s, ot, uint64(ptrdata)) 849 850 ot = duint32(s, ot, typehash(t)) 851 852 var tflag uint8 853 if uncommonSize(t) != 0 { 854 tflag |= tflagUncommon 855 } 856 if t.Sym != nil && t.Sym.Name != "" { 857 tflag |= tflagNamed 858 } 859 860 exported := false 861 p := Tconv(t, FmtLeft|FmtUnsigned) 862 // If we're writing out type T, 863 // we are very likely to write out type *T as well. 864 // Use the string "*T"[1:] for "T", so that the two 865 // share storage. This is a cheap way to reduce the 866 // amount of space taken up by reflect strings. 867 if !strings.HasPrefix(p, "*") { 868 p = "*" + p 869 tflag |= tflagExtraStar 870 if t.Sym != nil { 871 exported = exportname(t.Sym.Name) 872 } 873 } else { 874 if t.Elem() != nil && t.Elem().Sym != nil { 875 exported = exportname(t.Elem().Sym.Name) 876 } 877 } 878 879 ot = duint8(s, ot, tflag) 880 881 // runtime (and common sense) expects alignment to be a power of two. 882 i := int(t.Align) 883 884 if i == 0 { 885 i = 1 886 } 887 if i&(i-1) != 0 { 888 Fatalf("invalid alignment %d for %v", t.Align, t) 889 } 890 ot = duint8(s, ot, t.Align) // align 891 ot = duint8(s, ot, t.Align) // fieldAlign 892 893 i = kinds[t.Etype] 894 if !haspointers(t) { 895 i |= obj.KindNoPointers 896 } 897 if isdirectiface(t) { 898 i |= obj.KindDirectIface 899 } 900 if useGCProg { 901 i |= obj.KindGCProg 902 } 903 ot = duint8(s, ot, uint8(i)) // kind 904 if algsym == nil { 905 ot = dsymptr(s, ot, dcommontype_algarray, int(alg)*sizeofAlg) 906 } else { 907 ot = dsymptr(s, ot, algsym, 0) 908 } 909 ot = dsymptr(s, ot, gcsym, 0) // gcdata 910 911 nsym := dname(p, "", nil, exported) 912 ot = dsymptrOffLSym(Linksym(s), ot, nsym, 0) // str 913 if sptr == nil { 914 ot = duint32(s, ot, 0) 915 } else { 916 ot = dsymptrOffLSym(Linksym(s), ot, Linksym(sptr), 0) // ptrToThis 917 } 918 919 return ot 920 } 921 922 func typesym(t *Type) *Sym { 923 return Pkglookup(Tconv(t, FmtLeft), typepkg) 924 } 925 926 // tracksym returns the symbol for tracking use of field/method f, assumed 927 // to be a member of struct/interface type t. 928 func tracksym(t *Type, f *Field) *Sym { 929 return Pkglookup(Tconv(t, FmtLeft)+"."+f.Sym.Name, trackpkg) 930 } 931 932 func typelinkLSym(t *Type) *obj.LSym { 933 name := "go.typelink." + Tconv(t, FmtLeft) // complete, unambiguous type name 934 return obj.Linklookup(Ctxt, name, 0) 935 } 936 937 func typesymprefix(prefix string, t *Type) *Sym { 938 p := prefix + "." + Tconv(t, FmtLeft) 939 s := Pkglookup(p, typepkg) 940 941 //print("algsym: %s -> %+S\n", p, s); 942 943 return s 944 } 945 946 func typenamesym(t *Type) *Sym { 947 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { 948 Fatalf("typename %v", t) 949 } 950 s := typesym(t) 951 if s.Def == nil { 952 n := newname(s) 953 n.Type = Types[TUINT8] 954 n.Class = PEXTERN 955 n.Typecheck = 1 956 s.Def = n 957 958 signatlist = append(signatlist, typenod(t)) 959 } 960 961 return s.Def.Sym 962 } 963 964 func typename(t *Type) *Node { 965 s := typenamesym(t) 966 n := Nod(OADDR, s.Def, nil) 967 n.Type = Ptrto(s.Def.Type) 968 n.Addable = true 969 n.Ullman = 2 970 n.Typecheck = 1 971 return n 972 } 973 974 func itabname(t, itype *Type) *Node { 975 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { 976 Fatalf("itabname %v", t) 977 } 978 s := Pkglookup(Tconv(t, FmtLeft)+","+Tconv(itype, FmtLeft), itabpkg) 979 if s.Def == nil { 980 n := newname(s) 981 n.Type = Types[TUINT8] 982 n.Class = PEXTERN 983 n.Typecheck = 1 984 s.Def = n 985 986 itabs = append(itabs, itabEntry{t: t, itype: itype, sym: s}) 987 } 988 989 n := Nod(OADDR, s.Def, nil) 990 n.Type = Ptrto(s.Def.Type) 991 n.Addable = true 992 n.Ullman = 2 993 n.Typecheck = 1 994 return n 995 } 996 997 // isreflexive reports whether t has a reflexive equality operator. 998 // That is, if x==x for all x of type t. 999 func isreflexive(t *Type) bool { 1000 switch t.Etype { 1001 case TBOOL, 1002 TINT, 1003 TUINT, 1004 TINT8, 1005 TUINT8, 1006 TINT16, 1007 TUINT16, 1008 TINT32, 1009 TUINT32, 1010 TINT64, 1011 TUINT64, 1012 TUINTPTR, 1013 TPTR32, 1014 TPTR64, 1015 TUNSAFEPTR, 1016 TSTRING, 1017 TCHAN: 1018 return true 1019 1020 case TFLOAT32, 1021 TFLOAT64, 1022 TCOMPLEX64, 1023 TCOMPLEX128, 1024 TINTER: 1025 return false 1026 1027 case TARRAY: 1028 return isreflexive(t.Elem()) 1029 1030 case TSTRUCT: 1031 for _, t1 := range t.Fields().Slice() { 1032 if !isreflexive(t1.Type) { 1033 return false 1034 } 1035 } 1036 return true 1037 1038 default: 1039 Fatalf("bad type for map key: %v", t) 1040 return false 1041 } 1042 } 1043 1044 // needkeyupdate reports whether map updates with t as a key 1045 // need the key to be updated. 1046 func needkeyupdate(t *Type) bool { 1047 switch t.Etype { 1048 case TBOOL, 1049 TINT, 1050 TUINT, 1051 TINT8, 1052 TUINT8, 1053 TINT16, 1054 TUINT16, 1055 TINT32, 1056 TUINT32, 1057 TINT64, 1058 TUINT64, 1059 TUINTPTR, 1060 TPTR32, 1061 TPTR64, 1062 TUNSAFEPTR, 1063 TCHAN: 1064 return false 1065 1066 case TFLOAT32, // floats can be +0/-0 1067 TFLOAT64, 1068 TCOMPLEX64, 1069 TCOMPLEX128, 1070 TINTER, 1071 TSTRING: // strings might have smaller backing stores 1072 return true 1073 1074 case TARRAY: 1075 return needkeyupdate(t.Elem()) 1076 1077 case TSTRUCT: 1078 for _, t1 := range t.Fields().Slice() { 1079 if needkeyupdate(t1.Type) { 1080 return true 1081 } 1082 } 1083 return false 1084 1085 default: 1086 Fatalf("bad type for map key: %v", t) 1087 return true 1088 } 1089 } 1090 1091 func dtypesym(t *Type) *Sym { 1092 // Replace byte, rune aliases with real type. 1093 // They've been separate internally to make error messages 1094 // better, but we have to merge them in the reflect tables. 1095 if t == bytetype || t == runetype { 1096 t = Types[t.Etype] 1097 } 1098 1099 if t.IsUntyped() { 1100 Fatalf("dtypesym %v", t) 1101 } 1102 1103 s := typesym(t) 1104 if s.Flags&SymSiggen != 0 { 1105 return s 1106 } 1107 s.Flags |= SymSiggen 1108 1109 // special case (look for runtime below): 1110 // when compiling package runtime, 1111 // emit the type structures for int, float, etc. 1112 tbase := t 1113 1114 if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil { 1115 tbase = t.Elem() 1116 } 1117 dupok := 0 1118 if tbase.Sym == nil { 1119 dupok = obj.DUPOK 1120 } 1121 1122 if myimportpath == "runtime" && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc 1123 goto ok 1124 } 1125 1126 // named types from other files are defined only by those files 1127 if tbase.Sym != nil && !tbase.Local { 1128 return s 1129 } 1130 if isforw[tbase.Etype] { 1131 return s 1132 } 1133 1134 ok: 1135 ot := 0 1136 switch t.Etype { 1137 default: 1138 ot = dcommontype(s, ot, t) 1139 ot = dextratype(s, ot, t, 0) 1140 1141 case TARRAY: 1142 // ../../../../runtime/type.go:/arrayType 1143 s1 := dtypesym(t.Elem()) 1144 t2 := typSlice(t.Elem()) 1145 s2 := dtypesym(t2) 1146 ot = dcommontype(s, ot, t) 1147 ot = dsymptr(s, ot, s1, 0) 1148 ot = dsymptr(s, ot, s2, 0) 1149 ot = duintptr(s, ot, uint64(t.NumElem())) 1150 ot = dextratype(s, ot, t, 0) 1151 1152 case TSLICE: 1153 // ../../../../runtime/type.go:/sliceType 1154 s1 := dtypesym(t.Elem()) 1155 ot = dcommontype(s, ot, t) 1156 ot = dsymptr(s, ot, s1, 0) 1157 ot = dextratype(s, ot, t, 0) 1158 1159 case TCHAN: 1160 // ../../../../runtime/type.go:/chanType 1161 s1 := dtypesym(t.Elem()) 1162 ot = dcommontype(s, ot, t) 1163 ot = dsymptr(s, ot, s1, 0) 1164 ot = duintptr(s, ot, uint64(t.ChanDir())) 1165 ot = dextratype(s, ot, t, 0) 1166 1167 case TFUNC: 1168 for _, t1 := range t.Recvs().Fields().Slice() { 1169 dtypesym(t1.Type) 1170 } 1171 isddd := false 1172 for _, t1 := range t.Params().Fields().Slice() { 1173 isddd = t1.Isddd 1174 dtypesym(t1.Type) 1175 } 1176 for _, t1 := range t.Results().Fields().Slice() { 1177 dtypesym(t1.Type) 1178 } 1179 1180 ot = dcommontype(s, ot, t) 1181 inCount := t.Recvs().NumFields() + t.Params().NumFields() 1182 outCount := t.Results().NumFields() 1183 if isddd { 1184 outCount |= 1 << 15 1185 } 1186 ot = duint16(s, ot, uint16(inCount)) 1187 ot = duint16(s, ot, uint16(outCount)) 1188 if Widthptr == 8 { 1189 ot += 4 // align for *rtype 1190 } 1191 1192 dataAdd := (inCount + t.Results().NumFields()) * Widthptr 1193 ot = dextratype(s, ot, t, dataAdd) 1194 1195 // Array of rtype pointers follows funcType. 1196 for _, t1 := range t.Recvs().Fields().Slice() { 1197 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1198 } 1199 for _, t1 := range t.Params().Fields().Slice() { 1200 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1201 } 1202 for _, t1 := range t.Results().Fields().Slice() { 1203 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1204 } 1205 1206 case TINTER: 1207 m := imethods(t) 1208 n := len(m) 1209 for _, a := range m { 1210 dtypesym(a.type_) 1211 } 1212 1213 // ../../../../runtime/type.go:/interfaceType 1214 ot = dcommontype(s, ot, t) 1215 1216 var tpkg *Pkg 1217 if t.Sym != nil && t != Types[t.Etype] && t != errortype { 1218 tpkg = t.Sym.Pkg 1219 } 1220 ot = dgopkgpath(s, ot, tpkg) 1221 1222 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint+uncommonSize(t)) 1223 ot = duintxx(s, ot, uint64(n), Widthint) 1224 ot = duintxx(s, ot, uint64(n), Widthint) 1225 dataAdd := imethodSize() * n 1226 ot = dextratype(s, ot, t, dataAdd) 1227 1228 lsym := Linksym(s) 1229 for _, a := range m { 1230 // ../../../../runtime/type.go:/imethod 1231 exported := exportname(a.name) 1232 var pkg *Pkg 1233 if !exported && a.pkg != tpkg { 1234 pkg = a.pkg 1235 } 1236 nsym := dname(a.name, "", pkg, exported) 1237 1238 ot = dsymptrOffLSym(lsym, ot, nsym, 0) 1239 ot = dsymptrOffLSym(lsym, ot, Linksym(dtypesym(a.type_)), 0) 1240 } 1241 1242 // ../../../../runtime/type.go:/mapType 1243 case TMAP: 1244 s1 := dtypesym(t.Key()) 1245 s2 := dtypesym(t.Val()) 1246 s3 := dtypesym(mapbucket(t)) 1247 s4 := dtypesym(hmap(t)) 1248 ot = dcommontype(s, ot, t) 1249 ot = dsymptr(s, ot, s1, 0) 1250 ot = dsymptr(s, ot, s2, 0) 1251 ot = dsymptr(s, ot, s3, 0) 1252 ot = dsymptr(s, ot, s4, 0) 1253 if t.Key().Width > MAXKEYSIZE { 1254 ot = duint8(s, ot, uint8(Widthptr)) 1255 ot = duint8(s, ot, 1) // indirect 1256 } else { 1257 ot = duint8(s, ot, uint8(t.Key().Width)) 1258 ot = duint8(s, ot, 0) // not indirect 1259 } 1260 1261 if t.Val().Width > MAXVALSIZE { 1262 ot = duint8(s, ot, uint8(Widthptr)) 1263 ot = duint8(s, ot, 1) // indirect 1264 } else { 1265 ot = duint8(s, ot, uint8(t.Val().Width)) 1266 ot = duint8(s, ot, 0) // not indirect 1267 } 1268 1269 ot = duint16(s, ot, uint16(mapbucket(t).Width)) 1270 ot = duint8(s, ot, uint8(obj.Bool2int(isreflexive(t.Key())))) 1271 ot = duint8(s, ot, uint8(obj.Bool2int(needkeyupdate(t.Key())))) 1272 ot = dextratype(s, ot, t, 0) 1273 1274 case TPTR32, TPTR64: 1275 if t.Elem().Etype == TANY { 1276 // ../../../../runtime/type.go:/UnsafePointerType 1277 ot = dcommontype(s, ot, t) 1278 ot = dextratype(s, ot, t, 0) 1279 1280 break 1281 } 1282 1283 // ../../../../runtime/type.go:/ptrType 1284 s1 := dtypesym(t.Elem()) 1285 1286 ot = dcommontype(s, ot, t) 1287 ot = dsymptr(s, ot, s1, 0) 1288 ot = dextratype(s, ot, t, 0) 1289 1290 // ../../../../runtime/type.go:/structType 1291 // for security, only the exported fields. 1292 case TSTRUCT: 1293 n := 0 1294 1295 for _, t1 := range t.Fields().Slice() { 1296 dtypesym(t1.Type) 1297 n++ 1298 } 1299 1300 ot = dcommontype(s, ot, t) 1301 pkg := localpkg 1302 if t.Sym != nil { 1303 pkg = t.Sym.Pkg 1304 } else { 1305 // Unnamed type. Grab the package from the first field, if any. 1306 for _, f := range t.Fields().Slice() { 1307 if f.Embedded != 0 { 1308 continue 1309 } 1310 pkg = f.Sym.Pkg 1311 break 1312 } 1313 } 1314 ot = dgopkgpath(s, ot, pkg) 1315 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint+uncommonSize(t)) 1316 ot = duintxx(s, ot, uint64(n), Widthint) 1317 ot = duintxx(s, ot, uint64(n), Widthint) 1318 1319 dataAdd := n * structfieldSize() 1320 ot = dextratype(s, ot, t, dataAdd) 1321 1322 for _, f := range t.Fields().Slice() { 1323 // ../../../../runtime/type.go:/structField 1324 ot = dnameField(s, ot, f) 1325 ot = dsymptr(s, ot, dtypesym(f.Type), 0) 1326 ot = duintptr(s, ot, uint64(f.Offset)) 1327 } 1328 } 1329 1330 ot = dextratypeData(s, ot, t) 1331 ggloblsym(s, int32(ot), int16(dupok|obj.RODATA)) 1332 1333 // generate typelink.foo pointing at s = type.foo. 1334 // 1335 // The linker will leave a table of all the typelinks for 1336 // types in the binary, so the runtime can find them. 1337 // 1338 // When buildmode=shared, all types are in typelinks so the 1339 // runtime can deduplicate type pointers. 1340 keep := Ctxt.Flag_dynlink 1341 if !keep && t.Sym == nil { 1342 // For an unnamed type, we only need the link if the type can 1343 // be created at run time by reflect.PtrTo and similar 1344 // functions. If the type exists in the program, those 1345 // functions must return the existing type structure rather 1346 // than creating a new one. 1347 switch t.Etype { 1348 case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT: 1349 keep = true 1350 } 1351 } 1352 if keep { 1353 slink := typelinkLSym(t) 1354 dsymptrOffLSym(slink, 0, Linksym(s), 0) 1355 ggloblLSym(slink, 4, int16(dupok|obj.RODATA)) 1356 } 1357 1358 return s 1359 } 1360 1361 func dumptypestructs() { 1362 // copy types from externdcl list to signatlist 1363 for _, n := range externdcl { 1364 if n.Op != OTYPE { 1365 continue 1366 } 1367 signatlist = append(signatlist, n) 1368 } 1369 1370 // Process signatlist. This can't use range, as entries are 1371 // added to the list while it is being processed. 1372 for i := 0; i < len(signatlist); i++ { 1373 n := signatlist[i] 1374 if n.Op != OTYPE { 1375 continue 1376 } 1377 t := n.Type 1378 dtypesym(t) 1379 if t.Sym != nil { 1380 dtypesym(Ptrto(t)) 1381 } 1382 } 1383 1384 // process itabs 1385 for _, i := range itabs { 1386 // dump empty itab symbol into i.sym 1387 // type itab struct { 1388 // inter *interfacetype 1389 // _type *_type 1390 // link *itab 1391 // bad int32 1392 // unused int32 1393 // fun [1]uintptr // variable sized 1394 // } 1395 o := dsymptr(i.sym, 0, dtypesym(i.itype), 0) 1396 o = dsymptr(i.sym, o, dtypesym(i.t), 0) 1397 o += Widthptr + 8 // skip link/bad/unused fields 1398 o += len(imethods(i.itype)) * Widthptr // skip fun method pointers 1399 // at runtime the itab will contain pointers to types, other itabs and 1400 // method functions. None are allocated on heap, so we can use obj.NOPTR. 1401 ggloblsym(i.sym, int32(o), int16(obj.DUPOK|obj.NOPTR)) 1402 1403 ilink := Pkglookup(Tconv(i.t, FmtLeft)+","+Tconv(i.itype, FmtLeft), itablinkpkg) 1404 dsymptr(ilink, 0, i.sym, 0) 1405 ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA)) 1406 } 1407 1408 // generate import strings for imported packages 1409 if forceObjFileStability { 1410 // Sorting the packages is not necessary but to compare binaries created 1411 // using textual and binary format we sort by path to reduce differences. 1412 sort.Sort(pkgByPath(pkgs)) 1413 } 1414 for _, p := range pkgs { 1415 if p.Direct { 1416 dimportpath(p) 1417 } 1418 } 1419 1420 // do basic types if compiling package runtime. 1421 // they have to be in at least one package, 1422 // and runtime is always loaded implicitly, 1423 // so this is as good as any. 1424 // another possible choice would be package main, 1425 // but using runtime means fewer copies in .6 files. 1426 if myimportpath == "runtime" { 1427 for i := EType(1); i <= TBOOL; i++ { 1428 dtypesym(Ptrto(Types[i])) 1429 } 1430 dtypesym(Ptrto(Types[TSTRING])) 1431 dtypesym(Ptrto(Types[TUNSAFEPTR])) 1432 1433 // emit type structs for error and func(error) string. 1434 // The latter is the type of an auto-generated wrapper. 1435 dtypesym(Ptrto(errortype)) 1436 1437 dtypesym(functype(nil, []*Node{Nod(ODCLFIELD, nil, typenod(errortype))}, []*Node{Nod(ODCLFIELD, nil, typenod(Types[TSTRING]))})) 1438 1439 // add paths for runtime and main, which 6l imports implicitly. 1440 dimportpath(Runtimepkg) 1441 1442 if flag_race { 1443 dimportpath(racepkg) 1444 } 1445 if flag_msan { 1446 dimportpath(msanpkg) 1447 } 1448 dimportpath(mkpkg("main")) 1449 } 1450 } 1451 1452 type pkgByPath []*Pkg 1453 1454 func (a pkgByPath) Len() int { return len(a) } 1455 func (a pkgByPath) Less(i, j int) bool { return a[i].Path < a[j].Path } 1456 func (a pkgByPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1457 1458 func dalgsym(t *Type) *Sym { 1459 var s *Sym 1460 var hashfunc *Sym 1461 var eqfunc *Sym 1462 1463 // dalgsym is only called for a type that needs an algorithm table, 1464 // which implies that the type is comparable (or else it would use ANOEQ). 1465 1466 if algtype(t) == AMEM { 1467 // we use one algorithm table for all AMEM types of a given size 1468 p := fmt.Sprintf(".alg%d", t.Width) 1469 1470 s = Pkglookup(p, typepkg) 1471 1472 if s.Flags&SymAlgGen != 0 { 1473 return s 1474 } 1475 s.Flags |= SymAlgGen 1476 1477 // make hash closure 1478 p = fmt.Sprintf(".hashfunc%d", t.Width) 1479 1480 hashfunc = Pkglookup(p, typepkg) 1481 1482 ot := 0 1483 ot = dsymptr(hashfunc, ot, Pkglookup("memhash_varlen", Runtimepkg), 0) 1484 ot = duintxx(hashfunc, ot, uint64(t.Width), Widthptr) // size encoded in closure 1485 ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA) 1486 1487 // make equality closure 1488 p = fmt.Sprintf(".eqfunc%d", t.Width) 1489 1490 eqfunc = Pkglookup(p, typepkg) 1491 1492 ot = 0 1493 ot = dsymptr(eqfunc, ot, Pkglookup("memequal_varlen", Runtimepkg), 0) 1494 ot = duintxx(eqfunc, ot, uint64(t.Width), Widthptr) 1495 ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA) 1496 } else { 1497 // generate an alg table specific to this type 1498 s = typesymprefix(".alg", t) 1499 1500 hash := typesymprefix(".hash", t) 1501 eq := typesymprefix(".eq", t) 1502 hashfunc = typesymprefix(".hashfunc", t) 1503 eqfunc = typesymprefix(".eqfunc", t) 1504 1505 genhash(hash, t) 1506 geneq(eq, t) 1507 1508 // make Go funcs (closures) for calling hash and equal from Go 1509 dsymptr(hashfunc, 0, hash, 0) 1510 1511 ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1512 dsymptr(eqfunc, 0, eq, 0) 1513 ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1514 } 1515 1516 // ../../../../runtime/alg.go:/typeAlg 1517 ot := 0 1518 1519 ot = dsymptr(s, ot, hashfunc, 0) 1520 ot = dsymptr(s, ot, eqfunc, 0) 1521 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 1522 return s 1523 } 1524 1525 // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, 1526 // which holds 1-bit entries describing where pointers are in a given type. 1527 // 16 bytes is enough to describe 128 pointer-sized words, 512 or 1024 bytes 1528 // depending on the system. Above this length, the GC information is 1529 // recorded as a GC program, which can express repetition compactly. 1530 // In either form, the information is used by the runtime to initialize the 1531 // heap bitmap, and for large types (like 128 or more words), they are 1532 // roughly the same speed. GC programs are never much larger and often 1533 // more compact. (If large arrays are involved, they can be arbitrarily more 1534 // compact.) 1535 // 1536 // The cutoff must be large enough that any allocation large enough to 1537 // use a GC program is large enough that it does not share heap bitmap 1538 // bytes with any other objects, allowing the GC program execution to 1539 // assume an aligned start and not use atomic operations. In the current 1540 // runtime, this means all malloc size classes larger than the cutoff must 1541 // be multiples of four words. On 32-bit systems that's 16 bytes, and 1542 // all size classes >= 16 bytes are 16-byte aligned, so no real constraint. 1543 // On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed 1544 // for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated 1545 // is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes 1546 // must be >= 4. 1547 // 1548 // We used to use 16 because the GC programs do have some constant overhead 1549 // to get started, and processing 128 pointers seems to be enough to 1550 // amortize that overhead well. 1551 // 1552 // To make sure that the runtime's chansend can call typeBitsBulkBarrier, 1553 // we raised the limit to 2048, so that even 32-bit systems are guaranteed to 1554 // use bitmaps for objects up to 64 kB in size. 1555 // 1556 // Also known to reflect/type.go. 1557 // 1558 const maxPtrmaskBytes = 2048 1559 1560 // dgcsym emits and returns a data symbol containing GC information for type t, 1561 // along with a boolean reporting whether the UseGCProg bit should be set in 1562 // the type kind, and the ptrdata field to record in the reflect type information. 1563 func dgcsym(t *Type) (sym *Sym, useGCProg bool, ptrdata int64) { 1564 ptrdata = typeptrdata(t) 1565 if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 { 1566 sym = dgcptrmask(t) 1567 return 1568 } 1569 1570 useGCProg = true 1571 sym, ptrdata = dgcprog(t) 1572 return 1573 } 1574 1575 // dgcptrmask emits and returns the symbol containing a pointer mask for type t. 1576 func dgcptrmask(t *Type) *Sym { 1577 ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8) 1578 fillptrmask(t, ptrmask) 1579 p := fmt.Sprintf("gcbits.%x", ptrmask) 1580 1581 sym := Pkglookup(p, Runtimepkg) 1582 if sym.Flags&SymUniq == 0 { 1583 sym.Flags |= SymUniq 1584 for i, x := range ptrmask { 1585 duint8(sym, i, x) 1586 } 1587 ggloblsym(sym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) 1588 } 1589 return sym 1590 } 1591 1592 // fillptrmask fills in ptrmask with 1s corresponding to the 1593 // word offsets in t that hold pointers. 1594 // ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits. 1595 func fillptrmask(t *Type, ptrmask []byte) { 1596 for i := range ptrmask { 1597 ptrmask[i] = 0 1598 } 1599 if !haspointers(t) { 1600 return 1601 } 1602 1603 vec := bvalloc(8 * int32(len(ptrmask))) 1604 xoffset := int64(0) 1605 onebitwalktype1(t, &xoffset, vec) 1606 1607 nptr := typeptrdata(t) / int64(Widthptr) 1608 for i := int64(0); i < nptr; i++ { 1609 if bvget(vec, int32(i)) == 1 { 1610 ptrmask[i/8] |= 1 << (uint(i) % 8) 1611 } 1612 } 1613 } 1614 1615 // dgcprog emits and returns the symbol containing a GC program for type t 1616 // along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]). 1617 // In practice, the size is typeptrdata(t) except for non-trivial arrays. 1618 // For non-trivial arrays, the program describes the full t.Width size. 1619 func dgcprog(t *Type) (*Sym, int64) { 1620 dowidth(t) 1621 if t.Width == BADWIDTH { 1622 Fatalf("dgcprog: %v badwidth", t) 1623 } 1624 sym := typesymprefix(".gcprog", t) 1625 var p GCProg 1626 p.init(sym) 1627 p.emit(t, 0) 1628 offset := p.w.BitIndex() * int64(Widthptr) 1629 p.end() 1630 if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { 1631 Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) 1632 } 1633 return sym, offset 1634 } 1635 1636 type GCProg struct { 1637 sym *Sym 1638 symoff int 1639 w gcprog.Writer 1640 } 1641 1642 var Debug_gcprog int // set by -d gcprog 1643 1644 func (p *GCProg) init(sym *Sym) { 1645 p.sym = sym 1646 p.symoff = 4 // first 4 bytes hold program length 1647 p.w.Init(p.writeByte) 1648 if Debug_gcprog > 0 { 1649 fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", sym) 1650 p.w.Debug(os.Stderr) 1651 } 1652 } 1653 1654 func (p *GCProg) writeByte(x byte) { 1655 p.symoff = duint8(p.sym, p.symoff, x) 1656 } 1657 1658 func (p *GCProg) end() { 1659 p.w.End() 1660 duint32(p.sym, 0, uint32(p.symoff-4)) 1661 ggloblsym(p.sym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) 1662 if Debug_gcprog > 0 { 1663 fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.sym) 1664 } 1665 } 1666 1667 func (p *GCProg) emit(t *Type, offset int64) { 1668 dowidth(t) 1669 if !haspointers(t) { 1670 return 1671 } 1672 if t.Width == int64(Widthptr) { 1673 p.w.Ptr(offset / int64(Widthptr)) 1674 return 1675 } 1676 switch t.Etype { 1677 default: 1678 Fatalf("GCProg.emit: unexpected type %v", t) 1679 1680 case TSTRING: 1681 p.w.Ptr(offset / int64(Widthptr)) 1682 1683 case TINTER: 1684 p.w.Ptr(offset / int64(Widthptr)) 1685 p.w.Ptr(offset/int64(Widthptr) + 1) 1686 1687 case TSLICE: 1688 p.w.Ptr(offset / int64(Widthptr)) 1689 1690 case TARRAY: 1691 if t.NumElem() == 0 { 1692 // should have been handled by haspointers check above 1693 Fatalf("GCProg.emit: empty array") 1694 } 1695 1696 // Flatten array-of-array-of-array to just a big array by multiplying counts. 1697 count := t.NumElem() 1698 elem := t.Elem() 1699 for elem.IsArray() { 1700 count *= elem.NumElem() 1701 elem = elem.Elem() 1702 } 1703 1704 if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) { 1705 // Cheaper to just emit the bits. 1706 for i := int64(0); i < count; i++ { 1707 p.emit(elem, offset+i*elem.Width) 1708 } 1709 return 1710 } 1711 p.emit(elem, offset) 1712 p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) 1713 p.w.Repeat(elem.Width/int64(Widthptr), count-1) 1714 1715 case TSTRUCT: 1716 for _, t1 := range t.Fields().Slice() { 1717 p.emit(t1.Type, offset+t1.Offset) 1718 } 1719 } 1720 } 1721 1722 // zeroaddr returns the address of a symbol with at least 1723 // size bytes of zeros. 1724 func zeroaddr(size int64) *Node { 1725 if size >= 1<<31 { 1726 Fatalf("map value too big %d", size) 1727 } 1728 if zerosize < size { 1729 zerosize = size 1730 } 1731 s := Pkglookup("zero", mappkg) 1732 if s.Def == nil { 1733 x := newname(s) 1734 x.Type = Types[TUINT8] 1735 x.Class = PEXTERN 1736 x.Typecheck = 1 1737 s.Def = x 1738 } 1739 z := Nod(OADDR, s.Def, nil) 1740 z.Type = Ptrto(Types[TUINT8]) 1741 z.Addable = true 1742 z.Typecheck = 1 1743 return z 1744 }