github.com/mh-cbon/go@v0.0.0-20160603070303-9e112a3fe4c0/src/cmd/compile/internal/gc/reflect.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/internal/gcprog" 9 "cmd/internal/obj" 10 "fmt" 11 "os" 12 "sort" 13 "strings" 14 ) 15 16 type itabEntry struct { 17 t, itype *Type 18 sym *Sym 19 } 20 21 // runtime interface and reflection data structures 22 var signatlist []*Node 23 var itabs []itabEntry 24 25 type Sig struct { 26 name string 27 pkg *Pkg 28 isym *Sym 29 tsym *Sym 30 type_ *Type 31 mtype *Type 32 offset int32 33 } 34 35 // byMethodNameAndPackagePath sorts method signatures by name, then package path. 36 type byMethodNameAndPackagePath []*Sig 37 38 func (x byMethodNameAndPackagePath) Len() int { return len(x) } 39 func (x byMethodNameAndPackagePath) Swap(i, j int) { x[i], x[j] = x[j], x[i] } 40 func (x byMethodNameAndPackagePath) Less(i, j int) bool { 41 return siglt(x[i], x[j]) 42 } 43 44 // siglt reports whether a < b 45 func siglt(a, b *Sig) bool { 46 if a.name != b.name { 47 return a.name < b.name 48 } 49 if a.pkg == b.pkg { 50 return false 51 } 52 if a.pkg == nil { 53 return true 54 } 55 if b.pkg == nil { 56 return false 57 } 58 return a.pkg.Path < b.pkg.Path 59 } 60 61 // Builds a type representing a Bucket structure for 62 // the given map type. This type is not visible to users - 63 // we include only enough information to generate a correct GC 64 // program for it. 65 // Make sure this stays in sync with ../../../../runtime/hashmap.go! 66 const ( 67 BUCKETSIZE = 8 68 MAXKEYSIZE = 128 69 MAXVALSIZE = 128 70 ) 71 72 func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) 73 func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) 74 func uncommonSize(t *Type) int { // Sizeof(runtime.uncommontype{}) 75 if t.Sym == nil && len(methods(t)) == 0 { 76 return 0 77 } 78 return 4 + 2 + 2 79 } 80 81 func makefield(name string, t *Type) *Field { 82 f := newField() 83 f.Type = t 84 f.Sym = nopkg.Lookup(name) 85 return f 86 } 87 88 func mapbucket(t *Type) *Type { 89 if t.MapType().Bucket != nil { 90 return t.MapType().Bucket 91 } 92 93 bucket := typ(TSTRUCT) 94 keytype := t.Key() 95 valtype := t.Val() 96 dowidth(keytype) 97 dowidth(valtype) 98 if keytype.Width > MAXKEYSIZE { 99 keytype = Ptrto(keytype) 100 } 101 if valtype.Width > MAXVALSIZE { 102 valtype = Ptrto(valtype) 103 } 104 105 field := make([]*Field, 0, 5) 106 107 // The first field is: uint8 topbits[BUCKETSIZE]. 108 arr := typArray(Types[TUINT8], BUCKETSIZE) 109 field = append(field, makefield("topbits", arr)) 110 111 arr = typArray(keytype, BUCKETSIZE) 112 arr.Noalg = true 113 field = append(field, makefield("keys", arr)) 114 115 arr = typArray(valtype, BUCKETSIZE) 116 arr.Noalg = true 117 field = append(field, makefield("values", arr)) 118 119 // Make sure the overflow pointer is the last memory in the struct, 120 // because the runtime assumes it can use size-ptrSize as the 121 // offset of the overflow pointer. We double-check that property 122 // below once the offsets and size are computed. 123 // 124 // BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point. 125 // On 32-bit systems, the max alignment is 32-bit, and the 126 // overflow pointer will add another 32-bit field, and the struct 127 // will end with no padding. 128 // On 64-bit systems, the max alignment is 64-bit, and the 129 // overflow pointer will add another 64-bit field, and the struct 130 // will end with no padding. 131 // On nacl/amd64p32, however, the max alignment is 64-bit, 132 // but the overflow pointer will add only a 32-bit field, 133 // so if the struct needs 64-bit padding (because a key or value does) 134 // then it would end with an extra 32-bit padding field. 135 // Preempt that by emitting the padding here. 136 if int(t.Val().Align) > Widthptr || int(t.Key().Align) > Widthptr { 137 field = append(field, makefield("pad", Types[TUINTPTR])) 138 } 139 140 // If keys and values have no pointers, the map implementation 141 // can keep a list of overflow pointers on the side so that 142 // buckets can be marked as having no pointers. 143 // Arrange for the bucket to have no pointers by changing 144 // the type of the overflow field to uintptr in this case. 145 // See comment on hmap.overflow in ../../../../runtime/hashmap.go. 146 otyp := Ptrto(bucket) 147 if !haspointers(t.Val()) && !haspointers(t.Key()) && t.Val().Width <= MAXVALSIZE && t.Key().Width <= MAXKEYSIZE { 148 otyp = Types[TUINTPTR] 149 } 150 ovf := makefield("overflow", otyp) 151 field = append(field, ovf) 152 153 // link up fields 154 bucket.Noalg = true 155 bucket.Local = t.Local 156 bucket.SetFields(field[:]) 157 dowidth(bucket) 158 159 // Double-check that overflow field is final memory in struct, 160 // with no padding at end. See comment above. 161 if ovf.Offset != bucket.Width-int64(Widthptr) { 162 Yyerror("bad math in mapbucket for %v", t) 163 } 164 165 t.MapType().Bucket = bucket 166 167 bucket.StructType().Map = t 168 return bucket 169 } 170 171 // Builds a type representing a Hmap structure for the given map type. 172 // Make sure this stays in sync with ../../../../runtime/hashmap.go! 173 func hmap(t *Type) *Type { 174 if t.MapType().Hmap != nil { 175 return t.MapType().Hmap 176 } 177 178 bucket := mapbucket(t) 179 var field [8]*Field 180 field[0] = makefield("count", Types[TINT]) 181 field[1] = makefield("flags", Types[TUINT8]) 182 field[2] = makefield("B", Types[TUINT8]) 183 field[3] = makefield("hash0", Types[TUINT32]) 184 field[4] = makefield("buckets", Ptrto(bucket)) 185 field[5] = makefield("oldbuckets", Ptrto(bucket)) 186 field[6] = makefield("nevacuate", Types[TUINTPTR]) 187 field[7] = makefield("overflow", Types[TUNSAFEPTR]) 188 189 h := typ(TSTRUCT) 190 h.Noalg = true 191 h.Local = t.Local 192 h.SetFields(field[:]) 193 dowidth(h) 194 t.MapType().Hmap = h 195 h.StructType().Map = t 196 return h 197 } 198 199 func hiter(t *Type) *Type { 200 if t.MapType().Hiter != nil { 201 return t.MapType().Hiter 202 } 203 204 // build a struct: 205 // hiter { 206 // key *Key 207 // val *Value 208 // t *MapType 209 // h *Hmap 210 // buckets *Bucket 211 // bptr *Bucket 212 // overflow0 unsafe.Pointer 213 // overflow1 unsafe.Pointer 214 // startBucket uintptr 215 // stuff uintptr 216 // bucket uintptr 217 // checkBucket uintptr 218 // } 219 // must match ../../../../runtime/hashmap.go:hiter. 220 var field [12]*Field 221 field[0] = makefield("key", Ptrto(t.Key())) 222 field[1] = makefield("val", Ptrto(t.Val())) 223 field[2] = makefield("t", Ptrto(Types[TUINT8])) 224 field[3] = makefield("h", Ptrto(hmap(t))) 225 field[4] = makefield("buckets", Ptrto(mapbucket(t))) 226 field[5] = makefield("bptr", Ptrto(mapbucket(t))) 227 field[6] = makefield("overflow0", Types[TUNSAFEPTR]) 228 field[7] = makefield("overflow1", Types[TUNSAFEPTR]) 229 field[8] = makefield("startBucket", Types[TUINTPTR]) 230 field[9] = makefield("stuff", Types[TUINTPTR]) // offset+wrapped+B+I 231 field[10] = makefield("bucket", Types[TUINTPTR]) 232 field[11] = makefield("checkBucket", Types[TUINTPTR]) 233 234 // build iterator struct holding the above fields 235 i := typ(TSTRUCT) 236 i.Noalg = true 237 i.SetFields(field[:]) 238 dowidth(i) 239 if i.Width != int64(12*Widthptr) { 240 Yyerror("hash_iter size not correct %d %d", i.Width, 12*Widthptr) 241 } 242 t.MapType().Hiter = i 243 i.StructType().Map = t 244 return i 245 } 246 247 // f is method type, with receiver. 248 // return function type, receiver as first argument (or not). 249 func methodfunc(f *Type, receiver *Type) *Type { 250 var in []*Node 251 if receiver != nil { 252 d := Nod(ODCLFIELD, nil, nil) 253 d.Type = receiver 254 in = append(in, d) 255 } 256 257 var d *Node 258 for _, t := range f.Params().Fields().Slice() { 259 d = Nod(ODCLFIELD, nil, nil) 260 d.Type = t.Type 261 d.Isddd = t.Isddd 262 in = append(in, d) 263 } 264 265 var out []*Node 266 for _, t := range f.Results().Fields().Slice() { 267 d = Nod(ODCLFIELD, nil, nil) 268 d.Type = t.Type 269 out = append(out, d) 270 } 271 272 t := functype(nil, in, out) 273 if f.Nname() != nil { 274 // Link to name of original method function. 275 t.SetNname(f.Nname()) 276 } 277 278 return t 279 } 280 281 // methods returns the methods of the non-interface type t, sorted by name. 282 // Generates stub functions as needed. 283 func methods(t *Type) []*Sig { 284 // method type 285 mt := methtype(t, 0) 286 287 if mt == nil { 288 return nil 289 } 290 expandmeth(mt) 291 292 // type stored in interface word 293 it := t 294 295 if !isdirectiface(it) { 296 it = Ptrto(t) 297 } 298 299 // make list of methods for t, 300 // generating code if necessary. 301 var ms []*Sig 302 for _, f := range mt.AllMethods().Slice() { 303 if f.Type.Etype != TFUNC || f.Type.Recv() == nil { 304 Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) 305 } 306 if f.Type.Recv() == nil { 307 Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) 308 } 309 if f.Nointerface { 310 continue 311 } 312 313 method := f.Sym 314 if method == nil { 315 continue 316 } 317 318 // get receiver type for this particular method. 319 // if pointer receiver but non-pointer t and 320 // this is not an embedded pointer inside a struct, 321 // method does not apply. 322 this := f.Type.Recv().Type 323 324 if this.IsPtr() && this.Elem() == t { 325 continue 326 } 327 if this.IsPtr() && !t.IsPtr() && f.Embedded != 2 && !isifacemethod(f.Type) { 328 continue 329 } 330 331 var sig Sig 332 ms = append(ms, &sig) 333 334 sig.name = method.Name 335 if !exportname(method.Name) { 336 if method.Pkg == nil { 337 Fatalf("methods: missing package") 338 } 339 sig.pkg = method.Pkg 340 } 341 342 sig.isym = methodsym(method, it, 1) 343 sig.tsym = methodsym(method, t, 0) 344 sig.type_ = methodfunc(f.Type, t) 345 sig.mtype = methodfunc(f.Type, nil) 346 347 if sig.isym.Flags&SymSiggen == 0 { 348 sig.isym.Flags |= SymSiggen 349 if !Eqtype(this, it) || this.Width < Types[Tptr].Width { 350 compiling_wrappers = 1 351 genwrapper(it, f, sig.isym, 1) 352 compiling_wrappers = 0 353 } 354 } 355 356 if sig.tsym.Flags&SymSiggen == 0 { 357 sig.tsym.Flags |= SymSiggen 358 if !Eqtype(this, t) { 359 compiling_wrappers = 1 360 genwrapper(t, f, sig.tsym, 0) 361 compiling_wrappers = 0 362 } 363 } 364 } 365 366 sort.Sort(byMethodNameAndPackagePath(ms)) 367 return ms 368 } 369 370 // imethods returns the methods of the interface type t, sorted by name. 371 func imethods(t *Type) []*Sig { 372 var methods []*Sig 373 for _, f := range t.Fields().Slice() { 374 if f.Type.Etype != TFUNC || f.Sym == nil { 375 continue 376 } 377 method := f.Sym 378 var sig = Sig{ 379 name: method.Name, 380 } 381 if !exportname(method.Name) { 382 if method.Pkg == nil { 383 Fatalf("imethods: missing package") 384 } 385 sig.pkg = method.Pkg 386 } 387 388 sig.mtype = f.Type 389 sig.offset = 0 390 sig.type_ = methodfunc(f.Type, nil) 391 392 if n := len(methods); n > 0 { 393 last := methods[n-1] 394 if !(siglt(last, &sig)) { 395 Fatalf("sigcmp vs sortinter %s %s", last.name, sig.name) 396 } 397 } 398 methods = append(methods, &sig) 399 400 // Compiler can only refer to wrappers for non-blank methods. 401 if isblanksym(method) { 402 continue 403 } 404 405 // NOTE(rsc): Perhaps an oversight that 406 // IfaceType.Method is not in the reflect data. 407 // Generate the method body, so that compiled 408 // code can refer to it. 409 isym := methodsym(method, t, 0) 410 411 if isym.Flags&SymSiggen == 0 { 412 isym.Flags |= SymSiggen 413 genwrapper(t, f, isym, 0) 414 } 415 } 416 417 return methods 418 } 419 420 func dimportpath(p *Pkg) { 421 if p.Pathsym != nil { 422 return 423 } 424 425 // If we are compiling the runtime package, there are two runtime packages around 426 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for 427 // both of them, so just produce one for localpkg. 428 if myimportpath == "runtime" && p == Runtimepkg { 429 return 430 } 431 432 var str string 433 if p == localpkg { 434 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. 435 str = myimportpath 436 } else { 437 str = p.Path 438 } 439 440 s := obj.Linklookup(Ctxt, "type..importpath."+p.Prefix+".", 0) 441 ot := dnameData(s, 0, str, "", nil, false) 442 ggloblLSym(s, int32(ot), obj.DUPOK|obj.RODATA) 443 p.Pathsym = s 444 } 445 446 func dgopkgpath(s *Sym, ot int, pkg *Pkg) int { 447 return dgopkgpathLSym(Linksym(s), ot, pkg) 448 } 449 450 func dgopkgpathLSym(s *obj.LSym, ot int, pkg *Pkg) int { 451 if pkg == nil { 452 return duintxxLSym(s, ot, 0, Widthptr) 453 } 454 455 if pkg == localpkg && myimportpath == "" { 456 // If we don't know the full import path of the package being compiled 457 // (i.e. -p was not passed on the compiler command line), emit a reference to 458 // type..importpath.""., which the linker will rewrite using the correct import path. 459 // Every package that imports this one directly defines the symbol. 460 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 461 ns := obj.Linklookup(Ctxt, `type..importpath."".`, 0) 462 return dsymptrLSym(s, ot, ns, 0) 463 } 464 465 dimportpath(pkg) 466 return dsymptrLSym(s, ot, pkg.Pathsym, 0) 467 } 468 469 // dgopkgpathOffLSym writes an offset relocation in s at offset ot to the pkg path symbol. 470 func dgopkgpathOffLSym(s *obj.LSym, ot int, pkg *Pkg) int { 471 if pkg == nil { 472 return duintxxLSym(s, ot, 0, 4) 473 } 474 if pkg == localpkg && myimportpath == "" { 475 // If we don't know the full import path of the package being compiled 476 // (i.e. -p was not passed on the compiler command line), emit a reference to 477 // type..importpath.""., which the linker will rewrite using the correct import path. 478 // Every package that imports this one directly defines the symbol. 479 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. 480 ns := obj.Linklookup(Ctxt, `type..importpath."".`, 0) 481 return dsymptrOffLSym(s, ot, ns, 0) 482 } 483 484 dimportpath(pkg) 485 return dsymptrOffLSym(s, ot, pkg.Pathsym, 0) 486 } 487 488 // isExportedField reports whether a struct field is exported. 489 func isExportedField(ft *Field) bool { 490 if ft.Sym != nil && ft.Embedded == 0 { 491 return exportname(ft.Sym.Name) 492 } else { 493 if ft.Type.Sym != nil && 494 (ft.Type.Sym.Pkg == builtinpkg || !exportname(ft.Type.Sym.Name)) { 495 return false 496 } else { 497 return true 498 } 499 } 500 } 501 502 // dnameField dumps a reflect.name for a struct field. 503 func dnameField(s *Sym, ot int, ft *Field) int { 504 var name string 505 if ft.Sym != nil && ft.Embedded == 0 { 506 name = ft.Sym.Name 507 } 508 nsym := dname(name, ft.Note, nil, isExportedField(ft)) 509 return dsymptrLSym(Linksym(s), ot, nsym, 0) 510 } 511 512 // dnameData writes the contents of a reflect.name into s at offset ot. 513 func dnameData(s *obj.LSym, ot int, name, tag string, pkg *Pkg, exported bool) int { 514 if len(name) > 1<<16-1 { 515 Fatalf("name too long: %s", name) 516 } 517 if len(tag) > 1<<16-1 { 518 Fatalf("tag too long: %s", tag) 519 } 520 521 // Encode name and tag. See reflect/type.go for details. 522 var bits byte 523 l := 1 + 2 + len(name) 524 if exported { 525 bits |= 1 << 0 526 } 527 if len(tag) > 0 { 528 l += 2 + len(tag) 529 bits |= 1 << 1 530 } 531 if pkg != nil { 532 bits |= 1 << 2 533 } 534 b := make([]byte, l) 535 b[0] = bits 536 b[1] = uint8(len(name) >> 8) 537 b[2] = uint8(len(name)) 538 copy(b[3:], name) 539 if len(tag) > 0 { 540 tb := b[3+len(name):] 541 tb[0] = uint8(len(tag) >> 8) 542 tb[1] = uint8(len(tag)) 543 copy(tb[2:], tag) 544 } 545 546 ot = int(s.WriteBytes(Ctxt, int64(ot), b)) 547 548 if pkg != nil { 549 ot = dgopkgpathOffLSym(s, ot, pkg) 550 } 551 552 return ot 553 } 554 555 var dnameCount int 556 557 // dname creates a reflect.name for a struct field or method. 558 func dname(name, tag string, pkg *Pkg, exported bool) *obj.LSym { 559 // Write out data as "type.." to signal two things to the 560 // linker, first that when dynamically linking, the symbol 561 // should be moved to a relro section, and second that the 562 // contents should not be decoded as a type. 563 sname := "type..namedata." 564 if pkg == nil { 565 // In the common case, share data with other packages. 566 if name == "" { 567 if exported { 568 sname += "-noname-exported." + tag 569 } else { 570 sname += "-noname-unexported." + tag 571 } 572 } else { 573 sname += name + "." + tag 574 } 575 } else { 576 sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount) 577 dnameCount++ 578 } 579 s := obj.Linklookup(Ctxt, sname, 0) 580 if len(s.P) > 0 { 581 return s 582 } 583 ot := dnameData(s, 0, name, tag, pkg, exported) 584 ggloblLSym(s, int32(ot), obj.DUPOK|obj.RODATA) 585 return s 586 } 587 588 // dextratype dumps the fields of a runtime.uncommontype. 589 // dataAdd is the offset in bytes after the header where the 590 // backing array of the []method field is written (by dextratypeData). 591 func dextratype(s *Sym, ot int, t *Type, dataAdd int) int { 592 m := methods(t) 593 if t.Sym == nil && len(m) == 0 { 594 return ot 595 } 596 noff := int(Rnd(int64(ot), int64(Widthptr))) 597 if noff != ot { 598 Fatalf("unexpected alignment in dextratype for %s", t) 599 } 600 601 for _, a := range m { 602 dtypesym(a.type_) 603 } 604 605 ot = dgopkgpathOffLSym(Linksym(s), ot, typePkg(t)) 606 607 dataAdd += 4 + 2 + 2 608 mcount := len(m) 609 if mcount != int(uint16(mcount)) { 610 Fatalf("too many methods on %s: %d", t, mcount) 611 } 612 if dataAdd != int(uint16(dataAdd)) { 613 Fatalf("methods are too far away on %s: %d", t, dataAdd) 614 } 615 616 ot = duint16(s, ot, uint16(mcount)) 617 ot = duint16(s, ot, uint16(dataAdd)) 618 return ot 619 } 620 621 func typePkg(t *Type) *Pkg { 622 tsym := t.Sym 623 if tsym == nil { 624 switch t.Etype { 625 case TARRAY, TSLICE, TPTR32, TPTR64, TCHAN: 626 if t.Elem() != nil { 627 tsym = t.Elem().Sym 628 } 629 } 630 } 631 if tsym != nil && t != Types[t.Etype] && t != errortype { 632 return tsym.Pkg 633 } 634 return nil 635 } 636 637 // dextratypeData dumps the backing array for the []method field of 638 // runtime.uncommontype. 639 func dextratypeData(s *Sym, ot int, t *Type) int { 640 lsym := Linksym(s) 641 for _, a := range methods(t) { 642 // ../../../../runtime/type.go:/method 643 exported := exportname(a.name) 644 var pkg *Pkg 645 if !exported && a.pkg != typePkg(t) { 646 pkg = a.pkg 647 } 648 nsym := dname(a.name, "", pkg, exported) 649 650 ot = dsymptrOffLSym(lsym, ot, nsym, 0) 651 ot = dmethodptrOffLSym(lsym, ot, Linksym(dtypesym(a.mtype))) 652 ot = dmethodptrOffLSym(lsym, ot, Linksym(a.isym)) 653 ot = dmethodptrOffLSym(lsym, ot, Linksym(a.tsym)) 654 } 655 return ot 656 } 657 658 func dmethodptrOffLSym(s *obj.LSym, ot int, x *obj.LSym) int { 659 duintxxLSym(s, ot, 0, 4) 660 r := obj.Addrel(s) 661 r.Off = int32(ot) 662 r.Siz = 4 663 r.Sym = x 664 r.Type = obj.R_METHODOFF 665 return ot + 4 666 } 667 668 var kinds = []int{ 669 TINT: obj.KindInt, 670 TUINT: obj.KindUint, 671 TINT8: obj.KindInt8, 672 TUINT8: obj.KindUint8, 673 TINT16: obj.KindInt16, 674 TUINT16: obj.KindUint16, 675 TINT32: obj.KindInt32, 676 TUINT32: obj.KindUint32, 677 TINT64: obj.KindInt64, 678 TUINT64: obj.KindUint64, 679 TUINTPTR: obj.KindUintptr, 680 TFLOAT32: obj.KindFloat32, 681 TFLOAT64: obj.KindFloat64, 682 TBOOL: obj.KindBool, 683 TSTRING: obj.KindString, 684 TPTR32: obj.KindPtr, 685 TPTR64: obj.KindPtr, 686 TSTRUCT: obj.KindStruct, 687 TINTER: obj.KindInterface, 688 TCHAN: obj.KindChan, 689 TMAP: obj.KindMap, 690 TARRAY: obj.KindArray, 691 TSLICE: obj.KindSlice, 692 TFUNC: obj.KindFunc, 693 TCOMPLEX64: obj.KindComplex64, 694 TCOMPLEX128: obj.KindComplex128, 695 TUNSAFEPTR: obj.KindUnsafePointer, 696 } 697 698 func haspointers(t *Type) bool { 699 switch t.Etype { 700 case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, 701 TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL: 702 return false 703 704 case TSLICE: 705 return true 706 707 case TARRAY: 708 at := t.Extra.(*ArrayType) 709 if at.Haspointers != 0 { 710 return at.Haspointers-1 != 0 711 } 712 713 ret := false 714 if t.NumElem() != 0 { // non-empty array 715 ret = haspointers(t.Elem()) 716 } 717 718 at.Haspointers = 1 + uint8(obj.Bool2int(ret)) 719 return ret 720 721 case TSTRUCT: 722 st := t.StructType() 723 if st.Haspointers != 0 { 724 return st.Haspointers-1 != 0 725 } 726 727 ret := false 728 for _, t1 := range t.Fields().Slice() { 729 if haspointers(t1.Type) { 730 ret = true 731 break 732 } 733 } 734 st.Haspointers = 1 + uint8(obj.Bool2int(ret)) 735 return ret 736 } 737 738 return true 739 } 740 741 // typeptrdata returns the length in bytes of the prefix of t 742 // containing pointer data. Anything after this offset is scalar data. 743 func typeptrdata(t *Type) int64 { 744 if !haspointers(t) { 745 return 0 746 } 747 748 switch t.Etype { 749 case TPTR32, 750 TPTR64, 751 TUNSAFEPTR, 752 TFUNC, 753 TCHAN, 754 TMAP: 755 return int64(Widthptr) 756 757 case TSTRING: 758 // struct { byte *str; intgo len; } 759 return int64(Widthptr) 760 761 case TINTER: 762 // struct { Itab *tab; void *data; } or 763 // struct { Type *type; void *data; } 764 return 2 * int64(Widthptr) 765 766 case TSLICE: 767 // struct { byte *array; uintgo len; uintgo cap; } 768 return int64(Widthptr) 769 770 case TARRAY: 771 // haspointers already eliminated t.NumElem() == 0. 772 return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem()) 773 774 case TSTRUCT: 775 // Find the last field that has pointers. 776 var lastPtrField *Field 777 for _, t1 := range t.Fields().Slice() { 778 if haspointers(t1.Type) { 779 lastPtrField = t1 780 } 781 } 782 return lastPtrField.Offset + typeptrdata(lastPtrField.Type) 783 784 default: 785 Fatalf("typeptrdata: unexpected type, %v", t) 786 return 0 787 } 788 } 789 790 // tflag is documented in reflect/type.go. 791 // 792 // tflag values must be kept in sync with copies in: 793 // cmd/compile/internal/gc/reflect.go 794 // cmd/link/internal/ld/decodesym.go 795 // reflect/type.go 796 // runtime/type.go 797 const ( 798 tflagUncommon = 1 << 0 799 tflagExtraStar = 1 << 1 800 ) 801 802 var dcommontype_algarray *Sym 803 804 // dcommontype dumps the contents of a reflect.rtype (runtime._type). 805 func dcommontype(s *Sym, ot int, t *Type) int { 806 if ot != 0 { 807 Fatalf("dcommontype %d", ot) 808 } 809 810 sizeofAlg := 2 * Widthptr 811 if dcommontype_algarray == nil { 812 dcommontype_algarray = Pkglookup("algarray", Runtimepkg) 813 } 814 dowidth(t) 815 alg := algtype(t) 816 var algsym *Sym 817 if alg == ASPECIAL || alg == AMEM { 818 algsym = dalgsym(t) 819 } 820 821 tptr := Ptrto(t) 822 if !t.IsPtr() && (t.Sym != nil || methods(tptr) != nil) { 823 sptr := dtypesym(tptr) 824 r := obj.Addrel(Linksym(s)) 825 r.Off = 0 826 r.Siz = 0 827 r.Sym = sptr.Lsym 828 r.Type = obj.R_USETYPE 829 } 830 831 gcsym, useGCProg, ptrdata := dgcsym(t) 832 833 // ../../../../reflect/type.go:/^type.rtype 834 // actual type structure 835 // type rtype struct { 836 // size uintptr 837 // ptrdata uintptr 838 // hash uint32 839 // tflag tflag 840 // align uint8 841 // fieldAlign uint8 842 // kind uint8 843 // alg *typeAlg 844 // gcdata *byte 845 // str nameOff 846 // _ int32 847 // } 848 ot = duintptr(s, ot, uint64(t.Width)) 849 ot = duintptr(s, ot, uint64(ptrdata)) 850 851 ot = duint32(s, ot, typehash(t)) 852 853 var tflag uint8 854 if uncommonSize(t) != 0 { 855 tflag |= tflagUncommon 856 } 857 858 exported := false 859 p := Tconv(t, FmtLeft|FmtUnsigned) 860 // If we're writing out type T, 861 // we are very likely to write out type *T as well. 862 // Use the string "*T"[1:] for "T", so that the two 863 // share storage. This is a cheap way to reduce the 864 // amount of space taken up by reflect strings. 865 if !strings.HasPrefix(p, "*") { 866 p = "*" + p 867 tflag |= tflagExtraStar 868 if t.Sym != nil { 869 exported = exportname(t.Sym.Name) 870 } 871 } else { 872 if t.Elem() != nil && t.Elem().Sym != nil { 873 exported = exportname(t.Elem().Sym.Name) 874 } 875 } 876 877 ot = duint8(s, ot, tflag) 878 879 // runtime (and common sense) expects alignment to be a power of two. 880 i := int(t.Align) 881 882 if i == 0 { 883 i = 1 884 } 885 if i&(i-1) != 0 { 886 Fatalf("invalid alignment %d for %v", t.Align, t) 887 } 888 ot = duint8(s, ot, t.Align) // align 889 ot = duint8(s, ot, t.Align) // fieldAlign 890 891 i = kinds[t.Etype] 892 if !haspointers(t) { 893 i |= obj.KindNoPointers 894 } 895 if isdirectiface(t) { 896 i |= obj.KindDirectIface 897 } 898 if useGCProg { 899 i |= obj.KindGCProg 900 } 901 ot = duint8(s, ot, uint8(i)) // kind 902 if algsym == nil { 903 ot = dsymptr(s, ot, dcommontype_algarray, int(alg)*sizeofAlg) 904 } else { 905 ot = dsymptr(s, ot, algsym, 0) 906 } 907 ot = dsymptr(s, ot, gcsym, 0) // gcdata 908 909 nsym := dname(p, "", nil, exported) 910 ot = dsymptrOffLSym(Linksym(s), ot, nsym, 0) 911 ot = duint32(s, ot, 0) 912 913 return ot 914 } 915 916 func typesym(t *Type) *Sym { 917 return Pkglookup(Tconv(t, FmtLeft), typepkg) 918 } 919 920 // tracksym returns the symbol for tracking use of field/method f, assumed 921 // to be a member of struct/interface type t. 922 func tracksym(t *Type, f *Field) *Sym { 923 return Pkglookup(Tconv(t, FmtLeft)+"."+f.Sym.Name, trackpkg) 924 } 925 926 func typelinkLSym(t *Type) *obj.LSym { 927 name := "go.typelink." + Tconv(t, FmtLeft) // complete, unambiguous type name 928 return obj.Linklookup(Ctxt, name, 0) 929 } 930 931 func typesymprefix(prefix string, t *Type) *Sym { 932 p := prefix + "." + Tconv(t, FmtLeft) 933 s := Pkglookup(p, typepkg) 934 935 //print("algsym: %s -> %+S\n", p, s); 936 937 return s 938 } 939 940 func typenamesym(t *Type) *Sym { 941 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { 942 Fatalf("typename %v", t) 943 } 944 s := typesym(t) 945 if s.Def == nil { 946 n := newname(s) 947 n.Type = Types[TUINT8] 948 n.Class = PEXTERN 949 n.Typecheck = 1 950 s.Def = n 951 952 signatlist = append(signatlist, typenod(t)) 953 } 954 955 return s.Def.Sym 956 } 957 958 func typename(t *Type) *Node { 959 s := typenamesym(t) 960 n := Nod(OADDR, s.Def, nil) 961 n.Type = Ptrto(s.Def.Type) 962 n.Addable = true 963 n.Ullman = 2 964 n.Typecheck = 1 965 return n 966 } 967 968 func itabname(t, itype *Type) *Node { 969 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() { 970 Fatalf("itabname %v", t) 971 } 972 s := Pkglookup(Tconv(t, FmtLeft)+","+Tconv(itype, FmtLeft), itabpkg) 973 if s.Def == nil { 974 n := newname(s) 975 n.Type = Types[TUINT8] 976 n.Class = PEXTERN 977 n.Typecheck = 1 978 s.Def = n 979 980 itabs = append(itabs, itabEntry{t: t, itype: itype, sym: s}) 981 } 982 983 n := Nod(OADDR, s.Def, nil) 984 n.Type = Ptrto(s.Def.Type) 985 n.Addable = true 986 n.Ullman = 2 987 n.Typecheck = 1 988 return n 989 } 990 991 // isreflexive reports whether t has a reflexive equality operator. 992 // That is, if x==x for all x of type t. 993 func isreflexive(t *Type) bool { 994 switch t.Etype { 995 case TBOOL, 996 TINT, 997 TUINT, 998 TINT8, 999 TUINT8, 1000 TINT16, 1001 TUINT16, 1002 TINT32, 1003 TUINT32, 1004 TINT64, 1005 TUINT64, 1006 TUINTPTR, 1007 TPTR32, 1008 TPTR64, 1009 TUNSAFEPTR, 1010 TSTRING, 1011 TCHAN: 1012 return true 1013 1014 case TFLOAT32, 1015 TFLOAT64, 1016 TCOMPLEX64, 1017 TCOMPLEX128, 1018 TINTER: 1019 return false 1020 1021 case TARRAY: 1022 return isreflexive(t.Elem()) 1023 1024 case TSTRUCT: 1025 for _, t1 := range t.Fields().Slice() { 1026 if !isreflexive(t1.Type) { 1027 return false 1028 } 1029 } 1030 return true 1031 1032 default: 1033 Fatalf("bad type for map key: %v", t) 1034 return false 1035 } 1036 } 1037 1038 // needkeyupdate reports whether map updates with t as a key 1039 // need the key to be updated. 1040 func needkeyupdate(t *Type) bool { 1041 switch t.Etype { 1042 case TBOOL, 1043 TINT, 1044 TUINT, 1045 TINT8, 1046 TUINT8, 1047 TINT16, 1048 TUINT16, 1049 TINT32, 1050 TUINT32, 1051 TINT64, 1052 TUINT64, 1053 TUINTPTR, 1054 TPTR32, 1055 TPTR64, 1056 TUNSAFEPTR, 1057 TCHAN: 1058 return false 1059 1060 case TFLOAT32, // floats can be +0/-0 1061 TFLOAT64, 1062 TCOMPLEX64, 1063 TCOMPLEX128, 1064 TINTER, 1065 TSTRING: // strings might have smaller backing stores 1066 return true 1067 1068 case TARRAY: 1069 return needkeyupdate(t.Elem()) 1070 1071 case TSTRUCT: 1072 for _, t1 := range t.Fields().Slice() { 1073 if needkeyupdate(t1.Type) { 1074 return true 1075 } 1076 } 1077 return false 1078 1079 default: 1080 Fatalf("bad type for map key: %v", t) 1081 return true 1082 } 1083 } 1084 1085 func dtypesym(t *Type) *Sym { 1086 // Replace byte, rune aliases with real type. 1087 // They've been separate internally to make error messages 1088 // better, but we have to merge them in the reflect tables. 1089 if t == bytetype || t == runetype { 1090 t = Types[t.Etype] 1091 } 1092 1093 if t.IsUntyped() { 1094 Fatalf("dtypesym %v", t) 1095 } 1096 1097 s := typesym(t) 1098 if s.Flags&SymSiggen != 0 { 1099 return s 1100 } 1101 s.Flags |= SymSiggen 1102 1103 // special case (look for runtime below): 1104 // when compiling package runtime, 1105 // emit the type structures for int, float, etc. 1106 tbase := t 1107 1108 if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil { 1109 tbase = t.Elem() 1110 } 1111 dupok := 0 1112 if tbase.Sym == nil { 1113 dupok = obj.DUPOK 1114 } 1115 1116 if myimportpath == "runtime" && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc 1117 goto ok 1118 } 1119 1120 // named types from other files are defined only by those files 1121 if tbase.Sym != nil && !tbase.Local { 1122 return s 1123 } 1124 if isforw[tbase.Etype] { 1125 return s 1126 } 1127 1128 ok: 1129 ot := 0 1130 switch t.Etype { 1131 default: 1132 ot = dcommontype(s, ot, t) 1133 ot = dextratype(s, ot, t, 0) 1134 1135 case TARRAY: 1136 // ../../../../runtime/type.go:/arrayType 1137 s1 := dtypesym(t.Elem()) 1138 t2 := typSlice(t.Elem()) 1139 s2 := dtypesym(t2) 1140 ot = dcommontype(s, ot, t) 1141 ot = dsymptr(s, ot, s1, 0) 1142 ot = dsymptr(s, ot, s2, 0) 1143 ot = duintptr(s, ot, uint64(t.NumElem())) 1144 ot = dextratype(s, ot, t, 0) 1145 1146 case TSLICE: 1147 // ../../../../runtime/type.go:/sliceType 1148 s1 := dtypesym(t.Elem()) 1149 ot = dcommontype(s, ot, t) 1150 ot = dsymptr(s, ot, s1, 0) 1151 ot = dextratype(s, ot, t, 0) 1152 1153 case TCHAN: 1154 // ../../../../runtime/type.go:/chanType 1155 s1 := dtypesym(t.Elem()) 1156 ot = dcommontype(s, ot, t) 1157 ot = dsymptr(s, ot, s1, 0) 1158 ot = duintptr(s, ot, uint64(t.ChanDir())) 1159 ot = dextratype(s, ot, t, 0) 1160 1161 case TFUNC: 1162 for _, t1 := range t.Recvs().Fields().Slice() { 1163 dtypesym(t1.Type) 1164 } 1165 isddd := false 1166 for _, t1 := range t.Params().Fields().Slice() { 1167 isddd = t1.Isddd 1168 dtypesym(t1.Type) 1169 } 1170 for _, t1 := range t.Results().Fields().Slice() { 1171 dtypesym(t1.Type) 1172 } 1173 1174 ot = dcommontype(s, ot, t) 1175 inCount := t.Recvs().NumFields() + t.Params().NumFields() 1176 outCount := t.Results().NumFields() 1177 if isddd { 1178 outCount |= 1 << 15 1179 } 1180 ot = duint16(s, ot, uint16(inCount)) 1181 ot = duint16(s, ot, uint16(outCount)) 1182 if Widthptr == 8 { 1183 ot += 4 // align for *rtype 1184 } 1185 1186 dataAdd := (inCount + t.Results().NumFields()) * Widthptr 1187 ot = dextratype(s, ot, t, dataAdd) 1188 1189 // Array of rtype pointers follows funcType. 1190 for _, t1 := range t.Recvs().Fields().Slice() { 1191 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1192 } 1193 for _, t1 := range t.Params().Fields().Slice() { 1194 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1195 } 1196 for _, t1 := range t.Results().Fields().Slice() { 1197 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1198 } 1199 1200 case TINTER: 1201 m := imethods(t) 1202 n := len(m) 1203 for _, a := range m { 1204 dtypesym(a.type_) 1205 } 1206 1207 // ../../../../runtime/type.go:/interfaceType 1208 ot = dcommontype(s, ot, t) 1209 1210 var tpkg *Pkg 1211 if t.Sym != nil && t != Types[t.Etype] && t != errortype { 1212 tpkg = t.Sym.Pkg 1213 } 1214 ot = dgopkgpath(s, ot, tpkg) 1215 1216 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint+uncommonSize(t)) 1217 ot = duintxx(s, ot, uint64(n), Widthint) 1218 ot = duintxx(s, ot, uint64(n), Widthint) 1219 dataAdd := imethodSize() * n 1220 ot = dextratype(s, ot, t, dataAdd) 1221 1222 lsym := Linksym(s) 1223 for _, a := range m { 1224 // ../../../../runtime/type.go:/imethod 1225 exported := exportname(a.name) 1226 var pkg *Pkg 1227 if !exported && a.pkg != tpkg { 1228 pkg = a.pkg 1229 } 1230 nsym := dname(a.name, "", pkg, exported) 1231 1232 ot = dsymptrOffLSym(lsym, ot, nsym, 0) 1233 ot = dsymptrOffLSym(lsym, ot, Linksym(dtypesym(a.type_)), 0) 1234 } 1235 1236 // ../../../../runtime/type.go:/mapType 1237 case TMAP: 1238 s1 := dtypesym(t.Key()) 1239 s2 := dtypesym(t.Val()) 1240 s3 := dtypesym(mapbucket(t)) 1241 s4 := dtypesym(hmap(t)) 1242 ot = dcommontype(s, ot, t) 1243 ot = dsymptr(s, ot, s1, 0) 1244 ot = dsymptr(s, ot, s2, 0) 1245 ot = dsymptr(s, ot, s3, 0) 1246 ot = dsymptr(s, ot, s4, 0) 1247 if t.Key().Width > MAXKEYSIZE { 1248 ot = duint8(s, ot, uint8(Widthptr)) 1249 ot = duint8(s, ot, 1) // indirect 1250 } else { 1251 ot = duint8(s, ot, uint8(t.Key().Width)) 1252 ot = duint8(s, ot, 0) // not indirect 1253 } 1254 1255 if t.Val().Width > MAXVALSIZE { 1256 ot = duint8(s, ot, uint8(Widthptr)) 1257 ot = duint8(s, ot, 1) // indirect 1258 } else { 1259 ot = duint8(s, ot, uint8(t.Val().Width)) 1260 ot = duint8(s, ot, 0) // not indirect 1261 } 1262 1263 ot = duint16(s, ot, uint16(mapbucket(t).Width)) 1264 ot = duint8(s, ot, uint8(obj.Bool2int(isreflexive(t.Key())))) 1265 ot = duint8(s, ot, uint8(obj.Bool2int(needkeyupdate(t.Key())))) 1266 ot = dextratype(s, ot, t, 0) 1267 1268 case TPTR32, TPTR64: 1269 if t.Elem().Etype == TANY { 1270 // ../../../../runtime/type.go:/UnsafePointerType 1271 ot = dcommontype(s, ot, t) 1272 ot = dextratype(s, ot, t, 0) 1273 1274 break 1275 } 1276 1277 // ../../../../runtime/type.go:/ptrType 1278 s1 := dtypesym(t.Elem()) 1279 1280 ot = dcommontype(s, ot, t) 1281 ot = dsymptr(s, ot, s1, 0) 1282 ot = dextratype(s, ot, t, 0) 1283 1284 // ../../../../runtime/type.go:/structType 1285 // for security, only the exported fields. 1286 case TSTRUCT: 1287 n := 0 1288 1289 for _, t1 := range t.Fields().Slice() { 1290 dtypesym(t1.Type) 1291 n++ 1292 } 1293 1294 ot = dcommontype(s, ot, t) 1295 pkg := localpkg 1296 if t.Sym != nil { 1297 pkg = t.Sym.Pkg 1298 } 1299 ot = dgopkgpath(s, ot, pkg) 1300 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint+uncommonSize(t)) 1301 ot = duintxx(s, ot, uint64(n), Widthint) 1302 ot = duintxx(s, ot, uint64(n), Widthint) 1303 1304 dataAdd := n * structfieldSize() 1305 ot = dextratype(s, ot, t, dataAdd) 1306 1307 for _, f := range t.Fields().Slice() { 1308 // ../../../../runtime/type.go:/structField 1309 ot = dnameField(s, ot, f) 1310 ot = dsymptr(s, ot, dtypesym(f.Type), 0) 1311 ot = duintptr(s, ot, uint64(f.Offset)) 1312 } 1313 } 1314 1315 ot = dextratypeData(s, ot, t) 1316 ggloblsym(s, int32(ot), int16(dupok|obj.RODATA)) 1317 1318 // generate typelink.foo pointing at s = type.foo. 1319 // 1320 // The linker will leave a table of all the typelinks for 1321 // types in the binary, so the runtime can find them. 1322 // 1323 // When buildmode=shared, all types are in typelinks so the 1324 // runtime can deduplicate type pointers. 1325 keep := Ctxt.Flag_dynlink 1326 if !keep && t.Sym == nil { 1327 // For an unnamed type, we only need the link if the type can 1328 // be created at run time by reflect.PtrTo and similar 1329 // functions. If the type exists in the program, those 1330 // functions must return the existing type structure rather 1331 // than creating a new one. 1332 switch t.Etype { 1333 case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT: 1334 keep = true 1335 } 1336 } 1337 if keep { 1338 slink := typelinkLSym(t) 1339 dsymptrOffLSym(slink, 0, Linksym(s), 0) 1340 ggloblLSym(slink, 4, int16(dupok|obj.RODATA)) 1341 } 1342 1343 return s 1344 } 1345 1346 func dumptypestructs() { 1347 // copy types from externdcl list to signatlist 1348 for _, n := range externdcl { 1349 if n.Op != OTYPE { 1350 continue 1351 } 1352 signatlist = append(signatlist, n) 1353 } 1354 1355 // Process signatlist. This can't use range, as entries are 1356 // added to the list while it is being processed. 1357 for i := 0; i < len(signatlist); i++ { 1358 n := signatlist[i] 1359 if n.Op != OTYPE { 1360 continue 1361 } 1362 t := n.Type 1363 dtypesym(t) 1364 if t.Sym != nil { 1365 dtypesym(Ptrto(t)) 1366 } 1367 } 1368 1369 // process itabs 1370 for _, i := range itabs { 1371 // dump empty itab symbol into i.sym 1372 // type itab struct { 1373 // inter *interfacetype 1374 // _type *_type 1375 // link *itab 1376 // bad int32 1377 // unused int32 1378 // fun [1]uintptr // variable sized 1379 // } 1380 o := dsymptr(i.sym, 0, dtypesym(i.itype), 0) 1381 o = dsymptr(i.sym, o, dtypesym(i.t), 0) 1382 o += Widthptr + 8 // skip link/bad/unused fields 1383 o += len(imethods(i.itype)) * Widthptr // skip fun method pointers 1384 // at runtime the itab will contain pointers to types, other itabs and 1385 // method functions. None are allocated on heap, so we can use obj.NOPTR. 1386 ggloblsym(i.sym, int32(o), int16(obj.DUPOK|obj.NOPTR)) 1387 1388 ilink := Pkglookup(Tconv(i.t, FmtLeft)+","+Tconv(i.itype, FmtLeft), itablinkpkg) 1389 dsymptr(ilink, 0, i.sym, 0) 1390 ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA)) 1391 } 1392 1393 // generate import strings for imported packages 1394 if forceObjFileStability { 1395 // Sorting the packages is not necessary but to compare binaries created 1396 // using textual and binary format we sort by path to reduce differences. 1397 sort.Sort(pkgByPath(pkgs)) 1398 } 1399 for _, p := range pkgs { 1400 if p.Direct { 1401 dimportpath(p) 1402 } 1403 } 1404 1405 // do basic types if compiling package runtime. 1406 // they have to be in at least one package, 1407 // and runtime is always loaded implicitly, 1408 // so this is as good as any. 1409 // another possible choice would be package main, 1410 // but using runtime means fewer copies in .6 files. 1411 if myimportpath == "runtime" { 1412 for i := EType(1); i <= TBOOL; i++ { 1413 dtypesym(Ptrto(Types[i])) 1414 } 1415 dtypesym(Ptrto(Types[TSTRING])) 1416 dtypesym(Ptrto(Types[TUNSAFEPTR])) 1417 1418 // emit type structs for error and func(error) string. 1419 // The latter is the type of an auto-generated wrapper. 1420 dtypesym(Ptrto(errortype)) 1421 1422 dtypesym(functype(nil, []*Node{Nod(ODCLFIELD, nil, typenod(errortype))}, []*Node{Nod(ODCLFIELD, nil, typenod(Types[TSTRING]))})) 1423 1424 // add paths for runtime and main, which 6l imports implicitly. 1425 dimportpath(Runtimepkg) 1426 1427 if flag_race { 1428 dimportpath(racepkg) 1429 } 1430 if flag_msan { 1431 dimportpath(msanpkg) 1432 } 1433 dimportpath(mkpkg("main")) 1434 } 1435 } 1436 1437 type pkgByPath []*Pkg 1438 1439 func (a pkgByPath) Len() int { return len(a) } 1440 func (a pkgByPath) Less(i, j int) bool { return a[i].Path < a[j].Path } 1441 func (a pkgByPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 1442 1443 func dalgsym(t *Type) *Sym { 1444 var s *Sym 1445 var hashfunc *Sym 1446 var eqfunc *Sym 1447 1448 // dalgsym is only called for a type that needs an algorithm table, 1449 // which implies that the type is comparable (or else it would use ANOEQ). 1450 1451 if algtype(t) == AMEM { 1452 // we use one algorithm table for all AMEM types of a given size 1453 p := fmt.Sprintf(".alg%d", t.Width) 1454 1455 s = Pkglookup(p, typepkg) 1456 1457 if s.Flags&SymAlgGen != 0 { 1458 return s 1459 } 1460 s.Flags |= SymAlgGen 1461 1462 // make hash closure 1463 p = fmt.Sprintf(".hashfunc%d", t.Width) 1464 1465 hashfunc = Pkglookup(p, typepkg) 1466 1467 ot := 0 1468 ot = dsymptr(hashfunc, ot, Pkglookup("memhash_varlen", Runtimepkg), 0) 1469 ot = duintxx(hashfunc, ot, uint64(t.Width), Widthptr) // size encoded in closure 1470 ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA) 1471 1472 // make equality closure 1473 p = fmt.Sprintf(".eqfunc%d", t.Width) 1474 1475 eqfunc = Pkglookup(p, typepkg) 1476 1477 ot = 0 1478 ot = dsymptr(eqfunc, ot, Pkglookup("memequal_varlen", Runtimepkg), 0) 1479 ot = duintxx(eqfunc, ot, uint64(t.Width), Widthptr) 1480 ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA) 1481 } else { 1482 // generate an alg table specific to this type 1483 s = typesymprefix(".alg", t) 1484 1485 hash := typesymprefix(".hash", t) 1486 eq := typesymprefix(".eq", t) 1487 hashfunc = typesymprefix(".hashfunc", t) 1488 eqfunc = typesymprefix(".eqfunc", t) 1489 1490 genhash(hash, t) 1491 geneq(eq, t) 1492 1493 // make Go funcs (closures) for calling hash and equal from Go 1494 dsymptr(hashfunc, 0, hash, 0) 1495 1496 ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1497 dsymptr(eqfunc, 0, eq, 0) 1498 ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1499 } 1500 1501 // ../../../../runtime/alg.go:/typeAlg 1502 ot := 0 1503 1504 ot = dsymptr(s, ot, hashfunc, 0) 1505 ot = dsymptr(s, ot, eqfunc, 0) 1506 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 1507 return s 1508 } 1509 1510 // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, 1511 // which holds 1-bit entries describing where pointers are in a given type. 1512 // 16 bytes is enough to describe 128 pointer-sized words, 512 or 1024 bytes 1513 // depending on the system. Above this length, the GC information is 1514 // recorded as a GC program, which can express repetition compactly. 1515 // In either form, the information is used by the runtime to initialize the 1516 // heap bitmap, and for large types (like 128 or more words), they are 1517 // roughly the same speed. GC programs are never much larger and often 1518 // more compact. (If large arrays are involved, they can be arbitrarily more 1519 // compact.) 1520 // 1521 // The cutoff must be large enough that any allocation large enough to 1522 // use a GC program is large enough that it does not share heap bitmap 1523 // bytes with any other objects, allowing the GC program execution to 1524 // assume an aligned start and not use atomic operations. In the current 1525 // runtime, this means all malloc size classes larger than the cutoff must 1526 // be multiples of four words. On 32-bit systems that's 16 bytes, and 1527 // all size classes >= 16 bytes are 16-byte aligned, so no real constraint. 1528 // On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed 1529 // for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated 1530 // is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes 1531 // must be >= 4. 1532 // 1533 // We used to use 16 because the GC programs do have some constant overhead 1534 // to get started, and processing 128 pointers seems to be enough to 1535 // amortize that overhead well. 1536 // 1537 // To make sure that the runtime's chansend can call typeBitsBulkBarrier, 1538 // we raised the limit to 2048, so that even 32-bit systems are guaranteed to 1539 // use bitmaps for objects up to 64 kB in size. 1540 // 1541 // Also known to reflect/type.go. 1542 // 1543 const maxPtrmaskBytes = 2048 1544 1545 // dgcsym emits and returns a data symbol containing GC information for type t, 1546 // along with a boolean reporting whether the UseGCProg bit should be set in 1547 // the type kind, and the ptrdata field to record in the reflect type information. 1548 func dgcsym(t *Type) (sym *Sym, useGCProg bool, ptrdata int64) { 1549 ptrdata = typeptrdata(t) 1550 if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 { 1551 sym = dgcptrmask(t) 1552 return 1553 } 1554 1555 useGCProg = true 1556 sym, ptrdata = dgcprog(t) 1557 return 1558 } 1559 1560 // dgcptrmask emits and returns the symbol containing a pointer mask for type t. 1561 func dgcptrmask(t *Type) *Sym { 1562 ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8) 1563 fillptrmask(t, ptrmask) 1564 p := fmt.Sprintf("gcbits.%x", ptrmask) 1565 1566 sym := Pkglookup(p, Runtimepkg) 1567 if sym.Flags&SymUniq == 0 { 1568 sym.Flags |= SymUniq 1569 for i, x := range ptrmask { 1570 duint8(sym, i, x) 1571 } 1572 ggloblsym(sym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) 1573 } 1574 return sym 1575 } 1576 1577 // fillptrmask fills in ptrmask with 1s corresponding to the 1578 // word offsets in t that hold pointers. 1579 // ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits. 1580 func fillptrmask(t *Type, ptrmask []byte) { 1581 for i := range ptrmask { 1582 ptrmask[i] = 0 1583 } 1584 if !haspointers(t) { 1585 return 1586 } 1587 1588 vec := bvalloc(8 * int32(len(ptrmask))) 1589 xoffset := int64(0) 1590 onebitwalktype1(t, &xoffset, vec) 1591 1592 nptr := typeptrdata(t) / int64(Widthptr) 1593 for i := int64(0); i < nptr; i++ { 1594 if bvget(vec, int32(i)) == 1 { 1595 ptrmask[i/8] |= 1 << (uint(i) % 8) 1596 } 1597 } 1598 } 1599 1600 // dgcprog emits and returns the symbol containing a GC program for type t 1601 // along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]). 1602 // In practice, the size is typeptrdata(t) except for non-trivial arrays. 1603 // For non-trivial arrays, the program describes the full t.Width size. 1604 func dgcprog(t *Type) (*Sym, int64) { 1605 dowidth(t) 1606 if t.Width == BADWIDTH { 1607 Fatalf("dgcprog: %v badwidth", t) 1608 } 1609 sym := typesymprefix(".gcprog", t) 1610 var p GCProg 1611 p.init(sym) 1612 p.emit(t, 0) 1613 offset := p.w.BitIndex() * int64(Widthptr) 1614 p.end() 1615 if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { 1616 Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) 1617 } 1618 return sym, offset 1619 } 1620 1621 type GCProg struct { 1622 sym *Sym 1623 symoff int 1624 w gcprog.Writer 1625 } 1626 1627 var Debug_gcprog int // set by -d gcprog 1628 1629 func (p *GCProg) init(sym *Sym) { 1630 p.sym = sym 1631 p.symoff = 4 // first 4 bytes hold program length 1632 p.w.Init(p.writeByte) 1633 if Debug_gcprog > 0 { 1634 fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", sym) 1635 p.w.Debug(os.Stderr) 1636 } 1637 } 1638 1639 func (p *GCProg) writeByte(x byte) { 1640 p.symoff = duint8(p.sym, p.symoff, x) 1641 } 1642 1643 func (p *GCProg) end() { 1644 p.w.End() 1645 duint32(p.sym, 0, uint32(p.symoff-4)) 1646 ggloblsym(p.sym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) 1647 if Debug_gcprog > 0 { 1648 fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.sym) 1649 } 1650 } 1651 1652 func (p *GCProg) emit(t *Type, offset int64) { 1653 dowidth(t) 1654 if !haspointers(t) { 1655 return 1656 } 1657 if t.Width == int64(Widthptr) { 1658 p.w.Ptr(offset / int64(Widthptr)) 1659 return 1660 } 1661 switch t.Etype { 1662 default: 1663 Fatalf("GCProg.emit: unexpected type %v", t) 1664 1665 case TSTRING: 1666 p.w.Ptr(offset / int64(Widthptr)) 1667 1668 case TINTER: 1669 p.w.Ptr(offset / int64(Widthptr)) 1670 p.w.Ptr(offset/int64(Widthptr) + 1) 1671 1672 case TSLICE: 1673 p.w.Ptr(offset / int64(Widthptr)) 1674 1675 case TARRAY: 1676 if t.NumElem() == 0 { 1677 // should have been handled by haspointers check above 1678 Fatalf("GCProg.emit: empty array") 1679 } 1680 1681 // Flatten array-of-array-of-array to just a big array by multiplying counts. 1682 count := t.NumElem() 1683 elem := t.Elem() 1684 for elem.IsArray() { 1685 count *= elem.NumElem() 1686 elem = elem.Elem() 1687 } 1688 1689 if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) { 1690 // Cheaper to just emit the bits. 1691 for i := int64(0); i < count; i++ { 1692 p.emit(elem, offset+i*elem.Width) 1693 } 1694 return 1695 } 1696 p.emit(elem, offset) 1697 p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) 1698 p.w.Repeat(elem.Width/int64(Widthptr), count-1) 1699 1700 case TSTRUCT: 1701 for _, t1 := range t.Fields().Slice() { 1702 p.emit(t1.Type, offset+t1.Offset) 1703 } 1704 } 1705 } 1706 1707 // zeroaddr returns the address of a symbol with at least 1708 // size bytes of zeros. 1709 func zeroaddr(size int64) *Node { 1710 if size >= 1<<31 { 1711 Fatalf("map value too big %d", size) 1712 } 1713 if zerosize < size { 1714 zerosize = size 1715 } 1716 s := Pkglookup("zero", mappkg) 1717 if s.Def == nil { 1718 x := newname(s) 1719 x.Type = Types[TUINT8] 1720 x.Class = PEXTERN 1721 x.Typecheck = 1 1722 s.Def = x 1723 } 1724 z := Nod(OADDR, s.Def, nil) 1725 z.Type = Ptrto(Types[TUINT8]) 1726 z.Addable = true 1727 z.Typecheck = 1 1728 return z 1729 }