github.com/peggyl/go@v0.0.0-20151008231540-ae315999c2d5/src/cmd/compile/internal/gc/reflect.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/internal/gcprog" 9 "cmd/internal/obj" 10 "fmt" 11 "os" 12 "sort" 13 ) 14 15 /* 16 * runtime interface and reflection data structures 17 */ 18 var signatlist *NodeList 19 20 // byMethodNameAndPackagePath sorts method signatures by name, then package path. 21 type byMethodNameAndPackagePath []*Sig 22 23 func (x byMethodNameAndPackagePath) Len() int { return len(x) } 24 func (x byMethodNameAndPackagePath) Swap(i, j int) { x[i], x[j] = x[j], x[i] } 25 func (x byMethodNameAndPackagePath) Less(i, j int) bool { 26 return siglt(x[i], x[j]) 27 } 28 29 // siglt reports whether a < b 30 func siglt(a, b *Sig) bool { 31 if a.name != b.name { 32 return a.name < b.name 33 } 34 if a.pkg == b.pkg { 35 return false 36 } 37 if a.pkg == nil { 38 return true 39 } 40 if b.pkg == nil { 41 return false 42 } 43 return a.pkg.Path < b.pkg.Path 44 } 45 46 // Builds a type representing a Bucket structure for 47 // the given map type. This type is not visible to users - 48 // we include only enough information to generate a correct GC 49 // program for it. 50 // Make sure this stays in sync with ../../runtime/hashmap.go! 51 const ( 52 BUCKETSIZE = 8 53 MAXKEYSIZE = 128 54 MAXVALSIZE = 128 55 ) 56 57 func makefield(name string, t *Type) *Type { 58 f := typ(TFIELD) 59 f.Type = t 60 f.Sym = new(Sym) 61 f.Sym.Name = name 62 return f 63 } 64 65 func mapbucket(t *Type) *Type { 66 if t.Bucket != nil { 67 return t.Bucket 68 } 69 70 bucket := typ(TSTRUCT) 71 keytype := t.Down 72 valtype := t.Type 73 dowidth(keytype) 74 dowidth(valtype) 75 if keytype.Width > MAXKEYSIZE { 76 keytype = Ptrto(keytype) 77 } 78 if valtype.Width > MAXVALSIZE { 79 valtype = Ptrto(valtype) 80 } 81 82 // The first field is: uint8 topbits[BUCKETSIZE]. 83 arr := typ(TARRAY) 84 85 arr.Type = Types[TUINT8] 86 arr.Bound = BUCKETSIZE 87 field := make([]*Type, 0, 5) 88 field = append(field, makefield("topbits", arr)) 89 arr = typ(TARRAY) 90 arr.Type = keytype 91 arr.Bound = BUCKETSIZE 92 field = append(field, makefield("keys", arr)) 93 arr = typ(TARRAY) 94 arr.Type = valtype 95 arr.Bound = BUCKETSIZE 96 field = append(field, makefield("values", arr)) 97 98 // Make sure the overflow pointer is the last memory in the struct, 99 // because the runtime assumes it can use size-ptrSize as the 100 // offset of the overflow pointer. We double-check that property 101 // below once the offsets and size are computed. 102 // 103 // BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point. 104 // On 32-bit systems, the max alignment is 32-bit, and the 105 // overflow pointer will add another 32-bit field, and the struct 106 // will end with no padding. 107 // On 64-bit systems, the max alignment is 64-bit, and the 108 // overflow pointer will add another 64-bit field, and the struct 109 // will end with no padding. 110 // On nacl/amd64p32, however, the max alignment is 64-bit, 111 // but the overflow pointer will add only a 32-bit field, 112 // so if the struct needs 64-bit padding (because a key or value does) 113 // then it would end with an extra 32-bit padding field. 114 // Preempt that by emitting the padding here. 115 if int(t.Type.Align) > Widthptr || int(t.Down.Align) > Widthptr { 116 field = append(field, makefield("pad", Types[TUINTPTR])) 117 } 118 119 // If keys and values have no pointers, the map implementation 120 // can keep a list of overflow pointers on the side so that 121 // buckets can be marked as having no pointers. 122 // Arrange for the bucket to have no pointers by changing 123 // the type of the overflow field to uintptr in this case. 124 // See comment on hmap.overflow in ../../../../runtime/hashmap.go. 125 otyp := Ptrto(bucket) 126 if !haspointers(t.Type) && !haspointers(t.Down) && t.Type.Width <= MAXKEYSIZE && t.Down.Width <= MAXVALSIZE { 127 otyp = Types[TUINTPTR] 128 } 129 ovf := makefield("overflow", otyp) 130 field = append(field, ovf) 131 132 // link up fields 133 bucket.Noalg = true 134 bucket.Local = t.Local 135 bucket.Type = field[0] 136 for n := int32(0); n < int32(len(field)-1); n++ { 137 field[n].Down = field[n+1] 138 } 139 field[len(field)-1].Down = nil 140 dowidth(bucket) 141 142 // Double-check that overflow field is final memory in struct, 143 // with no padding at end. See comment above. 144 if ovf.Width != bucket.Width-int64(Widthptr) { 145 Yyerror("bad math in mapbucket for %v", t) 146 } 147 148 t.Bucket = bucket 149 150 bucket.Map = t 151 return bucket 152 } 153 154 // Builds a type representing a Hmap structure for the given map type. 155 // Make sure this stays in sync with ../../runtime/hashmap.go! 156 func hmap(t *Type) *Type { 157 if t.Hmap != nil { 158 return t.Hmap 159 } 160 161 bucket := mapbucket(t) 162 var field [8]*Type 163 field[0] = makefield("count", Types[TINT]) 164 field[1] = makefield("flags", Types[TUINT8]) 165 field[2] = makefield("B", Types[TUINT8]) 166 field[3] = makefield("hash0", Types[TUINT32]) 167 field[4] = makefield("buckets", Ptrto(bucket)) 168 field[5] = makefield("oldbuckets", Ptrto(bucket)) 169 field[6] = makefield("nevacuate", Types[TUINTPTR]) 170 field[7] = makefield("overflow", Types[TUNSAFEPTR]) 171 172 h := typ(TSTRUCT) 173 h.Noalg = true 174 h.Local = t.Local 175 h.Type = field[0] 176 for n := int32(0); n < int32(len(field)-1); n++ { 177 field[n].Down = field[n+1] 178 } 179 field[len(field)-1].Down = nil 180 dowidth(h) 181 t.Hmap = h 182 h.Map = t 183 return h 184 } 185 186 func hiter(t *Type) *Type { 187 if t.Hiter != nil { 188 return t.Hiter 189 } 190 191 // build a struct: 192 // hash_iter { 193 // key *Key 194 // val *Value 195 // t *MapType 196 // h *Hmap 197 // buckets *Bucket 198 // bptr *Bucket 199 // overflow0 unsafe.Pointer 200 // overflow1 unsafe.Pointer 201 // startBucket uintptr 202 // stuff uintptr 203 // bucket uintptr 204 // checkBucket uintptr 205 // } 206 // must match ../../runtime/hashmap.go:hash_iter. 207 var field [12]*Type 208 field[0] = makefield("key", Ptrto(t.Down)) 209 210 field[1] = makefield("val", Ptrto(t.Type)) 211 field[2] = makefield("t", Ptrto(Types[TUINT8])) 212 field[3] = makefield("h", Ptrto(hmap(t))) 213 field[4] = makefield("buckets", Ptrto(mapbucket(t))) 214 field[5] = makefield("bptr", Ptrto(mapbucket(t))) 215 field[6] = makefield("overflow0", Types[TUNSAFEPTR]) 216 field[7] = makefield("overflow1", Types[TUNSAFEPTR]) 217 field[8] = makefield("startBucket", Types[TUINTPTR]) 218 field[9] = makefield("stuff", Types[TUINTPTR]) // offset+wrapped+B+I 219 field[10] = makefield("bucket", Types[TUINTPTR]) 220 field[11] = makefield("checkBucket", Types[TUINTPTR]) 221 222 // build iterator struct holding the above fields 223 i := typ(TSTRUCT) 224 225 i.Noalg = true 226 i.Type = field[0] 227 for n := int32(0); n < int32(len(field)-1); n++ { 228 field[n].Down = field[n+1] 229 } 230 field[len(field)-1].Down = nil 231 dowidth(i) 232 if i.Width != int64(12*Widthptr) { 233 Yyerror("hash_iter size not correct %d %d", i.Width, 12*Widthptr) 234 } 235 t.Hiter = i 236 i.Map = t 237 return i 238 } 239 240 /* 241 * f is method type, with receiver. 242 * return function type, receiver as first argument (or not). 243 */ 244 func methodfunc(f *Type, receiver *Type) *Type { 245 var in *NodeList 246 if receiver != nil { 247 d := Nod(ODCLFIELD, nil, nil) 248 d.Type = receiver 249 in = list(in, d) 250 } 251 252 var d *Node 253 for t := getinargx(f).Type; t != nil; t = t.Down { 254 d = Nod(ODCLFIELD, nil, nil) 255 d.Type = t.Type 256 d.Isddd = t.Isddd 257 in = list(in, d) 258 } 259 260 var out *NodeList 261 for t := getoutargx(f).Type; t != nil; t = t.Down { 262 d = Nod(ODCLFIELD, nil, nil) 263 d.Type = t.Type 264 out = list(out, d) 265 } 266 267 t := functype(nil, in, out) 268 if f.Nname != nil { 269 // Link to name of original method function. 270 t.Nname = f.Nname 271 } 272 273 return t 274 } 275 276 // methods returns the methods of the non-interface type t, sorted by name. 277 // Generates stub functions as needed. 278 func methods(t *Type) []*Sig { 279 // method type 280 mt := methtype(t, 0) 281 282 if mt == nil { 283 return nil 284 } 285 expandmeth(mt) 286 287 // type stored in interface word 288 it := t 289 290 if !isdirectiface(it) { 291 it = Ptrto(t) 292 } 293 294 // make list of methods for t, 295 // generating code if necessary. 296 var ms []*Sig 297 for f := mt.Xmethod; f != nil; f = f.Down { 298 if f.Etype != TFIELD { 299 Fatalf("methods: not field %v", f) 300 } 301 if f.Type.Etype != TFUNC || f.Type.Thistuple == 0 { 302 Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) 303 } 304 if getthisx(f.Type).Type == nil { 305 Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) 306 } 307 if f.Nointerface { 308 continue 309 } 310 311 method := f.Sym 312 if method == nil { 313 continue 314 } 315 316 // get receiver type for this particular method. 317 // if pointer receiver but non-pointer t and 318 // this is not an embedded pointer inside a struct, 319 // method does not apply. 320 this := getthisx(f.Type).Type.Type 321 322 if Isptr[this.Etype] && this.Type == t { 323 continue 324 } 325 if Isptr[this.Etype] && !Isptr[t.Etype] && f.Embedded != 2 && !isifacemethod(f.Type) { 326 continue 327 } 328 329 var sig Sig 330 ms = append(ms, &sig) 331 332 sig.name = method.Name 333 if !exportname(method.Name) { 334 if method.Pkg == nil { 335 Fatalf("methods: missing package") 336 } 337 sig.pkg = method.Pkg 338 } 339 340 sig.isym = methodsym(method, it, 1) 341 sig.tsym = methodsym(method, t, 0) 342 sig.type_ = methodfunc(f.Type, t) 343 sig.mtype = methodfunc(f.Type, nil) 344 345 if sig.isym.Flags&SymSiggen == 0 { 346 sig.isym.Flags |= SymSiggen 347 if !Eqtype(this, it) || this.Width < Types[Tptr].Width { 348 compiling_wrappers = 1 349 genwrapper(it, f, sig.isym, 1) 350 compiling_wrappers = 0 351 } 352 } 353 354 if sig.tsym.Flags&SymSiggen == 0 { 355 sig.tsym.Flags |= SymSiggen 356 if !Eqtype(this, t) { 357 compiling_wrappers = 1 358 genwrapper(t, f, sig.tsym, 0) 359 compiling_wrappers = 0 360 } 361 } 362 } 363 364 sort.Sort(byMethodNameAndPackagePath(ms)) 365 return ms 366 } 367 368 // imethods returns the methods of the interface type t, sorted by name. 369 func imethods(t *Type) []*Sig { 370 var methods []*Sig 371 for f := t.Type; f != nil; f = f.Down { 372 if f.Etype != TFIELD { 373 Fatalf("imethods: not field") 374 } 375 if f.Type.Etype != TFUNC || f.Sym == nil { 376 continue 377 } 378 method := f.Sym 379 var sig = Sig{ 380 name: method.Name, 381 } 382 if !exportname(method.Name) { 383 if method.Pkg == nil { 384 Fatalf("imethods: missing package") 385 } 386 sig.pkg = method.Pkg 387 } 388 389 sig.mtype = f.Type 390 sig.offset = 0 391 sig.type_ = methodfunc(f.Type, nil) 392 393 if n := len(methods); n > 0 { 394 last := methods[n-1] 395 if !(siglt(last, &sig)) { 396 Fatalf("sigcmp vs sortinter %s %s", last.name, sig.name) 397 } 398 } 399 methods = append(methods, &sig) 400 401 // Compiler can only refer to wrappers for non-blank methods. 402 if isblanksym(method) { 403 continue 404 } 405 406 // NOTE(rsc): Perhaps an oversight that 407 // IfaceType.Method is not in the reflect data. 408 // Generate the method body, so that compiled 409 // code can refer to it. 410 isym := methodsym(method, t, 0) 411 412 if isym.Flags&SymSiggen == 0 { 413 isym.Flags |= SymSiggen 414 genwrapper(t, f, isym, 0) 415 } 416 } 417 418 return methods 419 } 420 421 var dimportpath_gopkg *Pkg 422 423 func dimportpath(p *Pkg) { 424 if p.Pathsym != nil { 425 return 426 } 427 428 // If we are compiling the runtime package, there are two runtime packages around 429 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for 430 // both of them, so just produce one for localpkg. 431 if myimportpath == "runtime" && p == Runtimepkg { 432 return 433 } 434 435 if dimportpath_gopkg == nil { 436 dimportpath_gopkg = mkpkg("go") 437 dimportpath_gopkg.Name = "go" 438 } 439 440 nam := "importpath." + p.Prefix + "." 441 442 n := Nod(ONAME, nil, nil) 443 n.Sym = Pkglookup(nam, dimportpath_gopkg) 444 445 n.Class = PEXTERN 446 n.Xoffset = 0 447 p.Pathsym = n.Sym 448 449 if p == localpkg { 450 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. 451 gdatastring(n, myimportpath) 452 } else { 453 gdatastring(n, p.Path) 454 } 455 ggloblsym(n.Sym, int32(Types[TSTRING].Width), obj.DUPOK|obj.RODATA) 456 } 457 458 func dgopkgpath(s *Sym, ot int, pkg *Pkg) int { 459 if pkg == nil { 460 return dgostringptr(s, ot, "") 461 } 462 463 if pkg == localpkg && myimportpath == "" { 464 // If we don't know the full path of the package being compiled (i.e. -p 465 // was not passed on the compiler command line), emit reference to 466 // go.importpath.""., which 6l will rewrite using the correct import path. 467 // Every package that imports this one directly defines the symbol. 468 var ns *Sym 469 470 if ns == nil { 471 ns = Pkglookup("importpath.\"\".", mkpkg("go")) 472 } 473 return dsymptr(s, ot, ns, 0) 474 } 475 476 dimportpath(pkg) 477 return dsymptr(s, ot, pkg.Pathsym, 0) 478 } 479 480 /* 481 * uncommonType 482 * ../../runtime/type.go:/uncommonType 483 */ 484 func dextratype(sym *Sym, off int, t *Type, ptroff int) int { 485 m := methods(t) 486 if t.Sym == nil && len(m) == 0 { 487 return off 488 } 489 490 // fill in *extraType pointer in header 491 off = int(Rnd(int64(off), int64(Widthptr))) 492 493 dsymptr(sym, ptroff, sym, off) 494 495 for _, a := range m { 496 dtypesym(a.type_) 497 } 498 499 ot := off 500 s := sym 501 if t.Sym != nil { 502 ot = dgostringptr(s, ot, t.Sym.Name) 503 if t != Types[t.Etype] && t != errortype { 504 ot = dgopkgpath(s, ot, t.Sym.Pkg) 505 } else { 506 ot = dgostringptr(s, ot, "") 507 } 508 } else { 509 ot = dgostringptr(s, ot, "") 510 ot = dgostringptr(s, ot, "") 511 } 512 513 // slice header 514 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint) 515 516 n := len(m) 517 ot = duintxx(s, ot, uint64(n), Widthint) 518 ot = duintxx(s, ot, uint64(n), Widthint) 519 520 // methods 521 for _, a := range m { 522 // method 523 // ../../runtime/type.go:/method 524 ot = dgostringptr(s, ot, a.name) 525 526 ot = dgopkgpath(s, ot, a.pkg) 527 ot = dsymptr(s, ot, dtypesym(a.mtype), 0) 528 ot = dsymptr(s, ot, dtypesym(a.type_), 0) 529 if a.isym != nil { 530 ot = dsymptr(s, ot, a.isym, 0) 531 } else { 532 ot = duintptr(s, ot, 0) 533 } 534 if a.tsym != nil { 535 ot = dsymptr(s, ot, a.tsym, 0) 536 } else { 537 ot = duintptr(s, ot, 0) 538 } 539 } 540 541 return ot 542 } 543 544 var kinds = []int{ 545 TINT: obj.KindInt, 546 TUINT: obj.KindUint, 547 TINT8: obj.KindInt8, 548 TUINT8: obj.KindUint8, 549 TINT16: obj.KindInt16, 550 TUINT16: obj.KindUint16, 551 TINT32: obj.KindInt32, 552 TUINT32: obj.KindUint32, 553 TINT64: obj.KindInt64, 554 TUINT64: obj.KindUint64, 555 TUINTPTR: obj.KindUintptr, 556 TFLOAT32: obj.KindFloat32, 557 TFLOAT64: obj.KindFloat64, 558 TBOOL: obj.KindBool, 559 TSTRING: obj.KindString, 560 TPTR32: obj.KindPtr, 561 TPTR64: obj.KindPtr, 562 TSTRUCT: obj.KindStruct, 563 TINTER: obj.KindInterface, 564 TCHAN: obj.KindChan, 565 TMAP: obj.KindMap, 566 TARRAY: obj.KindArray, 567 TFUNC: obj.KindFunc, 568 TCOMPLEX64: obj.KindComplex64, 569 TCOMPLEX128: obj.KindComplex128, 570 TUNSAFEPTR: obj.KindUnsafePointer, 571 } 572 573 func haspointers(t *Type) bool { 574 if t.Haspointers != 0 { 575 return t.Haspointers-1 != 0 576 } 577 578 var ret bool 579 switch t.Etype { 580 case TINT, 581 TUINT, 582 TINT8, 583 TUINT8, 584 TINT16, 585 TUINT16, 586 TINT32, 587 TUINT32, 588 TINT64, 589 TUINT64, 590 TUINTPTR, 591 TFLOAT32, 592 TFLOAT64, 593 TCOMPLEX64, 594 TCOMPLEX128, 595 TBOOL: 596 ret = false 597 598 case TARRAY: 599 if t.Bound < 0 { // slice 600 ret = true 601 break 602 } 603 604 if t.Bound == 0 { // empty array 605 ret = false 606 break 607 } 608 609 ret = haspointers(t.Type) 610 611 case TSTRUCT: 612 ret = false 613 for t1 := t.Type; t1 != nil; t1 = t1.Down { 614 if haspointers(t1.Type) { 615 ret = true 616 break 617 } 618 } 619 620 case TSTRING, 621 TPTR32, 622 TPTR64, 623 TUNSAFEPTR, 624 TINTER, 625 TCHAN, 626 TMAP, 627 TFUNC: 628 fallthrough 629 default: 630 ret = true 631 632 case TFIELD: 633 Fatalf("haspointers: unexpected type, %v", t) 634 } 635 636 t.Haspointers = 1 + uint8(obj.Bool2int(ret)) 637 return ret 638 } 639 640 // typeptrdata returns the length in bytes of the prefix of t 641 // containing pointer data. Anything after this offset is scalar data. 642 func typeptrdata(t *Type) int64 { 643 if !haspointers(t) { 644 return 0 645 } 646 647 switch t.Etype { 648 case TPTR32, 649 TPTR64, 650 TUNSAFEPTR, 651 TFUNC, 652 TCHAN, 653 TMAP: 654 return int64(Widthptr) 655 656 case TSTRING: 657 // struct { byte *str; intgo len; } 658 return int64(Widthptr) 659 660 case TINTER: 661 // struct { Itab *tab; void *data; } or 662 // struct { Type *type; void *data; } 663 return 2 * int64(Widthptr) 664 665 case TARRAY: 666 if Isslice(t) { 667 // struct { byte *array; uintgo len; uintgo cap; } 668 return int64(Widthptr) 669 } 670 // haspointers already eliminated t.Bound == 0. 671 return (t.Bound-1)*t.Type.Width + typeptrdata(t.Type) 672 673 case TSTRUCT: 674 // Find the last field that has pointers. 675 var lastPtrField *Type 676 for t1 := t.Type; t1 != nil; t1 = t1.Down { 677 if haspointers(t1.Type) { 678 lastPtrField = t1 679 } 680 } 681 return lastPtrField.Width + typeptrdata(lastPtrField.Type) 682 683 default: 684 Fatalf("typeptrdata: unexpected type, %v", t) 685 return 0 686 } 687 } 688 689 /* 690 * commonType 691 * ../../runtime/type.go:/commonType 692 */ 693 694 var dcommontype_algarray *Sym 695 696 func dcommontype(s *Sym, ot int, t *Type) int { 697 if ot != 0 { 698 Fatalf("dcommontype %d", ot) 699 } 700 701 sizeofAlg := 2 * Widthptr 702 if dcommontype_algarray == nil { 703 dcommontype_algarray = Pkglookup("algarray", Runtimepkg) 704 } 705 dowidth(t) 706 alg := algtype(t) 707 var algsym *Sym 708 if alg < 0 || alg == AMEM { 709 algsym = dalgsym(t) 710 } 711 712 var sptr *Sym 713 tptr := Ptrto(t) 714 if !Isptr[t.Etype] && (t.Sym != nil || methods(tptr) != nil) { 715 sptr = dtypesym(tptr) 716 } else { 717 sptr = weaktypesym(tptr) 718 } 719 720 gcsym, useGCProg, ptrdata := dgcsym(t) 721 722 // ../../pkg/reflect/type.go:/^type.commonType 723 // actual type structure 724 // type commonType struct { 725 // size uintptr 726 // ptrsize uintptr 727 // hash uint32 728 // _ uint8 729 // align uint8 730 // fieldAlign uint8 731 // kind uint8 732 // alg unsafe.Pointer 733 // gcdata unsafe.Pointer 734 // string *string 735 // *extraType 736 // ptrToThis *Type 737 // } 738 ot = duintptr(s, ot, uint64(t.Width)) 739 ot = duintptr(s, ot, uint64(ptrdata)) 740 741 ot = duint32(s, ot, typehash(t)) 742 ot = duint8(s, ot, 0) // unused 743 744 // runtime (and common sense) expects alignment to be a power of two. 745 i := int(t.Align) 746 747 if i == 0 { 748 i = 1 749 } 750 if i&(i-1) != 0 { 751 Fatalf("invalid alignment %d for %v", t.Align, t) 752 } 753 ot = duint8(s, ot, t.Align) // align 754 ot = duint8(s, ot, t.Align) // fieldAlign 755 756 i = kinds[t.Etype] 757 if t.Etype == TARRAY && t.Bound < 0 { 758 i = obj.KindSlice 759 } 760 if !haspointers(t) { 761 i |= obj.KindNoPointers 762 } 763 if isdirectiface(t) { 764 i |= obj.KindDirectIface 765 } 766 if useGCProg { 767 i |= obj.KindGCProg 768 } 769 ot = duint8(s, ot, uint8(i)) // kind 770 if algsym == nil { 771 ot = dsymptr(s, ot, dcommontype_algarray, alg*sizeofAlg) 772 } else { 773 ot = dsymptr(s, ot, algsym, 0) 774 } 775 ot = dsymptr(s, ot, gcsym, 0) 776 777 p := Tconv(t, obj.FmtLeft|obj.FmtUnsigned) 778 779 //print("dcommontype: %s\n", p); 780 ot = dgostringptr(s, ot, p) // string 781 782 // skip pointer to extraType, 783 // which follows the rest of this type structure. 784 // caller will fill in if needed. 785 // otherwise linker will assume 0. 786 ot += Widthptr 787 788 ot = dsymptr(s, ot, sptr, 0) // ptrto type 789 return ot 790 } 791 792 func typesym(t *Type) *Sym { 793 return Pkglookup(Tconv(t, obj.FmtLeft), typepkg) 794 } 795 796 func tracksym(t *Type) *Sym { 797 return Pkglookup(Tconv(t.Outer, obj.FmtLeft)+"."+t.Sym.Name, trackpkg) 798 } 799 800 func typelinksym(t *Type) *Sym { 801 // %-uT is what the generated Type's string field says. 802 // It uses (ambiguous) package names instead of import paths. 803 // %-T is the complete, unambiguous type name. 804 // We want the types to end up sorted by string field, 805 // so use that first in the name, and then add :%-T to 806 // disambiguate. We use a tab character as the separator to 807 // ensure the types appear sorted by their string field. The 808 // names are a little long but they are discarded by the linker 809 // and do not end up in the symbol table of the final binary. 810 p := Tconv(t, obj.FmtLeft|obj.FmtUnsigned) + "\t" + Tconv(t, obj.FmtLeft) 811 812 s := Pkglookup(p, typelinkpkg) 813 814 //print("typelinksym: %s -> %+S\n", p, s); 815 816 return s 817 } 818 819 func typesymprefix(prefix string, t *Type) *Sym { 820 p := prefix + "." + Tconv(t, obj.FmtLeft) 821 s := Pkglookup(p, typepkg) 822 823 //print("algsym: %s -> %+S\n", p, s); 824 825 return s 826 } 827 828 func typenamesym(t *Type) *Sym { 829 if t == nil || (Isptr[t.Etype] && t.Type == nil) || isideal(t) { 830 Fatalf("typename %v", t) 831 } 832 s := typesym(t) 833 if s.Def == nil { 834 n := Nod(ONAME, nil, nil) 835 n.Sym = s 836 n.Type = Types[TUINT8] 837 n.Addable = true 838 n.Ullman = 1 839 n.Class = PEXTERN 840 n.Xoffset = 0 841 n.Typecheck = 1 842 s.Def = n 843 844 signatlist = list(signatlist, typenod(t)) 845 } 846 847 return s.Def.Sym 848 } 849 850 func typename(t *Type) *Node { 851 s := typenamesym(t) 852 n := Nod(OADDR, s.Def, nil) 853 n.Type = Ptrto(s.Def.Type) 854 n.Addable = true 855 n.Ullman = 2 856 n.Typecheck = 1 857 return n 858 } 859 860 func weaktypesym(t *Type) *Sym { 861 p := Tconv(t, obj.FmtLeft) 862 s := Pkglookup(p, weaktypepkg) 863 864 //print("weaktypesym: %s -> %+S\n", p, s); 865 866 return s 867 } 868 869 // isreflexive reports whether t has a reflexive equality operator. 870 // That is, if x==x for all x of type t. 871 func isreflexive(t *Type) bool { 872 switch t.Etype { 873 case TBOOL, 874 TINT, 875 TUINT, 876 TINT8, 877 TUINT8, 878 TINT16, 879 TUINT16, 880 TINT32, 881 TUINT32, 882 TINT64, 883 TUINT64, 884 TUINTPTR, 885 TPTR32, 886 TPTR64, 887 TUNSAFEPTR, 888 TSTRING, 889 TCHAN: 890 return true 891 892 case TFLOAT32, 893 TFLOAT64, 894 TCOMPLEX64, 895 TCOMPLEX128, 896 TINTER: 897 return false 898 899 case TARRAY: 900 if Isslice(t) { 901 Fatalf("slice can't be a map key: %v", t) 902 } 903 return isreflexive(t.Type) 904 905 case TSTRUCT: 906 for t1 := t.Type; t1 != nil; t1 = t1.Down { 907 if !isreflexive(t1.Type) { 908 return false 909 } 910 } 911 return true 912 913 default: 914 Fatalf("bad type for map key: %v", t) 915 return false 916 } 917 } 918 919 // needkeyupdate reports whether map updates with t as a key 920 // need the key to be updated. 921 func needkeyupdate(t *Type) bool { 922 switch t.Etype { 923 case TBOOL, 924 TINT, 925 TUINT, 926 TINT8, 927 TUINT8, 928 TINT16, 929 TUINT16, 930 TINT32, 931 TUINT32, 932 TINT64, 933 TUINT64, 934 TUINTPTR, 935 TPTR32, 936 TPTR64, 937 TUNSAFEPTR, 938 TCHAN: 939 return false 940 941 case TFLOAT32, // floats can be +0/-0 942 TFLOAT64, 943 TCOMPLEX64, 944 TCOMPLEX128, 945 TINTER, 946 TSTRING: // strings might have smaller backing stores 947 return true 948 949 case TARRAY: 950 if Isslice(t) { 951 Fatalf("slice can't be a map key: %v", t) 952 } 953 return needkeyupdate(t.Type) 954 955 case TSTRUCT: 956 for t1 := t.Type; t1 != nil; t1 = t1.Down { 957 if needkeyupdate(t1.Type) { 958 return true 959 } 960 } 961 return false 962 963 default: 964 Fatalf("bad type for map key: %v", t) 965 return true 966 } 967 } 968 969 func dtypesym(t *Type) *Sym { 970 // Replace byte, rune aliases with real type. 971 // They've been separate internally to make error messages 972 // better, but we have to merge them in the reflect tables. 973 if t == bytetype || t == runetype { 974 t = Types[t.Etype] 975 } 976 977 if isideal(t) { 978 Fatalf("dtypesym %v", t) 979 } 980 981 s := typesym(t) 982 if s.Flags&SymSiggen != 0 { 983 return s 984 } 985 s.Flags |= SymSiggen 986 987 // special case (look for runtime below): 988 // when compiling package runtime, 989 // emit the type structures for int, float, etc. 990 tbase := t 991 992 if Isptr[t.Etype] && t.Sym == nil && t.Type.Sym != nil { 993 tbase = t.Type 994 } 995 dupok := 0 996 if tbase.Sym == nil { 997 dupok = obj.DUPOK 998 } 999 1000 if compiling_runtime != 0 && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc 1001 goto ok 1002 } 1003 1004 // named types from other files are defined only by those files 1005 if tbase.Sym != nil && !tbase.Local { 1006 return s 1007 } 1008 if isforw[tbase.Etype] { 1009 return s 1010 } 1011 1012 ok: 1013 ot := 0 1014 xt := 0 1015 switch t.Etype { 1016 default: 1017 ot = dcommontype(s, ot, t) 1018 xt = ot - 2*Widthptr 1019 1020 case TARRAY: 1021 if t.Bound >= 0 { 1022 // ../../runtime/type.go:/ArrayType 1023 s1 := dtypesym(t.Type) 1024 1025 t2 := typ(TARRAY) 1026 t2.Type = t.Type 1027 t2.Bound = -1 // slice 1028 s2 := dtypesym(t2) 1029 ot = dcommontype(s, ot, t) 1030 xt = ot - 2*Widthptr 1031 ot = dsymptr(s, ot, s1, 0) 1032 ot = dsymptr(s, ot, s2, 0) 1033 ot = duintptr(s, ot, uint64(t.Bound)) 1034 } else { 1035 // ../../runtime/type.go:/SliceType 1036 s1 := dtypesym(t.Type) 1037 1038 ot = dcommontype(s, ot, t) 1039 xt = ot - 2*Widthptr 1040 ot = dsymptr(s, ot, s1, 0) 1041 } 1042 1043 // ../../runtime/type.go:/ChanType 1044 case TCHAN: 1045 s1 := dtypesym(t.Type) 1046 1047 ot = dcommontype(s, ot, t) 1048 xt = ot - 2*Widthptr 1049 ot = dsymptr(s, ot, s1, 0) 1050 ot = duintptr(s, ot, uint64(t.Chan)) 1051 1052 case TFUNC: 1053 for t1 := getthisx(t).Type; t1 != nil; t1 = t1.Down { 1054 dtypesym(t1.Type) 1055 } 1056 isddd := false 1057 for t1 := getinargx(t).Type; t1 != nil; t1 = t1.Down { 1058 isddd = t1.Isddd 1059 dtypesym(t1.Type) 1060 } 1061 1062 for t1 := getoutargx(t).Type; t1 != nil; t1 = t1.Down { 1063 dtypesym(t1.Type) 1064 } 1065 1066 ot = dcommontype(s, ot, t) 1067 xt = ot - 2*Widthptr 1068 ot = duint8(s, ot, uint8(obj.Bool2int(isddd))) 1069 1070 // two slice headers: in and out. 1071 ot = int(Rnd(int64(ot), int64(Widthptr))) 1072 1073 ot = dsymptr(s, ot, s, ot+2*(Widthptr+2*Widthint)) 1074 n := t.Thistuple + t.Intuple 1075 ot = duintxx(s, ot, uint64(n), Widthint) 1076 ot = duintxx(s, ot, uint64(n), Widthint) 1077 ot = dsymptr(s, ot, s, ot+1*(Widthptr+2*Widthint)+n*Widthptr) 1078 ot = duintxx(s, ot, uint64(t.Outtuple), Widthint) 1079 ot = duintxx(s, ot, uint64(t.Outtuple), Widthint) 1080 1081 // slice data 1082 for t1 := getthisx(t).Type; t1 != nil; t1 = t1.Down { 1083 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1084 n++ 1085 } 1086 for t1 := getinargx(t).Type; t1 != nil; t1 = t1.Down { 1087 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1088 n++ 1089 } 1090 for t1 := getoutargx(t).Type; t1 != nil; t1 = t1.Down { 1091 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1092 n++ 1093 } 1094 1095 case TINTER: 1096 m := imethods(t) 1097 n := len(m) 1098 for _, a := range m { 1099 dtypesym(a.type_) 1100 } 1101 1102 // ../../../runtime/type.go:/InterfaceType 1103 ot = dcommontype(s, ot, t) 1104 1105 xt = ot - 2*Widthptr 1106 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint) 1107 ot = duintxx(s, ot, uint64(n), Widthint) 1108 ot = duintxx(s, ot, uint64(n), Widthint) 1109 for _, a := range m { 1110 // ../../../runtime/type.go:/imethod 1111 ot = dgostringptr(s, ot, a.name) 1112 1113 ot = dgopkgpath(s, ot, a.pkg) 1114 ot = dsymptr(s, ot, dtypesym(a.type_), 0) 1115 } 1116 1117 // ../../../runtime/type.go:/MapType 1118 case TMAP: 1119 s1 := dtypesym(t.Down) 1120 1121 s2 := dtypesym(t.Type) 1122 s3 := dtypesym(mapbucket(t)) 1123 s4 := dtypesym(hmap(t)) 1124 ot = dcommontype(s, ot, t) 1125 xt = ot - 2*Widthptr 1126 ot = dsymptr(s, ot, s1, 0) 1127 ot = dsymptr(s, ot, s2, 0) 1128 ot = dsymptr(s, ot, s3, 0) 1129 ot = dsymptr(s, ot, s4, 0) 1130 if t.Down.Width > MAXKEYSIZE { 1131 ot = duint8(s, ot, uint8(Widthptr)) 1132 ot = duint8(s, ot, 1) // indirect 1133 } else { 1134 ot = duint8(s, ot, uint8(t.Down.Width)) 1135 ot = duint8(s, ot, 0) // not indirect 1136 } 1137 1138 if t.Type.Width > MAXVALSIZE { 1139 ot = duint8(s, ot, uint8(Widthptr)) 1140 ot = duint8(s, ot, 1) // indirect 1141 } else { 1142 ot = duint8(s, ot, uint8(t.Type.Width)) 1143 ot = duint8(s, ot, 0) // not indirect 1144 } 1145 1146 ot = duint16(s, ot, uint16(mapbucket(t).Width)) 1147 ot = duint8(s, ot, uint8(obj.Bool2int(isreflexive(t.Down)))) 1148 ot = duint8(s, ot, uint8(obj.Bool2int(needkeyupdate(t.Down)))) 1149 1150 case TPTR32, TPTR64: 1151 if t.Type.Etype == TANY { 1152 // ../../runtime/type.go:/UnsafePointerType 1153 ot = dcommontype(s, ot, t) 1154 1155 break 1156 } 1157 1158 // ../../runtime/type.go:/PtrType 1159 s1 := dtypesym(t.Type) 1160 1161 ot = dcommontype(s, ot, t) 1162 xt = ot - 2*Widthptr 1163 ot = dsymptr(s, ot, s1, 0) 1164 1165 // ../../runtime/type.go:/StructType 1166 // for security, only the exported fields. 1167 case TSTRUCT: 1168 n := 0 1169 1170 for t1 := t.Type; t1 != nil; t1 = t1.Down { 1171 dtypesym(t1.Type) 1172 n++ 1173 } 1174 1175 ot = dcommontype(s, ot, t) 1176 xt = ot - 2*Widthptr 1177 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint) 1178 ot = duintxx(s, ot, uint64(n), Widthint) 1179 ot = duintxx(s, ot, uint64(n), Widthint) 1180 for t1 := t.Type; t1 != nil; t1 = t1.Down { 1181 // ../../runtime/type.go:/structField 1182 if t1.Sym != nil && t1.Embedded == 0 { 1183 ot = dgostringptr(s, ot, t1.Sym.Name) 1184 if exportname(t1.Sym.Name) { 1185 ot = dgostringptr(s, ot, "") 1186 } else { 1187 ot = dgopkgpath(s, ot, t1.Sym.Pkg) 1188 } 1189 } else { 1190 ot = dgostringptr(s, ot, "") 1191 if t1.Type.Sym != nil && t1.Type.Sym.Pkg == builtinpkg { 1192 ot = dgopkgpath(s, ot, localpkg) 1193 } else { 1194 ot = dgostringptr(s, ot, "") 1195 } 1196 } 1197 1198 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1199 ot = dgostrlitptr(s, ot, t1.Note) 1200 ot = duintptr(s, ot, uint64(t1.Width)) // field offset 1201 } 1202 } 1203 1204 ot = dextratype(s, ot, t, xt) 1205 ggloblsym(s, int32(ot), int16(dupok|obj.RODATA)) 1206 1207 // generate typelink.foo pointing at s = type.foo. 1208 // The linker will leave a table of all the typelinks for 1209 // types in the binary, so reflect can find them. 1210 // We only need the link for unnamed composites that 1211 // we want be able to find. 1212 if t.Sym == nil { 1213 switch t.Etype { 1214 case TPTR32, TPTR64: 1215 // The ptrto field of the type data cannot be relied on when 1216 // dynamic linking: a type T may be defined in a module that makes 1217 // no use of pointers to that type, but another module can contain 1218 // a package that imports the first one and does use *T pointers. 1219 // The second module will end up defining type data for *T and a 1220 // type.*T symbol pointing at it. It's important that calling 1221 // .PtrTo() on the reflect.Type for T returns this type data and 1222 // not some synthesized object, so we need reflect to be able to 1223 // find it! 1224 if !Ctxt.Flag_dynlink { 1225 break 1226 } 1227 fallthrough 1228 case TARRAY, TCHAN, TFUNC, TMAP: 1229 slink := typelinksym(t) 1230 dsymptr(slink, 0, s, 0) 1231 ggloblsym(slink, int32(Widthptr), int16(dupok|obj.RODATA)) 1232 } 1233 } 1234 1235 return s 1236 } 1237 1238 func dumptypestructs() { 1239 var n *Node 1240 1241 // copy types from externdcl list to signatlist 1242 for _, n := range externdcl { 1243 if n.Op != OTYPE { 1244 continue 1245 } 1246 signatlist = list(signatlist, n) 1247 } 1248 1249 // process signatlist 1250 var t *Type 1251 for l := signatlist; l != nil; l = l.Next { 1252 n = l.N 1253 if n.Op != OTYPE { 1254 continue 1255 } 1256 t = n.Type 1257 dtypesym(t) 1258 if t.Sym != nil { 1259 dtypesym(Ptrto(t)) 1260 } 1261 } 1262 1263 // generate import strings for imported packages 1264 for _, p := range pkgs { 1265 if p.Direct { 1266 dimportpath(p) 1267 } 1268 } 1269 1270 // do basic types if compiling package runtime. 1271 // they have to be in at least one package, 1272 // and runtime is always loaded implicitly, 1273 // so this is as good as any. 1274 // another possible choice would be package main, 1275 // but using runtime means fewer copies in .6 files. 1276 if compiling_runtime != 0 { 1277 for i := 1; i <= TBOOL; i++ { 1278 dtypesym(Ptrto(Types[i])) 1279 } 1280 dtypesym(Ptrto(Types[TSTRING])) 1281 dtypesym(Ptrto(Types[TUNSAFEPTR])) 1282 1283 // emit type structs for error and func(error) string. 1284 // The latter is the type of an auto-generated wrapper. 1285 dtypesym(Ptrto(errortype)) 1286 1287 dtypesym(functype(nil, list1(Nod(ODCLFIELD, nil, typenod(errortype))), list1(Nod(ODCLFIELD, nil, typenod(Types[TSTRING]))))) 1288 1289 // add paths for runtime and main, which 6l imports implicitly. 1290 dimportpath(Runtimepkg) 1291 1292 if flag_race != 0 { 1293 dimportpath(racepkg) 1294 } 1295 dimportpath(mkpkg("main")) 1296 } 1297 } 1298 1299 func dalgsym(t *Type) *Sym { 1300 var s *Sym 1301 var hashfunc *Sym 1302 var eqfunc *Sym 1303 1304 // dalgsym is only called for a type that needs an algorithm table, 1305 // which implies that the type is comparable (or else it would use ANOEQ). 1306 1307 if algtype(t) == AMEM { 1308 // we use one algorithm table for all AMEM types of a given size 1309 p := fmt.Sprintf(".alg%d", t.Width) 1310 1311 s = Pkglookup(p, typepkg) 1312 1313 if s.Flags&SymAlgGen != 0 { 1314 return s 1315 } 1316 s.Flags |= SymAlgGen 1317 1318 // make hash closure 1319 p = fmt.Sprintf(".hashfunc%d", t.Width) 1320 1321 hashfunc = Pkglookup(p, typepkg) 1322 1323 ot := 0 1324 ot = dsymptr(hashfunc, ot, Pkglookup("memhash_varlen", Runtimepkg), 0) 1325 ot = duintxx(hashfunc, ot, uint64(t.Width), Widthptr) // size encoded in closure 1326 ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA) 1327 1328 // make equality closure 1329 p = fmt.Sprintf(".eqfunc%d", t.Width) 1330 1331 eqfunc = Pkglookup(p, typepkg) 1332 1333 ot = 0 1334 ot = dsymptr(eqfunc, ot, Pkglookup("memequal_varlen", Runtimepkg), 0) 1335 ot = duintxx(eqfunc, ot, uint64(t.Width), Widthptr) 1336 ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA) 1337 } else { 1338 // generate an alg table specific to this type 1339 s = typesymprefix(".alg", t) 1340 1341 hash := typesymprefix(".hash", t) 1342 eq := typesymprefix(".eq", t) 1343 hashfunc = typesymprefix(".hashfunc", t) 1344 eqfunc = typesymprefix(".eqfunc", t) 1345 1346 genhash(hash, t) 1347 geneq(eq, t) 1348 1349 // make Go funcs (closures) for calling hash and equal from Go 1350 dsymptr(hashfunc, 0, hash, 0) 1351 1352 ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1353 dsymptr(eqfunc, 0, eq, 0) 1354 ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1355 } 1356 1357 // ../../runtime/alg.go:/typeAlg 1358 ot := 0 1359 1360 ot = dsymptr(s, ot, hashfunc, 0) 1361 ot = dsymptr(s, ot, eqfunc, 0) 1362 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 1363 return s 1364 } 1365 1366 // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, 1367 // which holds 1-bit entries describing where pointers are in a given type. 1368 // 16 bytes is enough to describe 128 pointer-sized words, 512 or 1024 bytes 1369 // depending on the system. Above this length, the GC information is 1370 // recorded as a GC program, which can express repetition compactly. 1371 // In either form, the information is used by the runtime to initialize the 1372 // heap bitmap, and for large types (like 128 or more words), they are 1373 // roughly the same speed. GC programs are never much larger and often 1374 // more compact. (If large arrays are involved, they can be arbitrarily more 1375 // compact.) 1376 // 1377 // The cutoff must be large enough that any allocation large enough to 1378 // use a GC program is large enough that it does not share heap bitmap 1379 // bytes with any other objects, allowing the GC program execution to 1380 // assume an aligned start and not use atomic operations. In the current 1381 // runtime, this means all malloc size classes larger than the cutoff must 1382 // be multiples of four words. On 32-bit systems that's 16 bytes, and 1383 // all size classes >= 16 bytes are 16-byte aligned, so no real constraint. 1384 // On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed 1385 // for size classes >= 256 bytes. On a 64-bit sytem, 256 bytes allocated 1386 // is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes 1387 // must be >= 4. 1388 // 1389 // We used to use 16 because the GC programs do have some constant overhead 1390 // to get started, and processing 128 pointers seems to be enough to 1391 // amortize that overhead well. 1392 // 1393 // To make sure that the runtime's chansend can call typeBitsBulkBarrier, 1394 // we raised the limit to 2048, so that even 32-bit systems are guaranteed to 1395 // use bitmaps for objects up to 64 kB in size. 1396 // 1397 // Also known to reflect/type.go. 1398 // 1399 const maxPtrmaskBytes = 2048 1400 1401 // dgcsym emits and returns a data symbol containing GC information for type t, 1402 // along with a boolean reporting whether the UseGCProg bit should be set in 1403 // the type kind, and the ptrdata field to record in the reflect type information. 1404 func dgcsym(t *Type) (sym *Sym, useGCProg bool, ptrdata int64) { 1405 ptrdata = typeptrdata(t) 1406 if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 { 1407 sym = dgcptrmask(t) 1408 return 1409 } 1410 1411 useGCProg = true 1412 sym, ptrdata = dgcprog(t) 1413 return 1414 } 1415 1416 // dgcptrmask emits and returns the symbol containing a pointer mask for type t. 1417 func dgcptrmask(t *Type) *Sym { 1418 ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8) 1419 fillptrmask(t, ptrmask) 1420 p := fmt.Sprintf("gcbits.%x", ptrmask) 1421 1422 sym := Pkglookup(p, Runtimepkg) 1423 if sym.Flags&SymUniq == 0 { 1424 sym.Flags |= SymUniq 1425 for i, x := range ptrmask { 1426 duint8(sym, i, x) 1427 } 1428 ggloblsym(sym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) 1429 } 1430 return sym 1431 } 1432 1433 // fillptrmask fills in ptrmask with 1s corresponding to the 1434 // word offsets in t that hold pointers. 1435 // ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits. 1436 func fillptrmask(t *Type, ptrmask []byte) { 1437 for i := range ptrmask { 1438 ptrmask[i] = 0 1439 } 1440 if !haspointers(t) { 1441 return 1442 } 1443 1444 vec := bvalloc(8 * int32(len(ptrmask))) 1445 xoffset := int64(0) 1446 onebitwalktype1(t, &xoffset, vec) 1447 1448 nptr := typeptrdata(t) / int64(Widthptr) 1449 for i := int64(0); i < nptr; i++ { 1450 if bvget(vec, int32(i)) == 1 { 1451 ptrmask[i/8] |= 1 << (uint(i) % 8) 1452 } 1453 } 1454 } 1455 1456 // dgcprog emits and returns the symbol containing a GC program for type t 1457 // along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]). 1458 // In practice, the size is typeptrdata(t) except for non-trivial arrays. 1459 // For non-trivial arrays, the program describes the full t.Width size. 1460 func dgcprog(t *Type) (*Sym, int64) { 1461 dowidth(t) 1462 if t.Width == BADWIDTH { 1463 Fatalf("dgcprog: %v badwidth", t) 1464 } 1465 sym := typesymprefix(".gcprog", t) 1466 var p GCProg 1467 p.init(sym) 1468 p.emit(t, 0) 1469 offset := p.w.BitIndex() * int64(Widthptr) 1470 p.end() 1471 if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { 1472 Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) 1473 } 1474 return sym, offset 1475 } 1476 1477 type GCProg struct { 1478 sym *Sym 1479 symoff int 1480 w gcprog.Writer 1481 } 1482 1483 var Debug_gcprog int // set by -d gcprog 1484 1485 func (p *GCProg) init(sym *Sym) { 1486 p.sym = sym 1487 p.symoff = 4 // first 4 bytes hold program length 1488 p.w.Init(p.writeByte) 1489 if Debug_gcprog > 0 { 1490 fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", sym) 1491 p.w.Debug(os.Stderr) 1492 } 1493 } 1494 1495 func (p *GCProg) writeByte(x byte) { 1496 p.symoff = duint8(p.sym, p.symoff, x) 1497 } 1498 1499 func (p *GCProg) end() { 1500 p.w.End() 1501 duint32(p.sym, 0, uint32(p.symoff-4)) 1502 ggloblsym(p.sym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) 1503 if Debug_gcprog > 0 { 1504 fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.sym) 1505 } 1506 } 1507 1508 func (p *GCProg) emit(t *Type, offset int64) { 1509 dowidth(t) 1510 if !haspointers(t) { 1511 return 1512 } 1513 if t.Width == int64(Widthptr) { 1514 p.w.Ptr(offset / int64(Widthptr)) 1515 return 1516 } 1517 switch t.Etype { 1518 default: 1519 Fatalf("GCProg.emit: unexpected type %v", t) 1520 1521 case TSTRING: 1522 p.w.Ptr(offset / int64(Widthptr)) 1523 1524 case TINTER: 1525 p.w.Ptr(offset / int64(Widthptr)) 1526 p.w.Ptr(offset/int64(Widthptr) + 1) 1527 1528 case TARRAY: 1529 if Isslice(t) { 1530 p.w.Ptr(offset / int64(Widthptr)) 1531 return 1532 } 1533 if t.Bound == 0 { 1534 // should have been handled by haspointers check above 1535 Fatalf("GCProg.emit: empty array") 1536 } 1537 1538 // Flatten array-of-array-of-array to just a big array by multiplying counts. 1539 count := t.Bound 1540 elem := t.Type 1541 for Isfixedarray(elem) { 1542 count *= elem.Bound 1543 elem = elem.Type 1544 } 1545 1546 if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) { 1547 // Cheaper to just emit the bits. 1548 for i := int64(0); i < count; i++ { 1549 p.emit(elem, offset+i*elem.Width) 1550 } 1551 return 1552 } 1553 p.emit(elem, offset) 1554 p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) 1555 p.w.Repeat(elem.Width/int64(Widthptr), count-1) 1556 1557 case TSTRUCT: 1558 for t1 := t.Type; t1 != nil; t1 = t1.Down { 1559 p.emit(t1.Type, offset+t1.Width) 1560 } 1561 } 1562 }