github.com/rsc/go@v0.0.0-20150416155037-e040fd465409/src/cmd/internal/gc/reflect.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/internal/obj" 9 "fmt" 10 ) 11 12 /* 13 * runtime interface and reflection data structures 14 */ 15 var signatlist *NodeList 16 17 func sigcmp(a *Sig, b *Sig) int { 18 i := stringsCompare(a.name, b.name) 19 if i != 0 { 20 return i 21 } 22 if a.pkg == b.pkg { 23 return 0 24 } 25 if a.pkg == nil { 26 return -1 27 } 28 if b.pkg == nil { 29 return +1 30 } 31 return stringsCompare(a.pkg.Path, b.pkg.Path) 32 } 33 34 func lsort(l *Sig, f func(*Sig, *Sig) int) *Sig { 35 if l == nil || l.link == nil { 36 return l 37 } 38 39 l1 := l 40 l2 := l 41 for { 42 l2 = l2.link 43 if l2 == nil { 44 break 45 } 46 l2 = l2.link 47 if l2 == nil { 48 break 49 } 50 l1 = l1.link 51 } 52 53 l2 = l1.link 54 l1.link = nil 55 l1 = lsort(l, f) 56 l2 = lsort(l2, f) 57 58 /* set up lead element */ 59 if f(l1, l2) < 0 { 60 l = l1 61 l1 = l1.link 62 } else { 63 l = l2 64 l2 = l2.link 65 } 66 67 le := l 68 69 for { 70 if l1 == nil { 71 for l2 != nil { 72 le.link = l2 73 le = l2 74 l2 = l2.link 75 } 76 77 le.link = nil 78 break 79 } 80 81 if l2 == nil { 82 for l1 != nil { 83 le.link = l1 84 le = l1 85 l1 = l1.link 86 } 87 88 break 89 } 90 91 if f(l1, l2) < 0 { 92 le.link = l1 93 le = l1 94 l1 = l1.link 95 } else { 96 le.link = l2 97 le = l2 98 l2 = l2.link 99 } 100 } 101 102 le.link = nil 103 return l 104 } 105 106 // Builds a type respresenting a Bucket structure for 107 // the given map type. This type is not visible to users - 108 // we include only enough information to generate a correct GC 109 // program for it. 110 // Make sure this stays in sync with ../../runtime/hashmap.go! 111 const ( 112 BUCKETSIZE = 8 113 MAXKEYSIZE = 128 114 MAXVALSIZE = 128 115 ) 116 117 func makefield(name string, t *Type) *Type { 118 f := typ(TFIELD) 119 f.Type = t 120 f.Sym = new(Sym) 121 f.Sym.Name = name 122 return f 123 } 124 125 func mapbucket(t *Type) *Type { 126 if t.Bucket != nil { 127 return t.Bucket 128 } 129 130 bucket := typ(TSTRUCT) 131 keytype := t.Down 132 valtype := t.Type 133 dowidth(keytype) 134 dowidth(valtype) 135 if keytype.Width > MAXKEYSIZE { 136 keytype = Ptrto(keytype) 137 } 138 if valtype.Width > MAXVALSIZE { 139 valtype = Ptrto(valtype) 140 } 141 142 // The first field is: uint8 topbits[BUCKETSIZE]. 143 arr := typ(TARRAY) 144 145 arr.Type = Types[TUINT8] 146 arr.Bound = BUCKETSIZE 147 var field [4]*Type 148 field[0] = makefield("topbits", arr) 149 arr = typ(TARRAY) 150 arr.Type = keytype 151 arr.Bound = BUCKETSIZE 152 field[1] = makefield("keys", arr) 153 arr = typ(TARRAY) 154 arr.Type = valtype 155 arr.Bound = BUCKETSIZE 156 field[2] = makefield("values", arr) 157 field[3] = makefield("overflow", Ptrto(bucket)) 158 159 // link up fields 160 bucket.Noalg = 1 161 162 bucket.Local = t.Local 163 bucket.Type = field[0] 164 for n := int32(0); n < int32(len(field)-1); n++ { 165 field[n].Down = field[n+1] 166 } 167 field[len(field)-1].Down = nil 168 dowidth(bucket) 169 170 // Pad to the native integer alignment. 171 // This is usually the same as widthptr; the exception (as usual) is amd64p32. 172 if Widthreg > Widthptr { 173 bucket.Width += int64(Widthreg) - int64(Widthptr) 174 } 175 176 // See comment on hmap.overflow in ../../runtime/hashmap.go. 177 if !haspointers(t.Type) && !haspointers(t.Down) && t.Type.Width <= MAXKEYSIZE && t.Down.Width <= MAXVALSIZE { 178 bucket.Haspointers = 1 // no pointers 179 } 180 181 t.Bucket = bucket 182 183 bucket.Map = t 184 return bucket 185 } 186 187 // Builds a type representing a Hmap structure for the given map type. 188 // Make sure this stays in sync with ../../runtime/hashmap.go! 189 func hmap(t *Type) *Type { 190 if t.Hmap != nil { 191 return t.Hmap 192 } 193 194 bucket := mapbucket(t) 195 var field [8]*Type 196 field[0] = makefield("count", Types[TINT]) 197 field[1] = makefield("flags", Types[TUINT8]) 198 field[2] = makefield("B", Types[TUINT8]) 199 field[3] = makefield("hash0", Types[TUINT32]) 200 field[4] = makefield("buckets", Ptrto(bucket)) 201 field[5] = makefield("oldbuckets", Ptrto(bucket)) 202 field[6] = makefield("nevacuate", Types[TUINTPTR]) 203 field[7] = makefield("overflow", Types[TUNSAFEPTR]) 204 205 h := typ(TSTRUCT) 206 h.Noalg = 1 207 h.Local = t.Local 208 h.Type = field[0] 209 for n := int32(0); n < int32(len(field)-1); n++ { 210 field[n].Down = field[n+1] 211 } 212 field[len(field)-1].Down = nil 213 dowidth(h) 214 t.Hmap = h 215 h.Map = t 216 return h 217 } 218 219 func hiter(t *Type) *Type { 220 if t.Hiter != nil { 221 return t.Hiter 222 } 223 224 // build a struct: 225 // hash_iter { 226 // key *Key 227 // val *Value 228 // t *MapType 229 // h *Hmap 230 // buckets *Bucket 231 // bptr *Bucket 232 // overflow0 unsafe.Pointer 233 // overflow1 unsafe.Pointer 234 // startBucket uintptr 235 // stuff uintptr 236 // bucket uintptr 237 // checkBucket uintptr 238 // } 239 // must match ../../runtime/hashmap.go:hash_iter. 240 var field [12]*Type 241 field[0] = makefield("key", Ptrto(t.Down)) 242 243 field[1] = makefield("val", Ptrto(t.Type)) 244 field[2] = makefield("t", Ptrto(Types[TUINT8])) 245 field[3] = makefield("h", Ptrto(hmap(t))) 246 field[4] = makefield("buckets", Ptrto(mapbucket(t))) 247 field[5] = makefield("bptr", Ptrto(mapbucket(t))) 248 field[6] = makefield("overflow0", Types[TUNSAFEPTR]) 249 field[7] = makefield("overflow1", Types[TUNSAFEPTR]) 250 field[8] = makefield("startBucket", Types[TUINTPTR]) 251 field[9] = makefield("stuff", Types[TUINTPTR]) // offset+wrapped+B+I 252 field[10] = makefield("bucket", Types[TUINTPTR]) 253 field[11] = makefield("checkBucket", Types[TUINTPTR]) 254 255 // build iterator struct holding the above fields 256 i := typ(TSTRUCT) 257 258 i.Noalg = 1 259 i.Type = field[0] 260 for n := int32(0); n < int32(len(field)-1); n++ { 261 field[n].Down = field[n+1] 262 } 263 field[len(field)-1].Down = nil 264 dowidth(i) 265 if i.Width != int64(12*Widthptr) { 266 Yyerror("hash_iter size not correct %d %d", i.Width, 12*Widthptr) 267 } 268 t.Hiter = i 269 i.Map = t 270 return i 271 } 272 273 /* 274 * f is method type, with receiver. 275 * return function type, receiver as first argument (or not). 276 */ 277 func methodfunc(f *Type, receiver *Type) *Type { 278 var in *NodeList 279 if receiver != nil { 280 d := Nod(ODCLFIELD, nil, nil) 281 d.Type = receiver 282 in = list(in, d) 283 } 284 285 var d *Node 286 for t := getinargx(f).Type; t != nil; t = t.Down { 287 d = Nod(ODCLFIELD, nil, nil) 288 d.Type = t.Type 289 d.Isddd = t.Isddd 290 in = list(in, d) 291 } 292 293 var out *NodeList 294 for t := getoutargx(f).Type; t != nil; t = t.Down { 295 d = Nod(ODCLFIELD, nil, nil) 296 d.Type = t.Type 297 out = list(out, d) 298 } 299 300 t := functype(nil, in, out) 301 if f.Nname != nil { 302 // Link to name of original method function. 303 t.Nname = f.Nname 304 } 305 306 return t 307 } 308 309 /* 310 * return methods of non-interface type t, sorted by name. 311 * generates stub functions as needed. 312 */ 313 func methods(t *Type) *Sig { 314 // method type 315 mt := methtype(t, 0) 316 317 if mt == nil { 318 return nil 319 } 320 expandmeth(mt) 321 322 // type stored in interface word 323 it := t 324 325 if !isdirectiface(it) { 326 it = Ptrto(t) 327 } 328 329 // make list of methods for t, 330 // generating code if necessary. 331 var a *Sig 332 333 var this *Type 334 var b *Sig 335 var method *Sym 336 for f := mt.Xmethod; f != nil; f = f.Down { 337 if f.Etype != TFIELD { 338 Fatal("methods: not field %v", Tconv(f, 0)) 339 } 340 if f.Type.Etype != TFUNC || f.Type.Thistuple == 0 { 341 Fatal("non-method on %v method %v %v\n", Tconv(mt, 0), Sconv(f.Sym, 0), Tconv(f, 0)) 342 } 343 if getthisx(f.Type).Type == nil { 344 Fatal("receiver with no type on %v method %v %v\n", Tconv(mt, 0), Sconv(f.Sym, 0), Tconv(f, 0)) 345 } 346 if f.Nointerface { 347 continue 348 } 349 350 method = f.Sym 351 if method == nil { 352 continue 353 } 354 355 // get receiver type for this particular method. 356 // if pointer receiver but non-pointer t and 357 // this is not an embedded pointer inside a struct, 358 // method does not apply. 359 this = getthisx(f.Type).Type.Type 360 361 if Isptr[this.Etype] && this.Type == t { 362 continue 363 } 364 if Isptr[this.Etype] && !Isptr[t.Etype] && f.Embedded != 2 && !isifacemethod(f.Type) { 365 continue 366 } 367 368 b = new(Sig) 369 b.link = a 370 a = b 371 372 a.name = method.Name 373 if !exportname(method.Name) { 374 if method.Pkg == nil { 375 Fatal("methods: missing package") 376 } 377 a.pkg = method.Pkg 378 } 379 380 a.isym = methodsym(method, it, 1) 381 a.tsym = methodsym(method, t, 0) 382 a.type_ = methodfunc(f.Type, t) 383 a.mtype = methodfunc(f.Type, nil) 384 385 if a.isym.Flags&SymSiggen == 0 { 386 a.isym.Flags |= SymSiggen 387 if !Eqtype(this, it) || this.Width < Types[Tptr].Width { 388 compiling_wrappers = 1 389 genwrapper(it, f, a.isym, 1) 390 compiling_wrappers = 0 391 } 392 } 393 394 if a.tsym.Flags&SymSiggen == 0 { 395 a.tsym.Flags |= SymSiggen 396 if !Eqtype(this, t) { 397 compiling_wrappers = 1 398 genwrapper(t, f, a.tsym, 0) 399 compiling_wrappers = 0 400 } 401 } 402 } 403 404 return lsort(a, sigcmp) 405 } 406 407 /* 408 * return methods of interface type t, sorted by name. 409 */ 410 func imethods(t *Type) *Sig { 411 var a *Sig 412 var method *Sym 413 var isym *Sym 414 415 var all *Sig 416 var last *Sig 417 for f := t.Type; f != nil; f = f.Down { 418 if f.Etype != TFIELD { 419 Fatal("imethods: not field") 420 } 421 if f.Type.Etype != TFUNC || f.Sym == nil { 422 continue 423 } 424 method = f.Sym 425 a = new(Sig) 426 a.name = method.Name 427 if !exportname(method.Name) { 428 if method.Pkg == nil { 429 Fatal("imethods: missing package") 430 } 431 a.pkg = method.Pkg 432 } 433 434 a.mtype = f.Type 435 a.offset = 0 436 a.type_ = methodfunc(f.Type, nil) 437 438 if last != nil && sigcmp(last, a) >= 0 { 439 Fatal("sigcmp vs sortinter %s %s", last.name, a.name) 440 } 441 if last == nil { 442 all = a 443 } else { 444 last.link = a 445 } 446 last = a 447 448 // Compiler can only refer to wrappers for non-blank methods. 449 if isblanksym(method) { 450 continue 451 } 452 453 // NOTE(rsc): Perhaps an oversight that 454 // IfaceType.Method is not in the reflect data. 455 // Generate the method body, so that compiled 456 // code can refer to it. 457 isym = methodsym(method, t, 0) 458 459 if isym.Flags&SymSiggen == 0 { 460 isym.Flags |= SymSiggen 461 genwrapper(t, f, isym, 0) 462 } 463 } 464 465 return all 466 } 467 468 var dimportpath_gopkg *Pkg 469 470 func dimportpath(p *Pkg) { 471 if p.Pathsym != nil { 472 return 473 } 474 475 // If we are compiling the runtime package, there are two runtime packages around 476 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for 477 // both of them, so just produce one for localpkg. 478 if myimportpath == "runtime" && p == Runtimepkg { 479 return 480 } 481 482 if dimportpath_gopkg == nil { 483 dimportpath_gopkg = mkpkg("go") 484 dimportpath_gopkg.Name = "go" 485 } 486 487 var nam string 488 if p == localpkg { 489 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. 490 nam = "importpath." + pathtoprefix(myimportpath) + "." 491 } else { 492 nam = "importpath." + p.Prefix + "." 493 } 494 495 n := Nod(ONAME, nil, nil) 496 n.Sym = Pkglookup(nam, dimportpath_gopkg) 497 498 n.Class = PEXTERN 499 n.Xoffset = 0 500 p.Pathsym = n.Sym 501 502 gdatastring(n, p.Path) 503 ggloblsym(n.Sym, int32(Types[TSTRING].Width), obj.DUPOK|obj.RODATA) 504 } 505 506 func dgopkgpath(s *Sym, ot int, pkg *Pkg) int { 507 if pkg == nil { 508 return dgostringptr(s, ot, "") 509 } 510 511 if pkg == localpkg && myimportpath == "" { 512 // If we don't know the full path of the package being compiled (i.e. -p 513 // was not passed on the compiler command line), emit reference to 514 // go.importpath.""., which 6l will rewrite using the correct import path. 515 // Every package that imports this one directly defines the symbol. 516 var ns *Sym 517 518 if ns == nil { 519 ns = Pkglookup("importpath.\"\".", mkpkg("go")) 520 } 521 return dsymptr(s, ot, ns, 0) 522 } 523 524 dimportpath(pkg) 525 return dsymptr(s, ot, pkg.Pathsym, 0) 526 } 527 528 /* 529 * uncommonType 530 * ../../runtime/type.go:/uncommonType 531 */ 532 func dextratype(sym *Sym, off int, t *Type, ptroff int) int { 533 m := methods(t) 534 if t.Sym == nil && m == nil { 535 return off 536 } 537 538 // fill in *extraType pointer in header 539 off = int(Rnd(int64(off), int64(Widthptr))) 540 541 dsymptr(sym, ptroff, sym, off) 542 543 n := 0 544 for a := m; a != nil; a = a.link { 545 dtypesym(a.type_) 546 n++ 547 } 548 549 ot := off 550 s := sym 551 if t.Sym != nil { 552 ot = dgostringptr(s, ot, t.Sym.Name) 553 if t != Types[t.Etype] && t != errortype { 554 ot = dgopkgpath(s, ot, t.Sym.Pkg) 555 } else { 556 ot = dgostringptr(s, ot, "") 557 } 558 } else { 559 ot = dgostringptr(s, ot, "") 560 ot = dgostringptr(s, ot, "") 561 } 562 563 // slice header 564 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint) 565 566 ot = duintxx(s, ot, uint64(n), Widthint) 567 ot = duintxx(s, ot, uint64(n), Widthint) 568 569 // methods 570 for a := m; a != nil; a = a.link { 571 // method 572 // ../../runtime/type.go:/method 573 ot = dgostringptr(s, ot, a.name) 574 575 ot = dgopkgpath(s, ot, a.pkg) 576 ot = dsymptr(s, ot, dtypesym(a.mtype), 0) 577 ot = dsymptr(s, ot, dtypesym(a.type_), 0) 578 if a.isym != nil { 579 ot = dsymptr(s, ot, a.isym, 0) 580 } else { 581 ot = duintptr(s, ot, 0) 582 } 583 if a.tsym != nil { 584 ot = dsymptr(s, ot, a.tsym, 0) 585 } else { 586 ot = duintptr(s, ot, 0) 587 } 588 } 589 590 return ot 591 } 592 593 var kinds = []int{ 594 TINT: obj.KindInt, 595 TUINT: obj.KindUint, 596 TINT8: obj.KindInt8, 597 TUINT8: obj.KindUint8, 598 TINT16: obj.KindInt16, 599 TUINT16: obj.KindUint16, 600 TINT32: obj.KindInt32, 601 TUINT32: obj.KindUint32, 602 TINT64: obj.KindInt64, 603 TUINT64: obj.KindUint64, 604 TUINTPTR: obj.KindUintptr, 605 TFLOAT32: obj.KindFloat32, 606 TFLOAT64: obj.KindFloat64, 607 TBOOL: obj.KindBool, 608 TSTRING: obj.KindString, 609 TPTR32: obj.KindPtr, 610 TPTR64: obj.KindPtr, 611 TSTRUCT: obj.KindStruct, 612 TINTER: obj.KindInterface, 613 TCHAN: obj.KindChan, 614 TMAP: obj.KindMap, 615 TARRAY: obj.KindArray, 616 TFUNC: obj.KindFunc, 617 TCOMPLEX64: obj.KindComplex64, 618 TCOMPLEX128: obj.KindComplex128, 619 TUNSAFEPTR: obj.KindUnsafePointer, 620 } 621 622 func haspointers(t *Type) bool { 623 if t.Haspointers != 0 { 624 return t.Haspointers-1 != 0 625 } 626 627 var ret bool 628 switch t.Etype { 629 case TINT, 630 TUINT, 631 TINT8, 632 TUINT8, 633 TINT16, 634 TUINT16, 635 TINT32, 636 TUINT32, 637 TINT64, 638 TUINT64, 639 TUINTPTR, 640 TFLOAT32, 641 TFLOAT64, 642 TCOMPLEX64, 643 TCOMPLEX128, 644 TBOOL: 645 ret = false 646 647 case TARRAY: 648 if t.Bound < 0 { // slice 649 ret = true 650 break 651 } 652 653 if t.Bound == 0 { // empty array 654 ret = false 655 break 656 } 657 658 ret = haspointers(t.Type) 659 660 case TSTRUCT: 661 ret = false 662 for t1 := t.Type; t1 != nil; t1 = t1.Down { 663 if haspointers(t1.Type) { 664 ret = true 665 break 666 } 667 } 668 669 case TSTRING, 670 TPTR32, 671 TPTR64, 672 TUNSAFEPTR, 673 TINTER, 674 TCHAN, 675 TMAP, 676 TFUNC: 677 fallthrough 678 default: 679 ret = true 680 } 681 682 t.Haspointers = 1 + uint8(bool2int(ret)) 683 return ret 684 } 685 686 /* 687 * commonType 688 * ../../runtime/type.go:/commonType 689 */ 690 691 var dcommontype_algarray *Sym 692 693 func dcommontype(s *Sym, ot int, t *Type) int { 694 if ot != 0 { 695 Fatal("dcommontype %d", ot) 696 } 697 698 sizeofAlg := 2 * Widthptr 699 if dcommontype_algarray == nil { 700 dcommontype_algarray = Pkglookup("algarray", Runtimepkg) 701 } 702 dowidth(t) 703 alg := algtype(t) 704 var algsym *Sym 705 if alg < 0 || alg == AMEM { 706 algsym = dalgsym(t) 707 } 708 709 var sptr *Sym 710 if t.Sym != nil && !Isptr[t.Etype] { 711 sptr = dtypesym(Ptrto(t)) 712 } else { 713 sptr = weaktypesym(Ptrto(t)) 714 } 715 716 // All (non-reflect-allocated) Types share the same zero object. 717 // Each place in the compiler where a pointer to the zero object 718 // might be returned by a runtime call (map access return value, 719 // 2-arg type cast) declares the size of the zerovalue it needs. 720 // The linker magically takes the max of all the sizes. 721 zero := Pkglookup("zerovalue", Runtimepkg) 722 723 // We use size 0 here so we get the pointer to the zero value, 724 // but don't allocate space for the zero value unless we need it. 725 // TODO: how do we get this symbol into bss? We really want 726 // a read-only bss, but I don't think such a thing exists. 727 728 // ../../pkg/reflect/type.go:/^type.commonType 729 // actual type structure 730 // type commonType struct { 731 // size uintptr 732 // hash uint32 733 // _ uint8 734 // align uint8 735 // fieldAlign uint8 736 // kind uint8 737 // alg unsafe.Pointer 738 // gc unsafe.Pointer 739 // string *string 740 // *extraType 741 // ptrToThis *Type 742 // zero unsafe.Pointer 743 // } 744 ot = duintptr(s, ot, uint64(t.Width)) 745 746 ot = duint32(s, ot, typehash(t)) 747 ot = duint8(s, ot, 0) // unused 748 749 // runtime (and common sense) expects alignment to be a power of two. 750 i := int(t.Align) 751 752 if i == 0 { 753 i = 1 754 } 755 if i&(i-1) != 0 { 756 Fatal("invalid alignment %d for %v", t.Align, Tconv(t, 0)) 757 } 758 ot = duint8(s, ot, t.Align) // align 759 ot = duint8(s, ot, t.Align) // fieldAlign 760 761 gcprog := usegcprog(t) 762 763 i = kinds[t.Etype] 764 if t.Etype == TARRAY && t.Bound < 0 { 765 i = obj.KindSlice 766 } 767 if !haspointers(t) { 768 i |= obj.KindNoPointers 769 } 770 if isdirectiface(t) { 771 i |= obj.KindDirectIface 772 } 773 if gcprog { 774 i |= obj.KindGCProg 775 } 776 ot = duint8(s, ot, uint8(i)) // kind 777 if algsym == nil { 778 ot = dsymptr(s, ot, dcommontype_algarray, alg*sizeofAlg) 779 } else { 780 ot = dsymptr(s, ot, algsym, 0) 781 } 782 783 // gc 784 if gcprog { 785 var gcprog1 *Sym 786 var gcprog0 *Sym 787 gengcprog(t, &gcprog0, &gcprog1) 788 if gcprog0 != nil { 789 ot = dsymptr(s, ot, gcprog0, 0) 790 } else { 791 ot = duintptr(s, ot, 0) 792 } 793 ot = dsymptr(s, ot, gcprog1, 0) 794 } else { 795 var gcmask [16]uint8 796 gengcmask(t, gcmask[:]) 797 x1 := uint64(0) 798 for i := 0; i < 8; i++ { 799 x1 = x1<<8 | uint64(gcmask[i]) 800 } 801 var p string 802 if Widthptr == 4 { 803 p = fmt.Sprintf("gcbits.0x%016x", x1) 804 } else { 805 x2 := uint64(0) 806 for i := 0; i < 8; i++ { 807 x2 = x2<<8 | uint64(gcmask[i+8]) 808 } 809 p = fmt.Sprintf("gcbits.0x%016x%016x", x1, x2) 810 } 811 812 sbits := Pkglookup(p, Runtimepkg) 813 if sbits.Flags&SymUniq == 0 { 814 sbits.Flags |= SymUniq 815 for i := 0; i < 2*Widthptr; i++ { 816 duint8(sbits, i, gcmask[i]) 817 } 818 ggloblsym(sbits, 2*int32(Widthptr), obj.DUPOK|obj.RODATA) 819 } 820 821 ot = dsymptr(s, ot, sbits, 0) 822 ot = duintptr(s, ot, 0) 823 } 824 825 p := Tconv(t, obj.FmtLeft|obj.FmtUnsigned) 826 827 //print("dcommontype: %s\n", p); 828 ot = dgostringptr(s, ot, p) // string 829 830 // skip pointer to extraType, 831 // which follows the rest of this type structure. 832 // caller will fill in if needed. 833 // otherwise linker will assume 0. 834 ot += Widthptr 835 836 ot = dsymptr(s, ot, sptr, 0) // ptrto type 837 ot = dsymptr(s, ot, zero, 0) // ptr to zero value 838 return ot 839 } 840 841 func typesym(t *Type) *Sym { 842 return Pkglookup(Tconv(t, obj.FmtLeft), typepkg) 843 } 844 845 func tracksym(t *Type) *Sym { 846 return Pkglookup(Tconv(t.Outer, obj.FmtLeft)+"."+t.Sym.Name, trackpkg) 847 } 848 849 func typelinksym(t *Type) *Sym { 850 // %-uT is what the generated Type's string field says. 851 // It uses (ambiguous) package names instead of import paths. 852 // %-T is the complete, unambiguous type name. 853 // We want the types to end up sorted by string field, 854 // so use that first in the name, and then add :%-T to 855 // disambiguate. We use a tab character as the separator to 856 // ensure the types appear sorted by their string field. The 857 // names are a little long but they are discarded by the linker 858 // and do not end up in the symbol table of the final binary. 859 p := Tconv(t, obj.FmtLeft|obj.FmtUnsigned) + "\t" + Tconv(t, obj.FmtLeft) 860 861 s := Pkglookup(p, typelinkpkg) 862 863 //print("typelinksym: %s -> %+S\n", p, s); 864 865 return s 866 } 867 868 func typesymprefix(prefix string, t *Type) *Sym { 869 p := prefix + "." + Tconv(t, obj.FmtLeft) 870 s := Pkglookup(p, typepkg) 871 872 //print("algsym: %s -> %+S\n", p, s); 873 874 return s 875 } 876 877 func typenamesym(t *Type) *Sym { 878 if t == nil || (Isptr[t.Etype] && t.Type == nil) || isideal(t) { 879 Fatal("typename %v", Tconv(t, 0)) 880 } 881 s := typesym(t) 882 if s.Def == nil { 883 n := Nod(ONAME, nil, nil) 884 n.Sym = s 885 n.Type = Types[TUINT8] 886 n.Addable = true 887 n.Ullman = 1 888 n.Class = PEXTERN 889 n.Xoffset = 0 890 n.Typecheck = 1 891 s.Def = n 892 893 signatlist = list(signatlist, typenod(t)) 894 } 895 896 return s.Def.Sym 897 } 898 899 func typename(t *Type) *Node { 900 s := typenamesym(t) 901 n := Nod(OADDR, s.Def, nil) 902 n.Type = Ptrto(s.Def.Type) 903 n.Addable = true 904 n.Ullman = 2 905 n.Typecheck = 1 906 return n 907 } 908 909 func weaktypesym(t *Type) *Sym { 910 p := Tconv(t, obj.FmtLeft) 911 s := Pkglookup(p, weaktypepkg) 912 913 //print("weaktypesym: %s -> %+S\n", p, s); 914 915 return s 916 } 917 918 /* 919 * Returns 1 if t has a reflexive equality operator. 920 * That is, if x==x for all x of type t. 921 */ 922 func isreflexive(t *Type) bool { 923 switch t.Etype { 924 case TBOOL, 925 TINT, 926 TUINT, 927 TINT8, 928 TUINT8, 929 TINT16, 930 TUINT16, 931 TINT32, 932 TUINT32, 933 TINT64, 934 TUINT64, 935 TUINTPTR, 936 TPTR32, 937 TPTR64, 938 TUNSAFEPTR, 939 TSTRING, 940 TCHAN: 941 return true 942 943 case TFLOAT32, 944 TFLOAT64, 945 TCOMPLEX64, 946 TCOMPLEX128, 947 TINTER: 948 return false 949 950 case TARRAY: 951 if Isslice(t) { 952 Fatal("slice can't be a map key: %v", Tconv(t, 0)) 953 } 954 return isreflexive(t.Type) 955 956 case TSTRUCT: 957 for t1 := t.Type; t1 != nil; t1 = t1.Down { 958 if !isreflexive(t1.Type) { 959 return false 960 } 961 } 962 963 return true 964 965 default: 966 Fatal("bad type for map key: %v", Tconv(t, 0)) 967 return false 968 } 969 } 970 971 func dtypesym(t *Type) *Sym { 972 // Replace byte, rune aliases with real type. 973 // They've been separate internally to make error messages 974 // better, but we have to merge them in the reflect tables. 975 if t == bytetype || t == runetype { 976 t = Types[t.Etype] 977 } 978 979 if isideal(t) { 980 Fatal("dtypesym %v", Tconv(t, 0)) 981 } 982 983 s := typesym(t) 984 if s.Flags&SymSiggen != 0 { 985 return s 986 } 987 s.Flags |= SymSiggen 988 989 // special case (look for runtime below): 990 // when compiling package runtime, 991 // emit the type structures for int, float, etc. 992 tbase := t 993 994 if Isptr[t.Etype] && t.Sym == nil && t.Type.Sym != nil { 995 tbase = t.Type 996 } 997 dupok := 0 998 if tbase.Sym == nil { 999 dupok = obj.DUPOK 1000 } 1001 1002 if compiling_runtime != 0 && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc 1003 goto ok 1004 } 1005 1006 // named types from other files are defined only by those files 1007 if tbase.Sym != nil && !tbase.Local { 1008 return s 1009 } 1010 if isforw[tbase.Etype] { 1011 return s 1012 } 1013 1014 ok: 1015 ot := 0 1016 xt := 0 1017 switch t.Etype { 1018 default: 1019 ot = dcommontype(s, ot, t) 1020 xt = ot - 3*Widthptr 1021 1022 case TARRAY: 1023 if t.Bound >= 0 { 1024 // ../../runtime/type.go:/ArrayType 1025 s1 := dtypesym(t.Type) 1026 1027 t2 := typ(TARRAY) 1028 t2.Type = t.Type 1029 t2.Bound = -1 // slice 1030 s2 := dtypesym(t2) 1031 ot = dcommontype(s, ot, t) 1032 xt = ot - 3*Widthptr 1033 ot = dsymptr(s, ot, s1, 0) 1034 ot = dsymptr(s, ot, s2, 0) 1035 ot = duintptr(s, ot, uint64(t.Bound)) 1036 } else { 1037 // ../../runtime/type.go:/SliceType 1038 s1 := dtypesym(t.Type) 1039 1040 ot = dcommontype(s, ot, t) 1041 xt = ot - 3*Widthptr 1042 ot = dsymptr(s, ot, s1, 0) 1043 } 1044 1045 // ../../runtime/type.go:/ChanType 1046 case TCHAN: 1047 s1 := dtypesym(t.Type) 1048 1049 ot = dcommontype(s, ot, t) 1050 xt = ot - 3*Widthptr 1051 ot = dsymptr(s, ot, s1, 0) 1052 ot = duintptr(s, ot, uint64(t.Chan)) 1053 1054 case TFUNC: 1055 for t1 := getthisx(t).Type; t1 != nil; t1 = t1.Down { 1056 dtypesym(t1.Type) 1057 } 1058 isddd := false 1059 for t1 := getinargx(t).Type; t1 != nil; t1 = t1.Down { 1060 isddd = t1.Isddd 1061 dtypesym(t1.Type) 1062 } 1063 1064 for t1 := getoutargx(t).Type; t1 != nil; t1 = t1.Down { 1065 dtypesym(t1.Type) 1066 } 1067 1068 ot = dcommontype(s, ot, t) 1069 xt = ot - 3*Widthptr 1070 ot = duint8(s, ot, uint8(bool2int(isddd))) 1071 1072 // two slice headers: in and out. 1073 ot = int(Rnd(int64(ot), int64(Widthptr))) 1074 1075 ot = dsymptr(s, ot, s, ot+2*(Widthptr+2*Widthint)) 1076 n := t.Thistuple + t.Intuple 1077 ot = duintxx(s, ot, uint64(n), Widthint) 1078 ot = duintxx(s, ot, uint64(n), Widthint) 1079 ot = dsymptr(s, ot, s, ot+1*(Widthptr+2*Widthint)+n*Widthptr) 1080 ot = duintxx(s, ot, uint64(t.Outtuple), Widthint) 1081 ot = duintxx(s, ot, uint64(t.Outtuple), Widthint) 1082 1083 // slice data 1084 for t1 := getthisx(t).Type; t1 != nil; t1 = t1.Down { 1085 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1086 n++ 1087 } 1088 for t1 := getinargx(t).Type; t1 != nil; t1 = t1.Down { 1089 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1090 n++ 1091 } 1092 for t1 := getoutargx(t).Type; t1 != nil; t1 = t1.Down { 1093 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1094 n++ 1095 } 1096 1097 case TINTER: 1098 m := imethods(t) 1099 n := 0 1100 for a := m; a != nil; a = a.link { 1101 dtypesym(a.type_) 1102 n++ 1103 } 1104 1105 // ../../runtime/type.go:/InterfaceType 1106 ot = dcommontype(s, ot, t) 1107 1108 xt = ot - 3*Widthptr 1109 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint) 1110 ot = duintxx(s, ot, uint64(n), Widthint) 1111 ot = duintxx(s, ot, uint64(n), Widthint) 1112 for a := m; a != nil; a = a.link { 1113 // ../../runtime/type.go:/imethod 1114 ot = dgostringptr(s, ot, a.name) 1115 1116 ot = dgopkgpath(s, ot, a.pkg) 1117 ot = dsymptr(s, ot, dtypesym(a.type_), 0) 1118 } 1119 1120 // ../../runtime/type.go:/MapType 1121 case TMAP: 1122 s1 := dtypesym(t.Down) 1123 1124 s2 := dtypesym(t.Type) 1125 s3 := dtypesym(mapbucket(t)) 1126 s4 := dtypesym(hmap(t)) 1127 ot = dcommontype(s, ot, t) 1128 xt = ot - 3*Widthptr 1129 ot = dsymptr(s, ot, s1, 0) 1130 ot = dsymptr(s, ot, s2, 0) 1131 ot = dsymptr(s, ot, s3, 0) 1132 ot = dsymptr(s, ot, s4, 0) 1133 if t.Down.Width > MAXKEYSIZE { 1134 ot = duint8(s, ot, uint8(Widthptr)) 1135 ot = duint8(s, ot, 1) // indirect 1136 } else { 1137 ot = duint8(s, ot, uint8(t.Down.Width)) 1138 ot = duint8(s, ot, 0) // not indirect 1139 } 1140 1141 if t.Type.Width > MAXVALSIZE { 1142 ot = duint8(s, ot, uint8(Widthptr)) 1143 ot = duint8(s, ot, 1) // indirect 1144 } else { 1145 ot = duint8(s, ot, uint8(t.Type.Width)) 1146 ot = duint8(s, ot, 0) // not indirect 1147 } 1148 1149 ot = duint16(s, ot, uint16(mapbucket(t).Width)) 1150 ot = duint8(s, ot, uint8(bool2int(isreflexive(t.Down)))) 1151 1152 case TPTR32, TPTR64: 1153 if t.Type.Etype == TANY { 1154 // ../../runtime/type.go:/UnsafePointerType 1155 ot = dcommontype(s, ot, t) 1156 1157 break 1158 } 1159 1160 // ../../runtime/type.go:/PtrType 1161 s1 := dtypesym(t.Type) 1162 1163 ot = dcommontype(s, ot, t) 1164 xt = ot - 3*Widthptr 1165 ot = dsymptr(s, ot, s1, 0) 1166 1167 // ../../runtime/type.go:/StructType 1168 // for security, only the exported fields. 1169 case TSTRUCT: 1170 n := 0 1171 1172 for t1 := t.Type; t1 != nil; t1 = t1.Down { 1173 dtypesym(t1.Type) 1174 n++ 1175 } 1176 1177 ot = dcommontype(s, ot, t) 1178 xt = ot - 3*Widthptr 1179 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint) 1180 ot = duintxx(s, ot, uint64(n), Widthint) 1181 ot = duintxx(s, ot, uint64(n), Widthint) 1182 for t1 := t.Type; t1 != nil; t1 = t1.Down { 1183 // ../../runtime/type.go:/structField 1184 if t1.Sym != nil && t1.Embedded == 0 { 1185 ot = dgostringptr(s, ot, t1.Sym.Name) 1186 if exportname(t1.Sym.Name) { 1187 ot = dgostringptr(s, ot, "") 1188 } else { 1189 ot = dgopkgpath(s, ot, t1.Sym.Pkg) 1190 } 1191 } else { 1192 ot = dgostringptr(s, ot, "") 1193 if t1.Type.Sym != nil && t1.Type.Sym.Pkg == builtinpkg { 1194 ot = dgopkgpath(s, ot, localpkg) 1195 } else { 1196 ot = dgostringptr(s, ot, "") 1197 } 1198 } 1199 1200 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1201 ot = dgostrlitptr(s, ot, t1.Note) 1202 ot = duintptr(s, ot, uint64(t1.Width)) // field offset 1203 } 1204 } 1205 1206 ot = dextratype(s, ot, t, xt) 1207 ggloblsym(s, int32(ot), int8(dupok|obj.RODATA)) 1208 1209 // generate typelink.foo pointing at s = type.foo. 1210 // The linker will leave a table of all the typelinks for 1211 // types in the binary, so reflect can find them. 1212 // We only need the link for unnamed composites that 1213 // we want be able to find. 1214 if t.Sym == nil { 1215 switch t.Etype { 1216 case TARRAY, TCHAN, TFUNC, TMAP: 1217 slink := typelinksym(t) 1218 dsymptr(slink, 0, s, 0) 1219 ggloblsym(slink, int32(Widthptr), int8(dupok|obj.RODATA)) 1220 } 1221 } 1222 1223 return s 1224 } 1225 1226 func dumptypestructs() { 1227 var n *Node 1228 1229 // copy types from externdcl list to signatlist 1230 for l := externdcl; l != nil; l = l.Next { 1231 n = l.N 1232 if n.Op != OTYPE { 1233 continue 1234 } 1235 signatlist = list(signatlist, n) 1236 } 1237 1238 // process signatlist 1239 var t *Type 1240 for l := signatlist; l != nil; l = l.Next { 1241 n = l.N 1242 if n.Op != OTYPE { 1243 continue 1244 } 1245 t = n.Type 1246 dtypesym(t) 1247 if t.Sym != nil { 1248 dtypesym(Ptrto(t)) 1249 } 1250 } 1251 1252 // generate import strings for imported packages 1253 for _, p := range pkgs { 1254 if p.Direct != 0 { 1255 dimportpath(p) 1256 } 1257 } 1258 1259 // do basic types if compiling package runtime. 1260 // they have to be in at least one package, 1261 // and runtime is always loaded implicitly, 1262 // so this is as good as any. 1263 // another possible choice would be package main, 1264 // but using runtime means fewer copies in .6 files. 1265 if compiling_runtime != 0 { 1266 for i := 1; i <= TBOOL; i++ { 1267 dtypesym(Ptrto(Types[i])) 1268 } 1269 dtypesym(Ptrto(Types[TSTRING])) 1270 dtypesym(Ptrto(Types[TUNSAFEPTR])) 1271 1272 // emit type structs for error and func(error) string. 1273 // The latter is the type of an auto-generated wrapper. 1274 dtypesym(Ptrto(errortype)) 1275 1276 dtypesym(functype(nil, list1(Nod(ODCLFIELD, nil, typenod(errortype))), list1(Nod(ODCLFIELD, nil, typenod(Types[TSTRING]))))) 1277 1278 // add paths for runtime and main, which 6l imports implicitly. 1279 dimportpath(Runtimepkg) 1280 1281 if flag_race != 0 { 1282 dimportpath(racepkg) 1283 } 1284 dimportpath(mkpkg("main")) 1285 } 1286 } 1287 1288 func dalgsym(t *Type) *Sym { 1289 var s *Sym 1290 var hashfunc *Sym 1291 var eqfunc *Sym 1292 1293 // dalgsym is only called for a type that needs an algorithm table, 1294 // which implies that the type is comparable (or else it would use ANOEQ). 1295 1296 if algtype(t) == AMEM { 1297 // we use one algorithm table for all AMEM types of a given size 1298 p := fmt.Sprintf(".alg%d", t.Width) 1299 1300 s = Pkglookup(p, typepkg) 1301 1302 if s.Flags&SymAlgGen != 0 { 1303 return s 1304 } 1305 s.Flags |= SymAlgGen 1306 1307 // make hash closure 1308 p = fmt.Sprintf(".hashfunc%d", t.Width) 1309 1310 hashfunc = Pkglookup(p, typepkg) 1311 1312 ot := 0 1313 ot = dsymptr(hashfunc, ot, Pkglookup("memhash_varlen", Runtimepkg), 0) 1314 ot = duintxx(hashfunc, ot, uint64(t.Width), Widthptr) // size encoded in closure 1315 ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA) 1316 1317 // make equality closure 1318 p = fmt.Sprintf(".eqfunc%d", t.Width) 1319 1320 eqfunc = Pkglookup(p, typepkg) 1321 1322 ot = 0 1323 ot = dsymptr(eqfunc, ot, Pkglookup("memequal_varlen", Runtimepkg), 0) 1324 ot = duintxx(eqfunc, ot, uint64(t.Width), Widthptr) 1325 ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA) 1326 } else { 1327 // generate an alg table specific to this type 1328 s = typesymprefix(".alg", t) 1329 1330 hash := typesymprefix(".hash", t) 1331 eq := typesymprefix(".eq", t) 1332 hashfunc = typesymprefix(".hashfunc", t) 1333 eqfunc = typesymprefix(".eqfunc", t) 1334 1335 genhash(hash, t) 1336 geneq(eq, t) 1337 1338 // make Go funcs (closures) for calling hash and equal from Go 1339 dsymptr(hashfunc, 0, hash, 0) 1340 1341 ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1342 dsymptr(eqfunc, 0, eq, 0) 1343 ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1344 } 1345 1346 // ../../runtime/alg.go:/typeAlg 1347 ot := 0 1348 1349 ot = dsymptr(s, ot, hashfunc, 0) 1350 ot = dsymptr(s, ot, eqfunc, 0) 1351 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 1352 return s 1353 } 1354 1355 func usegcprog(t *Type) bool { 1356 if !haspointers(t) { 1357 return false 1358 } 1359 if t.Width == BADWIDTH { 1360 dowidth(t) 1361 } 1362 1363 // Calculate size of the unrolled GC mask. 1364 nptr := (t.Width + int64(Widthptr) - 1) / int64(Widthptr) 1365 1366 size := nptr 1367 if size%2 != 0 { 1368 size *= 2 // repeated 1369 } 1370 size = size * obj.GcBits / 8 // 4 bits per word 1371 1372 // Decide whether to use unrolled GC mask or GC program. 1373 // We could use a more elaborate condition, but this seems to work well in practice. 1374 // For small objects GC program can't give significant reduction. 1375 // While large objects usually contain arrays; and even if it don't 1376 // the program uses 2-bits per word while mask uses 4-bits per word, 1377 // so the program is still smaller. 1378 return size > int64(2*Widthptr) 1379 } 1380 1381 // Generates sparse GC bitmask (4 bits per word). 1382 func gengcmask(t *Type, gcmask []byte) { 1383 for i := int64(0); i < 16; i++ { 1384 gcmask[i] = 0 1385 } 1386 if !haspointers(t) { 1387 return 1388 } 1389 1390 // Generate compact mask as stacks use. 1391 xoffset := int64(0) 1392 1393 vec := bvalloc(2 * int32(Widthptr) * 8) 1394 twobitwalktype1(t, &xoffset, vec) 1395 1396 // Unfold the mask for the GC bitmap format: 1397 // 4 bits per word, 2 high bits encode pointer info. 1398 pos := gcmask 1399 1400 nptr := (t.Width + int64(Widthptr) - 1) / int64(Widthptr) 1401 half := false 1402 1403 // If number of words is odd, repeat the mask. 1404 // This makes simpler handling of arrays in runtime. 1405 var i int64 1406 var bits uint8 1407 for j := int64(0); j <= (nptr % 2); j++ { 1408 for i = 0; i < nptr; i++ { 1409 bits = uint8(bvget(vec, int32(i*obj.BitsPerPointer)) | bvget(vec, int32(i*obj.BitsPerPointer+1))<<1) 1410 1411 // Some fake types (e.g. Hmap) has missing fileds. 1412 // twobitwalktype1 generates BitsDead for that holes, 1413 // replace BitsDead with BitsScalar. 1414 if bits == obj.BitsDead { 1415 bits = obj.BitsScalar 1416 } 1417 bits <<= 2 1418 if half { 1419 bits <<= 4 1420 } 1421 pos[0] |= byte(bits) 1422 half = !half 1423 if !half { 1424 pos = pos[1:] 1425 } 1426 } 1427 } 1428 } 1429 1430 // Helper object for generation of GC programs. 1431 type ProgGen struct { 1432 s *Sym 1433 datasize int32 1434 data [256 / obj.PointersPerByte]uint8 1435 ot int64 1436 } 1437 1438 func proggeninit(g *ProgGen, s *Sym) { 1439 g.s = s 1440 g.datasize = 0 1441 g.ot = 0 1442 g.data = [256 / obj.PointersPerByte]uint8{} 1443 } 1444 1445 func proggenemit(g *ProgGen, v uint8) { 1446 g.ot = int64(duint8(g.s, int(g.ot), v)) 1447 } 1448 1449 // Emits insData block from g->data. 1450 func proggendataflush(g *ProgGen) { 1451 if g.datasize == 0 { 1452 return 1453 } 1454 proggenemit(g, obj.InsData) 1455 proggenemit(g, uint8(g.datasize)) 1456 s := (g.datasize + obj.PointersPerByte - 1) / obj.PointersPerByte 1457 for i := int32(0); i < s; i++ { 1458 proggenemit(g, g.data[i]) 1459 } 1460 g.datasize = 0 1461 g.data = [256 / obj.PointersPerByte]uint8{} 1462 } 1463 1464 func proggendata(g *ProgGen, d uint8) { 1465 g.data[g.datasize/obj.PointersPerByte] |= d << uint((g.datasize%obj.PointersPerByte)*obj.BitsPerPointer) 1466 g.datasize++ 1467 if g.datasize == 255 { 1468 proggendataflush(g) 1469 } 1470 } 1471 1472 // Skip v bytes due to alignment, etc. 1473 func proggenskip(g *ProgGen, off int64, v int64) { 1474 for i := off; i < off+v; i++ { 1475 if (i % int64(Widthptr)) == 0 { 1476 proggendata(g, obj.BitsScalar) 1477 } 1478 } 1479 } 1480 1481 // Emit insArray instruction. 1482 func proggenarray(g *ProgGen, len int64) { 1483 proggendataflush(g) 1484 proggenemit(g, obj.InsArray) 1485 for i := int32(0); i < int32(Widthptr); i, len = i+1, len>>8 { 1486 proggenemit(g, uint8(len)) 1487 } 1488 } 1489 1490 func proggenarrayend(g *ProgGen) { 1491 proggendataflush(g) 1492 proggenemit(g, obj.InsArrayEnd) 1493 } 1494 1495 func proggenfini(g *ProgGen) int64 { 1496 proggendataflush(g) 1497 proggenemit(g, obj.InsEnd) 1498 return g.ot 1499 } 1500 1501 // Generates GC program for large types. 1502 func gengcprog(t *Type, pgc0 **Sym, pgc1 **Sym) { 1503 nptr := (t.Width + int64(Widthptr) - 1) / int64(Widthptr) 1504 size := nptr 1505 if size%2 != 0 { 1506 size *= 2 // repeated twice 1507 } 1508 size = size * obj.PointersPerByte / 8 // 4 bits per word 1509 size++ // unroll flag in the beginning, used by runtime (see runtime.markallocated) 1510 1511 // emity space in BSS for unrolled program 1512 *pgc0 = nil 1513 1514 // Don't generate it if it's too large, runtime will unroll directly into GC bitmap. 1515 if size <= obj.MaxGCMask { 1516 gc0 := typesymprefix(".gc", t) 1517 ggloblsym(gc0, int32(size), obj.DUPOK|obj.NOPTR) 1518 *pgc0 = gc0 1519 } 1520 1521 // program in RODATA 1522 gc1 := typesymprefix(".gcprog", t) 1523 1524 var g ProgGen 1525 proggeninit(&g, gc1) 1526 xoffset := int64(0) 1527 gengcprog1(&g, t, &xoffset) 1528 ot := proggenfini(&g) 1529 ggloblsym(gc1, int32(ot), obj.DUPOK|obj.RODATA) 1530 *pgc1 = gc1 1531 } 1532 1533 // Recursively walks type t and writes GC program into g. 1534 func gengcprog1(g *ProgGen, t *Type, xoffset *int64) { 1535 switch t.Etype { 1536 case TINT8, 1537 TUINT8, 1538 TINT16, 1539 TUINT16, 1540 TINT32, 1541 TUINT32, 1542 TINT64, 1543 TUINT64, 1544 TINT, 1545 TUINT, 1546 TUINTPTR, 1547 TBOOL, 1548 TFLOAT32, 1549 TFLOAT64, 1550 TCOMPLEX64, 1551 TCOMPLEX128: 1552 proggenskip(g, *xoffset, t.Width) 1553 *xoffset += t.Width 1554 1555 case TPTR32, 1556 TPTR64, 1557 TUNSAFEPTR, 1558 TFUNC, 1559 TCHAN, 1560 TMAP: 1561 proggendata(g, obj.BitsPointer) 1562 *xoffset += t.Width 1563 1564 case TSTRING: 1565 proggendata(g, obj.BitsPointer) 1566 proggendata(g, obj.BitsScalar) 1567 *xoffset += t.Width 1568 1569 // Assuming IfacePointerOnly=1. 1570 case TINTER: 1571 proggendata(g, obj.BitsPointer) 1572 1573 proggendata(g, obj.BitsPointer) 1574 *xoffset += t.Width 1575 1576 case TARRAY: 1577 if Isslice(t) { 1578 proggendata(g, obj.BitsPointer) 1579 proggendata(g, obj.BitsScalar) 1580 proggendata(g, obj.BitsScalar) 1581 } else { 1582 t1 := t.Type 1583 if t1.Width == 0 { 1584 } 1585 // ignore 1586 if t.Bound <= 1 || t.Bound*t1.Width < int64(32*Widthptr) { 1587 for i := int64(0); i < t.Bound; i++ { 1588 gengcprog1(g, t1, xoffset) 1589 } 1590 } else if !haspointers(t1) { 1591 n := t.Width 1592 n -= -*xoffset & (int64(Widthptr) - 1) // skip to next ptr boundary 1593 proggenarray(g, (n+int64(Widthptr)-1)/int64(Widthptr)) 1594 proggendata(g, obj.BitsScalar) 1595 proggenarrayend(g) 1596 *xoffset -= (n+int64(Widthptr)-1)/int64(Widthptr)*int64(Widthptr) - t.Width 1597 } else { 1598 proggenarray(g, t.Bound) 1599 gengcprog1(g, t1, xoffset) 1600 *xoffset += (t.Bound - 1) * t1.Width 1601 proggenarrayend(g) 1602 } 1603 } 1604 1605 case TSTRUCT: 1606 o := int64(0) 1607 var fieldoffset int64 1608 for t1 := t.Type; t1 != nil; t1 = t1.Down { 1609 fieldoffset = t1.Width 1610 proggenskip(g, *xoffset, fieldoffset-o) 1611 *xoffset += fieldoffset - o 1612 gengcprog1(g, t1.Type, xoffset) 1613 o = fieldoffset + t1.Type.Width 1614 } 1615 1616 proggenskip(g, *xoffset, t.Width-o) 1617 *xoffset += t.Width - o 1618 1619 default: 1620 Fatal("gengcprog1: unexpected type, %v", Tconv(t, 0)) 1621 } 1622 }