github.com/huandu/go@v0.0.0-20151114150818-04e615e41150/src/cmd/compile/internal/gc/reflect.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/internal/gcprog" 9 "cmd/internal/obj" 10 "fmt" 11 "os" 12 ) 13 14 /* 15 * runtime interface and reflection data structures 16 */ 17 var signatlist *NodeList 18 19 func sigcmp(a *Sig, b *Sig) int { 20 i := stringsCompare(a.name, b.name) 21 if i != 0 { 22 return i 23 } 24 if a.pkg == b.pkg { 25 return 0 26 } 27 if a.pkg == nil { 28 return -1 29 } 30 if b.pkg == nil { 31 return +1 32 } 33 return stringsCompare(a.pkg.Path, b.pkg.Path) 34 } 35 36 func lsort(l *Sig, f func(*Sig, *Sig) int) *Sig { 37 if l == nil || l.link == nil { 38 return l 39 } 40 41 l1 := l 42 l2 := l 43 for { 44 l2 = l2.link 45 if l2 == nil { 46 break 47 } 48 l2 = l2.link 49 if l2 == nil { 50 break 51 } 52 l1 = l1.link 53 } 54 55 l2 = l1.link 56 l1.link = nil 57 l1 = lsort(l, f) 58 l2 = lsort(l2, f) 59 60 /* set up lead element */ 61 if f(l1, l2) < 0 { 62 l = l1 63 l1 = l1.link 64 } else { 65 l = l2 66 l2 = l2.link 67 } 68 69 le := l 70 71 for { 72 if l1 == nil { 73 for l2 != nil { 74 le.link = l2 75 le = l2 76 l2 = l2.link 77 } 78 79 le.link = nil 80 break 81 } 82 83 if l2 == nil { 84 for l1 != nil { 85 le.link = l1 86 le = l1 87 l1 = l1.link 88 } 89 90 break 91 } 92 93 if f(l1, l2) < 0 { 94 le.link = l1 95 le = l1 96 l1 = l1.link 97 } else { 98 le.link = l2 99 le = l2 100 l2 = l2.link 101 } 102 } 103 104 le.link = nil 105 return l 106 } 107 108 // Builds a type representing a Bucket structure for 109 // the given map type. This type is not visible to users - 110 // we include only enough information to generate a correct GC 111 // program for it. 112 // Make sure this stays in sync with ../../runtime/hashmap.go! 113 const ( 114 BUCKETSIZE = 8 115 MAXKEYSIZE = 128 116 MAXVALSIZE = 128 117 ) 118 119 func makefield(name string, t *Type) *Type { 120 f := typ(TFIELD) 121 f.Type = t 122 f.Sym = new(Sym) 123 f.Sym.Name = name 124 return f 125 } 126 127 func mapbucket(t *Type) *Type { 128 if t.Bucket != nil { 129 return t.Bucket 130 } 131 132 bucket := typ(TSTRUCT) 133 keytype := t.Down 134 valtype := t.Type 135 dowidth(keytype) 136 dowidth(valtype) 137 if keytype.Width > MAXKEYSIZE { 138 keytype = Ptrto(keytype) 139 } 140 if valtype.Width > MAXVALSIZE { 141 valtype = Ptrto(valtype) 142 } 143 144 // The first field is: uint8 topbits[BUCKETSIZE]. 145 arr := typ(TARRAY) 146 147 arr.Type = Types[TUINT8] 148 arr.Bound = BUCKETSIZE 149 field := make([]*Type, 0, 5) 150 field = append(field, makefield("topbits", arr)) 151 arr = typ(TARRAY) 152 arr.Type = keytype 153 arr.Bound = BUCKETSIZE 154 field = append(field, makefield("keys", arr)) 155 arr = typ(TARRAY) 156 arr.Type = valtype 157 arr.Bound = BUCKETSIZE 158 field = append(field, makefield("values", arr)) 159 160 // Make sure the overflow pointer is the last memory in the struct, 161 // because the runtime assumes it can use size-ptrSize as the 162 // offset of the overflow pointer. We double-check that property 163 // below once the offsets and size are computed. 164 // 165 // BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point. 166 // On 32-bit systems, the max alignment is 32-bit, and the 167 // overflow pointer will add another 32-bit field, and the struct 168 // will end with no padding. 169 // On 64-bit systems, the max alignment is 64-bit, and the 170 // overflow pointer will add another 64-bit field, and the struct 171 // will end with no padding. 172 // On nacl/amd64p32, however, the max alignment is 64-bit, 173 // but the overflow pointer will add only a 32-bit field, 174 // so if the struct needs 64-bit padding (because a key or value does) 175 // then it would end with an extra 32-bit padding field. 176 // Preempt that by emitting the padding here. 177 if int(t.Type.Align) > Widthptr || int(t.Down.Align) > Widthptr { 178 field = append(field, makefield("pad", Types[TUINTPTR])) 179 } 180 181 // If keys and values have no pointers, the map implementation 182 // can keep a list of overflow pointers on the side so that 183 // buckets can be marked as having no pointers. 184 // Arrange for the bucket to have no pointers by changing 185 // the type of the overflow field to uintptr in this case. 186 // See comment on hmap.overflow in ../../../../runtime/hashmap.go. 187 otyp := Ptrto(bucket) 188 if !haspointers(t.Type) && !haspointers(t.Down) && t.Type.Width <= MAXKEYSIZE && t.Down.Width <= MAXVALSIZE { 189 otyp = Types[TUINTPTR] 190 } 191 ovf := makefield("overflow", otyp) 192 field = append(field, ovf) 193 194 // link up fields 195 bucket.Noalg = 1 196 bucket.Local = t.Local 197 bucket.Type = field[0] 198 for n := int32(0); n < int32(len(field)-1); n++ { 199 field[n].Down = field[n+1] 200 } 201 field[len(field)-1].Down = nil 202 dowidth(bucket) 203 204 // Double-check that overflow field is final memory in struct, 205 // with no padding at end. See comment above. 206 if ovf.Width != bucket.Width-int64(Widthptr) { 207 Yyerror("bad math in mapbucket for %v", t) 208 } 209 210 t.Bucket = bucket 211 212 bucket.Map = t 213 return bucket 214 } 215 216 // Builds a type representing a Hmap structure for the given map type. 217 // Make sure this stays in sync with ../../runtime/hashmap.go! 218 func hmap(t *Type) *Type { 219 if t.Hmap != nil { 220 return t.Hmap 221 } 222 223 bucket := mapbucket(t) 224 var field [8]*Type 225 field[0] = makefield("count", Types[TINT]) 226 field[1] = makefield("flags", Types[TUINT8]) 227 field[2] = makefield("B", Types[TUINT8]) 228 field[3] = makefield("hash0", Types[TUINT32]) 229 field[4] = makefield("buckets", Ptrto(bucket)) 230 field[5] = makefield("oldbuckets", Ptrto(bucket)) 231 field[6] = makefield("nevacuate", Types[TUINTPTR]) 232 field[7] = makefield("overflow", Types[TUNSAFEPTR]) 233 234 h := typ(TSTRUCT) 235 h.Noalg = 1 236 h.Local = t.Local 237 h.Type = field[0] 238 for n := int32(0); n < int32(len(field)-1); n++ { 239 field[n].Down = field[n+1] 240 } 241 field[len(field)-1].Down = nil 242 dowidth(h) 243 t.Hmap = h 244 h.Map = t 245 return h 246 } 247 248 func hiter(t *Type) *Type { 249 if t.Hiter != nil { 250 return t.Hiter 251 } 252 253 // build a struct: 254 // hash_iter { 255 // key *Key 256 // val *Value 257 // t *MapType 258 // h *Hmap 259 // buckets *Bucket 260 // bptr *Bucket 261 // overflow0 unsafe.Pointer 262 // overflow1 unsafe.Pointer 263 // startBucket uintptr 264 // stuff uintptr 265 // bucket uintptr 266 // checkBucket uintptr 267 // } 268 // must match ../../runtime/hashmap.go:hash_iter. 269 var field [12]*Type 270 field[0] = makefield("key", Ptrto(t.Down)) 271 272 field[1] = makefield("val", Ptrto(t.Type)) 273 field[2] = makefield("t", Ptrto(Types[TUINT8])) 274 field[3] = makefield("h", Ptrto(hmap(t))) 275 field[4] = makefield("buckets", Ptrto(mapbucket(t))) 276 field[5] = makefield("bptr", Ptrto(mapbucket(t))) 277 field[6] = makefield("overflow0", Types[TUNSAFEPTR]) 278 field[7] = makefield("overflow1", Types[TUNSAFEPTR]) 279 field[8] = makefield("startBucket", Types[TUINTPTR]) 280 field[9] = makefield("stuff", Types[TUINTPTR]) // offset+wrapped+B+I 281 field[10] = makefield("bucket", Types[TUINTPTR]) 282 field[11] = makefield("checkBucket", Types[TUINTPTR]) 283 284 // build iterator struct holding the above fields 285 i := typ(TSTRUCT) 286 287 i.Noalg = 1 288 i.Type = field[0] 289 for n := int32(0); n < int32(len(field)-1); n++ { 290 field[n].Down = field[n+1] 291 } 292 field[len(field)-1].Down = nil 293 dowidth(i) 294 if i.Width != int64(12*Widthptr) { 295 Yyerror("hash_iter size not correct %d %d", i.Width, 12*Widthptr) 296 } 297 t.Hiter = i 298 i.Map = t 299 return i 300 } 301 302 /* 303 * f is method type, with receiver. 304 * return function type, receiver as first argument (or not). 305 */ 306 func methodfunc(f *Type, receiver *Type) *Type { 307 var in *NodeList 308 if receiver != nil { 309 d := Nod(ODCLFIELD, nil, nil) 310 d.Type = receiver 311 in = list(in, d) 312 } 313 314 var d *Node 315 for t := getinargx(f).Type; t != nil; t = t.Down { 316 d = Nod(ODCLFIELD, nil, nil) 317 d.Type = t.Type 318 d.Isddd = t.Isddd 319 in = list(in, d) 320 } 321 322 var out *NodeList 323 for t := getoutargx(f).Type; t != nil; t = t.Down { 324 d = Nod(ODCLFIELD, nil, nil) 325 d.Type = t.Type 326 out = list(out, d) 327 } 328 329 t := functype(nil, in, out) 330 if f.Nname != nil { 331 // Link to name of original method function. 332 t.Nname = f.Nname 333 } 334 335 return t 336 } 337 338 /* 339 * return methods of non-interface type t, sorted by name. 340 * generates stub functions as needed. 341 */ 342 func methods(t *Type) *Sig { 343 // method type 344 mt := methtype(t, 0) 345 346 if mt == nil { 347 return nil 348 } 349 expandmeth(mt) 350 351 // type stored in interface word 352 it := t 353 354 if !isdirectiface(it) { 355 it = Ptrto(t) 356 } 357 358 // make list of methods for t, 359 // generating code if necessary. 360 var a *Sig 361 362 var this *Type 363 var b *Sig 364 var method *Sym 365 for f := mt.Xmethod; f != nil; f = f.Down { 366 if f.Etype != TFIELD { 367 Fatal("methods: not field %v", f) 368 } 369 if f.Type.Etype != TFUNC || f.Type.Thistuple == 0 { 370 Fatal("non-method on %v method %v %v\n", mt, f.Sym, f) 371 } 372 if getthisx(f.Type).Type == nil { 373 Fatal("receiver with no type on %v method %v %v\n", mt, f.Sym, f) 374 } 375 if f.Nointerface { 376 continue 377 } 378 379 method = f.Sym 380 if method == nil { 381 continue 382 } 383 384 // get receiver type for this particular method. 385 // if pointer receiver but non-pointer t and 386 // this is not an embedded pointer inside a struct, 387 // method does not apply. 388 this = getthisx(f.Type).Type.Type 389 390 if Isptr[this.Etype] && this.Type == t { 391 continue 392 } 393 if Isptr[this.Etype] && !Isptr[t.Etype] && f.Embedded != 2 && !isifacemethod(f.Type) { 394 continue 395 } 396 397 b = new(Sig) 398 b.link = a 399 a = b 400 401 a.name = method.Name 402 if !exportname(method.Name) { 403 if method.Pkg == nil { 404 Fatal("methods: missing package") 405 } 406 a.pkg = method.Pkg 407 } 408 409 a.isym = methodsym(method, it, 1) 410 a.tsym = methodsym(method, t, 0) 411 a.type_ = methodfunc(f.Type, t) 412 a.mtype = methodfunc(f.Type, nil) 413 414 if a.isym.Flags&SymSiggen == 0 { 415 a.isym.Flags |= SymSiggen 416 if !Eqtype(this, it) || this.Width < Types[Tptr].Width { 417 compiling_wrappers = 1 418 genwrapper(it, f, a.isym, 1) 419 compiling_wrappers = 0 420 } 421 } 422 423 if a.tsym.Flags&SymSiggen == 0 { 424 a.tsym.Flags |= SymSiggen 425 if !Eqtype(this, t) { 426 compiling_wrappers = 1 427 genwrapper(t, f, a.tsym, 0) 428 compiling_wrappers = 0 429 } 430 } 431 } 432 433 return lsort(a, sigcmp) 434 } 435 436 /* 437 * return methods of interface type t, sorted by name. 438 */ 439 func imethods(t *Type) *Sig { 440 var a *Sig 441 var method *Sym 442 var isym *Sym 443 444 var all *Sig 445 var last *Sig 446 for f := t.Type; f != nil; f = f.Down { 447 if f.Etype != TFIELD { 448 Fatal("imethods: not field") 449 } 450 if f.Type.Etype != TFUNC || f.Sym == nil { 451 continue 452 } 453 method = f.Sym 454 a = new(Sig) 455 a.name = method.Name 456 if !exportname(method.Name) { 457 if method.Pkg == nil { 458 Fatal("imethods: missing package") 459 } 460 a.pkg = method.Pkg 461 } 462 463 a.mtype = f.Type 464 a.offset = 0 465 a.type_ = methodfunc(f.Type, nil) 466 467 if last != nil && sigcmp(last, a) >= 0 { 468 Fatal("sigcmp vs sortinter %s %s", last.name, a.name) 469 } 470 if last == nil { 471 all = a 472 } else { 473 last.link = a 474 } 475 last = a 476 477 // Compiler can only refer to wrappers for non-blank methods. 478 if isblanksym(method) { 479 continue 480 } 481 482 // NOTE(rsc): Perhaps an oversight that 483 // IfaceType.Method is not in the reflect data. 484 // Generate the method body, so that compiled 485 // code can refer to it. 486 isym = methodsym(method, t, 0) 487 488 if isym.Flags&SymSiggen == 0 { 489 isym.Flags |= SymSiggen 490 genwrapper(t, f, isym, 0) 491 } 492 } 493 494 return all 495 } 496 497 var dimportpath_gopkg *Pkg 498 499 func dimportpath(p *Pkg) { 500 if p.Pathsym != nil { 501 return 502 } 503 504 // If we are compiling the runtime package, there are two runtime packages around 505 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for 506 // both of them, so just produce one for localpkg. 507 if myimportpath == "runtime" && p == Runtimepkg { 508 return 509 } 510 511 if dimportpath_gopkg == nil { 512 dimportpath_gopkg = mkpkg("go") 513 dimportpath_gopkg.Name = "go" 514 } 515 516 nam := "importpath." + p.Prefix + "." 517 518 n := Nod(ONAME, nil, nil) 519 n.Sym = Pkglookup(nam, dimportpath_gopkg) 520 521 n.Class = PEXTERN 522 n.Xoffset = 0 523 p.Pathsym = n.Sym 524 525 if p == localpkg { 526 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. 527 gdatastring(n, myimportpath) 528 } else { 529 gdatastring(n, p.Path) 530 } 531 ggloblsym(n.Sym, int32(Types[TSTRING].Width), obj.DUPOK|obj.RODATA) 532 } 533 534 func dgopkgpath(s *Sym, ot int, pkg *Pkg) int { 535 if pkg == nil { 536 return dgostringptr(s, ot, "") 537 } 538 539 if pkg == localpkg && myimportpath == "" { 540 // If we don't know the full path of the package being compiled (i.e. -p 541 // was not passed on the compiler command line), emit reference to 542 // go.importpath.""., which 6l will rewrite using the correct import path. 543 // Every package that imports this one directly defines the symbol. 544 var ns *Sym 545 546 if ns == nil { 547 ns = Pkglookup("importpath.\"\".", mkpkg("go")) 548 } 549 return dsymptr(s, ot, ns, 0) 550 } 551 552 dimportpath(pkg) 553 return dsymptr(s, ot, pkg.Pathsym, 0) 554 } 555 556 /* 557 * uncommonType 558 * ../../runtime/type.go:/uncommonType 559 */ 560 func dextratype(sym *Sym, off int, t *Type, ptroff int) int { 561 m := methods(t) 562 if t.Sym == nil && m == nil { 563 return off 564 } 565 566 // fill in *extraType pointer in header 567 off = int(Rnd(int64(off), int64(Widthptr))) 568 569 dsymptr(sym, ptroff, sym, off) 570 571 n := 0 572 for a := m; a != nil; a = a.link { 573 dtypesym(a.type_) 574 n++ 575 } 576 577 ot := off 578 s := sym 579 if t.Sym != nil { 580 ot = dgostringptr(s, ot, t.Sym.Name) 581 if t != Types[t.Etype] && t != errortype { 582 ot = dgopkgpath(s, ot, t.Sym.Pkg) 583 } else { 584 ot = dgostringptr(s, ot, "") 585 } 586 } else { 587 ot = dgostringptr(s, ot, "") 588 ot = dgostringptr(s, ot, "") 589 } 590 591 // slice header 592 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint) 593 594 ot = duintxx(s, ot, uint64(n), Widthint) 595 ot = duintxx(s, ot, uint64(n), Widthint) 596 597 // methods 598 for a := m; a != nil; a = a.link { 599 // method 600 // ../../runtime/type.go:/method 601 ot = dgostringptr(s, ot, a.name) 602 603 ot = dgopkgpath(s, ot, a.pkg) 604 ot = dsymptr(s, ot, dtypesym(a.mtype), 0) 605 ot = dsymptr(s, ot, dtypesym(a.type_), 0) 606 if a.isym != nil { 607 ot = dsymptr(s, ot, a.isym, 0) 608 } else { 609 ot = duintptr(s, ot, 0) 610 } 611 if a.tsym != nil { 612 ot = dsymptr(s, ot, a.tsym, 0) 613 } else { 614 ot = duintptr(s, ot, 0) 615 } 616 } 617 618 return ot 619 } 620 621 var kinds = []int{ 622 TINT: obj.KindInt, 623 TUINT: obj.KindUint, 624 TINT8: obj.KindInt8, 625 TUINT8: obj.KindUint8, 626 TINT16: obj.KindInt16, 627 TUINT16: obj.KindUint16, 628 TINT32: obj.KindInt32, 629 TUINT32: obj.KindUint32, 630 TINT64: obj.KindInt64, 631 TUINT64: obj.KindUint64, 632 TUINTPTR: obj.KindUintptr, 633 TFLOAT32: obj.KindFloat32, 634 TFLOAT64: obj.KindFloat64, 635 TBOOL: obj.KindBool, 636 TSTRING: obj.KindString, 637 TPTR32: obj.KindPtr, 638 TPTR64: obj.KindPtr, 639 TSTRUCT: obj.KindStruct, 640 TINTER: obj.KindInterface, 641 TCHAN: obj.KindChan, 642 TMAP: obj.KindMap, 643 TARRAY: obj.KindArray, 644 TFUNC: obj.KindFunc, 645 TCOMPLEX64: obj.KindComplex64, 646 TCOMPLEX128: obj.KindComplex128, 647 TUNSAFEPTR: obj.KindUnsafePointer, 648 } 649 650 func haspointers(t *Type) bool { 651 if t.Haspointers != 0 { 652 return t.Haspointers-1 != 0 653 } 654 655 var ret bool 656 switch t.Etype { 657 case TINT, 658 TUINT, 659 TINT8, 660 TUINT8, 661 TINT16, 662 TUINT16, 663 TINT32, 664 TUINT32, 665 TINT64, 666 TUINT64, 667 TUINTPTR, 668 TFLOAT32, 669 TFLOAT64, 670 TCOMPLEX64, 671 TCOMPLEX128, 672 TBOOL: 673 ret = false 674 675 case TARRAY: 676 if t.Bound < 0 { // slice 677 ret = true 678 break 679 } 680 681 if t.Bound == 0 { // empty array 682 ret = false 683 break 684 } 685 686 ret = haspointers(t.Type) 687 688 case TSTRUCT: 689 ret = false 690 for t1 := t.Type; t1 != nil; t1 = t1.Down { 691 if haspointers(t1.Type) { 692 ret = true 693 break 694 } 695 } 696 697 case TSTRING, 698 TPTR32, 699 TPTR64, 700 TUNSAFEPTR, 701 TINTER, 702 TCHAN, 703 TMAP, 704 TFUNC: 705 fallthrough 706 default: 707 ret = true 708 709 case TFIELD: 710 Fatal("haspointers: unexpected type, %v", t) 711 } 712 713 t.Haspointers = 1 + uint8(obj.Bool2int(ret)) 714 return ret 715 } 716 717 // typeptrdata returns the length in bytes of the prefix of t 718 // containing pointer data. Anything after this offset is scalar data. 719 func typeptrdata(t *Type) int64 { 720 if !haspointers(t) { 721 return 0 722 } 723 724 switch t.Etype { 725 case TPTR32, 726 TPTR64, 727 TUNSAFEPTR, 728 TFUNC, 729 TCHAN, 730 TMAP: 731 return int64(Widthptr) 732 733 case TSTRING: 734 // struct { byte *str; intgo len; } 735 return int64(Widthptr) 736 737 case TINTER: 738 // struct { Itab *tab; void *data; } or 739 // struct { Type *type; void *data; } 740 return 2 * int64(Widthptr) 741 742 case TARRAY: 743 if Isslice(t) { 744 // struct { byte *array; uintgo len; uintgo cap; } 745 return int64(Widthptr) 746 } 747 // haspointers already eliminated t.Bound == 0. 748 return (t.Bound-1)*t.Type.Width + typeptrdata(t.Type) 749 750 case TSTRUCT: 751 // Find the last field that has pointers. 752 var lastPtrField *Type 753 for t1 := t.Type; t1 != nil; t1 = t1.Down { 754 if haspointers(t1.Type) { 755 lastPtrField = t1 756 } 757 } 758 return lastPtrField.Width + typeptrdata(lastPtrField.Type) 759 760 default: 761 Fatal("typeptrdata: unexpected type, %v", t) 762 return 0 763 } 764 } 765 766 /* 767 * commonType 768 * ../../runtime/type.go:/commonType 769 */ 770 771 var dcommontype_algarray *Sym 772 773 func dcommontype(s *Sym, ot int, t *Type) int { 774 if ot != 0 { 775 Fatal("dcommontype %d", ot) 776 } 777 778 sizeofAlg := 2 * Widthptr 779 if dcommontype_algarray == nil { 780 dcommontype_algarray = Pkglookup("algarray", Runtimepkg) 781 } 782 dowidth(t) 783 alg := algtype(t) 784 var algsym *Sym 785 if alg < 0 || alg == AMEM { 786 algsym = dalgsym(t) 787 } 788 789 var sptr *Sym 790 tptr := Ptrto(t) 791 if !Isptr[t.Etype] && (t.Sym != nil || methods(tptr) != nil) { 792 sptr = dtypesym(tptr) 793 } else { 794 sptr = weaktypesym(tptr) 795 } 796 797 // All (non-reflect-allocated) Types share the same zero object. 798 // Each place in the compiler where a pointer to the zero object 799 // might be returned by a runtime call (map access return value, 800 // 2-arg type cast) declares the size of the zerovalue it needs. 801 // The linker magically takes the max of all the sizes. 802 zero := Pkglookup("zerovalue", Runtimepkg) 803 804 gcsym, useGCProg, ptrdata := dgcsym(t) 805 806 // We use size 0 here so we get the pointer to the zero value, 807 // but don't allocate space for the zero value unless we need it. 808 // TODO: how do we get this symbol into bss? We really want 809 // a read-only bss, but I don't think such a thing exists. 810 811 // ../../pkg/reflect/type.go:/^type.commonType 812 // actual type structure 813 // type commonType struct { 814 // size uintptr 815 // ptrsize uintptr 816 // hash uint32 817 // _ uint8 818 // align uint8 819 // fieldAlign uint8 820 // kind uint8 821 // alg unsafe.Pointer 822 // gcdata unsafe.Pointer 823 // string *string 824 // *extraType 825 // ptrToThis *Type 826 // zero unsafe.Pointer 827 // } 828 ot = duintptr(s, ot, uint64(t.Width)) 829 ot = duintptr(s, ot, uint64(ptrdata)) 830 831 ot = duint32(s, ot, typehash(t)) 832 ot = duint8(s, ot, 0) // unused 833 834 // runtime (and common sense) expects alignment to be a power of two. 835 i := int(t.Align) 836 837 if i == 0 { 838 i = 1 839 } 840 if i&(i-1) != 0 { 841 Fatal("invalid alignment %d for %v", t.Align, t) 842 } 843 ot = duint8(s, ot, t.Align) // align 844 ot = duint8(s, ot, t.Align) // fieldAlign 845 846 i = kinds[t.Etype] 847 if t.Etype == TARRAY && t.Bound < 0 { 848 i = obj.KindSlice 849 } 850 if !haspointers(t) { 851 i |= obj.KindNoPointers 852 } 853 if isdirectiface(t) { 854 i |= obj.KindDirectIface 855 } 856 if useGCProg { 857 i |= obj.KindGCProg 858 } 859 ot = duint8(s, ot, uint8(i)) // kind 860 if algsym == nil { 861 ot = dsymptr(s, ot, dcommontype_algarray, alg*sizeofAlg) 862 } else { 863 ot = dsymptr(s, ot, algsym, 0) 864 } 865 ot = dsymptr(s, ot, gcsym, 0) 866 867 p := Tconv(t, obj.FmtLeft|obj.FmtUnsigned) 868 869 //print("dcommontype: %s\n", p); 870 ot = dgostringptr(s, ot, p) // string 871 872 // skip pointer to extraType, 873 // which follows the rest of this type structure. 874 // caller will fill in if needed. 875 // otherwise linker will assume 0. 876 ot += Widthptr 877 878 ot = dsymptr(s, ot, sptr, 0) // ptrto type 879 ot = dsymptr(s, ot, zero, 0) // ptr to zero value 880 return ot 881 } 882 883 func typesym(t *Type) *Sym { 884 return Pkglookup(Tconv(t, obj.FmtLeft), typepkg) 885 } 886 887 func tracksym(t *Type) *Sym { 888 return Pkglookup(Tconv(t.Outer, obj.FmtLeft)+"."+t.Sym.Name, trackpkg) 889 } 890 891 func typelinksym(t *Type) *Sym { 892 // %-uT is what the generated Type's string field says. 893 // It uses (ambiguous) package names instead of import paths. 894 // %-T is the complete, unambiguous type name. 895 // We want the types to end up sorted by string field, 896 // so use that first in the name, and then add :%-T to 897 // disambiguate. We use a tab character as the separator to 898 // ensure the types appear sorted by their string field. The 899 // names are a little long but they are discarded by the linker 900 // and do not end up in the symbol table of the final binary. 901 p := Tconv(t, obj.FmtLeft|obj.FmtUnsigned) + "\t" + Tconv(t, obj.FmtLeft) 902 903 s := Pkglookup(p, typelinkpkg) 904 905 //print("typelinksym: %s -> %+S\n", p, s); 906 907 return s 908 } 909 910 func typesymprefix(prefix string, t *Type) *Sym { 911 p := prefix + "." + Tconv(t, obj.FmtLeft) 912 s := Pkglookup(p, typepkg) 913 914 //print("algsym: %s -> %+S\n", p, s); 915 916 return s 917 } 918 919 func typenamesym(t *Type) *Sym { 920 if t == nil || (Isptr[t.Etype] && t.Type == nil) || isideal(t) { 921 Fatal("typename %v", t) 922 } 923 s := typesym(t) 924 if s.Def == nil { 925 n := Nod(ONAME, nil, nil) 926 n.Sym = s 927 n.Type = Types[TUINT8] 928 n.Addable = true 929 n.Ullman = 1 930 n.Class = PEXTERN 931 n.Xoffset = 0 932 n.Typecheck = 1 933 s.Def = n 934 935 signatlist = list(signatlist, typenod(t)) 936 } 937 938 return s.Def.Sym 939 } 940 941 func typename(t *Type) *Node { 942 s := typenamesym(t) 943 n := Nod(OADDR, s.Def, nil) 944 n.Type = Ptrto(s.Def.Type) 945 n.Addable = true 946 n.Ullman = 2 947 n.Typecheck = 1 948 return n 949 } 950 951 func weaktypesym(t *Type) *Sym { 952 p := Tconv(t, obj.FmtLeft) 953 s := Pkglookup(p, weaktypepkg) 954 955 //print("weaktypesym: %s -> %+S\n", p, s); 956 957 return s 958 } 959 960 /* 961 * Returns 1 if t has a reflexive equality operator. 962 * That is, if x==x for all x of type t. 963 */ 964 func isreflexive(t *Type) bool { 965 switch t.Etype { 966 case TBOOL, 967 TINT, 968 TUINT, 969 TINT8, 970 TUINT8, 971 TINT16, 972 TUINT16, 973 TINT32, 974 TUINT32, 975 TINT64, 976 TUINT64, 977 TUINTPTR, 978 TPTR32, 979 TPTR64, 980 TUNSAFEPTR, 981 TSTRING, 982 TCHAN: 983 return true 984 985 case TFLOAT32, 986 TFLOAT64, 987 TCOMPLEX64, 988 TCOMPLEX128, 989 TINTER: 990 return false 991 992 case TARRAY: 993 if Isslice(t) { 994 Fatal("slice can't be a map key: %v", t) 995 } 996 return isreflexive(t.Type) 997 998 case TSTRUCT: 999 for t1 := t.Type; t1 != nil; t1 = t1.Down { 1000 if !isreflexive(t1.Type) { 1001 return false 1002 } 1003 } 1004 1005 return true 1006 1007 default: 1008 Fatal("bad type for map key: %v", t) 1009 return false 1010 } 1011 } 1012 1013 func dtypesym(t *Type) *Sym { 1014 // Replace byte, rune aliases with real type. 1015 // They've been separate internally to make error messages 1016 // better, but we have to merge them in the reflect tables. 1017 if t == bytetype || t == runetype { 1018 t = Types[t.Etype] 1019 } 1020 1021 if isideal(t) { 1022 Fatal("dtypesym %v", t) 1023 } 1024 1025 s := typesym(t) 1026 if s.Flags&SymSiggen != 0 { 1027 return s 1028 } 1029 s.Flags |= SymSiggen 1030 1031 // special case (look for runtime below): 1032 // when compiling package runtime, 1033 // emit the type structures for int, float, etc. 1034 tbase := t 1035 1036 if Isptr[t.Etype] && t.Sym == nil && t.Type.Sym != nil { 1037 tbase = t.Type 1038 } 1039 dupok := 0 1040 if tbase.Sym == nil { 1041 dupok = obj.DUPOK 1042 } 1043 1044 if compiling_runtime != 0 && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc 1045 goto ok 1046 } 1047 1048 // named types from other files are defined only by those files 1049 if tbase.Sym != nil && !tbase.Local { 1050 return s 1051 } 1052 if isforw[tbase.Etype] { 1053 return s 1054 } 1055 1056 ok: 1057 ot := 0 1058 xt := 0 1059 switch t.Etype { 1060 default: 1061 ot = dcommontype(s, ot, t) 1062 xt = ot - 3*Widthptr 1063 1064 case TARRAY: 1065 if t.Bound >= 0 { 1066 // ../../runtime/type.go:/ArrayType 1067 s1 := dtypesym(t.Type) 1068 1069 t2 := typ(TARRAY) 1070 t2.Type = t.Type 1071 t2.Bound = -1 // slice 1072 s2 := dtypesym(t2) 1073 ot = dcommontype(s, ot, t) 1074 xt = ot - 3*Widthptr 1075 ot = dsymptr(s, ot, s1, 0) 1076 ot = dsymptr(s, ot, s2, 0) 1077 ot = duintptr(s, ot, uint64(t.Bound)) 1078 } else { 1079 // ../../runtime/type.go:/SliceType 1080 s1 := dtypesym(t.Type) 1081 1082 ot = dcommontype(s, ot, t) 1083 xt = ot - 3*Widthptr 1084 ot = dsymptr(s, ot, s1, 0) 1085 } 1086 1087 // ../../runtime/type.go:/ChanType 1088 case TCHAN: 1089 s1 := dtypesym(t.Type) 1090 1091 ot = dcommontype(s, ot, t) 1092 xt = ot - 3*Widthptr 1093 ot = dsymptr(s, ot, s1, 0) 1094 ot = duintptr(s, ot, uint64(t.Chan)) 1095 1096 case TFUNC: 1097 for t1 := getthisx(t).Type; t1 != nil; t1 = t1.Down { 1098 dtypesym(t1.Type) 1099 } 1100 isddd := false 1101 for t1 := getinargx(t).Type; t1 != nil; t1 = t1.Down { 1102 isddd = t1.Isddd 1103 dtypesym(t1.Type) 1104 } 1105 1106 for t1 := getoutargx(t).Type; t1 != nil; t1 = t1.Down { 1107 dtypesym(t1.Type) 1108 } 1109 1110 ot = dcommontype(s, ot, t) 1111 xt = ot - 3*Widthptr 1112 ot = duint8(s, ot, uint8(obj.Bool2int(isddd))) 1113 1114 // two slice headers: in and out. 1115 ot = int(Rnd(int64(ot), int64(Widthptr))) 1116 1117 ot = dsymptr(s, ot, s, ot+2*(Widthptr+2*Widthint)) 1118 n := t.Thistuple + t.Intuple 1119 ot = duintxx(s, ot, uint64(n), Widthint) 1120 ot = duintxx(s, ot, uint64(n), Widthint) 1121 ot = dsymptr(s, ot, s, ot+1*(Widthptr+2*Widthint)+n*Widthptr) 1122 ot = duintxx(s, ot, uint64(t.Outtuple), Widthint) 1123 ot = duintxx(s, ot, uint64(t.Outtuple), Widthint) 1124 1125 // slice data 1126 for t1 := getthisx(t).Type; t1 != nil; t1 = t1.Down { 1127 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1128 n++ 1129 } 1130 for t1 := getinargx(t).Type; t1 != nil; t1 = t1.Down { 1131 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1132 n++ 1133 } 1134 for t1 := getoutargx(t).Type; t1 != nil; t1 = t1.Down { 1135 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1136 n++ 1137 } 1138 1139 case TINTER: 1140 m := imethods(t) 1141 n := 0 1142 for a := m; a != nil; a = a.link { 1143 dtypesym(a.type_) 1144 n++ 1145 } 1146 1147 // ../../runtime/type.go:/InterfaceType 1148 ot = dcommontype(s, ot, t) 1149 1150 xt = ot - 3*Widthptr 1151 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint) 1152 ot = duintxx(s, ot, uint64(n), Widthint) 1153 ot = duintxx(s, ot, uint64(n), Widthint) 1154 for a := m; a != nil; a = a.link { 1155 // ../../runtime/type.go:/imethod 1156 ot = dgostringptr(s, ot, a.name) 1157 1158 ot = dgopkgpath(s, ot, a.pkg) 1159 ot = dsymptr(s, ot, dtypesym(a.type_), 0) 1160 } 1161 1162 // ../../runtime/type.go:/MapType 1163 case TMAP: 1164 s1 := dtypesym(t.Down) 1165 1166 s2 := dtypesym(t.Type) 1167 s3 := dtypesym(mapbucket(t)) 1168 s4 := dtypesym(hmap(t)) 1169 ot = dcommontype(s, ot, t) 1170 xt = ot - 3*Widthptr 1171 ot = dsymptr(s, ot, s1, 0) 1172 ot = dsymptr(s, ot, s2, 0) 1173 ot = dsymptr(s, ot, s3, 0) 1174 ot = dsymptr(s, ot, s4, 0) 1175 if t.Down.Width > MAXKEYSIZE { 1176 ot = duint8(s, ot, uint8(Widthptr)) 1177 ot = duint8(s, ot, 1) // indirect 1178 } else { 1179 ot = duint8(s, ot, uint8(t.Down.Width)) 1180 ot = duint8(s, ot, 0) // not indirect 1181 } 1182 1183 if t.Type.Width > MAXVALSIZE { 1184 ot = duint8(s, ot, uint8(Widthptr)) 1185 ot = duint8(s, ot, 1) // indirect 1186 } else { 1187 ot = duint8(s, ot, uint8(t.Type.Width)) 1188 ot = duint8(s, ot, 0) // not indirect 1189 } 1190 1191 ot = duint16(s, ot, uint16(mapbucket(t).Width)) 1192 ot = duint8(s, ot, uint8(obj.Bool2int(isreflexive(t.Down)))) 1193 1194 case TPTR32, TPTR64: 1195 if t.Type.Etype == TANY { 1196 // ../../runtime/type.go:/UnsafePointerType 1197 ot = dcommontype(s, ot, t) 1198 1199 break 1200 } 1201 1202 // ../../runtime/type.go:/PtrType 1203 s1 := dtypesym(t.Type) 1204 1205 ot = dcommontype(s, ot, t) 1206 xt = ot - 3*Widthptr 1207 ot = dsymptr(s, ot, s1, 0) 1208 1209 // ../../runtime/type.go:/StructType 1210 // for security, only the exported fields. 1211 case TSTRUCT: 1212 n := 0 1213 1214 for t1 := t.Type; t1 != nil; t1 = t1.Down { 1215 dtypesym(t1.Type) 1216 n++ 1217 } 1218 1219 ot = dcommontype(s, ot, t) 1220 xt = ot - 3*Widthptr 1221 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint) 1222 ot = duintxx(s, ot, uint64(n), Widthint) 1223 ot = duintxx(s, ot, uint64(n), Widthint) 1224 for t1 := t.Type; t1 != nil; t1 = t1.Down { 1225 // ../../runtime/type.go:/structField 1226 if t1.Sym != nil && t1.Embedded == 0 { 1227 ot = dgostringptr(s, ot, t1.Sym.Name) 1228 if exportname(t1.Sym.Name) { 1229 ot = dgostringptr(s, ot, "") 1230 } else { 1231 ot = dgopkgpath(s, ot, t1.Sym.Pkg) 1232 } 1233 } else { 1234 ot = dgostringptr(s, ot, "") 1235 if t1.Type.Sym != nil && t1.Type.Sym.Pkg == builtinpkg { 1236 ot = dgopkgpath(s, ot, localpkg) 1237 } else { 1238 ot = dgostringptr(s, ot, "") 1239 } 1240 } 1241 1242 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1243 ot = dgostrlitptr(s, ot, t1.Note) 1244 ot = duintptr(s, ot, uint64(t1.Width)) // field offset 1245 } 1246 } 1247 1248 ot = dextratype(s, ot, t, xt) 1249 ggloblsym(s, int32(ot), int16(dupok|obj.RODATA)) 1250 1251 // generate typelink.foo pointing at s = type.foo. 1252 // The linker will leave a table of all the typelinks for 1253 // types in the binary, so reflect can find them. 1254 // We only need the link for unnamed composites that 1255 // we want be able to find. 1256 if t.Sym == nil { 1257 switch t.Etype { 1258 case TPTR32, TPTR64: 1259 // The ptrto field of the type data cannot be relied on when 1260 // dynamic linking: a type T may be defined in a module that makes 1261 // no use of pointers to that type, but another module can contain 1262 // a package that imports the first one and does use *T pointers. 1263 // The second module will end up defining type data for *T and a 1264 // type.*T symbol pointing at it. It's important that calling 1265 // .PtrTo() on the reflect.Type for T returns this type data and 1266 // not some synthesized object, so we need reflect to be able to 1267 // find it! 1268 if !Ctxt.Flag_dynlink { 1269 break 1270 } 1271 fallthrough 1272 case TARRAY, TCHAN, TFUNC, TMAP: 1273 slink := typelinksym(t) 1274 dsymptr(slink, 0, s, 0) 1275 ggloblsym(slink, int32(Widthptr), int16(dupok|obj.RODATA)) 1276 } 1277 } 1278 1279 return s 1280 } 1281 1282 func dumptypestructs() { 1283 var n *Node 1284 1285 // copy types from externdcl list to signatlist 1286 for l := externdcl; l != nil; l = l.Next { 1287 n = l.N 1288 if n.Op != OTYPE { 1289 continue 1290 } 1291 signatlist = list(signatlist, n) 1292 } 1293 1294 // process signatlist 1295 var t *Type 1296 for l := signatlist; l != nil; l = l.Next { 1297 n = l.N 1298 if n.Op != OTYPE { 1299 continue 1300 } 1301 t = n.Type 1302 dtypesym(t) 1303 if t.Sym != nil { 1304 dtypesym(Ptrto(t)) 1305 } 1306 } 1307 1308 // generate import strings for imported packages 1309 for _, p := range pkgs { 1310 if p.Direct != 0 { 1311 dimportpath(p) 1312 } 1313 } 1314 1315 // do basic types if compiling package runtime. 1316 // they have to be in at least one package, 1317 // and runtime is always loaded implicitly, 1318 // so this is as good as any. 1319 // another possible choice would be package main, 1320 // but using runtime means fewer copies in .6 files. 1321 if compiling_runtime != 0 { 1322 for i := 1; i <= TBOOL; i++ { 1323 dtypesym(Ptrto(Types[i])) 1324 } 1325 dtypesym(Ptrto(Types[TSTRING])) 1326 dtypesym(Ptrto(Types[TUNSAFEPTR])) 1327 1328 // emit type structs for error and func(error) string. 1329 // The latter is the type of an auto-generated wrapper. 1330 dtypesym(Ptrto(errortype)) 1331 1332 dtypesym(functype(nil, list1(Nod(ODCLFIELD, nil, typenod(errortype))), list1(Nod(ODCLFIELD, nil, typenod(Types[TSTRING]))))) 1333 1334 // add paths for runtime and main, which 6l imports implicitly. 1335 dimportpath(Runtimepkg) 1336 1337 if flag_race != 0 { 1338 dimportpath(racepkg) 1339 } 1340 dimportpath(mkpkg("main")) 1341 } 1342 } 1343 1344 func dalgsym(t *Type) *Sym { 1345 var s *Sym 1346 var hashfunc *Sym 1347 var eqfunc *Sym 1348 1349 // dalgsym is only called for a type that needs an algorithm table, 1350 // which implies that the type is comparable (or else it would use ANOEQ). 1351 1352 if algtype(t) == AMEM { 1353 // we use one algorithm table for all AMEM types of a given size 1354 p := fmt.Sprintf(".alg%d", t.Width) 1355 1356 s = Pkglookup(p, typepkg) 1357 1358 if s.Flags&SymAlgGen != 0 { 1359 return s 1360 } 1361 s.Flags |= SymAlgGen 1362 1363 // make hash closure 1364 p = fmt.Sprintf(".hashfunc%d", t.Width) 1365 1366 hashfunc = Pkglookup(p, typepkg) 1367 1368 ot := 0 1369 ot = dsymptr(hashfunc, ot, Pkglookup("memhash_varlen", Runtimepkg), 0) 1370 ot = duintxx(hashfunc, ot, uint64(t.Width), Widthptr) // size encoded in closure 1371 ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA) 1372 1373 // make equality closure 1374 p = fmt.Sprintf(".eqfunc%d", t.Width) 1375 1376 eqfunc = Pkglookup(p, typepkg) 1377 1378 ot = 0 1379 ot = dsymptr(eqfunc, ot, Pkglookup("memequal_varlen", Runtimepkg), 0) 1380 ot = duintxx(eqfunc, ot, uint64(t.Width), Widthptr) 1381 ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA) 1382 } else { 1383 // generate an alg table specific to this type 1384 s = typesymprefix(".alg", t) 1385 1386 hash := typesymprefix(".hash", t) 1387 eq := typesymprefix(".eq", t) 1388 hashfunc = typesymprefix(".hashfunc", t) 1389 eqfunc = typesymprefix(".eqfunc", t) 1390 1391 genhash(hash, t) 1392 geneq(eq, t) 1393 1394 // make Go funcs (closures) for calling hash and equal from Go 1395 dsymptr(hashfunc, 0, hash, 0) 1396 1397 ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1398 dsymptr(eqfunc, 0, eq, 0) 1399 ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1400 } 1401 1402 // ../../runtime/alg.go:/typeAlg 1403 ot := 0 1404 1405 ot = dsymptr(s, ot, hashfunc, 0) 1406 ot = dsymptr(s, ot, eqfunc, 0) 1407 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 1408 return s 1409 } 1410 1411 // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, 1412 // which holds 1-bit entries describing where pointers are in a given type. 1413 // 16 bytes is enough to describe 128 pointer-sized words, 512 or 1024 bytes 1414 // depending on the system. Above this length, the GC information is 1415 // recorded as a GC program, which can express repetition compactly. 1416 // In either form, the information is used by the runtime to initialize the 1417 // heap bitmap, and for large types (like 128 or more words), they are 1418 // roughly the same speed. GC programs are never much larger and often 1419 // more compact. (If large arrays are involved, they can be arbitrarily more 1420 // compact.) 1421 // 1422 // The cutoff must be large enough that any allocation large enough to 1423 // use a GC program is large enough that it does not share heap bitmap 1424 // bytes with any other objects, allowing the GC program execution to 1425 // assume an aligned start and not use atomic operations. In the current 1426 // runtime, this means all malloc size classes larger than the cutoff must 1427 // be multiples of four words. On 32-bit systems that's 16 bytes, and 1428 // all size classes >= 16 bytes are 16-byte aligned, so no real constraint. 1429 // On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed 1430 // for size classes >= 256 bytes. On a 64-bit sytem, 256 bytes allocated 1431 // is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes 1432 // must be >= 4. 1433 // 1434 // We used to use 16 because the GC programs do have some constant overhead 1435 // to get started, and processing 128 pointers seems to be enough to 1436 // amortize that overhead well. 1437 // 1438 // To make sure that the runtime's chansend can call typeBitsBulkBarrier, 1439 // we raised the limit to 2048, so that even 32-bit systems are guaranteed to 1440 // use bitmaps for objects up to 64 kB in size. 1441 // 1442 // Also known to reflect/type.go. 1443 // 1444 const maxPtrmaskBytes = 2048 1445 1446 // dgcsym emits and returns a data symbol containing GC information for type t, 1447 // along with a boolean reporting whether the UseGCProg bit should be set in 1448 // the type kind, and the ptrdata field to record in the reflect type information. 1449 func dgcsym(t *Type) (sym *Sym, useGCProg bool, ptrdata int64) { 1450 ptrdata = typeptrdata(t) 1451 if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 { 1452 sym = dgcptrmask(t) 1453 return 1454 } 1455 1456 useGCProg = true 1457 sym, ptrdata = dgcprog(t) 1458 return 1459 } 1460 1461 // dgcptrmask emits and returns the symbol containing a pointer mask for type t. 1462 func dgcptrmask(t *Type) *Sym { 1463 ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8) 1464 fillptrmask(t, ptrmask) 1465 p := fmt.Sprintf("gcbits.%x", ptrmask) 1466 1467 sym := Pkglookup(p, Runtimepkg) 1468 if sym.Flags&SymUniq == 0 { 1469 sym.Flags |= SymUniq 1470 for i, x := range ptrmask { 1471 duint8(sym, i, x) 1472 } 1473 ggloblsym(sym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) 1474 } 1475 return sym 1476 } 1477 1478 // fillptrmask fills in ptrmask with 1s corresponding to the 1479 // word offsets in t that hold pointers. 1480 // ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits. 1481 func fillptrmask(t *Type, ptrmask []byte) { 1482 for i := range ptrmask { 1483 ptrmask[i] = 0 1484 } 1485 if !haspointers(t) { 1486 return 1487 } 1488 1489 vec := bvalloc(8 * int32(len(ptrmask))) 1490 xoffset := int64(0) 1491 onebitwalktype1(t, &xoffset, vec) 1492 1493 nptr := typeptrdata(t) / int64(Widthptr) 1494 for i := int64(0); i < nptr; i++ { 1495 if bvget(vec, int32(i)) == 1 { 1496 ptrmask[i/8] |= 1 << (uint(i) % 8) 1497 } 1498 } 1499 } 1500 1501 // dgcprog emits and returns the symbol containing a GC program for type t 1502 // along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]). 1503 // In practice, the size is typeptrdata(t) except for non-trivial arrays. 1504 // For non-trivial arrays, the program describes the full t.Width size. 1505 func dgcprog(t *Type) (*Sym, int64) { 1506 dowidth(t) 1507 if t.Width == BADWIDTH { 1508 Fatal("dgcprog: %v badwidth", t) 1509 } 1510 sym := typesymprefix(".gcprog", t) 1511 var p GCProg 1512 p.init(sym) 1513 p.emit(t, 0) 1514 offset := p.w.BitIndex() * int64(Widthptr) 1515 p.end() 1516 if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { 1517 Fatal("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) 1518 } 1519 return sym, offset 1520 } 1521 1522 type GCProg struct { 1523 sym *Sym 1524 symoff int 1525 w gcprog.Writer 1526 } 1527 1528 var Debug_gcprog int // set by -d gcprog 1529 1530 func (p *GCProg) init(sym *Sym) { 1531 p.sym = sym 1532 p.symoff = 4 // first 4 bytes hold program length 1533 p.w.Init(p.writeByte) 1534 if Debug_gcprog > 0 { 1535 fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", sym) 1536 p.w.Debug(os.Stderr) 1537 } 1538 } 1539 1540 func (p *GCProg) writeByte(x byte) { 1541 p.symoff = duint8(p.sym, p.symoff, x) 1542 } 1543 1544 func (p *GCProg) end() { 1545 p.w.End() 1546 duint32(p.sym, 0, uint32(p.symoff-4)) 1547 ggloblsym(p.sym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) 1548 if Debug_gcprog > 0 { 1549 fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.sym) 1550 } 1551 } 1552 1553 func (p *GCProg) emit(t *Type, offset int64) { 1554 dowidth(t) 1555 if !haspointers(t) { 1556 return 1557 } 1558 if t.Width == int64(Widthptr) { 1559 p.w.Ptr(offset / int64(Widthptr)) 1560 return 1561 } 1562 switch t.Etype { 1563 default: 1564 Fatal("GCProg.emit: unexpected type %v", t) 1565 1566 case TSTRING: 1567 p.w.Ptr(offset / int64(Widthptr)) 1568 1569 case TINTER: 1570 p.w.Ptr(offset / int64(Widthptr)) 1571 p.w.Ptr(offset/int64(Widthptr) + 1) 1572 1573 case TARRAY: 1574 if Isslice(t) { 1575 p.w.Ptr(offset / int64(Widthptr)) 1576 return 1577 } 1578 if t.Bound == 0 { 1579 // should have been handled by haspointers check above 1580 Fatal("GCProg.emit: empty array") 1581 } 1582 1583 // Flatten array-of-array-of-array to just a big array by multiplying counts. 1584 count := t.Bound 1585 elem := t.Type 1586 for Isfixedarray(elem) { 1587 count *= elem.Bound 1588 elem = elem.Type 1589 } 1590 1591 if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) { 1592 // Cheaper to just emit the bits. 1593 for i := int64(0); i < count; i++ { 1594 p.emit(elem, offset+i*elem.Width) 1595 } 1596 return 1597 } 1598 p.emit(elem, offset) 1599 p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) 1600 p.w.Repeat(elem.Width/int64(Widthptr), count-1) 1601 1602 case TSTRUCT: 1603 for t1 := t.Type; t1 != nil; t1 = t1.Down { 1604 p.emit(t1.Type, offset+t1.Width) 1605 } 1606 } 1607 }