github.com/mdempsky/go@v0.0.0-20151201204031-5dd372bd1e70/src/cmd/compile/internal/gc/reflect.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/internal/gcprog" 9 "cmd/internal/obj" 10 "fmt" 11 "os" 12 "sort" 13 ) 14 15 // runtime interface and reflection data structures 16 var signatlist *NodeList 17 18 // byMethodNameAndPackagePath sorts method signatures by name, then package path. 19 type byMethodNameAndPackagePath []*Sig 20 21 func (x byMethodNameAndPackagePath) Len() int { return len(x) } 22 func (x byMethodNameAndPackagePath) Swap(i, j int) { x[i], x[j] = x[j], x[i] } 23 func (x byMethodNameAndPackagePath) Less(i, j int) bool { 24 return siglt(x[i], x[j]) 25 } 26 27 // siglt reports whether a < b 28 func siglt(a, b *Sig) bool { 29 if a.name != b.name { 30 return a.name < b.name 31 } 32 if a.pkg == b.pkg { 33 return false 34 } 35 if a.pkg == nil { 36 return true 37 } 38 if b.pkg == nil { 39 return false 40 } 41 return a.pkg.Path < b.pkg.Path 42 } 43 44 // Builds a type representing a Bucket structure for 45 // the given map type. This type is not visible to users - 46 // we include only enough information to generate a correct GC 47 // program for it. 48 // Make sure this stays in sync with ../../runtime/hashmap.go! 49 const ( 50 BUCKETSIZE = 8 51 MAXKEYSIZE = 128 52 MAXVALSIZE = 128 53 ) 54 55 func makefield(name string, t *Type) *Type { 56 f := typ(TFIELD) 57 f.Type = t 58 f.Sym = new(Sym) 59 f.Sym.Name = name 60 return f 61 } 62 63 func mapbucket(t *Type) *Type { 64 if t.Bucket != nil { 65 return t.Bucket 66 } 67 68 bucket := typ(TSTRUCT) 69 keytype := t.Down 70 valtype := t.Type 71 dowidth(keytype) 72 dowidth(valtype) 73 if keytype.Width > MAXKEYSIZE { 74 keytype = Ptrto(keytype) 75 } 76 if valtype.Width > MAXVALSIZE { 77 valtype = Ptrto(valtype) 78 } 79 80 // The first field is: uint8 topbits[BUCKETSIZE]. 81 arr := typ(TARRAY) 82 83 arr.Type = Types[TUINT8] 84 arr.Bound = BUCKETSIZE 85 field := make([]*Type, 0, 5) 86 field = append(field, makefield("topbits", arr)) 87 arr = typ(TARRAY) 88 arr.Type = keytype 89 arr.Bound = BUCKETSIZE 90 field = append(field, makefield("keys", arr)) 91 arr = typ(TARRAY) 92 arr.Type = valtype 93 arr.Bound = BUCKETSIZE 94 field = append(field, makefield("values", arr)) 95 96 // Make sure the overflow pointer is the last memory in the struct, 97 // because the runtime assumes it can use size-ptrSize as the 98 // offset of the overflow pointer. We double-check that property 99 // below once the offsets and size are computed. 100 // 101 // BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point. 102 // On 32-bit systems, the max alignment is 32-bit, and the 103 // overflow pointer will add another 32-bit field, and the struct 104 // will end with no padding. 105 // On 64-bit systems, the max alignment is 64-bit, and the 106 // overflow pointer will add another 64-bit field, and the struct 107 // will end with no padding. 108 // On nacl/amd64p32, however, the max alignment is 64-bit, 109 // but the overflow pointer will add only a 32-bit field, 110 // so if the struct needs 64-bit padding (because a key or value does) 111 // then it would end with an extra 32-bit padding field. 112 // Preempt that by emitting the padding here. 113 if int(t.Type.Align) > Widthptr || int(t.Down.Align) > Widthptr { 114 field = append(field, makefield("pad", Types[TUINTPTR])) 115 } 116 117 // If keys and values have no pointers, the map implementation 118 // can keep a list of overflow pointers on the side so that 119 // buckets can be marked as having no pointers. 120 // Arrange for the bucket to have no pointers by changing 121 // the type of the overflow field to uintptr in this case. 122 // See comment on hmap.overflow in ../../../../runtime/hashmap.go. 123 otyp := Ptrto(bucket) 124 if !haspointers(t.Type) && !haspointers(t.Down) && t.Type.Width <= MAXKEYSIZE && t.Down.Width <= MAXVALSIZE { 125 otyp = Types[TUINTPTR] 126 } 127 ovf := makefield("overflow", otyp) 128 field = append(field, ovf) 129 130 // link up fields 131 bucket.Noalg = true 132 bucket.Local = t.Local 133 bucket.Type = field[0] 134 for n := int32(0); n < int32(len(field)-1); n++ { 135 field[n].Down = field[n+1] 136 } 137 field[len(field)-1].Down = nil 138 dowidth(bucket) 139 140 // Double-check that overflow field is final memory in struct, 141 // with no padding at end. See comment above. 142 if ovf.Width != bucket.Width-int64(Widthptr) { 143 Yyerror("bad math in mapbucket for %v", t) 144 } 145 146 t.Bucket = bucket 147 148 bucket.Map = t 149 return bucket 150 } 151 152 // Builds a type representing a Hmap structure for the given map type. 153 // Make sure this stays in sync with ../../runtime/hashmap.go! 154 func hmap(t *Type) *Type { 155 if t.Hmap != nil { 156 return t.Hmap 157 } 158 159 bucket := mapbucket(t) 160 var field [8]*Type 161 field[0] = makefield("count", Types[TINT]) 162 field[1] = makefield("flags", Types[TUINT8]) 163 field[2] = makefield("B", Types[TUINT8]) 164 field[3] = makefield("hash0", Types[TUINT32]) 165 field[4] = makefield("buckets", Ptrto(bucket)) 166 field[5] = makefield("oldbuckets", Ptrto(bucket)) 167 field[6] = makefield("nevacuate", Types[TUINTPTR]) 168 field[7] = makefield("overflow", Types[TUNSAFEPTR]) 169 170 h := typ(TSTRUCT) 171 h.Noalg = true 172 h.Local = t.Local 173 h.Type = field[0] 174 for n := int32(0); n < int32(len(field)-1); n++ { 175 field[n].Down = field[n+1] 176 } 177 field[len(field)-1].Down = nil 178 dowidth(h) 179 t.Hmap = h 180 h.Map = t 181 return h 182 } 183 184 func hiter(t *Type) *Type { 185 if t.Hiter != nil { 186 return t.Hiter 187 } 188 189 // build a struct: 190 // hash_iter { 191 // key *Key 192 // val *Value 193 // t *MapType 194 // h *Hmap 195 // buckets *Bucket 196 // bptr *Bucket 197 // overflow0 unsafe.Pointer 198 // overflow1 unsafe.Pointer 199 // startBucket uintptr 200 // stuff uintptr 201 // bucket uintptr 202 // checkBucket uintptr 203 // } 204 // must match ../../runtime/hashmap.go:hash_iter. 205 var field [12]*Type 206 field[0] = makefield("key", Ptrto(t.Down)) 207 208 field[1] = makefield("val", Ptrto(t.Type)) 209 field[2] = makefield("t", Ptrto(Types[TUINT8])) 210 field[3] = makefield("h", Ptrto(hmap(t))) 211 field[4] = makefield("buckets", Ptrto(mapbucket(t))) 212 field[5] = makefield("bptr", Ptrto(mapbucket(t))) 213 field[6] = makefield("overflow0", Types[TUNSAFEPTR]) 214 field[7] = makefield("overflow1", Types[TUNSAFEPTR]) 215 field[8] = makefield("startBucket", Types[TUINTPTR]) 216 field[9] = makefield("stuff", Types[TUINTPTR]) // offset+wrapped+B+I 217 field[10] = makefield("bucket", Types[TUINTPTR]) 218 field[11] = makefield("checkBucket", Types[TUINTPTR]) 219 220 // build iterator struct holding the above fields 221 i := typ(TSTRUCT) 222 223 i.Noalg = true 224 i.Type = field[0] 225 for n := int32(0); n < int32(len(field)-1); n++ { 226 field[n].Down = field[n+1] 227 } 228 field[len(field)-1].Down = nil 229 dowidth(i) 230 if i.Width != int64(12*Widthptr) { 231 Yyerror("hash_iter size not correct %d %d", i.Width, 12*Widthptr) 232 } 233 t.Hiter = i 234 i.Map = t 235 return i 236 } 237 238 // f is method type, with receiver. 239 // return function type, receiver as first argument (or not). 240 func methodfunc(f *Type, receiver *Type) *Type { 241 var in *NodeList 242 if receiver != nil { 243 d := Nod(ODCLFIELD, nil, nil) 244 d.Type = receiver 245 in = list(in, d) 246 } 247 248 var d *Node 249 for t := getinargx(f).Type; t != nil; t = t.Down { 250 d = Nod(ODCLFIELD, nil, nil) 251 d.Type = t.Type 252 d.Isddd = t.Isddd 253 in = list(in, d) 254 } 255 256 var out *NodeList 257 for t := getoutargx(f).Type; t != nil; t = t.Down { 258 d = Nod(ODCLFIELD, nil, nil) 259 d.Type = t.Type 260 out = list(out, d) 261 } 262 263 t := functype(nil, in, out) 264 if f.Nname != nil { 265 // Link to name of original method function. 266 t.Nname = f.Nname 267 } 268 269 return t 270 } 271 272 // methods returns the methods of the non-interface type t, sorted by name. 273 // Generates stub functions as needed. 274 func methods(t *Type) []*Sig { 275 // method type 276 mt := methtype(t, 0) 277 278 if mt == nil { 279 return nil 280 } 281 expandmeth(mt) 282 283 // type stored in interface word 284 it := t 285 286 if !isdirectiface(it) { 287 it = Ptrto(t) 288 } 289 290 // make list of methods for t, 291 // generating code if necessary. 292 var ms []*Sig 293 for f := mt.Xmethod; f != nil; f = f.Down { 294 if f.Etype != TFIELD { 295 Fatalf("methods: not field %v", f) 296 } 297 if f.Type.Etype != TFUNC || f.Type.Thistuple == 0 { 298 Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f) 299 } 300 if getthisx(f.Type).Type == nil { 301 Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f) 302 } 303 if f.Nointerface { 304 continue 305 } 306 307 method := f.Sym 308 if method == nil { 309 continue 310 } 311 312 // get receiver type for this particular method. 313 // if pointer receiver but non-pointer t and 314 // this is not an embedded pointer inside a struct, 315 // method does not apply. 316 this := getthisx(f.Type).Type.Type 317 318 if Isptr[this.Etype] && this.Type == t { 319 continue 320 } 321 if Isptr[this.Etype] && !Isptr[t.Etype] && f.Embedded != 2 && !isifacemethod(f.Type) { 322 continue 323 } 324 325 var sig Sig 326 ms = append(ms, &sig) 327 328 sig.name = method.Name 329 if !exportname(method.Name) { 330 if method.Pkg == nil { 331 Fatalf("methods: missing package") 332 } 333 sig.pkg = method.Pkg 334 } 335 336 sig.isym = methodsym(method, it, 1) 337 sig.tsym = methodsym(method, t, 0) 338 sig.type_ = methodfunc(f.Type, t) 339 sig.mtype = methodfunc(f.Type, nil) 340 341 if sig.isym.Flags&SymSiggen == 0 { 342 sig.isym.Flags |= SymSiggen 343 if !Eqtype(this, it) || this.Width < Types[Tptr].Width { 344 compiling_wrappers = 1 345 genwrapper(it, f, sig.isym, 1) 346 compiling_wrappers = 0 347 } 348 } 349 350 if sig.tsym.Flags&SymSiggen == 0 { 351 sig.tsym.Flags |= SymSiggen 352 if !Eqtype(this, t) { 353 compiling_wrappers = 1 354 genwrapper(t, f, sig.tsym, 0) 355 compiling_wrappers = 0 356 } 357 } 358 } 359 360 sort.Sort(byMethodNameAndPackagePath(ms)) 361 return ms 362 } 363 364 // imethods returns the methods of the interface type t, sorted by name. 365 func imethods(t *Type) []*Sig { 366 var methods []*Sig 367 for f := t.Type; f != nil; f = f.Down { 368 if f.Etype != TFIELD { 369 Fatalf("imethods: not field") 370 } 371 if f.Type.Etype != TFUNC || f.Sym == nil { 372 continue 373 } 374 method := f.Sym 375 var sig = Sig{ 376 name: method.Name, 377 } 378 if !exportname(method.Name) { 379 if method.Pkg == nil { 380 Fatalf("imethods: missing package") 381 } 382 sig.pkg = method.Pkg 383 } 384 385 sig.mtype = f.Type 386 sig.offset = 0 387 sig.type_ = methodfunc(f.Type, nil) 388 389 if n := len(methods); n > 0 { 390 last := methods[n-1] 391 if !(siglt(last, &sig)) { 392 Fatalf("sigcmp vs sortinter %s %s", last.name, sig.name) 393 } 394 } 395 methods = append(methods, &sig) 396 397 // Compiler can only refer to wrappers for non-blank methods. 398 if isblanksym(method) { 399 continue 400 } 401 402 // NOTE(rsc): Perhaps an oversight that 403 // IfaceType.Method is not in the reflect data. 404 // Generate the method body, so that compiled 405 // code can refer to it. 406 isym := methodsym(method, t, 0) 407 408 if isym.Flags&SymSiggen == 0 { 409 isym.Flags |= SymSiggen 410 genwrapper(t, f, isym, 0) 411 } 412 } 413 414 return methods 415 } 416 417 var dimportpath_gopkg *Pkg 418 419 func dimportpath(p *Pkg) { 420 if p.Pathsym != nil { 421 return 422 } 423 424 // If we are compiling the runtime package, there are two runtime packages around 425 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for 426 // both of them, so just produce one for localpkg. 427 if myimportpath == "runtime" && p == Runtimepkg { 428 return 429 } 430 431 if dimportpath_gopkg == nil { 432 dimportpath_gopkg = mkpkg("go") 433 dimportpath_gopkg.Name = "go" 434 } 435 436 nam := "importpath." + p.Prefix + "." 437 438 n := Nod(ONAME, nil, nil) 439 n.Sym = Pkglookup(nam, dimportpath_gopkg) 440 441 n.Class = PEXTERN 442 n.Xoffset = 0 443 p.Pathsym = n.Sym 444 445 if p == localpkg { 446 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. 447 gdatastring(n, myimportpath) 448 } else { 449 gdatastring(n, p.Path) 450 } 451 ggloblsym(n.Sym, int32(Types[TSTRING].Width), obj.DUPOK|obj.RODATA) 452 } 453 454 func dgopkgpath(s *Sym, ot int, pkg *Pkg) int { 455 if pkg == nil { 456 return dgostringptr(s, ot, "") 457 } 458 459 if pkg == localpkg && myimportpath == "" { 460 // If we don't know the full path of the package being compiled (i.e. -p 461 // was not passed on the compiler command line), emit reference to 462 // go.importpath.""., which 6l will rewrite using the correct import path. 463 // Every package that imports this one directly defines the symbol. 464 var ns *Sym 465 466 if ns == nil { 467 ns = Pkglookup("importpath.\"\".", mkpkg("go")) 468 } 469 return dsymptr(s, ot, ns, 0) 470 } 471 472 dimportpath(pkg) 473 return dsymptr(s, ot, pkg.Pathsym, 0) 474 } 475 476 // uncommonType 477 // ../../runtime/type.go:/uncommonType 478 func dextratype(sym *Sym, off int, t *Type, ptroff int) int { 479 m := methods(t) 480 if t.Sym == nil && len(m) == 0 { 481 return off 482 } 483 484 // fill in *extraType pointer in header 485 off = int(Rnd(int64(off), int64(Widthptr))) 486 487 dsymptr(sym, ptroff, sym, off) 488 489 for _, a := range m { 490 dtypesym(a.type_) 491 } 492 493 ot := off 494 s := sym 495 if t.Sym != nil { 496 ot = dgostringptr(s, ot, t.Sym.Name) 497 if t != Types[t.Etype] && t != errortype { 498 ot = dgopkgpath(s, ot, t.Sym.Pkg) 499 } else { 500 ot = dgostringptr(s, ot, "") 501 } 502 } else { 503 ot = dgostringptr(s, ot, "") 504 ot = dgostringptr(s, ot, "") 505 } 506 507 // slice header 508 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint) 509 510 n := len(m) 511 ot = duintxx(s, ot, uint64(n), Widthint) 512 ot = duintxx(s, ot, uint64(n), Widthint) 513 514 // methods 515 for _, a := range m { 516 // method 517 // ../../runtime/type.go:/method 518 ot = dgostringptr(s, ot, a.name) 519 520 ot = dgopkgpath(s, ot, a.pkg) 521 ot = dsymptr(s, ot, dtypesym(a.mtype), 0) 522 ot = dsymptr(s, ot, dtypesym(a.type_), 0) 523 if a.isym != nil { 524 ot = dsymptr(s, ot, a.isym, 0) 525 } else { 526 ot = duintptr(s, ot, 0) 527 } 528 if a.tsym != nil { 529 ot = dsymptr(s, ot, a.tsym, 0) 530 } else { 531 ot = duintptr(s, ot, 0) 532 } 533 } 534 535 return ot 536 } 537 538 var kinds = []int{ 539 TINT: obj.KindInt, 540 TUINT: obj.KindUint, 541 TINT8: obj.KindInt8, 542 TUINT8: obj.KindUint8, 543 TINT16: obj.KindInt16, 544 TUINT16: obj.KindUint16, 545 TINT32: obj.KindInt32, 546 TUINT32: obj.KindUint32, 547 TINT64: obj.KindInt64, 548 TUINT64: obj.KindUint64, 549 TUINTPTR: obj.KindUintptr, 550 TFLOAT32: obj.KindFloat32, 551 TFLOAT64: obj.KindFloat64, 552 TBOOL: obj.KindBool, 553 TSTRING: obj.KindString, 554 TPTR32: obj.KindPtr, 555 TPTR64: obj.KindPtr, 556 TSTRUCT: obj.KindStruct, 557 TINTER: obj.KindInterface, 558 TCHAN: obj.KindChan, 559 TMAP: obj.KindMap, 560 TARRAY: obj.KindArray, 561 TFUNC: obj.KindFunc, 562 TCOMPLEX64: obj.KindComplex64, 563 TCOMPLEX128: obj.KindComplex128, 564 TUNSAFEPTR: obj.KindUnsafePointer, 565 } 566 567 func haspointers(t *Type) bool { 568 if t.Haspointers != 0 { 569 return t.Haspointers-1 != 0 570 } 571 572 var ret bool 573 switch t.Etype { 574 case TINT, 575 TUINT, 576 TINT8, 577 TUINT8, 578 TINT16, 579 TUINT16, 580 TINT32, 581 TUINT32, 582 TINT64, 583 TUINT64, 584 TUINTPTR, 585 TFLOAT32, 586 TFLOAT64, 587 TCOMPLEX64, 588 TCOMPLEX128, 589 TBOOL: 590 ret = false 591 592 case TARRAY: 593 if t.Bound < 0 { // slice 594 ret = true 595 break 596 } 597 598 if t.Bound == 0 { // empty array 599 ret = false 600 break 601 } 602 603 ret = haspointers(t.Type) 604 605 case TSTRUCT: 606 ret = false 607 for t1 := t.Type; t1 != nil; t1 = t1.Down { 608 if haspointers(t1.Type) { 609 ret = true 610 break 611 } 612 } 613 614 case TSTRING, 615 TPTR32, 616 TPTR64, 617 TUNSAFEPTR, 618 TINTER, 619 TCHAN, 620 TMAP, 621 TFUNC: 622 fallthrough 623 default: 624 ret = true 625 626 case TFIELD: 627 Fatalf("haspointers: unexpected type, %v", t) 628 } 629 630 t.Haspointers = 1 + uint8(obj.Bool2int(ret)) 631 return ret 632 } 633 634 // typeptrdata returns the length in bytes of the prefix of t 635 // containing pointer data. Anything after this offset is scalar data. 636 func typeptrdata(t *Type) int64 { 637 if !haspointers(t) { 638 return 0 639 } 640 641 switch t.Etype { 642 case TPTR32, 643 TPTR64, 644 TUNSAFEPTR, 645 TFUNC, 646 TCHAN, 647 TMAP: 648 return int64(Widthptr) 649 650 case TSTRING: 651 // struct { byte *str; intgo len; } 652 return int64(Widthptr) 653 654 case TINTER: 655 // struct { Itab *tab; void *data; } or 656 // struct { Type *type; void *data; } 657 return 2 * int64(Widthptr) 658 659 case TARRAY: 660 if Isslice(t) { 661 // struct { byte *array; uintgo len; uintgo cap; } 662 return int64(Widthptr) 663 } 664 // haspointers already eliminated t.Bound == 0. 665 return (t.Bound-1)*t.Type.Width + typeptrdata(t.Type) 666 667 case TSTRUCT: 668 // Find the last field that has pointers. 669 var lastPtrField *Type 670 for t1 := t.Type; t1 != nil; t1 = t1.Down { 671 if haspointers(t1.Type) { 672 lastPtrField = t1 673 } 674 } 675 return lastPtrField.Width + typeptrdata(lastPtrField.Type) 676 677 default: 678 Fatalf("typeptrdata: unexpected type, %v", t) 679 return 0 680 } 681 } 682 683 // commonType 684 // ../../runtime/type.go:/commonType 685 686 var dcommontype_algarray *Sym 687 688 func dcommontype(s *Sym, ot int, t *Type) int { 689 if ot != 0 { 690 Fatalf("dcommontype %d", ot) 691 } 692 693 sizeofAlg := 2 * Widthptr 694 if dcommontype_algarray == nil { 695 dcommontype_algarray = Pkglookup("algarray", Runtimepkg) 696 } 697 dowidth(t) 698 alg := algtype(t) 699 var algsym *Sym 700 if alg < 0 || alg == AMEM { 701 algsym = dalgsym(t) 702 } 703 704 var sptr *Sym 705 tptr := Ptrto(t) 706 if !Isptr[t.Etype] && (t.Sym != nil || methods(tptr) != nil) { 707 sptr = dtypesym(tptr) 708 } else { 709 sptr = weaktypesym(tptr) 710 } 711 712 gcsym, useGCProg, ptrdata := dgcsym(t) 713 714 // ../../pkg/reflect/type.go:/^type.commonType 715 // actual type structure 716 // type commonType struct { 717 // size uintptr 718 // ptrsize uintptr 719 // hash uint32 720 // _ uint8 721 // align uint8 722 // fieldAlign uint8 723 // kind uint8 724 // alg unsafe.Pointer 725 // gcdata unsafe.Pointer 726 // string *string 727 // *extraType 728 // ptrToThis *Type 729 // } 730 ot = duintptr(s, ot, uint64(t.Width)) 731 ot = duintptr(s, ot, uint64(ptrdata)) 732 733 ot = duint32(s, ot, typehash(t)) 734 ot = duint8(s, ot, 0) // unused 735 736 // runtime (and common sense) expects alignment to be a power of two. 737 i := int(t.Align) 738 739 if i == 0 { 740 i = 1 741 } 742 if i&(i-1) != 0 { 743 Fatalf("invalid alignment %d for %v", t.Align, t) 744 } 745 ot = duint8(s, ot, t.Align) // align 746 ot = duint8(s, ot, t.Align) // fieldAlign 747 748 i = kinds[t.Etype] 749 if t.Etype == TARRAY && t.Bound < 0 { 750 i = obj.KindSlice 751 } 752 if !haspointers(t) { 753 i |= obj.KindNoPointers 754 } 755 if isdirectiface(t) { 756 i |= obj.KindDirectIface 757 } 758 if useGCProg { 759 i |= obj.KindGCProg 760 } 761 ot = duint8(s, ot, uint8(i)) // kind 762 if algsym == nil { 763 ot = dsymptr(s, ot, dcommontype_algarray, alg*sizeofAlg) 764 } else { 765 ot = dsymptr(s, ot, algsym, 0) 766 } 767 ot = dsymptr(s, ot, gcsym, 0) 768 769 p := Tconv(t, obj.FmtLeft|obj.FmtUnsigned) 770 771 //print("dcommontype: %s\n", p); 772 ot = dgostringptr(s, ot, p) // string 773 774 // skip pointer to extraType, 775 // which follows the rest of this type structure. 776 // caller will fill in if needed. 777 // otherwise linker will assume 0. 778 ot += Widthptr 779 780 ot = dsymptr(s, ot, sptr, 0) // ptrto type 781 return ot 782 } 783 784 func typesym(t *Type) *Sym { 785 return Pkglookup(Tconv(t, obj.FmtLeft), typepkg) 786 } 787 788 func tracksym(t *Type) *Sym { 789 return Pkglookup(Tconv(t.Outer, obj.FmtLeft)+"."+t.Sym.Name, trackpkg) 790 } 791 792 func typelinksym(t *Type) *Sym { 793 // %-uT is what the generated Type's string field says. 794 // It uses (ambiguous) package names instead of import paths. 795 // %-T is the complete, unambiguous type name. 796 // We want the types to end up sorted by string field, 797 // so use that first in the name, and then add :%-T to 798 // disambiguate. We use a tab character as the separator to 799 // ensure the types appear sorted by their string field. The 800 // names are a little long but they are discarded by the linker 801 // and do not end up in the symbol table of the final binary. 802 p := Tconv(t, obj.FmtLeft|obj.FmtUnsigned) + "\t" + Tconv(t, obj.FmtLeft) 803 804 s := Pkglookup(p, typelinkpkg) 805 806 //print("typelinksym: %s -> %+S\n", p, s); 807 808 return s 809 } 810 811 func typesymprefix(prefix string, t *Type) *Sym { 812 p := prefix + "." + Tconv(t, obj.FmtLeft) 813 s := Pkglookup(p, typepkg) 814 815 //print("algsym: %s -> %+S\n", p, s); 816 817 return s 818 } 819 820 func typenamesym(t *Type) *Sym { 821 if t == nil || (Isptr[t.Etype] && t.Type == nil) || isideal(t) { 822 Fatalf("typename %v", t) 823 } 824 s := typesym(t) 825 if s.Def == nil { 826 n := Nod(ONAME, nil, nil) 827 n.Sym = s 828 n.Type = Types[TUINT8] 829 n.Addable = true 830 n.Ullman = 1 831 n.Class = PEXTERN 832 n.Xoffset = 0 833 n.Typecheck = 1 834 s.Def = n 835 836 signatlist = list(signatlist, typenod(t)) 837 } 838 839 return s.Def.Sym 840 } 841 842 func typename(t *Type) *Node { 843 s := typenamesym(t) 844 n := Nod(OADDR, s.Def, nil) 845 n.Type = Ptrto(s.Def.Type) 846 n.Addable = true 847 n.Ullman = 2 848 n.Typecheck = 1 849 return n 850 } 851 852 func weaktypesym(t *Type) *Sym { 853 p := Tconv(t, obj.FmtLeft) 854 s := Pkglookup(p, weaktypepkg) 855 856 //print("weaktypesym: %s -> %+S\n", p, s); 857 858 return s 859 } 860 861 // isreflexive reports whether t has a reflexive equality operator. 862 // That is, if x==x for all x of type t. 863 func isreflexive(t *Type) bool { 864 switch t.Etype { 865 case TBOOL, 866 TINT, 867 TUINT, 868 TINT8, 869 TUINT8, 870 TINT16, 871 TUINT16, 872 TINT32, 873 TUINT32, 874 TINT64, 875 TUINT64, 876 TUINTPTR, 877 TPTR32, 878 TPTR64, 879 TUNSAFEPTR, 880 TSTRING, 881 TCHAN: 882 return true 883 884 case TFLOAT32, 885 TFLOAT64, 886 TCOMPLEX64, 887 TCOMPLEX128, 888 TINTER: 889 return false 890 891 case TARRAY: 892 if Isslice(t) { 893 Fatalf("slice can't be a map key: %v", t) 894 } 895 return isreflexive(t.Type) 896 897 case TSTRUCT: 898 for t1 := t.Type; t1 != nil; t1 = t1.Down { 899 if !isreflexive(t1.Type) { 900 return false 901 } 902 } 903 return true 904 905 default: 906 Fatalf("bad type for map key: %v", t) 907 return false 908 } 909 } 910 911 // needkeyupdate reports whether map updates with t as a key 912 // need the key to be updated. 913 func needkeyupdate(t *Type) bool { 914 switch t.Etype { 915 case TBOOL, 916 TINT, 917 TUINT, 918 TINT8, 919 TUINT8, 920 TINT16, 921 TUINT16, 922 TINT32, 923 TUINT32, 924 TINT64, 925 TUINT64, 926 TUINTPTR, 927 TPTR32, 928 TPTR64, 929 TUNSAFEPTR, 930 TCHAN: 931 return false 932 933 case TFLOAT32, // floats can be +0/-0 934 TFLOAT64, 935 TCOMPLEX64, 936 TCOMPLEX128, 937 TINTER, 938 TSTRING: // strings might have smaller backing stores 939 return true 940 941 case TARRAY: 942 if Isslice(t) { 943 Fatalf("slice can't be a map key: %v", t) 944 } 945 return needkeyupdate(t.Type) 946 947 case TSTRUCT: 948 for t1 := t.Type; t1 != nil; t1 = t1.Down { 949 if needkeyupdate(t1.Type) { 950 return true 951 } 952 } 953 return false 954 955 default: 956 Fatalf("bad type for map key: %v", t) 957 return true 958 } 959 } 960 961 func dtypesym(t *Type) *Sym { 962 // Replace byte, rune aliases with real type. 963 // They've been separate internally to make error messages 964 // better, but we have to merge them in the reflect tables. 965 if t == bytetype || t == runetype { 966 t = Types[t.Etype] 967 } 968 969 if isideal(t) { 970 Fatalf("dtypesym %v", t) 971 } 972 973 s := typesym(t) 974 if s.Flags&SymSiggen != 0 { 975 return s 976 } 977 s.Flags |= SymSiggen 978 979 // special case (look for runtime below): 980 // when compiling package runtime, 981 // emit the type structures for int, float, etc. 982 tbase := t 983 984 if Isptr[t.Etype] && t.Sym == nil && t.Type.Sym != nil { 985 tbase = t.Type 986 } 987 dupok := 0 988 if tbase.Sym == nil { 989 dupok = obj.DUPOK 990 } 991 992 if myimportpath == "runtime" && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc 993 goto ok 994 } 995 996 // named types from other files are defined only by those files 997 if tbase.Sym != nil && !tbase.Local { 998 return s 999 } 1000 if isforw[tbase.Etype] { 1001 return s 1002 } 1003 1004 ok: 1005 ot := 0 1006 xt := 0 1007 switch t.Etype { 1008 default: 1009 ot = dcommontype(s, ot, t) 1010 xt = ot - 2*Widthptr 1011 1012 case TARRAY: 1013 if t.Bound >= 0 { 1014 // ../../runtime/type.go:/ArrayType 1015 s1 := dtypesym(t.Type) 1016 1017 t2 := typ(TARRAY) 1018 t2.Type = t.Type 1019 t2.Bound = -1 // slice 1020 s2 := dtypesym(t2) 1021 ot = dcommontype(s, ot, t) 1022 xt = ot - 2*Widthptr 1023 ot = dsymptr(s, ot, s1, 0) 1024 ot = dsymptr(s, ot, s2, 0) 1025 ot = duintptr(s, ot, uint64(t.Bound)) 1026 } else { 1027 // ../../runtime/type.go:/SliceType 1028 s1 := dtypesym(t.Type) 1029 1030 ot = dcommontype(s, ot, t) 1031 xt = ot - 2*Widthptr 1032 ot = dsymptr(s, ot, s1, 0) 1033 } 1034 1035 // ../../runtime/type.go:/ChanType 1036 case TCHAN: 1037 s1 := dtypesym(t.Type) 1038 1039 ot = dcommontype(s, ot, t) 1040 xt = ot - 2*Widthptr 1041 ot = dsymptr(s, ot, s1, 0) 1042 ot = duintptr(s, ot, uint64(t.Chan)) 1043 1044 case TFUNC: 1045 for t1 := getthisx(t).Type; t1 != nil; t1 = t1.Down { 1046 dtypesym(t1.Type) 1047 } 1048 isddd := false 1049 for t1 := getinargx(t).Type; t1 != nil; t1 = t1.Down { 1050 isddd = t1.Isddd 1051 dtypesym(t1.Type) 1052 } 1053 1054 for t1 := getoutargx(t).Type; t1 != nil; t1 = t1.Down { 1055 dtypesym(t1.Type) 1056 } 1057 1058 ot = dcommontype(s, ot, t) 1059 xt = ot - 2*Widthptr 1060 ot = duint8(s, ot, uint8(obj.Bool2int(isddd))) 1061 1062 // two slice headers: in and out. 1063 ot = int(Rnd(int64(ot), int64(Widthptr))) 1064 1065 ot = dsymptr(s, ot, s, ot+2*(Widthptr+2*Widthint)) 1066 n := t.Thistuple + t.Intuple 1067 ot = duintxx(s, ot, uint64(n), Widthint) 1068 ot = duintxx(s, ot, uint64(n), Widthint) 1069 ot = dsymptr(s, ot, s, ot+1*(Widthptr+2*Widthint)+n*Widthptr) 1070 ot = duintxx(s, ot, uint64(t.Outtuple), Widthint) 1071 ot = duintxx(s, ot, uint64(t.Outtuple), Widthint) 1072 1073 // slice data 1074 for t1 := getthisx(t).Type; t1 != nil; t1 = t1.Down { 1075 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1076 n++ 1077 } 1078 for t1 := getinargx(t).Type; t1 != nil; t1 = t1.Down { 1079 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1080 n++ 1081 } 1082 for t1 := getoutargx(t).Type; t1 != nil; t1 = t1.Down { 1083 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1084 n++ 1085 } 1086 1087 case TINTER: 1088 m := imethods(t) 1089 n := len(m) 1090 for _, a := range m { 1091 dtypesym(a.type_) 1092 } 1093 1094 // ../../../runtime/type.go:/InterfaceType 1095 ot = dcommontype(s, ot, t) 1096 1097 xt = ot - 2*Widthptr 1098 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint) 1099 ot = duintxx(s, ot, uint64(n), Widthint) 1100 ot = duintxx(s, ot, uint64(n), Widthint) 1101 for _, a := range m { 1102 // ../../../runtime/type.go:/imethod 1103 ot = dgostringptr(s, ot, a.name) 1104 1105 ot = dgopkgpath(s, ot, a.pkg) 1106 ot = dsymptr(s, ot, dtypesym(a.type_), 0) 1107 } 1108 1109 // ../../../runtime/type.go:/MapType 1110 case TMAP: 1111 s1 := dtypesym(t.Down) 1112 1113 s2 := dtypesym(t.Type) 1114 s3 := dtypesym(mapbucket(t)) 1115 s4 := dtypesym(hmap(t)) 1116 ot = dcommontype(s, ot, t) 1117 xt = ot - 2*Widthptr 1118 ot = dsymptr(s, ot, s1, 0) 1119 ot = dsymptr(s, ot, s2, 0) 1120 ot = dsymptr(s, ot, s3, 0) 1121 ot = dsymptr(s, ot, s4, 0) 1122 if t.Down.Width > MAXKEYSIZE { 1123 ot = duint8(s, ot, uint8(Widthptr)) 1124 ot = duint8(s, ot, 1) // indirect 1125 } else { 1126 ot = duint8(s, ot, uint8(t.Down.Width)) 1127 ot = duint8(s, ot, 0) // not indirect 1128 } 1129 1130 if t.Type.Width > MAXVALSIZE { 1131 ot = duint8(s, ot, uint8(Widthptr)) 1132 ot = duint8(s, ot, 1) // indirect 1133 } else { 1134 ot = duint8(s, ot, uint8(t.Type.Width)) 1135 ot = duint8(s, ot, 0) // not indirect 1136 } 1137 1138 ot = duint16(s, ot, uint16(mapbucket(t).Width)) 1139 ot = duint8(s, ot, uint8(obj.Bool2int(isreflexive(t.Down)))) 1140 ot = duint8(s, ot, uint8(obj.Bool2int(needkeyupdate(t.Down)))) 1141 1142 case TPTR32, TPTR64: 1143 if t.Type.Etype == TANY { 1144 // ../../runtime/type.go:/UnsafePointerType 1145 ot = dcommontype(s, ot, t) 1146 1147 break 1148 } 1149 1150 // ../../runtime/type.go:/PtrType 1151 s1 := dtypesym(t.Type) 1152 1153 ot = dcommontype(s, ot, t) 1154 xt = ot - 2*Widthptr 1155 ot = dsymptr(s, ot, s1, 0) 1156 1157 // ../../runtime/type.go:/StructType 1158 // for security, only the exported fields. 1159 case TSTRUCT: 1160 n := 0 1161 1162 for t1 := t.Type; t1 != nil; t1 = t1.Down { 1163 dtypesym(t1.Type) 1164 n++ 1165 } 1166 1167 ot = dcommontype(s, ot, t) 1168 xt = ot - 2*Widthptr 1169 ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint) 1170 ot = duintxx(s, ot, uint64(n), Widthint) 1171 ot = duintxx(s, ot, uint64(n), Widthint) 1172 for t1 := t.Type; t1 != nil; t1 = t1.Down { 1173 // ../../runtime/type.go:/structField 1174 if t1.Sym != nil && t1.Embedded == 0 { 1175 ot = dgostringptr(s, ot, t1.Sym.Name) 1176 if exportname(t1.Sym.Name) { 1177 ot = dgostringptr(s, ot, "") 1178 } else { 1179 ot = dgopkgpath(s, ot, t1.Sym.Pkg) 1180 } 1181 } else { 1182 ot = dgostringptr(s, ot, "") 1183 if t1.Type.Sym != nil && 1184 (t1.Type.Sym.Pkg == builtinpkg || !exportname(t1.Type.Sym.Name)) { 1185 ot = dgopkgpath(s, ot, localpkg) 1186 } else { 1187 ot = dgostringptr(s, ot, "") 1188 } 1189 } 1190 1191 ot = dsymptr(s, ot, dtypesym(t1.Type), 0) 1192 ot = dgostrlitptr(s, ot, t1.Note) 1193 ot = duintptr(s, ot, uint64(t1.Width)) // field offset 1194 } 1195 } 1196 1197 ot = dextratype(s, ot, t, xt) 1198 ggloblsym(s, int32(ot), int16(dupok|obj.RODATA)) 1199 1200 // generate typelink.foo pointing at s = type.foo. 1201 // The linker will leave a table of all the typelinks for 1202 // types in the binary, so reflect can find them. 1203 // We only need the link for unnamed composites that 1204 // we want be able to find. 1205 if t.Sym == nil { 1206 switch t.Etype { 1207 case TPTR32, TPTR64: 1208 // The ptrto field of the type data cannot be relied on when 1209 // dynamic linking: a type T may be defined in a module that makes 1210 // no use of pointers to that type, but another module can contain 1211 // a package that imports the first one and does use *T pointers. 1212 // The second module will end up defining type data for *T and a 1213 // type.*T symbol pointing at it. It's important that calling 1214 // .PtrTo() on the reflect.Type for T returns this type data and 1215 // not some synthesized object, so we need reflect to be able to 1216 // find it! 1217 if !Ctxt.Flag_dynlink { 1218 break 1219 } 1220 fallthrough 1221 case TARRAY, TCHAN, TFUNC, TMAP: 1222 slink := typelinksym(t) 1223 dsymptr(slink, 0, s, 0) 1224 ggloblsym(slink, int32(Widthptr), int16(dupok|obj.RODATA)) 1225 } 1226 } 1227 1228 return s 1229 } 1230 1231 func dumptypestructs() { 1232 var n *Node 1233 1234 // copy types from externdcl list to signatlist 1235 for _, n := range externdcl { 1236 if n.Op != OTYPE { 1237 continue 1238 } 1239 signatlist = list(signatlist, n) 1240 } 1241 1242 // process signatlist 1243 var t *Type 1244 for l := signatlist; l != nil; l = l.Next { 1245 n = l.N 1246 if n.Op != OTYPE { 1247 continue 1248 } 1249 t = n.Type 1250 dtypesym(t) 1251 if t.Sym != nil { 1252 dtypesym(Ptrto(t)) 1253 } 1254 } 1255 1256 // generate import strings for imported packages 1257 for _, p := range pkgs { 1258 if p.Direct { 1259 dimportpath(p) 1260 } 1261 } 1262 1263 // do basic types if compiling package runtime. 1264 // they have to be in at least one package, 1265 // and runtime is always loaded implicitly, 1266 // so this is as good as any. 1267 // another possible choice would be package main, 1268 // but using runtime means fewer copies in .6 files. 1269 if myimportpath == "runtime" { 1270 for i := EType(1); i <= TBOOL; i++ { 1271 dtypesym(Ptrto(Types[i])) 1272 } 1273 dtypesym(Ptrto(Types[TSTRING])) 1274 dtypesym(Ptrto(Types[TUNSAFEPTR])) 1275 1276 // emit type structs for error and func(error) string. 1277 // The latter is the type of an auto-generated wrapper. 1278 dtypesym(Ptrto(errortype)) 1279 1280 dtypesym(functype(nil, list1(Nod(ODCLFIELD, nil, typenod(errortype))), list1(Nod(ODCLFIELD, nil, typenod(Types[TSTRING]))))) 1281 1282 // add paths for runtime and main, which 6l imports implicitly. 1283 dimportpath(Runtimepkg) 1284 1285 if flag_race != 0 { 1286 dimportpath(racepkg) 1287 } 1288 if flag_msan != 0 { 1289 dimportpath(msanpkg) 1290 } 1291 dimportpath(mkpkg("main")) 1292 } 1293 } 1294 1295 func dalgsym(t *Type) *Sym { 1296 var s *Sym 1297 var hashfunc *Sym 1298 var eqfunc *Sym 1299 1300 // dalgsym is only called for a type that needs an algorithm table, 1301 // which implies that the type is comparable (or else it would use ANOEQ). 1302 1303 if algtype(t) == AMEM { 1304 // we use one algorithm table for all AMEM types of a given size 1305 p := fmt.Sprintf(".alg%d", t.Width) 1306 1307 s = Pkglookup(p, typepkg) 1308 1309 if s.Flags&SymAlgGen != 0 { 1310 return s 1311 } 1312 s.Flags |= SymAlgGen 1313 1314 // make hash closure 1315 p = fmt.Sprintf(".hashfunc%d", t.Width) 1316 1317 hashfunc = Pkglookup(p, typepkg) 1318 1319 ot := 0 1320 ot = dsymptr(hashfunc, ot, Pkglookup("memhash_varlen", Runtimepkg), 0) 1321 ot = duintxx(hashfunc, ot, uint64(t.Width), Widthptr) // size encoded in closure 1322 ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA) 1323 1324 // make equality closure 1325 p = fmt.Sprintf(".eqfunc%d", t.Width) 1326 1327 eqfunc = Pkglookup(p, typepkg) 1328 1329 ot = 0 1330 ot = dsymptr(eqfunc, ot, Pkglookup("memequal_varlen", Runtimepkg), 0) 1331 ot = duintxx(eqfunc, ot, uint64(t.Width), Widthptr) 1332 ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA) 1333 } else { 1334 // generate an alg table specific to this type 1335 s = typesymprefix(".alg", t) 1336 1337 hash := typesymprefix(".hash", t) 1338 eq := typesymprefix(".eq", t) 1339 hashfunc = typesymprefix(".hashfunc", t) 1340 eqfunc = typesymprefix(".eqfunc", t) 1341 1342 genhash(hash, t) 1343 geneq(eq, t) 1344 1345 // make Go funcs (closures) for calling hash and equal from Go 1346 dsymptr(hashfunc, 0, hash, 0) 1347 1348 ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1349 dsymptr(eqfunc, 0, eq, 0) 1350 ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA) 1351 } 1352 1353 // ../../runtime/alg.go:/typeAlg 1354 ot := 0 1355 1356 ot = dsymptr(s, ot, hashfunc, 0) 1357 ot = dsymptr(s, ot, eqfunc, 0) 1358 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) 1359 return s 1360 } 1361 1362 // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, 1363 // which holds 1-bit entries describing where pointers are in a given type. 1364 // 16 bytes is enough to describe 128 pointer-sized words, 512 or 1024 bytes 1365 // depending on the system. Above this length, the GC information is 1366 // recorded as a GC program, which can express repetition compactly. 1367 // In either form, the information is used by the runtime to initialize the 1368 // heap bitmap, and for large types (like 128 or more words), they are 1369 // roughly the same speed. GC programs are never much larger and often 1370 // more compact. (If large arrays are involved, they can be arbitrarily more 1371 // compact.) 1372 // 1373 // The cutoff must be large enough that any allocation large enough to 1374 // use a GC program is large enough that it does not share heap bitmap 1375 // bytes with any other objects, allowing the GC program execution to 1376 // assume an aligned start and not use atomic operations. In the current 1377 // runtime, this means all malloc size classes larger than the cutoff must 1378 // be multiples of four words. On 32-bit systems that's 16 bytes, and 1379 // all size classes >= 16 bytes are 16-byte aligned, so no real constraint. 1380 // On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed 1381 // for size classes >= 256 bytes. On a 64-bit sytem, 256 bytes allocated 1382 // is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes 1383 // must be >= 4. 1384 // 1385 // We used to use 16 because the GC programs do have some constant overhead 1386 // to get started, and processing 128 pointers seems to be enough to 1387 // amortize that overhead well. 1388 // 1389 // To make sure that the runtime's chansend can call typeBitsBulkBarrier, 1390 // we raised the limit to 2048, so that even 32-bit systems are guaranteed to 1391 // use bitmaps for objects up to 64 kB in size. 1392 // 1393 // Also known to reflect/type.go. 1394 // 1395 const maxPtrmaskBytes = 2048 1396 1397 // dgcsym emits and returns a data symbol containing GC information for type t, 1398 // along with a boolean reporting whether the UseGCProg bit should be set in 1399 // the type kind, and the ptrdata field to record in the reflect type information. 1400 func dgcsym(t *Type) (sym *Sym, useGCProg bool, ptrdata int64) { 1401 ptrdata = typeptrdata(t) 1402 if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 { 1403 sym = dgcptrmask(t) 1404 return 1405 } 1406 1407 useGCProg = true 1408 sym, ptrdata = dgcprog(t) 1409 return 1410 } 1411 1412 // dgcptrmask emits and returns the symbol containing a pointer mask for type t. 1413 func dgcptrmask(t *Type) *Sym { 1414 ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8) 1415 fillptrmask(t, ptrmask) 1416 p := fmt.Sprintf("gcbits.%x", ptrmask) 1417 1418 sym := Pkglookup(p, Runtimepkg) 1419 if sym.Flags&SymUniq == 0 { 1420 sym.Flags |= SymUniq 1421 for i, x := range ptrmask { 1422 duint8(sym, i, x) 1423 } 1424 ggloblsym(sym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL) 1425 } 1426 return sym 1427 } 1428 1429 // fillptrmask fills in ptrmask with 1s corresponding to the 1430 // word offsets in t that hold pointers. 1431 // ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits. 1432 func fillptrmask(t *Type, ptrmask []byte) { 1433 for i := range ptrmask { 1434 ptrmask[i] = 0 1435 } 1436 if !haspointers(t) { 1437 return 1438 } 1439 1440 vec := bvalloc(8 * int32(len(ptrmask))) 1441 xoffset := int64(0) 1442 onebitwalktype1(t, &xoffset, vec) 1443 1444 nptr := typeptrdata(t) / int64(Widthptr) 1445 for i := int64(0); i < nptr; i++ { 1446 if bvget(vec, int32(i)) == 1 { 1447 ptrmask[i/8] |= 1 << (uint(i) % 8) 1448 } 1449 } 1450 } 1451 1452 // dgcprog emits and returns the symbol containing a GC program for type t 1453 // along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]). 1454 // In practice, the size is typeptrdata(t) except for non-trivial arrays. 1455 // For non-trivial arrays, the program describes the full t.Width size. 1456 func dgcprog(t *Type) (*Sym, int64) { 1457 dowidth(t) 1458 if t.Width == BADWIDTH { 1459 Fatalf("dgcprog: %v badwidth", t) 1460 } 1461 sym := typesymprefix(".gcprog", t) 1462 var p GCProg 1463 p.init(sym) 1464 p.emit(t, 0) 1465 offset := p.w.BitIndex() * int64(Widthptr) 1466 p.end() 1467 if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width { 1468 Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width) 1469 } 1470 return sym, offset 1471 } 1472 1473 type GCProg struct { 1474 sym *Sym 1475 symoff int 1476 w gcprog.Writer 1477 } 1478 1479 var Debug_gcprog int // set by -d gcprog 1480 1481 func (p *GCProg) init(sym *Sym) { 1482 p.sym = sym 1483 p.symoff = 4 // first 4 bytes hold program length 1484 p.w.Init(p.writeByte) 1485 if Debug_gcprog > 0 { 1486 fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", sym) 1487 p.w.Debug(os.Stderr) 1488 } 1489 } 1490 1491 func (p *GCProg) writeByte(x byte) { 1492 p.symoff = duint8(p.sym, p.symoff, x) 1493 } 1494 1495 func (p *GCProg) end() { 1496 p.w.End() 1497 duint32(p.sym, 0, uint32(p.symoff-4)) 1498 ggloblsym(p.sym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL) 1499 if Debug_gcprog > 0 { 1500 fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.sym) 1501 } 1502 } 1503 1504 func (p *GCProg) emit(t *Type, offset int64) { 1505 dowidth(t) 1506 if !haspointers(t) { 1507 return 1508 } 1509 if t.Width == int64(Widthptr) { 1510 p.w.Ptr(offset / int64(Widthptr)) 1511 return 1512 } 1513 switch t.Etype { 1514 default: 1515 Fatalf("GCProg.emit: unexpected type %v", t) 1516 1517 case TSTRING: 1518 p.w.Ptr(offset / int64(Widthptr)) 1519 1520 case TINTER: 1521 p.w.Ptr(offset / int64(Widthptr)) 1522 p.w.Ptr(offset/int64(Widthptr) + 1) 1523 1524 case TARRAY: 1525 if Isslice(t) { 1526 p.w.Ptr(offset / int64(Widthptr)) 1527 return 1528 } 1529 if t.Bound == 0 { 1530 // should have been handled by haspointers check above 1531 Fatalf("GCProg.emit: empty array") 1532 } 1533 1534 // Flatten array-of-array-of-array to just a big array by multiplying counts. 1535 count := t.Bound 1536 elem := t.Type 1537 for Isfixedarray(elem) { 1538 count *= elem.Bound 1539 elem = elem.Type 1540 } 1541 1542 if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) { 1543 // Cheaper to just emit the bits. 1544 for i := int64(0); i < count; i++ { 1545 p.emit(elem, offset+i*elem.Width) 1546 } 1547 return 1548 } 1549 p.emit(elem, offset) 1550 p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr)) 1551 p.w.Repeat(elem.Width/int64(Widthptr), count-1) 1552 1553 case TSTRUCT: 1554 for t1 := t.Type; t1 != nil; t1 = t1.Down { 1555 p.emit(t1.Type, offset+t1.Width) 1556 } 1557 } 1558 }