github.com/sbinet/go@v0.0.0-20160827155028-54d7de7dd62b/src/cmd/compile/internal/gc/gsubr.go (about) 1 // Derived from Inferno utils/6c/txt.c 2 // http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package gc 32 33 import ( 34 "cmd/internal/obj" 35 "cmd/internal/sys" 36 "fmt" 37 "runtime" 38 "strings" 39 ) 40 41 var ( 42 ddumped bool 43 dfirst *obj.Prog 44 dpc *obj.Prog 45 ) 46 47 // Is this node a memory operand? 48 func Ismem(n *Node) bool { 49 switch n.Op { 50 case OITAB, 51 OIDATA, 52 OSPTR, 53 OLEN, 54 OCAP, 55 OINDREG, 56 ONAME, 57 OCLOSUREVAR: 58 return true 59 60 case OADDR: 61 // amd64 and s390x use PC relative addressing. 62 // TODO(rsc): not sure why ppc64 needs this too. 63 return Thearch.LinkArch.InFamily(sys.AMD64, sys.PPC64, sys.S390X) 64 } 65 66 return false 67 } 68 69 func Samereg(a *Node, b *Node) bool { 70 if a == nil || b == nil { 71 return false 72 } 73 if a.Op != OREGISTER { 74 return false 75 } 76 if b.Op != OREGISTER { 77 return false 78 } 79 if a.Reg != b.Reg { 80 return false 81 } 82 return true 83 } 84 85 func Gbranch(as obj.As, t *Type, likely int) *obj.Prog { 86 p := Prog(as) 87 p.To.Type = obj.TYPE_BRANCH 88 p.To.Val = nil 89 if as != obj.AJMP && likely != 0 && !Thearch.LinkArch.InFamily(sys.PPC64, sys.ARM64, sys.MIPS64, sys.S390X) { 90 p.From.Type = obj.TYPE_CONST 91 if likely > 0 { 92 p.From.Offset = 1 93 } 94 } 95 96 if Debug['g'] != 0 { 97 fmt.Printf("%v\n", p) 98 } 99 100 return p 101 } 102 103 func Prog(as obj.As) *obj.Prog { 104 var p *obj.Prog 105 106 if as == obj.AGLOBL { 107 if ddumped { 108 Fatalf("already dumped data") 109 } 110 if dpc == nil { 111 dpc = Ctxt.NewProg() 112 dfirst = dpc 113 } 114 115 p = dpc 116 dpc = Ctxt.NewProg() 117 p.Link = dpc 118 } else { 119 p = Pc 120 Pc = Ctxt.NewProg() 121 Clearp(Pc) 122 p.Link = Pc 123 } 124 125 if lineno == 0 && Debug['K'] != 0 { 126 Warn("prog: line 0") 127 } 128 129 p.As = as 130 p.Lineno = lineno 131 return p 132 } 133 134 func Nodreg(n *Node, t *Type, r int) { 135 if t == nil { 136 Fatalf("nodreg: t nil") 137 } 138 139 *n = Node{} 140 n.Op = OREGISTER 141 n.Addable = true 142 ullmancalc(n) 143 n.Reg = int16(r) 144 n.Type = t 145 } 146 147 func Nodindreg(n *Node, t *Type, r int) { 148 Nodreg(n, t, r) 149 n.Op = OINDREG 150 } 151 152 func Afunclit(a *obj.Addr, n *Node) { 153 if a.Type == obj.TYPE_ADDR && a.Name == obj.NAME_EXTERN { 154 a.Type = obj.TYPE_MEM 155 a.Sym = Linksym(n.Sym) 156 } 157 } 158 159 func Clearp(p *obj.Prog) { 160 obj.Nopout(p) 161 p.As = obj.AEND 162 p.Pc = int64(pcloc) 163 pcloc++ 164 } 165 166 func dumpdata() { 167 ddumped = true 168 if dfirst == nil { 169 return 170 } 171 newplist() 172 *Pc = *dfirst 173 Pc = dpc 174 Clearp(Pc) 175 } 176 177 func flushdata() { 178 if dfirst == nil { 179 return 180 } 181 newplist() 182 *Pc = *dfirst 183 Pc = dpc 184 Clearp(Pc) 185 dfirst = nil 186 dpc = nil 187 } 188 189 // Fixup instructions after allocauto (formerly compactframe) has moved all autos around. 190 func fixautoused(p *obj.Prog) { 191 for lp := &p; ; { 192 p = *lp 193 if p == nil { 194 break 195 } 196 if p.As == obj.ATYPE && p.From.Node != nil && p.From.Name == obj.NAME_AUTO && !((p.From.Node).(*Node)).Used { 197 *lp = p.Link 198 continue 199 } 200 201 if (p.As == obj.AVARDEF || p.As == obj.AVARKILL || p.As == obj.AVARLIVE) && p.To.Node != nil && !((p.To.Node).(*Node)).Used { 202 // Cannot remove VARDEF instruction, because - unlike TYPE handled above - 203 // VARDEFs are interspersed with other code, and a jump might be using the 204 // VARDEF as a target. Replace with a no-op instead. A later pass will remove 205 // the no-ops. 206 obj.Nopout(p) 207 208 continue 209 } 210 211 if p.From.Name == obj.NAME_AUTO && p.From.Node != nil { 212 p.From.Offset += stkdelta[p.From.Node.(*Node)] 213 } 214 215 if p.To.Name == obj.NAME_AUTO && p.To.Node != nil { 216 p.To.Offset += stkdelta[p.To.Node.(*Node)] 217 } 218 219 lp = &p.Link 220 } 221 } 222 223 func ggloblnod(nam *Node) { 224 p := Thearch.Gins(obj.AGLOBL, nam, nil) 225 p.Lineno = nam.Lineno 226 p.From.Sym.Gotype = Linksym(ngotype(nam)) 227 p.To.Sym = nil 228 p.To.Type = obj.TYPE_CONST 229 p.To.Offset = nam.Type.Width 230 p.From3 = new(obj.Addr) 231 if nam.Name.Readonly { 232 p.From3.Offset = obj.RODATA 233 } 234 if nam.Type != nil && !haspointers(nam.Type) { 235 p.From3.Offset |= obj.NOPTR 236 } 237 } 238 239 func ggloblsym(s *Sym, width int32, flags int16) { 240 ggloblLSym(Linksym(s), width, flags) 241 } 242 243 func ggloblLSym(s *obj.LSym, width int32, flags int16) { 244 p := Thearch.Gins(obj.AGLOBL, nil, nil) 245 p.From.Type = obj.TYPE_MEM 246 p.From.Name = obj.NAME_EXTERN 247 p.From.Sym = s 248 if flags&obj.LOCAL != 0 { 249 p.From.Sym.Local = true 250 flags &^= obj.LOCAL 251 } 252 p.To.Type = obj.TYPE_CONST 253 p.To.Offset = int64(width) 254 p.From3 = new(obj.Addr) 255 p.From3.Offset = int64(flags) 256 } 257 258 func gjmp(to *obj.Prog) *obj.Prog { 259 p := Gbranch(obj.AJMP, nil, 0) 260 if to != nil { 261 Patch(p, to) 262 } 263 return p 264 } 265 266 func gtrack(s *Sym) { 267 p := Thearch.Gins(obj.AUSEFIELD, nil, nil) 268 p.From.Type = obj.TYPE_MEM 269 p.From.Name = obj.NAME_EXTERN 270 p.From.Sym = Linksym(s) 271 } 272 273 func gused(n *Node) { 274 Thearch.Gins(obj.ANOP, n, nil) // used 275 } 276 277 func Isfat(t *Type) bool { 278 if t != nil { 279 switch t.Etype { 280 case TSTRUCT, TARRAY, TSLICE, TSTRING, 281 TINTER: // maybe remove later 282 return true 283 } 284 } 285 286 return false 287 } 288 289 // Sweep the prog list to mark any used nodes. 290 func markautoused(p *obj.Prog) { 291 for ; p != nil; p = p.Link { 292 if p.As == obj.ATYPE || p.As == obj.AVARDEF || p.As == obj.AVARKILL { 293 continue 294 } 295 296 if p.From.Node != nil { 297 ((p.From.Node).(*Node)).Used = true 298 } 299 300 if p.To.Node != nil { 301 ((p.To.Node).(*Node)).Used = true 302 } 303 } 304 } 305 306 // Naddr rewrites a to refer to n. 307 // It assumes that a is zeroed on entry. 308 func Naddr(a *obj.Addr, n *Node) { 309 if n == nil { 310 return 311 } 312 313 if n.Type != nil && n.Type.Etype != TIDEAL { 314 // TODO(rsc): This is undone by the selective clearing of width below, 315 // to match architectures that were not as aggressive in setting width 316 // during naddr. Those widths must be cleared to avoid triggering 317 // failures in gins when it detects real but heretofore latent (and one 318 // hopes innocuous) type mismatches. 319 // The type mismatches should be fixed and the clearing below removed. 320 dowidth(n.Type) 321 322 a.Width = n.Type.Width 323 } 324 325 switch n.Op { 326 default: 327 a := a // copy to let escape into Ctxt.Dconv 328 Debug['h'] = 1 329 Dump("naddr", n) 330 Fatalf("naddr: bad %v %v", n.Op, Ctxt.Dconv(a)) 331 332 case OREGISTER: 333 a.Type = obj.TYPE_REG 334 a.Reg = n.Reg 335 a.Sym = nil 336 if Thearch.LinkArch.Family == sys.I386 { // TODO(rsc): Never clear a->width. 337 a.Width = 0 338 } 339 340 case OINDREG: 341 a.Type = obj.TYPE_MEM 342 a.Reg = n.Reg 343 a.Sym = Linksym(n.Sym) 344 a.Offset = n.Xoffset 345 if a.Offset != int64(int32(a.Offset)) { 346 Yyerror("offset %d too large for OINDREG", a.Offset) 347 } 348 if Thearch.LinkArch.Family == sys.I386 { // TODO(rsc): Never clear a->width. 349 a.Width = 0 350 } 351 352 case OCLOSUREVAR: 353 if !Curfn.Func.Needctxt { 354 Fatalf("closurevar without needctxt") 355 } 356 a.Type = obj.TYPE_MEM 357 a.Reg = int16(Thearch.REGCTXT) 358 a.Sym = nil 359 a.Offset = n.Xoffset 360 361 case OCFUNC: 362 Naddr(a, n.Left) 363 a.Sym = Linksym(n.Left.Sym) 364 365 case ONAME: 366 a.Etype = 0 367 if n.Type != nil { 368 a.Etype = uint8(Simtype[n.Type.Etype]) 369 } 370 a.Offset = n.Xoffset 371 s := n.Sym 372 a.Node = n.Orig 373 374 //if(a->node >= (Node*)&n) 375 // fatal("stack node"); 376 if s == nil { 377 s = Lookup(".noname") 378 } 379 if n.Name.Method && n.Type != nil && n.Type.Sym != nil && n.Type.Sym.Pkg != nil { 380 s = Pkglookup(s.Name, n.Type.Sym.Pkg) 381 } 382 383 a.Type = obj.TYPE_MEM 384 switch n.Class { 385 default: 386 Fatalf("naddr: ONAME class %v %d\n", n.Sym, n.Class) 387 388 case PEXTERN: 389 a.Name = obj.NAME_EXTERN 390 391 case PAUTO: 392 a.Name = obj.NAME_AUTO 393 394 case PPARAM, PPARAMOUT: 395 a.Name = obj.NAME_PARAM 396 397 case PFUNC: 398 a.Name = obj.NAME_EXTERN 399 a.Type = obj.TYPE_ADDR 400 a.Width = int64(Widthptr) 401 s = funcsym(s) 402 } 403 404 a.Sym = Linksym(s) 405 406 case ODOT: 407 // A special case to make write barriers more efficient. 408 // Taking the address of the first field of a named struct 409 // is the same as taking the address of the struct. 410 if !n.Left.Type.IsStruct() || n.Left.Type.Field(0).Sym != n.Sym { 411 Debug['h'] = 1 412 Dump("naddr", n) 413 Fatalf("naddr: bad %v %v", n.Op, Ctxt.Dconv(a)) 414 } 415 Naddr(a, n.Left) 416 417 case OLITERAL: 418 if Thearch.LinkArch.Family == sys.I386 { 419 a.Width = 0 420 } 421 switch u := n.Val().U.(type) { 422 default: 423 Fatalf("naddr: const %v", Tconv(n.Type, FmtLong)) 424 425 case *Mpflt: 426 a.Type = obj.TYPE_FCONST 427 a.Val = u.Float64() 428 429 case *Mpint: 430 a.Sym = nil 431 a.Type = obj.TYPE_CONST 432 a.Offset = u.Int64() 433 434 case string: 435 datagostring(u, a) 436 437 case bool: 438 a.Sym = nil 439 a.Type = obj.TYPE_CONST 440 a.Offset = int64(obj.Bool2int(u)) 441 442 case *NilVal: 443 a.Sym = nil 444 a.Type = obj.TYPE_CONST 445 a.Offset = 0 446 } 447 448 case OADDR: 449 Naddr(a, n.Left) 450 a.Etype = uint8(Tptr) 451 if !Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { // TODO(rsc): Do this even for these architectures. 452 a.Width = int64(Widthptr) 453 } 454 if a.Type != obj.TYPE_MEM { 455 a := a // copy to let escape into Ctxt.Dconv 456 Fatalf("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), n.Left.Op) 457 } 458 a.Type = obj.TYPE_ADDR 459 460 case OITAB: 461 // itable of interface value 462 Naddr(a, n.Left) 463 if a.Type == obj.TYPE_CONST && a.Offset == 0 { 464 break // itab(nil) 465 } 466 a.Etype = uint8(Tptr) 467 a.Width = int64(Widthptr) 468 469 case OIDATA: 470 // idata of interface value 471 Naddr(a, n.Left) 472 if a.Type == obj.TYPE_CONST && a.Offset == 0 { 473 break // idata(nil) 474 } 475 if isdirectiface(n.Type) { 476 a.Etype = uint8(Simtype[n.Type.Etype]) 477 } else { 478 a.Etype = uint8(Tptr) 479 } 480 a.Offset += int64(Widthptr) 481 a.Width = int64(Widthptr) 482 483 // pointer in a string or slice 484 case OSPTR: 485 Naddr(a, n.Left) 486 487 if a.Type == obj.TYPE_CONST && a.Offset == 0 { 488 break // ptr(nil) 489 } 490 a.Etype = uint8(Simtype[Tptr]) 491 a.Offset += int64(Array_array) 492 a.Width = int64(Widthptr) 493 494 // len of string or slice 495 case OLEN: 496 Naddr(a, n.Left) 497 498 if a.Type == obj.TYPE_CONST && a.Offset == 0 { 499 break // len(nil) 500 } 501 a.Etype = uint8(Simtype[TUINT]) 502 a.Offset += int64(Array_nel) 503 if Thearch.LinkArch.Family != sys.ARM { // TODO(rsc): Do this even on arm. 504 a.Width = int64(Widthint) 505 } 506 507 // cap of string or slice 508 case OCAP: 509 Naddr(a, n.Left) 510 511 if a.Type == obj.TYPE_CONST && a.Offset == 0 { 512 break // cap(nil) 513 } 514 a.Etype = uint8(Simtype[TUINT]) 515 a.Offset += int64(Array_cap) 516 if Thearch.LinkArch.Family != sys.ARM { // TODO(rsc): Do this even on arm. 517 a.Width = int64(Widthint) 518 } 519 } 520 } 521 522 func newplist() *obj.Plist { 523 pl := obj.Linknewplist(Ctxt) 524 525 Pc = Ctxt.NewProg() 526 Clearp(Pc) 527 pl.Firstpc = Pc 528 529 return pl 530 } 531 532 // nodarg returns a Node for the function argument denoted by t, 533 // which is either the entire function argument or result struct (t is a struct *Type) 534 // or a specific argument (t is a *Field within a struct *Type). 535 // 536 // If fp is 0, the node is for use by a caller invoking the given 537 // function, preparing the arguments before the call 538 // or retrieving the results after the call. 539 // In this case, the node will correspond to an outgoing argument 540 // slot like 8(SP). 541 // 542 // If fp is 1, the node is for use by the function itself 543 // (the callee), to retrieve its arguments or write its results. 544 // In this case the node will be an ONAME with an appropriate 545 // type and offset. 546 func nodarg(t interface{}, fp int) *Node { 547 var n *Node 548 549 var funarg Funarg 550 switch t := t.(type) { 551 default: 552 Fatalf("bad nodarg %T(%v)", t, t) 553 554 case *Type: 555 // Entire argument struct, not just one arg 556 if !t.IsFuncArgStruct() { 557 Fatalf("nodarg: bad type %v", t) 558 } 559 funarg = t.StructType().Funarg 560 561 // Build fake variable name for whole arg struct. 562 n = Nod(ONAME, nil, nil) 563 n.Sym = Lookup(".args") 564 n.Type = t 565 first := t.Field(0) 566 if first == nil { 567 Fatalf("nodarg: bad struct") 568 } 569 if first.Offset == BADWIDTH { 570 Fatalf("nodarg: offset not computed for %v", t) 571 } 572 n.Xoffset = first.Offset 573 n.Addable = true 574 575 case *Field: 576 funarg = t.Funarg 577 if fp == 1 { 578 // NOTE(rsc): This should be using t.Nname directly, 579 // except in the case where t.Nname.Sym is the blank symbol and 580 // so the assignment would be discarded during code generation. 581 // In that case we need to make a new node, and there is no harm 582 // in optimization passes to doing so. But otherwise we should 583 // definitely be using the actual declaration and not a newly built node. 584 // The extra Fatalf checks here are verifying that this is the case, 585 // without changing the actual logic (at time of writing, it's getting 586 // toward time for the Go 1.7 beta). 587 // At some quieter time (assuming we've never seen these Fatalfs happen) 588 // we could change this code to use "expect" directly. 589 expect := t.Nname 590 if expect.isParamHeapCopy() { 591 expect = expect.Name.Param.Stackcopy 592 } 593 594 for _, n := range Curfn.Func.Dcl { 595 if (n.Class == PPARAM || n.Class == PPARAMOUT) && !isblanksym(t.Sym) && n.Sym == t.Sym { 596 if n != expect { 597 Fatalf("nodarg: unexpected node: %v (%p %v) vs %v (%p %v)", n, n, n.Op, t.Nname, t.Nname, t.Nname.Op) 598 } 599 return n 600 } 601 } 602 603 if !isblanksym(expect.Sym) { 604 Fatalf("nodarg: did not find node in dcl list: %v", expect) 605 } 606 } 607 608 // Build fake name for individual variable. 609 // This is safe because if there was a real declared name 610 // we'd have used it above. 611 n = Nod(ONAME, nil, nil) 612 n.Type = t.Type 613 n.Sym = t.Sym 614 if t.Offset == BADWIDTH { 615 Fatalf("nodarg: offset not computed for %v", t) 616 } 617 n.Xoffset = t.Offset 618 n.Addable = true 619 n.Orig = t.Nname 620 } 621 622 // Rewrite argument named _ to __, 623 // or else the assignment to _ will be 624 // discarded during code generation. 625 if isblank(n) { 626 n.Sym = Lookup("__") 627 } 628 629 switch fp { 630 default: 631 Fatalf("bad fp") 632 633 case 0: // preparing arguments for call 634 n.Op = OINDREG 635 n.Reg = int16(Thearch.REGSP) 636 n.Xoffset += Ctxt.FixedFrameSize() 637 638 case 1: // reading arguments inside call 639 n.Class = PPARAM 640 if funarg == FunargResults { 641 n.Class = PPARAMOUT 642 } 643 } 644 645 n.Typecheck = 1 646 n.Addrtaken = true // keep optimizers at bay 647 return n 648 } 649 650 func Patch(p *obj.Prog, to *obj.Prog) { 651 if p.To.Type != obj.TYPE_BRANCH { 652 Fatalf("patch: not a branch") 653 } 654 p.To.Val = to 655 p.To.Offset = to.Pc 656 } 657 658 func unpatch(p *obj.Prog) *obj.Prog { 659 if p.To.Type != obj.TYPE_BRANCH { 660 Fatalf("unpatch: not a branch") 661 } 662 q, _ := p.To.Val.(*obj.Prog) 663 p.To.Val = nil 664 p.To.Offset = 0 665 return q 666 } 667 668 var reg [100]int // count of references to reg 669 var regstk [100][]byte // allocation sites, when -v is given 670 671 func GetReg(r int) int { 672 return reg[r-Thearch.REGMIN] 673 } 674 func SetReg(r, v int) { 675 reg[r-Thearch.REGMIN] = v 676 } 677 678 func ginit() { 679 for r := range reg { 680 reg[r] = 1 681 } 682 683 for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ { 684 reg[r-Thearch.REGMIN] = 0 685 } 686 for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ { 687 reg[r-Thearch.REGMIN] = 0 688 } 689 690 for _, r := range Thearch.ReservedRegs { 691 reg[r-Thearch.REGMIN] = 1 692 } 693 } 694 695 func gclean() { 696 for _, r := range Thearch.ReservedRegs { 697 reg[r-Thearch.REGMIN]-- 698 } 699 700 for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ { 701 n := reg[r-Thearch.REGMIN] 702 if n != 0 { 703 if Debug['v'] != 0 { 704 Regdump() 705 } 706 Yyerror("reg %v left allocated", obj.Rconv(r)) 707 } 708 } 709 710 for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ { 711 n := reg[r-Thearch.REGMIN] 712 if n != 0 { 713 if Debug['v'] != 0 { 714 Regdump() 715 } 716 Yyerror("reg %v left allocated", obj.Rconv(r)) 717 } 718 } 719 } 720 721 func Anyregalloc() bool { 722 n := 0 723 for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ { 724 if reg[r-Thearch.REGMIN] == 0 { 725 n++ 726 } 727 } 728 return n > len(Thearch.ReservedRegs) 729 } 730 731 // allocate register of type t, leave in n. 732 // if o != N, o may be reusable register. 733 // caller must Regfree(n). 734 func Regalloc(n *Node, t *Type, o *Node) { 735 if t == nil { 736 Fatalf("regalloc: t nil") 737 } 738 et := Simtype[t.Etype] 739 if Ctxt.Arch.RegSize == 4 && (et == TINT64 || et == TUINT64) { 740 Fatalf("regalloc 64bit") 741 } 742 743 var i int 744 Switch: 745 switch et { 746 default: 747 Fatalf("regalloc: unknown type %v", t) 748 749 case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TPTR32, TPTR64, TBOOL: 750 if o != nil && o.Op == OREGISTER { 751 i = int(o.Reg) 752 if Thearch.REGMIN <= i && i <= Thearch.REGMAX { 753 break Switch 754 } 755 } 756 for i = Thearch.REGMIN; i <= Thearch.REGMAX; i++ { 757 if reg[i-Thearch.REGMIN] == 0 { 758 break Switch 759 } 760 } 761 Flusherrors() 762 Regdump() 763 Fatalf("out of fixed registers") 764 765 case TFLOAT32, TFLOAT64: 766 if Thearch.Use387 { 767 i = Thearch.FREGMIN // x86.REG_F0 768 break Switch 769 } 770 if o != nil && o.Op == OREGISTER { 771 i = int(o.Reg) 772 if Thearch.FREGMIN <= i && i <= Thearch.FREGMAX { 773 break Switch 774 } 775 } 776 for i = Thearch.FREGMIN; i <= Thearch.FREGMAX; i++ { 777 if reg[i-Thearch.REGMIN] == 0 { // note: REGMIN, not FREGMIN 778 break Switch 779 } 780 } 781 Flusherrors() 782 Regdump() 783 Fatalf("out of floating registers") 784 785 case TCOMPLEX64, TCOMPLEX128: 786 Tempname(n, t) 787 return 788 } 789 790 ix := i - Thearch.REGMIN 791 if reg[ix] == 0 && Debug['v'] > 0 { 792 if regstk[ix] == nil { 793 regstk[ix] = make([]byte, 4096) 794 } 795 stk := regstk[ix] 796 n := runtime.Stack(stk[:cap(stk)], false) 797 regstk[ix] = stk[:n] 798 } 799 reg[ix]++ 800 Nodreg(n, t, i) 801 } 802 803 func Regfree(n *Node) { 804 if n.Op == ONAME { 805 return 806 } 807 if n.Op != OREGISTER && n.Op != OINDREG { 808 Fatalf("regfree: not a register") 809 } 810 i := int(n.Reg) 811 if i == Thearch.REGSP { 812 return 813 } 814 switch { 815 case Thearch.REGMIN <= i && i <= Thearch.REGMAX, 816 Thearch.FREGMIN <= i && i <= Thearch.FREGMAX: 817 // ok 818 default: 819 Fatalf("regfree: reg out of range") 820 } 821 822 i -= Thearch.REGMIN 823 if reg[i] <= 0 { 824 Fatalf("regfree: reg not allocated") 825 } 826 reg[i]-- 827 if reg[i] == 0 { 828 regstk[i] = regstk[i][:0] 829 } 830 } 831 832 // Reginuse reports whether r is in use. 833 func Reginuse(r int) bool { 834 switch { 835 case Thearch.REGMIN <= r && r <= Thearch.REGMAX, 836 Thearch.FREGMIN <= r && r <= Thearch.FREGMAX: 837 // ok 838 default: 839 Fatalf("reginuse: reg out of range") 840 } 841 842 return reg[r-Thearch.REGMIN] > 0 843 } 844 845 // Regrealloc(n) undoes the effect of Regfree(n), 846 // so that a register can be given up but then reclaimed. 847 func Regrealloc(n *Node) { 848 if n.Op != OREGISTER && n.Op != OINDREG { 849 Fatalf("regrealloc: not a register") 850 } 851 i := int(n.Reg) 852 if i == Thearch.REGSP { 853 return 854 } 855 switch { 856 case Thearch.REGMIN <= i && i <= Thearch.REGMAX, 857 Thearch.FREGMIN <= i && i <= Thearch.FREGMAX: 858 // ok 859 default: 860 Fatalf("regrealloc: reg out of range") 861 } 862 863 i -= Thearch.REGMIN 864 if reg[i] == 0 && Debug['v'] > 0 { 865 if regstk[i] == nil { 866 regstk[i] = make([]byte, 4096) 867 } 868 stk := regstk[i] 869 n := runtime.Stack(stk[:cap(stk)], false) 870 regstk[i] = stk[:n] 871 } 872 reg[i]++ 873 } 874 875 func Regdump() { 876 if Debug['v'] == 0 { 877 fmt.Printf("run compiler with -v for register allocation sites\n") 878 return 879 } 880 881 dump := func(r int) { 882 stk := regstk[r-Thearch.REGMIN] 883 if len(stk) == 0 { 884 return 885 } 886 fmt.Printf("reg %v allocated at:\n", obj.Rconv(r)) 887 fmt.Printf("\t%s\n", strings.Replace(strings.TrimSpace(string(stk)), "\n", "\n\t", -1)) 888 } 889 890 for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ { 891 if reg[r-Thearch.REGMIN] != 0 { 892 dump(r) 893 } 894 } 895 for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ { 896 if reg[r-Thearch.REGMIN] == 0 { 897 dump(r) 898 } 899 } 900 }