github.com/gocuntian/go@v0.0.0-20160610041250-fee02d270bf8/src/cmd/compile/internal/gc/gsubr.go (about) 1 // Derived from Inferno utils/6c/txt.c 2 // http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package gc 32 33 import ( 34 "cmd/internal/obj" 35 "cmd/internal/sys" 36 "fmt" 37 "runtime" 38 "strings" 39 ) 40 41 var ( 42 ddumped bool 43 dfirst *obj.Prog 44 dpc *obj.Prog 45 ) 46 47 // Is this node a memory operand? 48 func Ismem(n *Node) bool { 49 switch n.Op { 50 case OITAB, 51 OSPTR, 52 OLEN, 53 OCAP, 54 OINDREG, 55 ONAME, 56 OCLOSUREVAR: 57 return true 58 59 case OADDR: 60 // amd64 and s390x use PC relative addressing. 61 // TODO(rsc): not sure why ppc64 needs this too. 62 return Thearch.LinkArch.InFamily(sys.AMD64, sys.PPC64, sys.S390X) 63 } 64 65 return false 66 } 67 68 func Samereg(a *Node, b *Node) bool { 69 if a == nil || b == nil { 70 return false 71 } 72 if a.Op != OREGISTER { 73 return false 74 } 75 if b.Op != OREGISTER { 76 return false 77 } 78 if a.Reg != b.Reg { 79 return false 80 } 81 return true 82 } 83 84 func Gbranch(as obj.As, t *Type, likely int) *obj.Prog { 85 p := Prog(as) 86 p.To.Type = obj.TYPE_BRANCH 87 p.To.Val = nil 88 if as != obj.AJMP && likely != 0 && !Thearch.LinkArch.InFamily(sys.PPC64, sys.ARM64, sys.MIPS64, sys.S390X) { 89 p.From.Type = obj.TYPE_CONST 90 if likely > 0 { 91 p.From.Offset = 1 92 } 93 } 94 95 if Debug['g'] != 0 { 96 fmt.Printf("%v\n", p) 97 } 98 99 return p 100 } 101 102 func Prog(as obj.As) *obj.Prog { 103 var p *obj.Prog 104 105 if as == obj.AGLOBL { 106 if ddumped { 107 Fatalf("already dumped data") 108 } 109 if dpc == nil { 110 dpc = Ctxt.NewProg() 111 dfirst = dpc 112 } 113 114 p = dpc 115 dpc = Ctxt.NewProg() 116 p.Link = dpc 117 } else { 118 p = Pc 119 Pc = Ctxt.NewProg() 120 Clearp(Pc) 121 p.Link = Pc 122 } 123 124 if lineno == 0 && Debug['K'] != 0 { 125 Warn("prog: line 0") 126 } 127 128 p.As = as 129 p.Lineno = lineno 130 return p 131 } 132 133 func Nodreg(n *Node, t *Type, r int) { 134 if t == nil { 135 Fatalf("nodreg: t nil") 136 } 137 138 *n = Node{} 139 n.Op = OREGISTER 140 n.Addable = true 141 ullmancalc(n) 142 n.Reg = int16(r) 143 n.Type = t 144 } 145 146 func Nodindreg(n *Node, t *Type, r int) { 147 Nodreg(n, t, r) 148 n.Op = OINDREG 149 } 150 151 func Afunclit(a *obj.Addr, n *Node) { 152 if a.Type == obj.TYPE_ADDR && a.Name == obj.NAME_EXTERN { 153 a.Type = obj.TYPE_MEM 154 a.Sym = Linksym(n.Sym) 155 } 156 } 157 158 func Clearp(p *obj.Prog) { 159 obj.Nopout(p) 160 p.As = obj.AEND 161 p.Pc = int64(pcloc) 162 pcloc++ 163 } 164 165 func dumpdata() { 166 ddumped = true 167 if dfirst == nil { 168 return 169 } 170 newplist() 171 *Pc = *dfirst 172 Pc = dpc 173 Clearp(Pc) 174 } 175 176 func flushdata() { 177 if dfirst == nil { 178 return 179 } 180 newplist() 181 *Pc = *dfirst 182 Pc = dpc 183 Clearp(Pc) 184 dfirst = nil 185 dpc = nil 186 } 187 188 // Fixup instructions after allocauto (formerly compactframe) has moved all autos around. 189 func fixautoused(p *obj.Prog) { 190 for lp := &p; ; { 191 p = *lp 192 if p == nil { 193 break 194 } 195 if p.As == obj.ATYPE && p.From.Node != nil && p.From.Name == obj.NAME_AUTO && !((p.From.Node).(*Node)).Used { 196 *lp = p.Link 197 continue 198 } 199 200 if (p.As == obj.AVARDEF || p.As == obj.AVARKILL || p.As == obj.AVARLIVE) && p.To.Node != nil && !((p.To.Node).(*Node)).Used { 201 // Cannot remove VARDEF instruction, because - unlike TYPE handled above - 202 // VARDEFs are interspersed with other code, and a jump might be using the 203 // VARDEF as a target. Replace with a no-op instead. A later pass will remove 204 // the no-ops. 205 obj.Nopout(p) 206 207 continue 208 } 209 210 if p.From.Name == obj.NAME_AUTO && p.From.Node != nil { 211 p.From.Offset += stkdelta[p.From.Node.(*Node)] 212 } 213 214 if p.To.Name == obj.NAME_AUTO && p.To.Node != nil { 215 p.To.Offset += stkdelta[p.To.Node.(*Node)] 216 } 217 218 lp = &p.Link 219 } 220 } 221 222 func ggloblnod(nam *Node) { 223 p := Thearch.Gins(obj.AGLOBL, nam, nil) 224 p.Lineno = nam.Lineno 225 p.From.Sym.Gotype = Linksym(ngotype(nam)) 226 p.To.Sym = nil 227 p.To.Type = obj.TYPE_CONST 228 p.To.Offset = nam.Type.Width 229 p.From3 = new(obj.Addr) 230 if nam.Name.Readonly { 231 p.From3.Offset = obj.RODATA 232 } 233 if nam.Type != nil && !haspointers(nam.Type) { 234 p.From3.Offset |= obj.NOPTR 235 } 236 } 237 238 func ggloblsym(s *Sym, width int32, flags int16) { 239 ggloblLSym(Linksym(s), width, flags) 240 } 241 242 func ggloblLSym(s *obj.LSym, width int32, flags int16) { 243 p := Thearch.Gins(obj.AGLOBL, nil, nil) 244 p.From.Type = obj.TYPE_MEM 245 p.From.Name = obj.NAME_EXTERN 246 p.From.Sym = s 247 if flags&obj.LOCAL != 0 { 248 p.From.Sym.Local = true 249 flags &^= obj.LOCAL 250 } 251 p.To.Type = obj.TYPE_CONST 252 p.To.Offset = int64(width) 253 p.From3 = new(obj.Addr) 254 p.From3.Offset = int64(flags) 255 } 256 257 func gjmp(to *obj.Prog) *obj.Prog { 258 p := Gbranch(obj.AJMP, nil, 0) 259 if to != nil { 260 Patch(p, to) 261 } 262 return p 263 } 264 265 func gtrack(s *Sym) { 266 p := Thearch.Gins(obj.AUSEFIELD, nil, nil) 267 p.From.Type = obj.TYPE_MEM 268 p.From.Name = obj.NAME_EXTERN 269 p.From.Sym = Linksym(s) 270 } 271 272 func gused(n *Node) { 273 Thearch.Gins(obj.ANOP, n, nil) // used 274 } 275 276 func Isfat(t *Type) bool { 277 if t != nil { 278 switch t.Etype { 279 case TSTRUCT, TARRAY, TSLICE, TSTRING, 280 TINTER: // maybe remove later 281 return true 282 } 283 } 284 285 return false 286 } 287 288 // Sweep the prog list to mark any used nodes. 289 func markautoused(p *obj.Prog) { 290 for ; p != nil; p = p.Link { 291 if p.As == obj.ATYPE || p.As == obj.AVARDEF || p.As == obj.AVARKILL { 292 continue 293 } 294 295 if p.From.Node != nil { 296 ((p.From.Node).(*Node)).Used = true 297 } 298 299 if p.To.Node != nil { 300 ((p.To.Node).(*Node)).Used = true 301 } 302 } 303 } 304 305 // Naddr rewrites a to refer to n. 306 // It assumes that a is zeroed on entry. 307 func Naddr(a *obj.Addr, n *Node) { 308 if n == nil { 309 return 310 } 311 312 if n.Type != nil && n.Type.Etype != TIDEAL { 313 // TODO(rsc): This is undone by the selective clearing of width below, 314 // to match architectures that were not as aggressive in setting width 315 // during naddr. Those widths must be cleared to avoid triggering 316 // failures in gins when it detects real but heretofore latent (and one 317 // hopes innocuous) type mismatches. 318 // The type mismatches should be fixed and the clearing below removed. 319 dowidth(n.Type) 320 321 a.Width = n.Type.Width 322 } 323 324 switch n.Op { 325 default: 326 a := a // copy to let escape into Ctxt.Dconv 327 Debug['h'] = 1 328 Dump("naddr", n) 329 Fatalf("naddr: bad %v %v", n.Op, Ctxt.Dconv(a)) 330 331 case OREGISTER: 332 a.Type = obj.TYPE_REG 333 a.Reg = n.Reg 334 a.Sym = nil 335 if Thearch.LinkArch.Family == sys.I386 { // TODO(rsc): Never clear a->width. 336 a.Width = 0 337 } 338 339 case OINDREG: 340 a.Type = obj.TYPE_MEM 341 a.Reg = n.Reg 342 a.Sym = Linksym(n.Sym) 343 a.Offset = n.Xoffset 344 if a.Offset != int64(int32(a.Offset)) { 345 Yyerror("offset %d too large for OINDREG", a.Offset) 346 } 347 if Thearch.LinkArch.Family == sys.I386 { // TODO(rsc): Never clear a->width. 348 a.Width = 0 349 } 350 351 case OCLOSUREVAR: 352 if !Curfn.Func.Needctxt { 353 Fatalf("closurevar without needctxt") 354 } 355 a.Type = obj.TYPE_MEM 356 a.Reg = int16(Thearch.REGCTXT) 357 a.Sym = nil 358 a.Offset = n.Xoffset 359 360 case OCFUNC: 361 Naddr(a, n.Left) 362 a.Sym = Linksym(n.Left.Sym) 363 364 case ONAME: 365 a.Etype = 0 366 if n.Type != nil { 367 a.Etype = uint8(Simtype[n.Type.Etype]) 368 } 369 a.Offset = n.Xoffset 370 s := n.Sym 371 a.Node = n.Orig 372 373 //if(a->node >= (Node*)&n) 374 // fatal("stack node"); 375 if s == nil { 376 s = Lookup(".noname") 377 } 378 if n.Name.Method && n.Type != nil && n.Type.Sym != nil && n.Type.Sym.Pkg != nil { 379 s = Pkglookup(s.Name, n.Type.Sym.Pkg) 380 } 381 382 a.Type = obj.TYPE_MEM 383 switch n.Class { 384 default: 385 Fatalf("naddr: ONAME class %v %d\n", n.Sym, n.Class) 386 387 case PEXTERN: 388 a.Name = obj.NAME_EXTERN 389 390 case PAUTO: 391 a.Name = obj.NAME_AUTO 392 393 case PPARAM, PPARAMOUT: 394 a.Name = obj.NAME_PARAM 395 396 case PFUNC: 397 a.Name = obj.NAME_EXTERN 398 a.Type = obj.TYPE_ADDR 399 a.Width = int64(Widthptr) 400 s = funcsym(s) 401 } 402 403 a.Sym = Linksym(s) 404 405 case ODOT: 406 // A special case to make write barriers more efficient. 407 // Taking the address of the first field of a named struct 408 // is the same as taking the address of the struct. 409 if !n.Left.Type.IsStruct() || n.Left.Type.Field(0).Sym != n.Sym { 410 Debug['h'] = 1 411 Dump("naddr", n) 412 Fatalf("naddr: bad %v %v", n.Op, Ctxt.Dconv(a)) 413 } 414 Naddr(a, n.Left) 415 416 case OLITERAL: 417 if Thearch.LinkArch.Family == sys.I386 { 418 a.Width = 0 419 } 420 switch u := n.Val().U.(type) { 421 default: 422 Fatalf("naddr: const %v", Tconv(n.Type, FmtLong)) 423 424 case *Mpflt: 425 a.Type = obj.TYPE_FCONST 426 a.Val = u.Float64() 427 428 case *Mpint: 429 a.Sym = nil 430 a.Type = obj.TYPE_CONST 431 a.Offset = u.Int64() 432 433 case string: 434 datagostring(u, a) 435 436 case bool: 437 a.Sym = nil 438 a.Type = obj.TYPE_CONST 439 a.Offset = int64(obj.Bool2int(u)) 440 441 case *NilVal: 442 a.Sym = nil 443 a.Type = obj.TYPE_CONST 444 a.Offset = 0 445 } 446 447 case OADDR: 448 Naddr(a, n.Left) 449 a.Etype = uint8(Tptr) 450 if !Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { // TODO(rsc): Do this even for these architectures. 451 a.Width = int64(Widthptr) 452 } 453 if a.Type != obj.TYPE_MEM { 454 a := a // copy to let escape into Ctxt.Dconv 455 Fatalf("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), n.Left.Op) 456 } 457 a.Type = obj.TYPE_ADDR 458 459 // itable of interface value 460 case OITAB: 461 Naddr(a, n.Left) 462 463 if a.Type == obj.TYPE_CONST && a.Offset == 0 { 464 break // itab(nil) 465 } 466 a.Etype = uint8(Tptr) 467 a.Width = int64(Widthptr) 468 469 // pointer in a string or slice 470 case OSPTR: 471 Naddr(a, n.Left) 472 473 if a.Type == obj.TYPE_CONST && a.Offset == 0 { 474 break // ptr(nil) 475 } 476 a.Etype = uint8(Simtype[Tptr]) 477 a.Offset += int64(Array_array) 478 a.Width = int64(Widthptr) 479 480 // len of string or slice 481 case OLEN: 482 Naddr(a, n.Left) 483 484 if a.Type == obj.TYPE_CONST && a.Offset == 0 { 485 break // len(nil) 486 } 487 a.Etype = uint8(Simtype[TUINT]) 488 a.Offset += int64(Array_nel) 489 if Thearch.LinkArch.Family != sys.ARM { // TODO(rsc): Do this even on arm. 490 a.Width = int64(Widthint) 491 } 492 493 // cap of string or slice 494 case OCAP: 495 Naddr(a, n.Left) 496 497 if a.Type == obj.TYPE_CONST && a.Offset == 0 { 498 break // cap(nil) 499 } 500 a.Etype = uint8(Simtype[TUINT]) 501 a.Offset += int64(Array_cap) 502 if Thearch.LinkArch.Family != sys.ARM { // TODO(rsc): Do this even on arm. 503 a.Width = int64(Widthint) 504 } 505 } 506 } 507 508 func newplist() *obj.Plist { 509 pl := obj.Linknewplist(Ctxt) 510 511 Pc = Ctxt.NewProg() 512 Clearp(Pc) 513 pl.Firstpc = Pc 514 515 return pl 516 } 517 518 // nodarg returns a Node for the function argument denoted by t, 519 // which is either the entire function argument or result struct (t is a struct *Type) 520 // or a specific argument (t is a *Field within a struct *Type). 521 // 522 // If fp is 0, the node is for use by a caller invoking the given 523 // function, preparing the arguments before the call 524 // or retrieving the results after the call. 525 // In this case, the node will correspond to an outgoing argument 526 // slot like 8(SP). 527 // 528 // If fp is 1, the node is for use by the function itself 529 // (the callee), to retrieve its arguments or write its results. 530 // In this case the node will be an ONAME with an appropriate 531 // type and offset. 532 func nodarg(t interface{}, fp int) *Node { 533 var n *Node 534 535 var funarg Funarg 536 switch t := t.(type) { 537 default: 538 Fatalf("bad nodarg %T(%v)", t, t) 539 540 case *Type: 541 // Entire argument struct, not just one arg 542 if !t.IsFuncArgStruct() { 543 Fatalf("nodarg: bad type %v", t) 544 } 545 funarg = t.StructType().Funarg 546 547 // Build fake variable name for whole arg struct. 548 n = Nod(ONAME, nil, nil) 549 n.Sym = Lookup(".args") 550 n.Type = t 551 first := t.Field(0) 552 if first == nil { 553 Fatalf("nodarg: bad struct") 554 } 555 if first.Offset == BADWIDTH { 556 Fatalf("nodarg: offset not computed for %v", t) 557 } 558 n.Xoffset = first.Offset 559 n.Addable = true 560 561 case *Field: 562 funarg = t.Funarg 563 if fp == 1 { 564 // NOTE(rsc): This should be using t.Nname directly, 565 // except in the case where t.Nname.Sym is the blank symbol and 566 // so the assignment would be discarded during code generation. 567 // In that case we need to make a new node, and there is no harm 568 // in optimization passes to doing so. But otherwise we should 569 // definitely be using the actual declaration and not a newly built node. 570 // The extra Fatalf checks here are verifying that this is the case, 571 // without changing the actual logic (at time of writing, it's getting 572 // toward time for the Go 1.7 beta). 573 // At some quieter time (assuming we've never seen these Fatalfs happen) 574 // we could change this code to use "expect" directly. 575 expect := t.Nname 576 if expect.isParamHeapCopy() { 577 expect = expect.Name.Param.Stackcopy 578 } 579 580 for _, n := range Curfn.Func.Dcl { 581 if (n.Class == PPARAM || n.Class == PPARAMOUT) && !isblanksym(t.Sym) && n.Sym == t.Sym { 582 if n != expect { 583 Fatalf("nodarg: unexpected node: %v (%p %v) vs %v (%p %v)", n, n, n.Op, t.Nname, t.Nname, t.Nname.Op) 584 } 585 return n 586 } 587 } 588 589 if !isblanksym(expect.Sym) { 590 Fatalf("nodarg: did not find node in dcl list: %v", expect) 591 } 592 } 593 594 // Build fake name for individual variable. 595 // This is safe because if there was a real declared name 596 // we'd have used it above. 597 n = Nod(ONAME, nil, nil) 598 n.Type = t.Type 599 n.Sym = t.Sym 600 if t.Offset == BADWIDTH { 601 Fatalf("nodarg: offset not computed for %v", t) 602 } 603 n.Xoffset = t.Offset 604 n.Addable = true 605 n.Orig = t.Nname 606 } 607 608 // Rewrite argument named _ to __, 609 // or else the assignment to _ will be 610 // discarded during code generation. 611 if isblank(n) { 612 n.Sym = Lookup("__") 613 } 614 615 switch fp { 616 default: 617 Fatalf("bad fp") 618 619 case 0: // preparing arguments for call 620 n.Op = OINDREG 621 n.Reg = int16(Thearch.REGSP) 622 n.Xoffset += Ctxt.FixedFrameSize() 623 624 case 1: // reading arguments inside call 625 n.Class = PPARAM 626 if funarg == FunargResults { 627 n.Class = PPARAMOUT 628 } 629 } 630 631 n.Typecheck = 1 632 n.Addrtaken = true // keep optimizers at bay 633 return n 634 } 635 636 func Patch(p *obj.Prog, to *obj.Prog) { 637 if p.To.Type != obj.TYPE_BRANCH { 638 Fatalf("patch: not a branch") 639 } 640 p.To.Val = to 641 p.To.Offset = to.Pc 642 } 643 644 func unpatch(p *obj.Prog) *obj.Prog { 645 if p.To.Type != obj.TYPE_BRANCH { 646 Fatalf("unpatch: not a branch") 647 } 648 q, _ := p.To.Val.(*obj.Prog) 649 p.To.Val = nil 650 p.To.Offset = 0 651 return q 652 } 653 654 var reg [100]int // count of references to reg 655 var regstk [100][]byte // allocation sites, when -v is given 656 657 func GetReg(r int) int { 658 return reg[r-Thearch.REGMIN] 659 } 660 func SetReg(r, v int) { 661 reg[r-Thearch.REGMIN] = v 662 } 663 664 func ginit() { 665 for r := range reg { 666 reg[r] = 1 667 } 668 669 for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ { 670 reg[r-Thearch.REGMIN] = 0 671 } 672 for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ { 673 reg[r-Thearch.REGMIN] = 0 674 } 675 676 for _, r := range Thearch.ReservedRegs { 677 reg[r-Thearch.REGMIN] = 1 678 } 679 } 680 681 func gclean() { 682 for _, r := range Thearch.ReservedRegs { 683 reg[r-Thearch.REGMIN]-- 684 } 685 686 for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ { 687 n := reg[r-Thearch.REGMIN] 688 if n != 0 { 689 if Debug['v'] != 0 { 690 Regdump() 691 } 692 Yyerror("reg %v left allocated", obj.Rconv(r)) 693 } 694 } 695 696 for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ { 697 n := reg[r-Thearch.REGMIN] 698 if n != 0 { 699 if Debug['v'] != 0 { 700 Regdump() 701 } 702 Yyerror("reg %v left allocated", obj.Rconv(r)) 703 } 704 } 705 } 706 707 func Anyregalloc() bool { 708 n := 0 709 for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ { 710 if reg[r-Thearch.REGMIN] == 0 { 711 n++ 712 } 713 } 714 return n > len(Thearch.ReservedRegs) 715 } 716 717 // allocate register of type t, leave in n. 718 // if o != N, o may be reusable register. 719 // caller must Regfree(n). 720 func Regalloc(n *Node, t *Type, o *Node) { 721 if t == nil { 722 Fatalf("regalloc: t nil") 723 } 724 et := Simtype[t.Etype] 725 if Ctxt.Arch.RegSize == 4 && (et == TINT64 || et == TUINT64) { 726 Fatalf("regalloc 64bit") 727 } 728 729 var i int 730 Switch: 731 switch et { 732 default: 733 Fatalf("regalloc: unknown type %v", t) 734 735 case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TPTR32, TPTR64, TBOOL: 736 if o != nil && o.Op == OREGISTER { 737 i = int(o.Reg) 738 if Thearch.REGMIN <= i && i <= Thearch.REGMAX { 739 break Switch 740 } 741 } 742 for i = Thearch.REGMIN; i <= Thearch.REGMAX; i++ { 743 if reg[i-Thearch.REGMIN] == 0 { 744 break Switch 745 } 746 } 747 Flusherrors() 748 Regdump() 749 Fatalf("out of fixed registers") 750 751 case TFLOAT32, TFLOAT64: 752 if Thearch.Use387 { 753 i = Thearch.FREGMIN // x86.REG_F0 754 break Switch 755 } 756 if o != nil && o.Op == OREGISTER { 757 i = int(o.Reg) 758 if Thearch.FREGMIN <= i && i <= Thearch.FREGMAX { 759 break Switch 760 } 761 } 762 for i = Thearch.FREGMIN; i <= Thearch.FREGMAX; i++ { 763 if reg[i-Thearch.REGMIN] == 0 { // note: REGMIN, not FREGMIN 764 break Switch 765 } 766 } 767 Flusherrors() 768 Regdump() 769 Fatalf("out of floating registers") 770 771 case TCOMPLEX64, TCOMPLEX128: 772 Tempname(n, t) 773 return 774 } 775 776 ix := i - Thearch.REGMIN 777 if reg[ix] == 0 && Debug['v'] > 0 { 778 if regstk[ix] == nil { 779 regstk[ix] = make([]byte, 4096) 780 } 781 stk := regstk[ix] 782 n := runtime.Stack(stk[:cap(stk)], false) 783 regstk[ix] = stk[:n] 784 } 785 reg[ix]++ 786 Nodreg(n, t, i) 787 } 788 789 func Regfree(n *Node) { 790 if n.Op == ONAME { 791 return 792 } 793 if n.Op != OREGISTER && n.Op != OINDREG { 794 Fatalf("regfree: not a register") 795 } 796 i := int(n.Reg) 797 if i == Thearch.REGSP { 798 return 799 } 800 switch { 801 case Thearch.REGMIN <= i && i <= Thearch.REGMAX, 802 Thearch.FREGMIN <= i && i <= Thearch.FREGMAX: 803 // ok 804 default: 805 Fatalf("regfree: reg out of range") 806 } 807 808 i -= Thearch.REGMIN 809 if reg[i] <= 0 { 810 Fatalf("regfree: reg not allocated") 811 } 812 reg[i]-- 813 if reg[i] == 0 { 814 regstk[i] = regstk[i][:0] 815 } 816 } 817 818 // Reginuse reports whether r is in use. 819 func Reginuse(r int) bool { 820 switch { 821 case Thearch.REGMIN <= r && r <= Thearch.REGMAX, 822 Thearch.FREGMIN <= r && r <= Thearch.FREGMAX: 823 // ok 824 default: 825 Fatalf("reginuse: reg out of range") 826 } 827 828 return reg[r-Thearch.REGMIN] > 0 829 } 830 831 // Regrealloc(n) undoes the effect of Regfree(n), 832 // so that a register can be given up but then reclaimed. 833 func Regrealloc(n *Node) { 834 if n.Op != OREGISTER && n.Op != OINDREG { 835 Fatalf("regrealloc: not a register") 836 } 837 i := int(n.Reg) 838 if i == Thearch.REGSP { 839 return 840 } 841 switch { 842 case Thearch.REGMIN <= i && i <= Thearch.REGMAX, 843 Thearch.FREGMIN <= i && i <= Thearch.FREGMAX: 844 // ok 845 default: 846 Fatalf("regrealloc: reg out of range") 847 } 848 849 i -= Thearch.REGMIN 850 if reg[i] == 0 && Debug['v'] > 0 { 851 if regstk[i] == nil { 852 regstk[i] = make([]byte, 4096) 853 } 854 stk := regstk[i] 855 n := runtime.Stack(stk[:cap(stk)], false) 856 regstk[i] = stk[:n] 857 } 858 reg[i]++ 859 } 860 861 func Regdump() { 862 if Debug['v'] == 0 { 863 fmt.Printf("run compiler with -v for register allocation sites\n") 864 return 865 } 866 867 dump := func(r int) { 868 stk := regstk[r-Thearch.REGMIN] 869 if len(stk) == 0 { 870 return 871 } 872 fmt.Printf("reg %v allocated at:\n", obj.Rconv(r)) 873 fmt.Printf("\t%s\n", strings.Replace(strings.TrimSpace(string(stk)), "\n", "\n\t", -1)) 874 } 875 876 for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ { 877 if reg[r-Thearch.REGMIN] != 0 { 878 dump(r) 879 } 880 } 881 for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ { 882 if reg[r-Thearch.REGMIN] == 0 { 883 dump(r) 884 } 885 } 886 }