github.com/bir3/gocompiler@v0.9.2202/src/cmd/internal/obj/mips/obj0.go (about) 1 // cmd/9l/noop.c, cmd/9l/pass.c, cmd/9l/span.c from Vita Nuova. 2 // 3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 5 // Portions Copyright © 1997-1999 Vita Nuova Limited 6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) 7 // Portions Copyright © 2004,2006 Bruce Ellis 8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others 10 // Portions Copyright © 2009 The Go Authors. All rights reserved. 11 // 12 // Permission is hereby granted, free of charge, to any person obtaining a copy 13 // of this software and associated documentation files (the "Software"), to deal 14 // in the Software without restriction, including without limitation the rights 15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 16 // copies of the Software, and to permit persons to whom the Software is 17 // furnished to do so, subject to the following conditions: 18 // 19 // The above copyright notice and this permission notice shall be included in 20 // all copies or substantial portions of the Software. 21 // 22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 28 // THE SOFTWARE. 29 30 package mips 31 32 import ( 33 "github.com/bir3/gocompiler/src/cmd/internal/obj" 34 "github.com/bir3/gocompiler/src/cmd/internal/sys" 35 "encoding/binary" 36 "fmt" 37 "github.com/bir3/gocompiler/src/internal/abi" 38 "log" 39 "math" 40 ) 41 42 func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { 43 c := ctxt0{ctxt: ctxt, newprog: newprog} 44 45 p.From.Class = 0 46 p.To.Class = 0 47 48 // Rewrite JMP/JAL to symbol as TYPE_BRANCH. 49 switch p.As { 50 case AJMP, 51 AJAL, 52 ARET, 53 obj.ADUFFZERO, 54 obj.ADUFFCOPY: 55 if p.To.Sym != nil { 56 p.To.Type = obj.TYPE_BRANCH 57 } 58 } 59 60 // Rewrite float constants to values stored in memory. 61 switch p.As { 62 case AMOVF: 63 if p.From.Type == obj.TYPE_FCONST { 64 f32 := float32(p.From.Val.(float64)) 65 if math.Float32bits(f32) == 0 { 66 p.As = AMOVW 67 p.From.Type = obj.TYPE_REG 68 p.From.Reg = REGZERO 69 break 70 } 71 p.From.Type = obj.TYPE_MEM 72 p.From.Sym = ctxt.Float32Sym(f32) 73 p.From.Name = obj.NAME_EXTERN 74 p.From.Offset = 0 75 } 76 77 case AMOVD: 78 if p.From.Type == obj.TYPE_FCONST { 79 f64 := p.From.Val.(float64) 80 if math.Float64bits(f64) == 0 && c.ctxt.Arch.Family == sys.MIPS64 { 81 p.As = AMOVV 82 p.From.Type = obj.TYPE_REG 83 p.From.Reg = REGZERO 84 break 85 } 86 p.From.Type = obj.TYPE_MEM 87 p.From.Sym = ctxt.Float64Sym(f64) 88 p.From.Name = obj.NAME_EXTERN 89 p.From.Offset = 0 90 } 91 92 // Put >32-bit constants in memory and load them 93 case AMOVV: 94 if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE && p.From.Reg == 0 && int64(int32(p.From.Offset)) != p.From.Offset { 95 p.From.Type = obj.TYPE_MEM 96 p.From.Sym = ctxt.Int64Sym(p.From.Offset) 97 p.From.Name = obj.NAME_EXTERN 98 p.From.Offset = 0 99 } 100 } 101 102 // Rewrite SUB constants into ADD. 103 switch p.As { 104 case ASUB: 105 if p.From.Type == obj.TYPE_CONST { 106 p.From.Offset = -p.From.Offset 107 p.As = AADD 108 } 109 110 case ASUBU: 111 if p.From.Type == obj.TYPE_CONST { 112 p.From.Offset = -p.From.Offset 113 p.As = AADDU 114 } 115 116 case ASUBV: 117 if p.From.Type == obj.TYPE_CONST { 118 p.From.Offset = -p.From.Offset 119 p.As = AADDV 120 } 121 122 case ASUBVU: 123 if p.From.Type == obj.TYPE_CONST { 124 p.From.Offset = -p.From.Offset 125 p.As = AADDVU 126 } 127 } 128 } 129 130 func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { 131 // TODO(minux): add morestack short-cuts with small fixed frame-size. 132 c := ctxt0{ctxt: ctxt, newprog: newprog, cursym: cursym} 133 134 // a switch for enabling/disabling instruction scheduling 135 nosched := true 136 137 if c.cursym.Func().Text == nil || c.cursym.Func().Text.Link == nil { 138 return 139 } 140 141 p := c.cursym.Func().Text 142 textstksiz := p.To.Offset 143 if textstksiz == -ctxt.Arch.FixedFrameSize { 144 // Historical way to mark NOFRAME. 145 p.From.Sym.Set(obj.AttrNoFrame, true) 146 textstksiz = 0 147 } 148 if textstksiz < 0 { 149 c.ctxt.Diag("negative frame size %d - did you mean NOFRAME?", textstksiz) 150 } 151 if p.From.Sym.NoFrame() { 152 if textstksiz != 0 { 153 c.ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz) 154 } 155 } 156 157 c.cursym.Func().Args = p.To.Val.(int32) 158 c.cursym.Func().Locals = int32(textstksiz) 159 160 /* 161 * find leaf subroutines 162 * expand RET 163 * expand BECOME pseudo 164 */ 165 166 for p := c.cursym.Func().Text; p != nil; p = p.Link { 167 switch p.As { 168 /* too hard, just leave alone */ 169 case obj.ATEXT: 170 p.Mark |= LABEL | LEAF | SYNC 171 if p.Link != nil { 172 p.Link.Mark |= LABEL 173 } 174 175 /* too hard, just leave alone */ 176 case AMOVW, 177 AMOVV: 178 if p.To.Type == obj.TYPE_REG && p.To.Reg >= REG_SPECIAL { 179 p.Mark |= LABEL | SYNC 180 break 181 } 182 if p.From.Type == obj.TYPE_REG && p.From.Reg >= REG_SPECIAL { 183 p.Mark |= LABEL | SYNC 184 } 185 186 /* too hard, just leave alone */ 187 case ASYSCALL, 188 AWORD, 189 ATLBWR, 190 ATLBWI, 191 ATLBP, 192 ATLBR: 193 p.Mark |= LABEL | SYNC 194 195 case ANOR: 196 if p.To.Type == obj.TYPE_REG { 197 if p.To.Reg == REGZERO { 198 p.Mark |= LABEL | SYNC 199 } 200 } 201 202 case ABGEZAL, 203 ABLTZAL, 204 AJAL, 205 obj.ADUFFZERO, 206 obj.ADUFFCOPY: 207 c.cursym.Func().Text.Mark &^= LEAF 208 fallthrough 209 210 case AJMP, 211 ABEQ, 212 ABGEZ, 213 ABGTZ, 214 ABLEZ, 215 ABLTZ, 216 ABNE, 217 ABFPT, ABFPF: 218 if p.As == ABFPT || p.As == ABFPF { 219 // We don't treat ABFPT and ABFPF as branches here, 220 // so that we will always fill nop (0x0) in their 221 // delay slot during assembly. 222 // This is to workaround a kernel FPU emulator bug 223 // where it uses the user stack to simulate the 224 // instruction in the delay slot if it's not 0x0, 225 // and somehow that leads to SIGSEGV when the kernel 226 // jump to the stack. 227 p.Mark |= SYNC 228 } else { 229 p.Mark |= BRANCH 230 } 231 q1 := p.To.Target() 232 if q1 != nil { 233 for q1.As == obj.ANOP { 234 q1 = q1.Link 235 p.To.SetTarget(q1) 236 } 237 238 if q1.Mark&LEAF == 0 { 239 q1.Mark |= LABEL 240 } 241 } 242 //else { 243 // p.Mark |= LABEL 244 //} 245 q1 = p.Link 246 if q1 != nil { 247 q1.Mark |= LABEL 248 } 249 250 case ARET: 251 if p.Link != nil { 252 p.Link.Mark |= LABEL 253 } 254 } 255 } 256 257 var mov, add obj.As 258 if c.ctxt.Arch.Family == sys.MIPS64 { 259 add = AADDV 260 mov = AMOVV 261 } else { 262 add = AADDU 263 mov = AMOVW 264 } 265 266 var q *obj.Prog 267 var q1 *obj.Prog 268 autosize := int32(0) 269 var p1 *obj.Prog 270 var p2 *obj.Prog 271 for p := c.cursym.Func().Text; p != nil; p = p.Link { 272 o := p.As 273 switch o { 274 case obj.ATEXT: 275 autosize = int32(textstksiz) 276 277 if p.Mark&LEAF != 0 && autosize == 0 { 278 // A leaf function with no locals has no frame. 279 p.From.Sym.Set(obj.AttrNoFrame, true) 280 } 281 282 if !p.From.Sym.NoFrame() { 283 // If there is a stack frame at all, it includes 284 // space to save the LR. 285 autosize += int32(c.ctxt.Arch.FixedFrameSize) 286 } 287 288 if autosize&4 != 0 && c.ctxt.Arch.Family == sys.MIPS64 { 289 autosize += 4 290 } 291 292 if autosize == 0 && c.cursym.Func().Text.Mark&LEAF == 0 { 293 if c.cursym.Func().Text.From.Sym.NoSplit() { 294 if ctxt.Debugvlog { 295 ctxt.Logf("save suppressed in: %s\n", c.cursym.Name) 296 } 297 298 c.cursym.Func().Text.Mark |= LEAF 299 } 300 } 301 302 p.To.Offset = int64(autosize) - ctxt.Arch.FixedFrameSize 303 304 if c.cursym.Func().Text.Mark&LEAF != 0 { 305 c.cursym.Set(obj.AttrLeaf, true) 306 if p.From.Sym.NoFrame() { 307 break 308 } 309 } 310 311 if !p.From.Sym.NoSplit() { 312 p = c.stacksplit(p, autosize) // emit split check 313 } 314 315 q = p 316 317 if autosize != 0 { 318 // Make sure to save link register for non-empty frame, even if 319 // it is a leaf function, so that traceback works. 320 // Store link register before decrement SP, so if a signal comes 321 // during the execution of the function prologue, the traceback 322 // code will not see a half-updated stack frame. 323 // This sequence is not async preemptible, as if we open a frame 324 // at the current SP, it will clobber the saved LR. 325 q = c.ctxt.StartUnsafePoint(q, c.newprog) 326 327 q = obj.Appendp(q, newprog) 328 q.As = mov 329 q.Pos = p.Pos 330 q.From.Type = obj.TYPE_REG 331 q.From.Reg = REGLINK 332 q.To.Type = obj.TYPE_MEM 333 q.To.Offset = int64(-autosize) 334 q.To.Reg = REGSP 335 336 q = obj.Appendp(q, newprog) 337 q.As = add 338 q.Pos = p.Pos 339 q.From.Type = obj.TYPE_CONST 340 q.From.Offset = int64(-autosize) 341 q.To.Type = obj.TYPE_REG 342 q.To.Reg = REGSP 343 q.Spadj = +autosize 344 345 q = c.ctxt.EndUnsafePoint(q, c.newprog, -1) 346 347 // On Linux, in a cgo binary we may get a SIGSETXID signal early on 348 // before the signal stack is set, as glibc doesn't allow us to block 349 // SIGSETXID. So a signal may land on the current stack and clobber 350 // the content below the SP. We store the LR again after the SP is 351 // decremented. 352 q = obj.Appendp(q, newprog) 353 q.As = mov 354 q.Pos = p.Pos 355 q.From.Type = obj.TYPE_REG 356 q.From.Reg = REGLINK 357 q.To.Type = obj.TYPE_MEM 358 q.To.Offset = 0 359 q.To.Reg = REGSP 360 } 361 362 if c.cursym.Func().Text.From.Sym.Wrapper() && c.cursym.Func().Text.Mark&LEAF == 0 { 363 // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame 364 // 365 // MOV g_panic(g), R1 366 // BEQ R1, end 367 // MOV panic_argp(R1), R2 368 // ADD $(autosize+FIXED_FRAME), R29, R3 369 // BNE R2, R3, end 370 // ADD $FIXED_FRAME, R29, R2 371 // MOV R2, panic_argp(R1) 372 // end: 373 // NOP 374 // 375 // The NOP is needed to give the jumps somewhere to land. 376 // It is a liblink NOP, not an mips NOP: it encodes to 0 instruction bytes. 377 // 378 // We don't generate this for leafs because that means the wrapped 379 // function was inlined into the wrapper. 380 381 q = obj.Appendp(q, newprog) 382 383 q.As = mov 384 q.From.Type = obj.TYPE_MEM 385 q.From.Reg = REGG 386 q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic 387 q.To.Type = obj.TYPE_REG 388 q.To.Reg = REG_R1 389 390 q = obj.Appendp(q, newprog) 391 q.As = ABEQ 392 q.From.Type = obj.TYPE_REG 393 q.From.Reg = REG_R1 394 q.To.Type = obj.TYPE_BRANCH 395 q.Mark |= BRANCH 396 p1 = q 397 398 q = obj.Appendp(q, newprog) 399 q.As = mov 400 q.From.Type = obj.TYPE_MEM 401 q.From.Reg = REG_R1 402 q.From.Offset = 0 // Panic.argp 403 q.To.Type = obj.TYPE_REG 404 q.To.Reg = REG_R2 405 406 q = obj.Appendp(q, newprog) 407 q.As = add 408 q.From.Type = obj.TYPE_CONST 409 q.From.Offset = int64(autosize) + ctxt.Arch.FixedFrameSize 410 q.Reg = REGSP 411 q.To.Type = obj.TYPE_REG 412 q.To.Reg = REG_R3 413 414 q = obj.Appendp(q, newprog) 415 q.As = ABNE 416 q.From.Type = obj.TYPE_REG 417 q.From.Reg = REG_R2 418 q.Reg = REG_R3 419 q.To.Type = obj.TYPE_BRANCH 420 q.Mark |= BRANCH 421 p2 = q 422 423 q = obj.Appendp(q, newprog) 424 q.As = add 425 q.From.Type = obj.TYPE_CONST 426 q.From.Offset = ctxt.Arch.FixedFrameSize 427 q.Reg = REGSP 428 q.To.Type = obj.TYPE_REG 429 q.To.Reg = REG_R2 430 431 q = obj.Appendp(q, newprog) 432 q.As = mov 433 q.From.Type = obj.TYPE_REG 434 q.From.Reg = REG_R2 435 q.To.Type = obj.TYPE_MEM 436 q.To.Reg = REG_R1 437 q.To.Offset = 0 // Panic.argp 438 439 q = obj.Appendp(q, newprog) 440 441 q.As = obj.ANOP 442 p1.To.SetTarget(q) 443 p2.To.SetTarget(q) 444 } 445 446 case ARET: 447 if p.From.Type == obj.TYPE_CONST { 448 ctxt.Diag("using BECOME (%v) is not supported!", p) 449 break 450 } 451 452 retSym := p.To.Sym 453 p.To.Name = obj.NAME_NONE // clear fields as we may modify p to other instruction 454 p.To.Sym = nil 455 456 if c.cursym.Func().Text.Mark&LEAF != 0 { 457 if autosize == 0 { 458 p.As = AJMP 459 p.From = obj.Addr{} 460 if retSym != nil { // retjmp 461 p.To.Type = obj.TYPE_BRANCH 462 p.To.Name = obj.NAME_EXTERN 463 p.To.Sym = retSym 464 } else { 465 p.To.Type = obj.TYPE_MEM 466 p.To.Reg = REGLINK 467 p.To.Offset = 0 468 } 469 p.Mark |= BRANCH 470 break 471 } 472 473 p.As = add 474 p.From.Type = obj.TYPE_CONST 475 p.From.Offset = int64(autosize) 476 p.To.Type = obj.TYPE_REG 477 p.To.Reg = REGSP 478 p.Spadj = -autosize 479 480 q = c.newprog() 481 q.As = AJMP 482 q.Pos = p.Pos 483 if retSym != nil { // retjmp 484 q.To.Type = obj.TYPE_BRANCH 485 q.To.Name = obj.NAME_EXTERN 486 q.To.Sym = retSym 487 } else { 488 q.To.Type = obj.TYPE_MEM 489 q.To.Reg = REGLINK 490 q.To.Offset = 0 491 } 492 q.Mark |= BRANCH 493 q.Spadj = +autosize 494 495 q.Link = p.Link 496 p.Link = q 497 break 498 } 499 500 p.As = mov 501 p.From.Type = obj.TYPE_MEM 502 p.From.Offset = 0 503 p.From.Reg = REGSP 504 p.To.Type = obj.TYPE_REG 505 p.To.Reg = REGLINK 506 507 if autosize != 0 { 508 q = c.newprog() 509 q.As = add 510 q.Pos = p.Pos 511 q.From.Type = obj.TYPE_CONST 512 q.From.Offset = int64(autosize) 513 q.To.Type = obj.TYPE_REG 514 q.To.Reg = REGSP 515 q.Spadj = -autosize 516 517 q.Link = p.Link 518 p.Link = q 519 } 520 521 q1 = c.newprog() 522 q1.As = AJMP 523 q1.Pos = p.Pos 524 if retSym != nil { // retjmp 525 q1.To.Type = obj.TYPE_BRANCH 526 q1.To.Name = obj.NAME_EXTERN 527 q1.To.Sym = retSym 528 } else { 529 q1.To.Type = obj.TYPE_MEM 530 q1.To.Offset = 0 531 q1.To.Reg = REGLINK 532 } 533 q1.Mark |= BRANCH 534 q1.Spadj = +autosize 535 536 q1.Link = q.Link 537 q.Link = q1 538 539 case AADD, 540 AADDU, 541 AADDV, 542 AADDVU: 543 if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST { 544 p.Spadj = int32(-p.From.Offset) 545 } 546 547 case obj.AGETCALLERPC: 548 if cursym.Leaf() { 549 /* MOV LR, Rd */ 550 p.As = mov 551 p.From.Type = obj.TYPE_REG 552 p.From.Reg = REGLINK 553 } else { 554 /* MOV (RSP), Rd */ 555 p.As = mov 556 p.From.Type = obj.TYPE_MEM 557 p.From.Reg = REGSP 558 } 559 } 560 561 if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.Spadj == 0 { 562 f := c.cursym.Func() 563 if f.FuncFlag&abi.FuncFlagSPWrite == 0 { 564 c.cursym.Func().FuncFlag |= abi.FuncFlagSPWrite 565 if ctxt.Debugvlog || !ctxt.IsAsm { 566 ctxt.Logf("auto-SPWRITE: %s %v\n", c.cursym.Name, p) 567 if !ctxt.IsAsm { 568 ctxt.Diag("invalid auto-SPWRITE in non-assembly") 569 ctxt.DiagFlush() 570 log.Fatalf("bad SPWRITE") 571 } 572 } 573 } 574 } 575 } 576 577 if c.ctxt.Arch.Family == sys.MIPS { 578 // rewrite MOVD into two MOVF in 32-bit mode to avoid unaligned memory access 579 for p = c.cursym.Func().Text; p != nil; p = p1 { 580 p1 = p.Link 581 582 if p.As != AMOVD { 583 continue 584 } 585 if p.From.Type != obj.TYPE_MEM && p.To.Type != obj.TYPE_MEM { 586 continue 587 } 588 589 p.As = AMOVF 590 q = c.newprog() 591 *q = *p 592 q.Link = p.Link 593 p.Link = q 594 p1 = q.Link 595 596 var addrOff int64 597 if c.ctxt.Arch.ByteOrder == binary.BigEndian { 598 addrOff = 4 // swap load/save order 599 } 600 if p.From.Type == obj.TYPE_MEM { 601 reg := REG_F0 + (p.To.Reg-REG_F0)&^1 602 p.To.Reg = reg 603 q.To.Reg = reg + 1 604 p.From.Offset += addrOff 605 q.From.Offset += 4 - addrOff 606 } else if p.To.Type == obj.TYPE_MEM { 607 reg := REG_F0 + (p.From.Reg-REG_F0)&^1 608 p.From.Reg = reg 609 q.From.Reg = reg + 1 610 p.To.Offset += addrOff 611 q.To.Offset += 4 - addrOff 612 } 613 } 614 } 615 616 if nosched { 617 // if we don't do instruction scheduling, simply add 618 // NOP after each branch instruction. 619 for p = c.cursym.Func().Text; p != nil; p = p.Link { 620 if p.Mark&BRANCH != 0 { 621 c.addnop(p) 622 } 623 } 624 return 625 } 626 627 // instruction scheduling 628 q = nil // p - 1 629 q1 = c.cursym.Func().Text // top of block 630 o := 0 // count of instructions 631 for p = c.cursym.Func().Text; p != nil; p = p1 { 632 p1 = p.Link 633 o++ 634 if p.Mark&NOSCHED != 0 { 635 if q1 != p { 636 c.sched(q1, q) 637 } 638 for ; p != nil; p = p.Link { 639 if p.Mark&NOSCHED == 0 { 640 break 641 } 642 q = p 643 } 644 p1 = p 645 q1 = p 646 o = 0 647 continue 648 } 649 if p.Mark&(LABEL|SYNC) != 0 { 650 if q1 != p { 651 c.sched(q1, q) 652 } 653 q1 = p 654 o = 1 655 } 656 if p.Mark&(BRANCH|SYNC) != 0 { 657 c.sched(q1, p) 658 q1 = p1 659 o = 0 660 } 661 if o >= NSCHED { 662 c.sched(q1, p) 663 q1 = p1 664 o = 0 665 } 666 q = p 667 } 668 } 669 670 func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { 671 var mov, add obj.As 672 673 if c.ctxt.Arch.Family == sys.MIPS64 { 674 add = AADDV 675 mov = AMOVV 676 } else { 677 add = AADDU 678 mov = AMOVW 679 } 680 681 if c.ctxt.Flag_maymorestack != "" { 682 // Save LR and REGCTXT. 683 frameSize := 2 * c.ctxt.Arch.PtrSize 684 685 p = c.ctxt.StartUnsafePoint(p, c.newprog) 686 687 // MOV REGLINK, -8/-16(SP) 688 p = obj.Appendp(p, c.newprog) 689 p.As = mov 690 p.From.Type = obj.TYPE_REG 691 p.From.Reg = REGLINK 692 p.To.Type = obj.TYPE_MEM 693 p.To.Offset = int64(-frameSize) 694 p.To.Reg = REGSP 695 696 // MOV REGCTXT, -4/-8(SP) 697 p = obj.Appendp(p, c.newprog) 698 p.As = mov 699 p.From.Type = obj.TYPE_REG 700 p.From.Reg = REGCTXT 701 p.To.Type = obj.TYPE_MEM 702 p.To.Offset = -int64(c.ctxt.Arch.PtrSize) 703 p.To.Reg = REGSP 704 705 // ADD $-8/$-16, SP 706 p = obj.Appendp(p, c.newprog) 707 p.As = add 708 p.From.Type = obj.TYPE_CONST 709 p.From.Offset = int64(-frameSize) 710 p.To.Type = obj.TYPE_REG 711 p.To.Reg = REGSP 712 p.Spadj = int32(frameSize) 713 714 // JAL maymorestack 715 p = obj.Appendp(p, c.newprog) 716 p.As = AJAL 717 p.To.Type = obj.TYPE_BRANCH 718 // See ../x86/obj6.go 719 p.To.Sym = c.ctxt.LookupABI(c.ctxt.Flag_maymorestack, c.cursym.ABI()) 720 p.Mark |= BRANCH 721 722 // Restore LR and REGCTXT. 723 724 // MOV 0(SP), REGLINK 725 p = obj.Appendp(p, c.newprog) 726 p.As = mov 727 p.From.Type = obj.TYPE_MEM 728 p.From.Offset = 0 729 p.From.Reg = REGSP 730 p.To.Type = obj.TYPE_REG 731 p.To.Reg = REGLINK 732 733 // MOV 4/8(SP), REGCTXT 734 p = obj.Appendp(p, c.newprog) 735 p.As = mov 736 p.From.Type = obj.TYPE_MEM 737 p.From.Offset = int64(c.ctxt.Arch.PtrSize) 738 p.From.Reg = REGSP 739 p.To.Type = obj.TYPE_REG 740 p.To.Reg = REGCTXT 741 742 // ADD $8/$16, SP 743 p = obj.Appendp(p, c.newprog) 744 p.As = add 745 p.From.Type = obj.TYPE_CONST 746 p.From.Offset = int64(frameSize) 747 p.To.Type = obj.TYPE_REG 748 p.To.Reg = REGSP 749 p.Spadj = int32(-frameSize) 750 751 p = c.ctxt.EndUnsafePoint(p, c.newprog, -1) 752 } 753 754 // Jump back to here after morestack returns. 755 startPred := p 756 757 // MOV g_stackguard(g), R1 758 p = obj.Appendp(p, c.newprog) 759 760 p.As = mov 761 p.From.Type = obj.TYPE_MEM 762 p.From.Reg = REGG 763 p.From.Offset = 2 * int64(c.ctxt.Arch.PtrSize) // G.stackguard0 764 if c.cursym.CFunc() { 765 p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1 766 } 767 p.To.Type = obj.TYPE_REG 768 p.To.Reg = REG_R1 769 770 // Mark the stack bound check and morestack call async nonpreemptible. 771 // If we get preempted here, when resumed the preemption request is 772 // cleared, but we'll still call morestack, which will double the stack 773 // unnecessarily. See issue #35470. 774 p = c.ctxt.StartUnsafePoint(p, c.newprog) 775 776 var q *obj.Prog 777 if framesize <= abi.StackSmall { 778 // small stack: SP < stackguard 779 // AGTU SP, stackguard, R1 780 p = obj.Appendp(p, c.newprog) 781 782 p.As = ASGTU 783 p.From.Type = obj.TYPE_REG 784 p.From.Reg = REGSP 785 p.Reg = REG_R1 786 p.To.Type = obj.TYPE_REG 787 p.To.Reg = REG_R1 788 } else { 789 // large stack: SP-framesize < stackguard-StackSmall 790 offset := int64(framesize) - abi.StackSmall 791 if framesize > abi.StackBig { 792 // Such a large stack we need to protect against underflow. 793 // The runtime guarantees SP > objabi.StackBig, but 794 // framesize is large enough that SP-framesize may 795 // underflow, causing a direct comparison with the 796 // stack guard to incorrectly succeed. We explicitly 797 // guard against underflow. 798 // 799 // SGTU $(framesize-StackSmall), SP, R2 800 // BNE R2, label-of-call-to-morestack 801 802 p = obj.Appendp(p, c.newprog) 803 p.As = ASGTU 804 p.From.Type = obj.TYPE_CONST 805 p.From.Offset = offset 806 p.Reg = REGSP 807 p.To.Type = obj.TYPE_REG 808 p.To.Reg = REG_R2 809 810 p = obj.Appendp(p, c.newprog) 811 q = p 812 p.As = ABNE 813 p.From.Type = obj.TYPE_REG 814 p.From.Reg = REG_R2 815 p.To.Type = obj.TYPE_BRANCH 816 p.Mark |= BRANCH 817 } 818 819 // Check against the stack guard. We've ensured this won't underflow. 820 // ADD $-(framesize-StackSmall), SP, R2 821 // SGTU R2, stackguard, R1 822 p = obj.Appendp(p, c.newprog) 823 824 p.As = add 825 p.From.Type = obj.TYPE_CONST 826 p.From.Offset = -offset 827 p.Reg = REGSP 828 p.To.Type = obj.TYPE_REG 829 p.To.Reg = REG_R2 830 831 p = obj.Appendp(p, c.newprog) 832 p.As = ASGTU 833 p.From.Type = obj.TYPE_REG 834 p.From.Reg = REG_R2 835 p.Reg = REG_R1 836 p.To.Type = obj.TYPE_REG 837 p.To.Reg = REG_R1 838 } 839 840 // q1: BNE R1, done 841 p = obj.Appendp(p, c.newprog) 842 q1 := p 843 844 p.As = ABNE 845 p.From.Type = obj.TYPE_REG 846 p.From.Reg = REG_R1 847 p.To.Type = obj.TYPE_BRANCH 848 p.Mark |= BRANCH 849 850 // MOV LINK, R3 851 p = obj.Appendp(p, c.newprog) 852 853 p.As = mov 854 p.From.Type = obj.TYPE_REG 855 p.From.Reg = REGLINK 856 p.To.Type = obj.TYPE_REG 857 p.To.Reg = REG_R3 858 if q != nil { 859 q.To.SetTarget(p) 860 p.Mark |= LABEL 861 } 862 863 p = c.ctxt.EmitEntryStackMap(c.cursym, p, c.newprog) 864 865 // JAL runtime.morestack(SB) 866 p = obj.Appendp(p, c.newprog) 867 868 p.As = AJAL 869 p.To.Type = obj.TYPE_BRANCH 870 if c.cursym.CFunc() { 871 p.To.Sym = c.ctxt.Lookup("runtime.morestackc") 872 } else if !c.cursym.Func().Text.From.Sym.NeedCtxt() { 873 p.To.Sym = c.ctxt.Lookup("runtime.morestack_noctxt") 874 } else { 875 p.To.Sym = c.ctxt.Lookup("runtime.morestack") 876 } 877 p.Mark |= BRANCH 878 879 p = c.ctxt.EndUnsafePoint(p, c.newprog, -1) 880 881 // JMP start 882 p = obj.Appendp(p, c.newprog) 883 884 p.As = AJMP 885 p.To.Type = obj.TYPE_BRANCH 886 p.To.SetTarget(startPred.Link) 887 startPred.Link.Mark |= LABEL 888 p.Mark |= BRANCH 889 890 // placeholder for q1's jump target 891 p = obj.Appendp(p, c.newprog) 892 893 p.As = obj.ANOP // zero-width place holder 894 q1.To.SetTarget(p) 895 896 return p 897 } 898 899 func (c *ctxt0) addnop(p *obj.Prog) { 900 q := c.newprog() 901 q.As = ANOOP 902 q.Pos = p.Pos 903 q.Link = p.Link 904 p.Link = q 905 } 906 907 const ( 908 E_HILO = 1 << 0 909 E_FCR = 1 << 1 910 E_MCR = 1 << 2 911 E_MEM = 1 << 3 912 E_MEMSP = 1 << 4 /* uses offset and size */ 913 E_MEMSB = 1 << 5 /* uses offset and size */ 914 ANYMEM = E_MEM | E_MEMSP | E_MEMSB 915 //DELAY = LOAD|BRANCH|FCMP 916 DELAY = BRANCH /* only schedule branch */ 917 ) 918 919 type Dep struct { 920 ireg uint32 921 freg uint32 922 cc uint32 923 } 924 925 type Sch struct { 926 p obj.Prog 927 set Dep 928 used Dep 929 soffset int32 930 size uint8 931 nop uint8 932 comp bool 933 } 934 935 func (c *ctxt0) sched(p0, pe *obj.Prog) { 936 var sch [NSCHED]Sch 937 938 /* 939 * build side structure 940 */ 941 s := sch[:] 942 for p := p0; ; p = p.Link { 943 s[0].p = *p 944 c.markregused(&s[0]) 945 if p == pe { 946 break 947 } 948 s = s[1:] 949 } 950 se := s 951 952 for i := cap(sch) - cap(se); i >= 0; i-- { 953 s = sch[i:] 954 if s[0].p.Mark&DELAY == 0 { 955 continue 956 } 957 if -cap(s) < -cap(se) { 958 if !conflict(&s[0], &s[1]) { 959 continue 960 } 961 } 962 963 var t []Sch 964 var j int 965 for j = cap(sch) - cap(s) - 1; j >= 0; j-- { 966 t = sch[j:] 967 if t[0].comp { 968 if s[0].p.Mark&BRANCH != 0 { 969 continue 970 } 971 } 972 if t[0].p.Mark&DELAY != 0 { 973 if -cap(s) >= -cap(se) || conflict(&t[0], &s[1]) { 974 continue 975 } 976 } 977 for u := t[1:]; -cap(u) <= -cap(s); u = u[1:] { 978 if c.depend(&u[0], &t[0]) { 979 continue 980 } 981 } 982 goto out2 983 } 984 985 if s[0].p.Mark&BRANCH != 0 { 986 s[0].nop = 1 987 } 988 continue 989 990 out2: 991 // t[0] is the instruction being moved to fill the delay 992 stmp := t[0] 993 copy(t[:i-j], t[1:i-j+1]) 994 s[0] = stmp 995 996 if t[i-j-1].p.Mark&BRANCH != 0 { 997 // t[i-j] is being put into a branch delay slot 998 // combine its Spadj with the branch instruction 999 t[i-j-1].p.Spadj += t[i-j].p.Spadj 1000 t[i-j].p.Spadj = 0 1001 } 1002 1003 i-- 1004 } 1005 1006 /* 1007 * put it all back 1008 */ 1009 var p *obj.Prog 1010 var q *obj.Prog 1011 for s, p = sch[:], p0; -cap(s) <= -cap(se); s, p = s[1:], q { 1012 q = p.Link 1013 if q != s[0].p.Link { 1014 *p = s[0].p 1015 p.Link = q 1016 } 1017 for s[0].nop != 0 { 1018 s[0].nop-- 1019 c.addnop(p) 1020 } 1021 } 1022 } 1023 1024 func (c *ctxt0) markregused(s *Sch) { 1025 p := &s.p 1026 s.comp = c.compound(p) 1027 s.nop = 0 1028 if s.comp { 1029 s.set.ireg |= 1 << (REGTMP - REG_R0) 1030 s.used.ireg |= 1 << (REGTMP - REG_R0) 1031 } 1032 1033 ar := 0 /* dest is really reference */ 1034 ad := 0 /* source/dest is really address */ 1035 ld := 0 /* opcode is load instruction */ 1036 sz := 20 /* size of load/store for overlap computation */ 1037 1038 /* 1039 * flags based on opcode 1040 */ 1041 switch p.As { 1042 case obj.ATEXT: 1043 c.autosize = int32(p.To.Offset + 8) 1044 ad = 1 1045 1046 case AJAL: 1047 r := p.Reg 1048 if r == 0 { 1049 r = REGLINK 1050 } 1051 s.set.ireg |= 1 << uint(r-REG_R0) 1052 ar = 1 1053 ad = 1 1054 1055 case ABGEZAL, 1056 ABLTZAL: 1057 s.set.ireg |= 1 << (REGLINK - REG_R0) 1058 fallthrough 1059 case ABEQ, 1060 ABGEZ, 1061 ABGTZ, 1062 ABLEZ, 1063 ABLTZ, 1064 ABNE: 1065 ar = 1 1066 ad = 1 1067 1068 case ABFPT, 1069 ABFPF: 1070 ad = 1 1071 s.used.cc |= E_FCR 1072 1073 case ACMPEQD, 1074 ACMPEQF, 1075 ACMPGED, 1076 ACMPGEF, 1077 ACMPGTD, 1078 ACMPGTF: 1079 ar = 1 1080 s.set.cc |= E_FCR 1081 p.Mark |= FCMP 1082 1083 case AJMP: 1084 ar = 1 1085 ad = 1 1086 1087 case AMOVB, 1088 AMOVBU: 1089 sz = 1 1090 ld = 1 1091 1092 case AMOVH, 1093 AMOVHU: 1094 sz = 2 1095 ld = 1 1096 1097 case AMOVF, 1098 AMOVW, 1099 AMOVWL, 1100 AMOVWR: 1101 sz = 4 1102 ld = 1 1103 1104 case AMOVD, 1105 AMOVV, 1106 AMOVVL, 1107 AMOVVR: 1108 sz = 8 1109 ld = 1 1110 1111 case ADIV, 1112 ADIVU, 1113 AMUL, 1114 AMULU, 1115 AREM, 1116 AREMU, 1117 ADIVV, 1118 ADIVVU, 1119 AMULV, 1120 AMULVU, 1121 AREMV, 1122 AREMVU: 1123 s.set.cc = E_HILO 1124 fallthrough 1125 case AADD, 1126 AADDU, 1127 AADDV, 1128 AADDVU, 1129 AAND, 1130 ANOR, 1131 AOR, 1132 ASGT, 1133 ASGTU, 1134 ASLL, 1135 ASRA, 1136 ASRL, 1137 ASLLV, 1138 ASRAV, 1139 ASRLV, 1140 ASUB, 1141 ASUBU, 1142 ASUBV, 1143 ASUBVU, 1144 AXOR, 1145 1146 AADDD, 1147 AADDF, 1148 AADDW, 1149 ASUBD, 1150 ASUBF, 1151 ASUBW, 1152 AMULF, 1153 AMULD, 1154 AMULW, 1155 ADIVF, 1156 ADIVD, 1157 ADIVW: 1158 if p.Reg == 0 { 1159 if p.To.Type == obj.TYPE_REG { 1160 p.Reg = p.To.Reg 1161 } 1162 //if(p->reg == NREG) 1163 // print("botch %P\n", p); 1164 } 1165 } 1166 1167 /* 1168 * flags based on 'to' field 1169 */ 1170 cls := int(p.To.Class) 1171 if cls == 0 { 1172 cls = c.aclass(&p.To) + 1 1173 p.To.Class = int8(cls) 1174 } 1175 cls-- 1176 switch cls { 1177 default: 1178 fmt.Printf("unknown class %d %v\n", cls, p) 1179 1180 case C_ZCON, 1181 C_SCON, 1182 C_ADD0CON, 1183 C_AND0CON, 1184 C_ADDCON, 1185 C_ANDCON, 1186 C_UCON, 1187 C_LCON, 1188 C_NONE, 1189 C_SBRA, 1190 C_LBRA, 1191 C_ADDR, 1192 C_TEXTSIZE: 1193 break 1194 1195 case C_HI, 1196 C_LO: 1197 s.set.cc |= E_HILO 1198 1199 case C_FCREG: 1200 s.set.cc |= E_FCR 1201 1202 case C_MREG: 1203 s.set.cc |= E_MCR 1204 1205 case C_ZOREG, 1206 C_SOREG, 1207 C_LOREG: 1208 cls = int(p.To.Reg) 1209 s.used.ireg |= 1 << uint(cls-REG_R0) 1210 if ad != 0 { 1211 break 1212 } 1213 s.size = uint8(sz) 1214 s.soffset = c.regoff(&p.To) 1215 1216 m := uint32(ANYMEM) 1217 if cls == REGSB { 1218 m = E_MEMSB 1219 } 1220 if cls == REGSP { 1221 m = E_MEMSP 1222 } 1223 1224 if ar != 0 { 1225 s.used.cc |= m 1226 } else { 1227 s.set.cc |= m 1228 } 1229 1230 case C_SACON, 1231 C_LACON: 1232 s.used.ireg |= 1 << (REGSP - REG_R0) 1233 1234 case C_SECON, 1235 C_LECON: 1236 s.used.ireg |= 1 << (REGSB - REG_R0) 1237 1238 case C_REG: 1239 if ar != 0 { 1240 s.used.ireg |= 1 << uint(p.To.Reg-REG_R0) 1241 } else { 1242 s.set.ireg |= 1 << uint(p.To.Reg-REG_R0) 1243 } 1244 1245 case C_FREG: 1246 if ar != 0 { 1247 s.used.freg |= 1 << uint(p.To.Reg-REG_F0) 1248 } else { 1249 s.set.freg |= 1 << uint(p.To.Reg-REG_F0) 1250 } 1251 if ld != 0 && p.From.Type == obj.TYPE_REG { 1252 p.Mark |= LOAD 1253 } 1254 1255 case C_SAUTO, 1256 C_LAUTO: 1257 s.used.ireg |= 1 << (REGSP - REG_R0) 1258 if ad != 0 { 1259 break 1260 } 1261 s.size = uint8(sz) 1262 s.soffset = c.regoff(&p.To) 1263 1264 if ar != 0 { 1265 s.used.cc |= E_MEMSP 1266 } else { 1267 s.set.cc |= E_MEMSP 1268 } 1269 1270 case C_SEXT, 1271 C_LEXT: 1272 s.used.ireg |= 1 << (REGSB - REG_R0) 1273 if ad != 0 { 1274 break 1275 } 1276 s.size = uint8(sz) 1277 s.soffset = c.regoff(&p.To) 1278 1279 if ar != 0 { 1280 s.used.cc |= E_MEMSB 1281 } else { 1282 s.set.cc |= E_MEMSB 1283 } 1284 } 1285 1286 /* 1287 * flags based on 'from' field 1288 */ 1289 cls = int(p.From.Class) 1290 if cls == 0 { 1291 cls = c.aclass(&p.From) + 1 1292 p.From.Class = int8(cls) 1293 } 1294 cls-- 1295 switch cls { 1296 default: 1297 fmt.Printf("unknown class %d %v\n", cls, p) 1298 1299 case C_ZCON, 1300 C_SCON, 1301 C_ADD0CON, 1302 C_AND0CON, 1303 C_ADDCON, 1304 C_ANDCON, 1305 C_UCON, 1306 C_LCON, 1307 C_NONE, 1308 C_SBRA, 1309 C_LBRA, 1310 C_ADDR, 1311 C_TEXTSIZE: 1312 break 1313 1314 case C_HI, 1315 C_LO: 1316 s.used.cc |= E_HILO 1317 1318 case C_FCREG: 1319 s.used.cc |= E_FCR 1320 1321 case C_MREG: 1322 s.used.cc |= E_MCR 1323 1324 case C_ZOREG, 1325 C_SOREG, 1326 C_LOREG: 1327 cls = int(p.From.Reg) 1328 s.used.ireg |= 1 << uint(cls-REG_R0) 1329 if ld != 0 { 1330 p.Mark |= LOAD 1331 } 1332 s.size = uint8(sz) 1333 s.soffset = c.regoff(&p.From) 1334 1335 m := uint32(ANYMEM) 1336 if cls == REGSB { 1337 m = E_MEMSB 1338 } 1339 if cls == REGSP { 1340 m = E_MEMSP 1341 } 1342 1343 s.used.cc |= m 1344 1345 case C_SACON, 1346 C_LACON: 1347 cls = int(p.From.Reg) 1348 if cls == 0 { 1349 cls = REGSP 1350 } 1351 s.used.ireg |= 1 << uint(cls-REG_R0) 1352 1353 case C_SECON, 1354 C_LECON: 1355 s.used.ireg |= 1 << (REGSB - REG_R0) 1356 1357 case C_REG: 1358 s.used.ireg |= 1 << uint(p.From.Reg-REG_R0) 1359 1360 case C_FREG: 1361 s.used.freg |= 1 << uint(p.From.Reg-REG_F0) 1362 if ld != 0 && p.To.Type == obj.TYPE_REG { 1363 p.Mark |= LOAD 1364 } 1365 1366 case C_SAUTO, 1367 C_LAUTO: 1368 s.used.ireg |= 1 << (REGSP - REG_R0) 1369 if ld != 0 { 1370 p.Mark |= LOAD 1371 } 1372 if ad != 0 { 1373 break 1374 } 1375 s.size = uint8(sz) 1376 s.soffset = c.regoff(&p.From) 1377 1378 s.used.cc |= E_MEMSP 1379 1380 case C_SEXT: 1381 case C_LEXT: 1382 s.used.ireg |= 1 << (REGSB - REG_R0) 1383 if ld != 0 { 1384 p.Mark |= LOAD 1385 } 1386 if ad != 0 { 1387 break 1388 } 1389 s.size = uint8(sz) 1390 s.soffset = c.regoff(&p.From) 1391 1392 s.used.cc |= E_MEMSB 1393 } 1394 1395 cls = int(p.Reg) 1396 if cls != 0 { 1397 if REG_F0 <= cls && cls <= REG_F31 { 1398 s.used.freg |= 1 << uint(cls-REG_F0) 1399 } else { 1400 s.used.ireg |= 1 << uint(cls-REG_R0) 1401 } 1402 } 1403 s.set.ireg &^= (1 << (REGZERO - REG_R0)) /* R0 can't be set */ 1404 } 1405 1406 /* 1407 * test to see if two instructions can be 1408 * interchanged without changing semantics 1409 */ 1410 func (c *ctxt0) depend(sa, sb *Sch) bool { 1411 if sa.set.ireg&(sb.set.ireg|sb.used.ireg) != 0 { 1412 return true 1413 } 1414 if sb.set.ireg&sa.used.ireg != 0 { 1415 return true 1416 } 1417 1418 if sa.set.freg&(sb.set.freg|sb.used.freg) != 0 { 1419 return true 1420 } 1421 if sb.set.freg&sa.used.freg != 0 { 1422 return true 1423 } 1424 1425 /* 1426 * special case. 1427 * loads from same address cannot pass. 1428 * this is for hardware fifo's and the like 1429 */ 1430 if sa.used.cc&sb.used.cc&E_MEM != 0 { 1431 if sa.p.Reg == sb.p.Reg { 1432 if c.regoff(&sa.p.From) == c.regoff(&sb.p.From) { 1433 return true 1434 } 1435 } 1436 } 1437 1438 x := (sa.set.cc & (sb.set.cc | sb.used.cc)) | (sb.set.cc & sa.used.cc) 1439 if x != 0 { 1440 /* 1441 * allow SB and SP to pass each other. 1442 * allow SB to pass SB iff doffsets are ok 1443 * anything else conflicts 1444 */ 1445 if x != E_MEMSP && x != E_MEMSB { 1446 return true 1447 } 1448 x = sa.set.cc | sb.set.cc | sa.used.cc | sb.used.cc 1449 if x&E_MEM != 0 { 1450 return true 1451 } 1452 if offoverlap(sa, sb) { 1453 return true 1454 } 1455 } 1456 1457 return false 1458 } 1459 1460 func offoverlap(sa, sb *Sch) bool { 1461 if sa.soffset < sb.soffset { 1462 if sa.soffset+int32(sa.size) > sb.soffset { 1463 return true 1464 } 1465 return false 1466 } 1467 if sb.soffset+int32(sb.size) > sa.soffset { 1468 return true 1469 } 1470 return false 1471 } 1472 1473 /* 1474 * test 2 adjacent instructions 1475 * and find out if inserted instructions 1476 * are desired to prevent stalls. 1477 */ 1478 func conflict(sa, sb *Sch) bool { 1479 if sa.set.ireg&sb.used.ireg != 0 { 1480 return true 1481 } 1482 if sa.set.freg&sb.used.freg != 0 { 1483 return true 1484 } 1485 if sa.set.cc&sb.used.cc != 0 { 1486 return true 1487 } 1488 return false 1489 } 1490 1491 func (c *ctxt0) compound(p *obj.Prog) bool { 1492 o := c.oplook(p) 1493 if o.size != 4 { 1494 return true 1495 } 1496 if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSB { 1497 return true 1498 } 1499 return false 1500 } 1501 1502 var Linkmips64 = obj.LinkArch{ 1503 Arch: sys.ArchMIPS64, 1504 Init: buildop, 1505 Preprocess: preprocess, 1506 Assemble: span0, 1507 Progedit: progedit, 1508 DWARFRegisters: MIPSDWARFRegisters, 1509 } 1510 1511 var Linkmips64le = obj.LinkArch{ 1512 Arch: sys.ArchMIPS64LE, 1513 Init: buildop, 1514 Preprocess: preprocess, 1515 Assemble: span0, 1516 Progedit: progedit, 1517 DWARFRegisters: MIPSDWARFRegisters, 1518 } 1519 1520 var Linkmips = obj.LinkArch{ 1521 Arch: sys.ArchMIPS, 1522 Init: buildop, 1523 Preprocess: preprocess, 1524 Assemble: span0, 1525 Progedit: progedit, 1526 DWARFRegisters: MIPSDWARFRegisters, 1527 } 1528 1529 var Linkmipsle = obj.LinkArch{ 1530 Arch: sys.ArchMIPSLE, 1531 Init: buildop, 1532 Preprocess: preprocess, 1533 Assemble: span0, 1534 Progedit: progedit, 1535 DWARFRegisters: MIPSDWARFRegisters, 1536 }