github.com/go-asm/go@v1.21.1-0.20240213172139-40c5ead50c48/cmd/obj/mips/obj0.go (about) 1 // cmd/9l/noop.c, cmd/9l/pass.c, cmd/9l/span.c from Vita Nuova. 2 // 3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 5 // Portions Copyright © 1997-1999 Vita Nuova Limited 6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) 7 // Portions Copyright © 2004,2006 Bruce Ellis 8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others 10 // Portions Copyright © 2009 The Go Authors. All rights reserved. 11 // 12 // Permission is hereby granted, free of charge, to any person obtaining a copy 13 // of this software and associated documentation files (the "Software"), to deal 14 // in the Software without restriction, including without limitation the rights 15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 16 // copies of the Software, and to permit persons to whom the Software is 17 // furnished to do so, subject to the following conditions: 18 // 19 // The above copyright notice and this permission notice shall be included in 20 // all copies or substantial portions of the Software. 21 // 22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 28 // THE SOFTWARE. 29 30 package mips 31 32 import ( 33 "encoding/binary" 34 "fmt" 35 "log" 36 "math" 37 38 "github.com/go-asm/go/abi" 39 "github.com/go-asm/go/cmd/obj" 40 "github.com/go-asm/go/cmd/sys" 41 ) 42 43 func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { 44 c := ctxt0{ctxt: ctxt, newprog: newprog} 45 46 p.From.Class = 0 47 p.To.Class = 0 48 49 // Rewrite JMP/JAL to symbol as TYPE_BRANCH. 50 switch p.As { 51 case AJMP, 52 AJAL, 53 ARET, 54 obj.ADUFFZERO, 55 obj.ADUFFCOPY: 56 if p.To.Sym != nil { 57 p.To.Type = obj.TYPE_BRANCH 58 } 59 } 60 61 // Rewrite float constants to values stored in memory. 62 switch p.As { 63 case AMOVF: 64 if p.From.Type == obj.TYPE_FCONST { 65 f32 := float32(p.From.Val.(float64)) 66 if math.Float32bits(f32) == 0 { 67 p.As = AMOVW 68 p.From.Type = obj.TYPE_REG 69 p.From.Reg = REGZERO 70 break 71 } 72 p.From.Type = obj.TYPE_MEM 73 p.From.Sym = ctxt.Float32Sym(f32) 74 p.From.Name = obj.NAME_EXTERN 75 p.From.Offset = 0 76 } 77 78 case AMOVD: 79 if p.From.Type == obj.TYPE_FCONST { 80 f64 := p.From.Val.(float64) 81 if math.Float64bits(f64) == 0 && c.ctxt.Arch.Family == sys.MIPS64 { 82 p.As = AMOVV 83 p.From.Type = obj.TYPE_REG 84 p.From.Reg = REGZERO 85 break 86 } 87 p.From.Type = obj.TYPE_MEM 88 p.From.Sym = ctxt.Float64Sym(f64) 89 p.From.Name = obj.NAME_EXTERN 90 p.From.Offset = 0 91 } 92 93 // Put >32-bit constants in memory and load them 94 case AMOVV: 95 if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE && p.From.Reg == 0 && int64(int32(p.From.Offset)) != p.From.Offset { 96 p.From.Type = obj.TYPE_MEM 97 p.From.Sym = ctxt.Int64Sym(p.From.Offset) 98 p.From.Name = obj.NAME_EXTERN 99 p.From.Offset = 0 100 } 101 } 102 103 // Rewrite SUB constants into ADD. 104 switch p.As { 105 case ASUB: 106 if p.From.Type == obj.TYPE_CONST { 107 p.From.Offset = -p.From.Offset 108 p.As = AADD 109 } 110 111 case ASUBU: 112 if p.From.Type == obj.TYPE_CONST { 113 p.From.Offset = -p.From.Offset 114 p.As = AADDU 115 } 116 117 case ASUBV: 118 if p.From.Type == obj.TYPE_CONST { 119 p.From.Offset = -p.From.Offset 120 p.As = AADDV 121 } 122 123 case ASUBVU: 124 if p.From.Type == obj.TYPE_CONST { 125 p.From.Offset = -p.From.Offset 126 p.As = AADDVU 127 } 128 } 129 } 130 131 func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { 132 // TODO(minux): add morestack short-cuts with small fixed frame-size. 133 c := ctxt0{ctxt: ctxt, newprog: newprog, cursym: cursym} 134 135 // a switch for enabling/disabling instruction scheduling 136 nosched := true 137 138 if c.cursym.Func().Text == nil || c.cursym.Func().Text.Link == nil { 139 return 140 } 141 142 p := c.cursym.Func().Text 143 textstksiz := p.To.Offset 144 if textstksiz == -ctxt.Arch.FixedFrameSize { 145 // Historical way to mark NOFRAME. 146 p.From.Sym.Set(obj.AttrNoFrame, true) 147 textstksiz = 0 148 } 149 if textstksiz < 0 { 150 c.ctxt.Diag("negative frame size %d - did you mean NOFRAME?", textstksiz) 151 } 152 if p.From.Sym.NoFrame() { 153 if textstksiz != 0 { 154 c.ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz) 155 } 156 } 157 158 c.cursym.Func().Args = p.To.Val.(int32) 159 c.cursym.Func().Locals = int32(textstksiz) 160 161 /* 162 * find leaf subroutines 163 * expand RET 164 * expand BECOME pseudo 165 */ 166 167 for p := c.cursym.Func().Text; p != nil; p = p.Link { 168 switch p.As { 169 /* too hard, just leave alone */ 170 case obj.ATEXT: 171 p.Mark |= LABEL | LEAF | SYNC 172 if p.Link != nil { 173 p.Link.Mark |= LABEL 174 } 175 176 /* too hard, just leave alone */ 177 case AMOVW, 178 AMOVV: 179 if p.To.Type == obj.TYPE_REG && p.To.Reg >= REG_SPECIAL { 180 p.Mark |= LABEL | SYNC 181 break 182 } 183 if p.From.Type == obj.TYPE_REG && p.From.Reg >= REG_SPECIAL { 184 p.Mark |= LABEL | SYNC 185 } 186 187 /* too hard, just leave alone */ 188 case ASYSCALL, 189 AWORD, 190 ATLBWR, 191 ATLBWI, 192 ATLBP, 193 ATLBR: 194 p.Mark |= LABEL | SYNC 195 196 case ANOR: 197 if p.To.Type == obj.TYPE_REG { 198 if p.To.Reg == REGZERO { 199 p.Mark |= LABEL | SYNC 200 } 201 } 202 203 case ABGEZAL, 204 ABLTZAL, 205 AJAL, 206 obj.ADUFFZERO, 207 obj.ADUFFCOPY: 208 c.cursym.Func().Text.Mark &^= LEAF 209 fallthrough 210 211 case AJMP, 212 ABEQ, 213 ABGEZ, 214 ABGTZ, 215 ABLEZ, 216 ABLTZ, 217 ABNE, 218 ABFPT, ABFPF: 219 if p.As == ABFPT || p.As == ABFPF { 220 // We don't treat ABFPT and ABFPF as branches here, 221 // so that we will always fill nop (0x0) in their 222 // delay slot during assembly. 223 // This is to workaround a kernel FPU emulator bug 224 // where it uses the user stack to simulate the 225 // instruction in the delay slot if it's not 0x0, 226 // and somehow that leads to SIGSEGV when the kernel 227 // jump to the stack. 228 p.Mark |= SYNC 229 } else { 230 p.Mark |= BRANCH 231 } 232 q1 := p.To.Target() 233 if q1 != nil { 234 for q1.As == obj.ANOP { 235 q1 = q1.Link 236 p.To.SetTarget(q1) 237 } 238 239 if q1.Mark&LEAF == 0 { 240 q1.Mark |= LABEL 241 } 242 } 243 //else { 244 // p.Mark |= LABEL 245 //} 246 q1 = p.Link 247 if q1 != nil { 248 q1.Mark |= LABEL 249 } 250 251 case ARET: 252 if p.Link != nil { 253 p.Link.Mark |= LABEL 254 } 255 } 256 } 257 258 var mov, add obj.As 259 if c.ctxt.Arch.Family == sys.MIPS64 { 260 add = AADDV 261 mov = AMOVV 262 } else { 263 add = AADDU 264 mov = AMOVW 265 } 266 267 var q *obj.Prog 268 var q1 *obj.Prog 269 autosize := int32(0) 270 var p1 *obj.Prog 271 var p2 *obj.Prog 272 for p := c.cursym.Func().Text; p != nil; p = p.Link { 273 o := p.As 274 switch o { 275 case obj.ATEXT: 276 autosize = int32(textstksiz) 277 278 if p.Mark&LEAF != 0 && autosize == 0 { 279 // A leaf function with no locals has no frame. 280 p.From.Sym.Set(obj.AttrNoFrame, true) 281 } 282 283 if !p.From.Sym.NoFrame() { 284 // If there is a stack frame at all, it includes 285 // space to save the LR. 286 autosize += int32(c.ctxt.Arch.FixedFrameSize) 287 } 288 289 if autosize&4 != 0 && c.ctxt.Arch.Family == sys.MIPS64 { 290 autosize += 4 291 } 292 293 if autosize == 0 && c.cursym.Func().Text.Mark&LEAF == 0 { 294 if c.cursym.Func().Text.From.Sym.NoSplit() { 295 if ctxt.Debugvlog { 296 ctxt.Logf("save suppressed in: %s\n", c.cursym.Name) 297 } 298 299 c.cursym.Func().Text.Mark |= LEAF 300 } 301 } 302 303 p.To.Offset = int64(autosize) - ctxt.Arch.FixedFrameSize 304 305 if c.cursym.Func().Text.Mark&LEAF != 0 { 306 c.cursym.Set(obj.AttrLeaf, true) 307 if p.From.Sym.NoFrame() { 308 break 309 } 310 } 311 312 if !p.From.Sym.NoSplit() { 313 p = c.stacksplit(p, autosize) // emit split check 314 } 315 316 q = p 317 318 if autosize != 0 { 319 // Make sure to save link register for non-empty frame, even if 320 // it is a leaf function, so that traceback works. 321 // Store link register before decrement SP, so if a signal comes 322 // during the execution of the function prologue, the traceback 323 // code will not see a half-updated stack frame. 324 // This sequence is not async preemptible, as if we open a frame 325 // at the current SP, it will clobber the saved LR. 326 q = c.ctxt.StartUnsafePoint(q, c.newprog) 327 328 q = obj.Appendp(q, newprog) 329 q.As = mov 330 q.Pos = p.Pos 331 q.From.Type = obj.TYPE_REG 332 q.From.Reg = REGLINK 333 q.To.Type = obj.TYPE_MEM 334 q.To.Offset = int64(-autosize) 335 q.To.Reg = REGSP 336 337 q = obj.Appendp(q, newprog) 338 q.As = add 339 q.Pos = p.Pos 340 q.From.Type = obj.TYPE_CONST 341 q.From.Offset = int64(-autosize) 342 q.To.Type = obj.TYPE_REG 343 q.To.Reg = REGSP 344 q.Spadj = +autosize 345 346 q = c.ctxt.EndUnsafePoint(q, c.newprog, -1) 347 348 // On Linux, in a cgo binary we may get a SIGSETXID signal early on 349 // before the signal stack is set, as glibc doesn't allow us to block 350 // SIGSETXID. So a signal may land on the current stack and clobber 351 // the content below the SP. We store the LR again after the SP is 352 // decremented. 353 q = obj.Appendp(q, newprog) 354 q.As = mov 355 q.Pos = p.Pos 356 q.From.Type = obj.TYPE_REG 357 q.From.Reg = REGLINK 358 q.To.Type = obj.TYPE_MEM 359 q.To.Offset = 0 360 q.To.Reg = REGSP 361 } 362 363 if c.cursym.Func().Text.From.Sym.Wrapper() && c.cursym.Func().Text.Mark&LEAF == 0 { 364 // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame 365 // 366 // MOV g_panic(g), R1 367 // BEQ R1, end 368 // MOV panic_argp(R1), R2 369 // ADD $(autosize+FIXED_FRAME), R29, R3 370 // BNE R2, R3, end 371 // ADD $FIXED_FRAME, R29, R2 372 // MOV R2, panic_argp(R1) 373 // end: 374 // NOP 375 // 376 // The NOP is needed to give the jumps somewhere to land. 377 // It is a liblink NOP, not an mips NOP: it encodes to 0 instruction bytes. 378 // 379 // We don't generate this for leafs because that means the wrapped 380 // function was inlined into the wrapper. 381 382 q = obj.Appendp(q, newprog) 383 384 q.As = mov 385 q.From.Type = obj.TYPE_MEM 386 q.From.Reg = REGG 387 q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic 388 q.To.Type = obj.TYPE_REG 389 q.To.Reg = REG_R1 390 391 q = obj.Appendp(q, newprog) 392 q.As = ABEQ 393 q.From.Type = obj.TYPE_REG 394 q.From.Reg = REG_R1 395 q.To.Type = obj.TYPE_BRANCH 396 q.Mark |= BRANCH 397 p1 = q 398 399 q = obj.Appendp(q, newprog) 400 q.As = mov 401 q.From.Type = obj.TYPE_MEM 402 q.From.Reg = REG_R1 403 q.From.Offset = 0 // Panic.argp 404 q.To.Type = obj.TYPE_REG 405 q.To.Reg = REG_R2 406 407 q = obj.Appendp(q, newprog) 408 q.As = add 409 q.From.Type = obj.TYPE_CONST 410 q.From.Offset = int64(autosize) + ctxt.Arch.FixedFrameSize 411 q.Reg = REGSP 412 q.To.Type = obj.TYPE_REG 413 q.To.Reg = REG_R3 414 415 q = obj.Appendp(q, newprog) 416 q.As = ABNE 417 q.From.Type = obj.TYPE_REG 418 q.From.Reg = REG_R2 419 q.Reg = REG_R3 420 q.To.Type = obj.TYPE_BRANCH 421 q.Mark |= BRANCH 422 p2 = q 423 424 q = obj.Appendp(q, newprog) 425 q.As = add 426 q.From.Type = obj.TYPE_CONST 427 q.From.Offset = ctxt.Arch.FixedFrameSize 428 q.Reg = REGSP 429 q.To.Type = obj.TYPE_REG 430 q.To.Reg = REG_R2 431 432 q = obj.Appendp(q, newprog) 433 q.As = mov 434 q.From.Type = obj.TYPE_REG 435 q.From.Reg = REG_R2 436 q.To.Type = obj.TYPE_MEM 437 q.To.Reg = REG_R1 438 q.To.Offset = 0 // Panic.argp 439 440 q = obj.Appendp(q, newprog) 441 442 q.As = obj.ANOP 443 p1.To.SetTarget(q) 444 p2.To.SetTarget(q) 445 } 446 447 case ARET: 448 if p.From.Type == obj.TYPE_CONST { 449 ctxt.Diag("using BECOME (%v) is not supported!", p) 450 break 451 } 452 453 retSym := p.To.Sym 454 p.To.Name = obj.NAME_NONE // clear fields as we may modify p to other instruction 455 p.To.Sym = nil 456 457 if c.cursym.Func().Text.Mark&LEAF != 0 { 458 if autosize == 0 { 459 p.As = AJMP 460 p.From = obj.Addr{} 461 if retSym != nil { // retjmp 462 p.To.Type = obj.TYPE_BRANCH 463 p.To.Name = obj.NAME_EXTERN 464 p.To.Sym = retSym 465 } else { 466 p.To.Type = obj.TYPE_MEM 467 p.To.Reg = REGLINK 468 p.To.Offset = 0 469 } 470 p.Mark |= BRANCH 471 break 472 } 473 474 p.As = add 475 p.From.Type = obj.TYPE_CONST 476 p.From.Offset = int64(autosize) 477 p.To.Type = obj.TYPE_REG 478 p.To.Reg = REGSP 479 p.Spadj = -autosize 480 481 q = c.newprog() 482 q.As = AJMP 483 q.Pos = p.Pos 484 if retSym != nil { // retjmp 485 q.To.Type = obj.TYPE_BRANCH 486 q.To.Name = obj.NAME_EXTERN 487 q.To.Sym = retSym 488 } else { 489 q.To.Type = obj.TYPE_MEM 490 q.To.Reg = REGLINK 491 q.To.Offset = 0 492 } 493 q.Mark |= BRANCH 494 q.Spadj = +autosize 495 496 q.Link = p.Link 497 p.Link = q 498 break 499 } 500 501 p.As = mov 502 p.From.Type = obj.TYPE_MEM 503 p.From.Offset = 0 504 p.From.Reg = REGSP 505 p.To.Type = obj.TYPE_REG 506 p.To.Reg = REGLINK 507 508 if autosize != 0 { 509 q = c.newprog() 510 q.As = add 511 q.Pos = p.Pos 512 q.From.Type = obj.TYPE_CONST 513 q.From.Offset = int64(autosize) 514 q.To.Type = obj.TYPE_REG 515 q.To.Reg = REGSP 516 q.Spadj = -autosize 517 518 q.Link = p.Link 519 p.Link = q 520 } 521 522 q1 = c.newprog() 523 q1.As = AJMP 524 q1.Pos = p.Pos 525 if retSym != nil { // retjmp 526 q1.To.Type = obj.TYPE_BRANCH 527 q1.To.Name = obj.NAME_EXTERN 528 q1.To.Sym = retSym 529 } else { 530 q1.To.Type = obj.TYPE_MEM 531 q1.To.Offset = 0 532 q1.To.Reg = REGLINK 533 } 534 q1.Mark |= BRANCH 535 q1.Spadj = +autosize 536 537 q1.Link = q.Link 538 q.Link = q1 539 540 case AADD, 541 AADDU, 542 AADDV, 543 AADDVU: 544 if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST { 545 p.Spadj = int32(-p.From.Offset) 546 } 547 548 case obj.AGETCALLERPC: 549 if cursym.Leaf() { 550 /* MOV LR, Rd */ 551 p.As = mov 552 p.From.Type = obj.TYPE_REG 553 p.From.Reg = REGLINK 554 } else { 555 /* MOV (RSP), Rd */ 556 p.As = mov 557 p.From.Type = obj.TYPE_MEM 558 p.From.Reg = REGSP 559 } 560 } 561 562 if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.Spadj == 0 { 563 f := c.cursym.Func() 564 if f.FuncFlag&abi.FuncFlagSPWrite == 0 { 565 c.cursym.Func().FuncFlag |= abi.FuncFlagSPWrite 566 if ctxt.Debugvlog || !ctxt.IsAsm { 567 ctxt.Logf("auto-SPWRITE: %s %v\n", c.cursym.Name, p) 568 if !ctxt.IsAsm { 569 ctxt.Diag("invalid auto-SPWRITE in non-assembly") 570 ctxt.DiagFlush() 571 log.Fatalf("bad SPWRITE") 572 } 573 } 574 } 575 } 576 } 577 578 if c.ctxt.Arch.Family == sys.MIPS { 579 // rewrite MOVD into two MOVF in 32-bit mode to avoid unaligned memory access 580 for p = c.cursym.Func().Text; p != nil; p = p1 { 581 p1 = p.Link 582 583 if p.As != AMOVD { 584 continue 585 } 586 if p.From.Type != obj.TYPE_MEM && p.To.Type != obj.TYPE_MEM { 587 continue 588 } 589 590 p.As = AMOVF 591 q = c.newprog() 592 *q = *p 593 q.Link = p.Link 594 p.Link = q 595 p1 = q.Link 596 597 var addrOff int64 598 if c.ctxt.Arch.ByteOrder == binary.BigEndian { 599 addrOff = 4 // swap load/save order 600 } 601 if p.From.Type == obj.TYPE_MEM { 602 reg := REG_F0 + (p.To.Reg-REG_F0)&^1 603 p.To.Reg = reg 604 q.To.Reg = reg + 1 605 p.From.Offset += addrOff 606 q.From.Offset += 4 - addrOff 607 } else if p.To.Type == obj.TYPE_MEM { 608 reg := REG_F0 + (p.From.Reg-REG_F0)&^1 609 p.From.Reg = reg 610 q.From.Reg = reg + 1 611 p.To.Offset += addrOff 612 q.To.Offset += 4 - addrOff 613 } 614 } 615 } 616 617 if nosched { 618 // if we don't do instruction scheduling, simply add 619 // NOP after each branch instruction. 620 for p = c.cursym.Func().Text; p != nil; p = p.Link { 621 if p.Mark&BRANCH != 0 { 622 c.addnop(p) 623 } 624 } 625 return 626 } 627 628 // instruction scheduling 629 q = nil // p - 1 630 q1 = c.cursym.Func().Text // top of block 631 o := 0 // count of instructions 632 for p = c.cursym.Func().Text; p != nil; p = p1 { 633 p1 = p.Link 634 o++ 635 if p.Mark&NOSCHED != 0 { 636 if q1 != p { 637 c.sched(q1, q) 638 } 639 for ; p != nil; p = p.Link { 640 if p.Mark&NOSCHED == 0 { 641 break 642 } 643 q = p 644 } 645 p1 = p 646 q1 = p 647 o = 0 648 continue 649 } 650 if p.Mark&(LABEL|SYNC) != 0 { 651 if q1 != p { 652 c.sched(q1, q) 653 } 654 q1 = p 655 o = 1 656 } 657 if p.Mark&(BRANCH|SYNC) != 0 { 658 c.sched(q1, p) 659 q1 = p1 660 o = 0 661 } 662 if o >= NSCHED { 663 c.sched(q1, p) 664 q1 = p1 665 o = 0 666 } 667 q = p 668 } 669 } 670 671 func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { 672 var mov, add obj.As 673 674 if c.ctxt.Arch.Family == sys.MIPS64 { 675 add = AADDV 676 mov = AMOVV 677 } else { 678 add = AADDU 679 mov = AMOVW 680 } 681 682 if c.ctxt.Flag_maymorestack != "" { 683 // Save LR and REGCTXT. 684 frameSize := 2 * c.ctxt.Arch.PtrSize 685 686 p = c.ctxt.StartUnsafePoint(p, c.newprog) 687 688 // MOV REGLINK, -8/-16(SP) 689 p = obj.Appendp(p, c.newprog) 690 p.As = mov 691 p.From.Type = obj.TYPE_REG 692 p.From.Reg = REGLINK 693 p.To.Type = obj.TYPE_MEM 694 p.To.Offset = int64(-frameSize) 695 p.To.Reg = REGSP 696 697 // MOV REGCTXT, -4/-8(SP) 698 p = obj.Appendp(p, c.newprog) 699 p.As = mov 700 p.From.Type = obj.TYPE_REG 701 p.From.Reg = REGCTXT 702 p.To.Type = obj.TYPE_MEM 703 p.To.Offset = -int64(c.ctxt.Arch.PtrSize) 704 p.To.Reg = REGSP 705 706 // ADD $-8/$-16, SP 707 p = obj.Appendp(p, c.newprog) 708 p.As = add 709 p.From.Type = obj.TYPE_CONST 710 p.From.Offset = int64(-frameSize) 711 p.To.Type = obj.TYPE_REG 712 p.To.Reg = REGSP 713 p.Spadj = int32(frameSize) 714 715 // JAL maymorestack 716 p = obj.Appendp(p, c.newprog) 717 p.As = AJAL 718 p.To.Type = obj.TYPE_BRANCH 719 // See ../x86/obj6.go 720 p.To.Sym = c.ctxt.LookupABI(c.ctxt.Flag_maymorestack, c.cursym.ABI()) 721 p.Mark |= BRANCH 722 723 // Restore LR and REGCTXT. 724 725 // MOV 0(SP), REGLINK 726 p = obj.Appendp(p, c.newprog) 727 p.As = mov 728 p.From.Type = obj.TYPE_MEM 729 p.From.Offset = 0 730 p.From.Reg = REGSP 731 p.To.Type = obj.TYPE_REG 732 p.To.Reg = REGLINK 733 734 // MOV 4/8(SP), REGCTXT 735 p = obj.Appendp(p, c.newprog) 736 p.As = mov 737 p.From.Type = obj.TYPE_MEM 738 p.From.Offset = int64(c.ctxt.Arch.PtrSize) 739 p.From.Reg = REGSP 740 p.To.Type = obj.TYPE_REG 741 p.To.Reg = REGCTXT 742 743 // ADD $8/$16, SP 744 p = obj.Appendp(p, c.newprog) 745 p.As = add 746 p.From.Type = obj.TYPE_CONST 747 p.From.Offset = int64(frameSize) 748 p.To.Type = obj.TYPE_REG 749 p.To.Reg = REGSP 750 p.Spadj = int32(-frameSize) 751 752 p = c.ctxt.EndUnsafePoint(p, c.newprog, -1) 753 } 754 755 // Jump back to here after morestack returns. 756 startPred := p 757 758 // MOV g_stackguard(g), R1 759 p = obj.Appendp(p, c.newprog) 760 761 p.As = mov 762 p.From.Type = obj.TYPE_MEM 763 p.From.Reg = REGG 764 p.From.Offset = 2 * int64(c.ctxt.Arch.PtrSize) // G.stackguard0 765 if c.cursym.CFunc() { 766 p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1 767 } 768 p.To.Type = obj.TYPE_REG 769 p.To.Reg = REG_R1 770 771 // Mark the stack bound check and morestack call async nonpreemptible. 772 // If we get preempted here, when resumed the preemption request is 773 // cleared, but we'll still call morestack, which will double the stack 774 // unnecessarily. See issue #35470. 775 p = c.ctxt.StartUnsafePoint(p, c.newprog) 776 777 var q *obj.Prog 778 if framesize <= abi.StackSmall { 779 // small stack: SP < stackguard 780 // AGTU SP, stackguard, R1 781 p = obj.Appendp(p, c.newprog) 782 783 p.As = ASGTU 784 p.From.Type = obj.TYPE_REG 785 p.From.Reg = REGSP 786 p.Reg = REG_R1 787 p.To.Type = obj.TYPE_REG 788 p.To.Reg = REG_R1 789 } else { 790 // large stack: SP-framesize < stackguard-StackSmall 791 offset := int64(framesize) - abi.StackSmall 792 if framesize > abi.StackBig { 793 // Such a large stack we need to protect against underflow. 794 // The runtime guarantees SP > objabi.StackBig, but 795 // framesize is large enough that SP-framesize may 796 // underflow, causing a direct comparison with the 797 // stack guard to incorrectly succeed. We explicitly 798 // guard against underflow. 799 // 800 // SGTU $(framesize-StackSmall), SP, R2 801 // BNE R2, label-of-call-to-morestack 802 803 p = obj.Appendp(p, c.newprog) 804 p.As = ASGTU 805 p.From.Type = obj.TYPE_CONST 806 p.From.Offset = offset 807 p.Reg = REGSP 808 p.To.Type = obj.TYPE_REG 809 p.To.Reg = REG_R2 810 811 p = obj.Appendp(p, c.newprog) 812 q = p 813 p.As = ABNE 814 p.From.Type = obj.TYPE_REG 815 p.From.Reg = REG_R2 816 p.To.Type = obj.TYPE_BRANCH 817 p.Mark |= BRANCH 818 } 819 820 // Check against the stack guard. We've ensured this won't underflow. 821 // ADD $-(framesize-StackSmall), SP, R2 822 // SGTU R2, stackguard, R1 823 p = obj.Appendp(p, c.newprog) 824 825 p.As = add 826 p.From.Type = obj.TYPE_CONST 827 p.From.Offset = -offset 828 p.Reg = REGSP 829 p.To.Type = obj.TYPE_REG 830 p.To.Reg = REG_R2 831 832 p = obj.Appendp(p, c.newprog) 833 p.As = ASGTU 834 p.From.Type = obj.TYPE_REG 835 p.From.Reg = REG_R2 836 p.Reg = REG_R1 837 p.To.Type = obj.TYPE_REG 838 p.To.Reg = REG_R1 839 } 840 841 // q1: BNE R1, done 842 p = obj.Appendp(p, c.newprog) 843 q1 := p 844 845 p.As = ABNE 846 p.From.Type = obj.TYPE_REG 847 p.From.Reg = REG_R1 848 p.To.Type = obj.TYPE_BRANCH 849 p.Mark |= BRANCH 850 851 // MOV LINK, R3 852 p = obj.Appendp(p, c.newprog) 853 854 p.As = mov 855 p.From.Type = obj.TYPE_REG 856 p.From.Reg = REGLINK 857 p.To.Type = obj.TYPE_REG 858 p.To.Reg = REG_R3 859 if q != nil { 860 q.To.SetTarget(p) 861 p.Mark |= LABEL 862 } 863 864 p = c.ctxt.EmitEntryStackMap(c.cursym, p, c.newprog) 865 866 // JAL runtime.morestack(SB) 867 p = obj.Appendp(p, c.newprog) 868 869 p.As = AJAL 870 p.To.Type = obj.TYPE_BRANCH 871 if c.cursym.CFunc() { 872 p.To.Sym = c.ctxt.Lookup("runtime.morestackc") 873 } else if !c.cursym.Func().Text.From.Sym.NeedCtxt() { 874 p.To.Sym = c.ctxt.Lookup("runtime.morestack_noctxt") 875 } else { 876 p.To.Sym = c.ctxt.Lookup("runtime.morestack") 877 } 878 p.Mark |= BRANCH 879 880 p = c.ctxt.EndUnsafePoint(p, c.newprog, -1) 881 882 // JMP start 883 p = obj.Appendp(p, c.newprog) 884 885 p.As = AJMP 886 p.To.Type = obj.TYPE_BRANCH 887 p.To.SetTarget(startPred.Link) 888 startPred.Link.Mark |= LABEL 889 p.Mark |= BRANCH 890 891 // placeholder for q1's jump target 892 p = obj.Appendp(p, c.newprog) 893 894 p.As = obj.ANOP // zero-width place holder 895 q1.To.SetTarget(p) 896 897 return p 898 } 899 900 func (c *ctxt0) addnop(p *obj.Prog) { 901 q := c.newprog() 902 q.As = ANOOP 903 q.Pos = p.Pos 904 q.Link = p.Link 905 p.Link = q 906 } 907 908 const ( 909 E_HILO = 1 << 0 910 E_FCR = 1 << 1 911 E_MCR = 1 << 2 912 E_MEM = 1 << 3 913 E_MEMSP = 1 << 4 /* uses offset and size */ 914 E_MEMSB = 1 << 5 /* uses offset and size */ 915 ANYMEM = E_MEM | E_MEMSP | E_MEMSB 916 //DELAY = LOAD|BRANCH|FCMP 917 DELAY = BRANCH /* only schedule branch */ 918 ) 919 920 type Dep struct { 921 ireg uint32 922 freg uint32 923 cc uint32 924 } 925 926 type Sch struct { 927 p obj.Prog 928 set Dep 929 used Dep 930 soffset int32 931 size uint8 932 nop uint8 933 comp bool 934 } 935 936 func (c *ctxt0) sched(p0, pe *obj.Prog) { 937 var sch [NSCHED]Sch 938 939 /* 940 * build side structure 941 */ 942 s := sch[:] 943 for p := p0; ; p = p.Link { 944 s[0].p = *p 945 c.markregused(&s[0]) 946 if p == pe { 947 break 948 } 949 s = s[1:] 950 } 951 se := s 952 953 for i := cap(sch) - cap(se); i >= 0; i-- { 954 s = sch[i:] 955 if s[0].p.Mark&DELAY == 0 { 956 continue 957 } 958 if -cap(s) < -cap(se) { 959 if !conflict(&s[0], &s[1]) { 960 continue 961 } 962 } 963 964 var t []Sch 965 var j int 966 for j = cap(sch) - cap(s) - 1; j >= 0; j-- { 967 t = sch[j:] 968 if t[0].comp { 969 if s[0].p.Mark&BRANCH != 0 { 970 continue 971 } 972 } 973 if t[0].p.Mark&DELAY != 0 { 974 if -cap(s) >= -cap(se) || conflict(&t[0], &s[1]) { 975 continue 976 } 977 } 978 for u := t[1:]; -cap(u) <= -cap(s); u = u[1:] { 979 if c.depend(&u[0], &t[0]) { 980 continue 981 } 982 } 983 goto out2 984 } 985 986 if s[0].p.Mark&BRANCH != 0 { 987 s[0].nop = 1 988 } 989 continue 990 991 out2: 992 // t[0] is the instruction being moved to fill the delay 993 stmp := t[0] 994 copy(t[:i-j], t[1:i-j+1]) 995 s[0] = stmp 996 997 if t[i-j-1].p.Mark&BRANCH != 0 { 998 // t[i-j] is being put into a branch delay slot 999 // combine its Spadj with the branch instruction 1000 t[i-j-1].p.Spadj += t[i-j].p.Spadj 1001 t[i-j].p.Spadj = 0 1002 } 1003 1004 i-- 1005 } 1006 1007 /* 1008 * put it all back 1009 */ 1010 var p *obj.Prog 1011 var q *obj.Prog 1012 for s, p = sch[:], p0; -cap(s) <= -cap(se); s, p = s[1:], q { 1013 q = p.Link 1014 if q != s[0].p.Link { 1015 *p = s[0].p 1016 p.Link = q 1017 } 1018 for s[0].nop != 0 { 1019 s[0].nop-- 1020 c.addnop(p) 1021 } 1022 } 1023 } 1024 1025 func (c *ctxt0) markregused(s *Sch) { 1026 p := &s.p 1027 s.comp = c.compound(p) 1028 s.nop = 0 1029 if s.comp { 1030 s.set.ireg |= 1 << (REGTMP - REG_R0) 1031 s.used.ireg |= 1 << (REGTMP - REG_R0) 1032 } 1033 1034 ar := 0 /* dest is really reference */ 1035 ad := 0 /* source/dest is really address */ 1036 ld := 0 /* opcode is load instruction */ 1037 sz := 20 /* size of load/store for overlap computation */ 1038 1039 /* 1040 * flags based on opcode 1041 */ 1042 switch p.As { 1043 case obj.ATEXT: 1044 c.autosize = int32(p.To.Offset + 8) 1045 ad = 1 1046 1047 case AJAL: 1048 r := p.Reg 1049 if r == 0 { 1050 r = REGLINK 1051 } 1052 s.set.ireg |= 1 << uint(r-REG_R0) 1053 ar = 1 1054 ad = 1 1055 1056 case ABGEZAL, 1057 ABLTZAL: 1058 s.set.ireg |= 1 << (REGLINK - REG_R0) 1059 fallthrough 1060 case ABEQ, 1061 ABGEZ, 1062 ABGTZ, 1063 ABLEZ, 1064 ABLTZ, 1065 ABNE: 1066 ar = 1 1067 ad = 1 1068 1069 case ABFPT, 1070 ABFPF: 1071 ad = 1 1072 s.used.cc |= E_FCR 1073 1074 case ACMPEQD, 1075 ACMPEQF, 1076 ACMPGED, 1077 ACMPGEF, 1078 ACMPGTD, 1079 ACMPGTF: 1080 ar = 1 1081 s.set.cc |= E_FCR 1082 p.Mark |= FCMP 1083 1084 case AJMP: 1085 ar = 1 1086 ad = 1 1087 1088 case AMOVB, 1089 AMOVBU: 1090 sz = 1 1091 ld = 1 1092 1093 case AMOVH, 1094 AMOVHU: 1095 sz = 2 1096 ld = 1 1097 1098 case AMOVF, 1099 AMOVW, 1100 AMOVWL, 1101 AMOVWR: 1102 sz = 4 1103 ld = 1 1104 1105 case AMOVD, 1106 AMOVV, 1107 AMOVVL, 1108 AMOVVR: 1109 sz = 8 1110 ld = 1 1111 1112 case ADIV, 1113 ADIVU, 1114 AMUL, 1115 AMULU, 1116 AREM, 1117 AREMU, 1118 ADIVV, 1119 ADIVVU, 1120 AMULV, 1121 AMULVU, 1122 AREMV, 1123 AREMVU: 1124 s.set.cc = E_HILO 1125 fallthrough 1126 case AADD, 1127 AADDU, 1128 AADDV, 1129 AADDVU, 1130 AAND, 1131 ANOR, 1132 AOR, 1133 ASGT, 1134 ASGTU, 1135 ASLL, 1136 ASRA, 1137 ASRL, 1138 ASLLV, 1139 ASRAV, 1140 ASRLV, 1141 ASUB, 1142 ASUBU, 1143 ASUBV, 1144 ASUBVU, 1145 AXOR, 1146 1147 AADDD, 1148 AADDF, 1149 AADDW, 1150 ASUBD, 1151 ASUBF, 1152 ASUBW, 1153 AMULF, 1154 AMULD, 1155 AMULW, 1156 ADIVF, 1157 ADIVD, 1158 ADIVW: 1159 if p.Reg == 0 { 1160 if p.To.Type == obj.TYPE_REG { 1161 p.Reg = p.To.Reg 1162 } 1163 //if(p->reg == NREG) 1164 // print("botch %P\n", p); 1165 } 1166 } 1167 1168 /* 1169 * flags based on 'to' field 1170 */ 1171 cls := int(p.To.Class) 1172 if cls == 0 { 1173 cls = c.aclass(&p.To) + 1 1174 p.To.Class = int8(cls) 1175 } 1176 cls-- 1177 switch cls { 1178 default: 1179 fmt.Printf("unknown class %d %v\n", cls, p) 1180 1181 case C_ZCON, 1182 C_SCON, 1183 C_ADD0CON, 1184 C_AND0CON, 1185 C_ADDCON, 1186 C_ANDCON, 1187 C_UCON, 1188 C_LCON, 1189 C_NONE, 1190 C_SBRA, 1191 C_LBRA, 1192 C_ADDR, 1193 C_TEXTSIZE: 1194 break 1195 1196 case C_HI, 1197 C_LO: 1198 s.set.cc |= E_HILO 1199 1200 case C_FCREG: 1201 s.set.cc |= E_FCR 1202 1203 case C_MREG: 1204 s.set.cc |= E_MCR 1205 1206 case C_ZOREG, 1207 C_SOREG, 1208 C_LOREG: 1209 cls = int(p.To.Reg) 1210 s.used.ireg |= 1 << uint(cls-REG_R0) 1211 if ad != 0 { 1212 break 1213 } 1214 s.size = uint8(sz) 1215 s.soffset = c.regoff(&p.To) 1216 1217 m := uint32(ANYMEM) 1218 if cls == REGSB { 1219 m = E_MEMSB 1220 } 1221 if cls == REGSP { 1222 m = E_MEMSP 1223 } 1224 1225 if ar != 0 { 1226 s.used.cc |= m 1227 } else { 1228 s.set.cc |= m 1229 } 1230 1231 case C_SACON, 1232 C_LACON: 1233 s.used.ireg |= 1 << (REGSP - REG_R0) 1234 1235 case C_SECON, 1236 C_LECON: 1237 s.used.ireg |= 1 << (REGSB - REG_R0) 1238 1239 case C_REG: 1240 if ar != 0 { 1241 s.used.ireg |= 1 << uint(p.To.Reg-REG_R0) 1242 } else { 1243 s.set.ireg |= 1 << uint(p.To.Reg-REG_R0) 1244 } 1245 1246 case C_FREG: 1247 if ar != 0 { 1248 s.used.freg |= 1 << uint(p.To.Reg-REG_F0) 1249 } else { 1250 s.set.freg |= 1 << uint(p.To.Reg-REG_F0) 1251 } 1252 if ld != 0 && p.From.Type == obj.TYPE_REG { 1253 p.Mark |= LOAD 1254 } 1255 1256 case C_SAUTO, 1257 C_LAUTO: 1258 s.used.ireg |= 1 << (REGSP - REG_R0) 1259 if ad != 0 { 1260 break 1261 } 1262 s.size = uint8(sz) 1263 s.soffset = c.regoff(&p.To) 1264 1265 if ar != 0 { 1266 s.used.cc |= E_MEMSP 1267 } else { 1268 s.set.cc |= E_MEMSP 1269 } 1270 1271 case C_SEXT, 1272 C_LEXT: 1273 s.used.ireg |= 1 << (REGSB - REG_R0) 1274 if ad != 0 { 1275 break 1276 } 1277 s.size = uint8(sz) 1278 s.soffset = c.regoff(&p.To) 1279 1280 if ar != 0 { 1281 s.used.cc |= E_MEMSB 1282 } else { 1283 s.set.cc |= E_MEMSB 1284 } 1285 } 1286 1287 /* 1288 * flags based on 'from' field 1289 */ 1290 cls = int(p.From.Class) 1291 if cls == 0 { 1292 cls = c.aclass(&p.From) + 1 1293 p.From.Class = int8(cls) 1294 } 1295 cls-- 1296 switch cls { 1297 default: 1298 fmt.Printf("unknown class %d %v\n", cls, p) 1299 1300 case C_ZCON, 1301 C_SCON, 1302 C_ADD0CON, 1303 C_AND0CON, 1304 C_ADDCON, 1305 C_ANDCON, 1306 C_UCON, 1307 C_LCON, 1308 C_NONE, 1309 C_SBRA, 1310 C_LBRA, 1311 C_ADDR, 1312 C_TEXTSIZE: 1313 break 1314 1315 case C_HI, 1316 C_LO: 1317 s.used.cc |= E_HILO 1318 1319 case C_FCREG: 1320 s.used.cc |= E_FCR 1321 1322 case C_MREG: 1323 s.used.cc |= E_MCR 1324 1325 case C_ZOREG, 1326 C_SOREG, 1327 C_LOREG: 1328 cls = int(p.From.Reg) 1329 s.used.ireg |= 1 << uint(cls-REG_R0) 1330 if ld != 0 { 1331 p.Mark |= LOAD 1332 } 1333 s.size = uint8(sz) 1334 s.soffset = c.regoff(&p.From) 1335 1336 m := uint32(ANYMEM) 1337 if cls == REGSB { 1338 m = E_MEMSB 1339 } 1340 if cls == REGSP { 1341 m = E_MEMSP 1342 } 1343 1344 s.used.cc |= m 1345 1346 case C_SACON, 1347 C_LACON: 1348 cls = int(p.From.Reg) 1349 if cls == 0 { 1350 cls = REGSP 1351 } 1352 s.used.ireg |= 1 << uint(cls-REG_R0) 1353 1354 case C_SECON, 1355 C_LECON: 1356 s.used.ireg |= 1 << (REGSB - REG_R0) 1357 1358 case C_REG: 1359 s.used.ireg |= 1 << uint(p.From.Reg-REG_R0) 1360 1361 case C_FREG: 1362 s.used.freg |= 1 << uint(p.From.Reg-REG_F0) 1363 if ld != 0 && p.To.Type == obj.TYPE_REG { 1364 p.Mark |= LOAD 1365 } 1366 1367 case C_SAUTO, 1368 C_LAUTO: 1369 s.used.ireg |= 1 << (REGSP - REG_R0) 1370 if ld != 0 { 1371 p.Mark |= LOAD 1372 } 1373 if ad != 0 { 1374 break 1375 } 1376 s.size = uint8(sz) 1377 s.soffset = c.regoff(&p.From) 1378 1379 s.used.cc |= E_MEMSP 1380 1381 case C_SEXT: 1382 case C_LEXT: 1383 s.used.ireg |= 1 << (REGSB - REG_R0) 1384 if ld != 0 { 1385 p.Mark |= LOAD 1386 } 1387 if ad != 0 { 1388 break 1389 } 1390 s.size = uint8(sz) 1391 s.soffset = c.regoff(&p.From) 1392 1393 s.used.cc |= E_MEMSB 1394 } 1395 1396 cls = int(p.Reg) 1397 if cls != 0 { 1398 if REG_F0 <= cls && cls <= REG_F31 { 1399 s.used.freg |= 1 << uint(cls-REG_F0) 1400 } else { 1401 s.used.ireg |= 1 << uint(cls-REG_R0) 1402 } 1403 } 1404 s.set.ireg &^= (1 << (REGZERO - REG_R0)) /* R0 can't be set */ 1405 } 1406 1407 /* 1408 * test to see if two instructions can be 1409 * interchanged without changing semantics 1410 */ 1411 func (c *ctxt0) depend(sa, sb *Sch) bool { 1412 if sa.set.ireg&(sb.set.ireg|sb.used.ireg) != 0 { 1413 return true 1414 } 1415 if sb.set.ireg&sa.used.ireg != 0 { 1416 return true 1417 } 1418 1419 if sa.set.freg&(sb.set.freg|sb.used.freg) != 0 { 1420 return true 1421 } 1422 if sb.set.freg&sa.used.freg != 0 { 1423 return true 1424 } 1425 1426 /* 1427 * special case. 1428 * loads from same address cannot pass. 1429 * this is for hardware fifo's and the like 1430 */ 1431 if sa.used.cc&sb.used.cc&E_MEM != 0 { 1432 if sa.p.Reg == sb.p.Reg { 1433 if c.regoff(&sa.p.From) == c.regoff(&sb.p.From) { 1434 return true 1435 } 1436 } 1437 } 1438 1439 x := (sa.set.cc & (sb.set.cc | sb.used.cc)) | (sb.set.cc & sa.used.cc) 1440 if x != 0 { 1441 /* 1442 * allow SB and SP to pass each other. 1443 * allow SB to pass SB iff doffsets are ok 1444 * anything else conflicts 1445 */ 1446 if x != E_MEMSP && x != E_MEMSB { 1447 return true 1448 } 1449 x = sa.set.cc | sb.set.cc | sa.used.cc | sb.used.cc 1450 if x&E_MEM != 0 { 1451 return true 1452 } 1453 if offoverlap(sa, sb) { 1454 return true 1455 } 1456 } 1457 1458 return false 1459 } 1460 1461 func offoverlap(sa, sb *Sch) bool { 1462 if sa.soffset < sb.soffset { 1463 if sa.soffset+int32(sa.size) > sb.soffset { 1464 return true 1465 } 1466 return false 1467 } 1468 if sb.soffset+int32(sb.size) > sa.soffset { 1469 return true 1470 } 1471 return false 1472 } 1473 1474 /* 1475 * test 2 adjacent instructions 1476 * and find out if inserted instructions 1477 * are desired to prevent stalls. 1478 */ 1479 func conflict(sa, sb *Sch) bool { 1480 if sa.set.ireg&sb.used.ireg != 0 { 1481 return true 1482 } 1483 if sa.set.freg&sb.used.freg != 0 { 1484 return true 1485 } 1486 if sa.set.cc&sb.used.cc != 0 { 1487 return true 1488 } 1489 return false 1490 } 1491 1492 func (c *ctxt0) compound(p *obj.Prog) bool { 1493 o := c.oplook(p) 1494 if o.size != 4 { 1495 return true 1496 } 1497 if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSB { 1498 return true 1499 } 1500 return false 1501 } 1502 1503 var Linkmips64 = obj.LinkArch{ 1504 Arch: sys.ArchMIPS64, 1505 Init: buildop, 1506 Preprocess: preprocess, 1507 Assemble: span0, 1508 Progedit: progedit, 1509 DWARFRegisters: MIPSDWARFRegisters, 1510 } 1511 1512 var Linkmips64le = obj.LinkArch{ 1513 Arch: sys.ArchMIPS64LE, 1514 Init: buildop, 1515 Preprocess: preprocess, 1516 Assemble: span0, 1517 Progedit: progedit, 1518 DWARFRegisters: MIPSDWARFRegisters, 1519 } 1520 1521 var Linkmips = obj.LinkArch{ 1522 Arch: sys.ArchMIPS, 1523 Init: buildop, 1524 Preprocess: preprocess, 1525 Assemble: span0, 1526 Progedit: progedit, 1527 DWARFRegisters: MIPSDWARFRegisters, 1528 } 1529 1530 var Linkmipsle = obj.LinkArch{ 1531 Arch: sys.ArchMIPSLE, 1532 Init: buildop, 1533 Preprocess: preprocess, 1534 Assemble: span0, 1535 Progedit: progedit, 1536 DWARFRegisters: MIPSDWARFRegisters, 1537 }